##// END OF EJS Templates
perf: make `perfrevlogindex` use the new `index.rev` api if available...
marmoute -
r43972:c314177e default
parent child Browse files
Show More
@@ -1,3844 +1,3857 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if '_tagscache' in vars(repo):
694 if '_tagscache' in vars(repo):
695 del repo.__dict__['_tagscache']
695 del repo.__dict__['_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, '_clcachekey', None)
729 object.__setattr__(repo, '_clcachekey', None)
730 object.__setattr__(repo, '_clcache', None)
730 object.__setattr__(repo, '_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
766 ]
767 + formatteropts,
767 + formatteropts,
768 )
768 )
769 def perfstatus(ui, repo, **opts):
769 def perfstatus(ui, repo, **opts):
770 """benchmark the performance of a single status call
770 """benchmark the performance of a single status call
771
771
772 The repository data are preserved between each call.
772 The repository data are preserved between each call.
773
773
774 By default, only the status of the tracked file are requested. If
774 By default, only the status of the tracked file are requested. If
775 `--unknown` is passed, the "unknown" files are also tracked.
775 `--unknown` is passed, the "unknown" files are also tracked.
776 """
776 """
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 # m = match.always(repo.root, repo.getcwd())
778 # m = match.always(repo.root, repo.getcwd())
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
780 # False))))
780 # False))))
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 if opts[b'dirstate']:
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
785 unknown = opts[b'unknown']
786
786
787 def status_dirstate():
787 def status_dirstate():
788 s = dirstate.status(
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
790 )
791 sum(map(len, s))
791 sum(map(len, s))
792
792
793 timer(status_dirstate)
793 timer(status_dirstate)
794 else:
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
796 fm.end()
796 fm.end()
797
797
798
798
799 @command(b'perfaddremove', formatteropts)
799 @command(b'perfaddremove', formatteropts)
800 def perfaddremove(ui, repo, **opts):
800 def perfaddremove(ui, repo, **opts):
801 opts = _byteskwargs(opts)
801 opts = _byteskwargs(opts)
802 timer, fm = gettimer(ui, opts)
802 timer, fm = gettimer(ui, opts)
803 try:
803 try:
804 oldquiet = repo.ui.quiet
804 oldquiet = repo.ui.quiet
805 repo.ui.quiet = True
805 repo.ui.quiet = True
806 matcher = scmutil.match(repo[None])
806 matcher = scmutil.match(repo[None])
807 opts[b'dry_run'] = True
807 opts[b'dry_run'] = True
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
809 uipathfn = scmutil.getuipathfn(repo)
809 uipathfn = scmutil.getuipathfn(repo)
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
811 else:
811 else:
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
813 finally:
813 finally:
814 repo.ui.quiet = oldquiet
814 repo.ui.quiet = oldquiet
815 fm.end()
815 fm.end()
816
816
817
817
818 def clearcaches(cl):
818 def clearcaches(cl):
819 # behave somewhat consistently across internal API changes
819 # behave somewhat consistently across internal API changes
820 if util.safehasattr(cl, b'clearcaches'):
820 if util.safehasattr(cl, b'clearcaches'):
821 cl.clearcaches()
821 cl.clearcaches()
822 elif util.safehasattr(cl, b'_nodecache'):
822 elif util.safehasattr(cl, b'_nodecache'):
823 # <= hg-5.2
823 # <= hg-5.2
824 from mercurial.node import nullid, nullrev
824 from mercurial.node import nullid, nullrev
825
825
826 cl._nodecache = {nullid: nullrev}
826 cl._nodecache = {nullid: nullrev}
827 cl._nodepos = None
827 cl._nodepos = None
828
828
829
829
830 @command(b'perfheads', formatteropts)
830 @command(b'perfheads', formatteropts)
831 def perfheads(ui, repo, **opts):
831 def perfheads(ui, repo, **opts):
832 """benchmark the computation of a changelog heads"""
832 """benchmark the computation of a changelog heads"""
833 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
835 cl = repo.changelog
835 cl = repo.changelog
836
836
837 def s():
837 def s():
838 clearcaches(cl)
838 clearcaches(cl)
839
839
840 def d():
840 def d():
841 len(cl.headrevs())
841 len(cl.headrevs())
842
842
843 timer(d, setup=s)
843 timer(d, setup=s)
844 fm.end()
844 fm.end()
845
845
846
846
847 @command(
847 @command(
848 b'perftags',
848 b'perftags',
849 formatteropts
849 formatteropts
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
851 )
851 )
852 def perftags(ui, repo, **opts):
852 def perftags(ui, repo, **opts):
853 opts = _byteskwargs(opts)
853 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
854 timer, fm = gettimer(ui, opts)
855 repocleartagscache = repocleartagscachefunc(repo)
855 repocleartagscache = repocleartagscachefunc(repo)
856 clearrevlogs = opts[b'clear_revlogs']
856 clearrevlogs = opts[b'clear_revlogs']
857
857
858 def s():
858 def s():
859 if clearrevlogs:
859 if clearrevlogs:
860 clearchangelog(repo)
860 clearchangelog(repo)
861 clearfilecache(repo.unfiltered(), 'manifest')
861 clearfilecache(repo.unfiltered(), 'manifest')
862 repocleartagscache()
862 repocleartagscache()
863
863
864 def t():
864 def t():
865 return len(repo.tags())
865 return len(repo.tags())
866
866
867 timer(t, setup=s)
867 timer(t, setup=s)
868 fm.end()
868 fm.end()
869
869
870
870
871 @command(b'perfancestors', formatteropts)
871 @command(b'perfancestors', formatteropts)
872 def perfancestors(ui, repo, **opts):
872 def perfancestors(ui, repo, **opts):
873 opts = _byteskwargs(opts)
873 opts = _byteskwargs(opts)
874 timer, fm = gettimer(ui, opts)
874 timer, fm = gettimer(ui, opts)
875 heads = repo.changelog.headrevs()
875 heads = repo.changelog.headrevs()
876
876
877 def d():
877 def d():
878 for a in repo.changelog.ancestors(heads):
878 for a in repo.changelog.ancestors(heads):
879 pass
879 pass
880
880
881 timer(d)
881 timer(d)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(b'perfancestorset', formatteropts)
885 @command(b'perfancestorset', formatteropts)
886 def perfancestorset(ui, repo, revset, **opts):
886 def perfancestorset(ui, repo, revset, **opts):
887 opts = _byteskwargs(opts)
887 opts = _byteskwargs(opts)
888 timer, fm = gettimer(ui, opts)
888 timer, fm = gettimer(ui, opts)
889 revs = repo.revs(revset)
889 revs = repo.revs(revset)
890 heads = repo.changelog.headrevs()
890 heads = repo.changelog.headrevs()
891
891
892 def d():
892 def d():
893 s = repo.changelog.ancestors(heads)
893 s = repo.changelog.ancestors(heads)
894 for rev in revs:
894 for rev in revs:
895 rev in s
895 rev in s
896
896
897 timer(d)
897 timer(d)
898 fm.end()
898 fm.end()
899
899
900
900
901 @command(b'perfdiscovery', formatteropts, b'PATH')
901 @command(b'perfdiscovery', formatteropts, b'PATH')
902 def perfdiscovery(ui, repo, path, **opts):
902 def perfdiscovery(ui, repo, path, **opts):
903 """benchmark discovery between local repo and the peer at given path
903 """benchmark discovery between local repo and the peer at given path
904 """
904 """
905 repos = [repo, None]
905 repos = [repo, None]
906 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
907 path = ui.expandpath(path)
907 path = ui.expandpath(path)
908
908
909 def s():
909 def s():
910 repos[1] = hg.peer(ui, opts, path)
910 repos[1] = hg.peer(ui, opts, path)
911
911
912 def d():
912 def d():
913 setdiscovery.findcommonheads(ui, *repos)
913 setdiscovery.findcommonheads(ui, *repos)
914
914
915 timer(d, setup=s)
915 timer(d, setup=s)
916 fm.end()
916 fm.end()
917
917
918
918
919 @command(
919 @command(
920 b'perfbookmarks',
920 b'perfbookmarks',
921 formatteropts
921 formatteropts
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
923 )
923 )
924 def perfbookmarks(ui, repo, **opts):
924 def perfbookmarks(ui, repo, **opts):
925 """benchmark parsing bookmarks from disk to memory"""
925 """benchmark parsing bookmarks from disk to memory"""
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928
928
929 clearrevlogs = opts[b'clear_revlogs']
929 clearrevlogs = opts[b'clear_revlogs']
930
930
931 def s():
931 def s():
932 if clearrevlogs:
932 if clearrevlogs:
933 clearchangelog(repo)
933 clearchangelog(repo)
934 clearfilecache(repo, b'_bookmarks')
934 clearfilecache(repo, b'_bookmarks')
935
935
936 def d():
936 def d():
937 repo._bookmarks
937 repo._bookmarks
938
938
939 timer(d, setup=s)
939 timer(d, setup=s)
940 fm.end()
940 fm.end()
941
941
942
942
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
944 def perfbundleread(ui, repo, bundlepath, **opts):
944 def perfbundleread(ui, repo, bundlepath, **opts):
945 """Benchmark reading of bundle files.
945 """Benchmark reading of bundle files.
946
946
947 This command is meant to isolate the I/O part of bundle reading as
947 This command is meant to isolate the I/O part of bundle reading as
948 much as possible.
948 much as possible.
949 """
949 """
950 from mercurial import (
950 from mercurial import (
951 bundle2,
951 bundle2,
952 exchange,
952 exchange,
953 streamclone,
953 streamclone,
954 )
954 )
955
955
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957
957
958 def makebench(fn):
958 def makebench(fn):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 bundle = exchange.readbundle(ui, fh, bundlepath)
961 bundle = exchange.readbundle(ui, fh, bundlepath)
962 fn(bundle)
962 fn(bundle)
963
963
964 return run
964 return run
965
965
966 def makereadnbytes(size):
966 def makereadnbytes(size):
967 def run():
967 def run():
968 with open(bundlepath, b'rb') as fh:
968 with open(bundlepath, b'rb') as fh:
969 bundle = exchange.readbundle(ui, fh, bundlepath)
969 bundle = exchange.readbundle(ui, fh, bundlepath)
970 while bundle.read(size):
970 while bundle.read(size):
971 pass
971 pass
972
972
973 return run
973 return run
974
974
975 def makestdioread(size):
975 def makestdioread(size):
976 def run():
976 def run():
977 with open(bundlepath, b'rb') as fh:
977 with open(bundlepath, b'rb') as fh:
978 while fh.read(size):
978 while fh.read(size):
979 pass
979 pass
980
980
981 return run
981 return run
982
982
983 # bundle1
983 # bundle1
984
984
985 def deltaiter(bundle):
985 def deltaiter(bundle):
986 for delta in bundle.deltaiter():
986 for delta in bundle.deltaiter():
987 pass
987 pass
988
988
989 def iterchunks(bundle):
989 def iterchunks(bundle):
990 for chunk in bundle.getchunks():
990 for chunk in bundle.getchunks():
991 pass
991 pass
992
992
993 # bundle2
993 # bundle2
994
994
995 def forwardchunks(bundle):
995 def forwardchunks(bundle):
996 for chunk in bundle._forwardchunks():
996 for chunk in bundle._forwardchunks():
997 pass
997 pass
998
998
999 def iterparts(bundle):
999 def iterparts(bundle):
1000 for part in bundle.iterparts():
1000 for part in bundle.iterparts():
1001 pass
1001 pass
1002
1002
1003 def iterpartsseekable(bundle):
1003 def iterpartsseekable(bundle):
1004 for part in bundle.iterparts(seekable=True):
1004 for part in bundle.iterparts(seekable=True):
1005 pass
1005 pass
1006
1006
1007 def seek(bundle):
1007 def seek(bundle):
1008 for part in bundle.iterparts(seekable=True):
1008 for part in bundle.iterparts(seekable=True):
1009 part.seek(0, os.SEEK_END)
1009 part.seek(0, os.SEEK_END)
1010
1010
1011 def makepartreadnbytes(size):
1011 def makepartreadnbytes(size):
1012 def run():
1012 def run():
1013 with open(bundlepath, b'rb') as fh:
1013 with open(bundlepath, b'rb') as fh:
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1015 for part in bundle.iterparts():
1015 for part in bundle.iterparts():
1016 while part.read(size):
1016 while part.read(size):
1017 pass
1017 pass
1018
1018
1019 return run
1019 return run
1020
1020
1021 benches = [
1021 benches = [
1022 (makestdioread(8192), b'read(8k)'),
1022 (makestdioread(8192), b'read(8k)'),
1023 (makestdioread(16384), b'read(16k)'),
1023 (makestdioread(16384), b'read(16k)'),
1024 (makestdioread(32768), b'read(32k)'),
1024 (makestdioread(32768), b'read(32k)'),
1025 (makestdioread(131072), b'read(128k)'),
1025 (makestdioread(131072), b'read(128k)'),
1026 ]
1026 ]
1027
1027
1028 with open(bundlepath, b'rb') as fh:
1028 with open(bundlepath, b'rb') as fh:
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1030
1030
1031 if isinstance(bundle, changegroup.cg1unpacker):
1031 if isinstance(bundle, changegroup.cg1unpacker):
1032 benches.extend(
1032 benches.extend(
1033 [
1033 [
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1040 ]
1040 ]
1041 )
1041 )
1042 elif isinstance(bundle, bundle2.unbundle20):
1042 elif isinstance(bundle, bundle2.unbundle20):
1043 benches.extend(
1043 benches.extend(
1044 [
1044 [
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1047 (
1047 (
1048 makebench(iterpartsseekable),
1048 makebench(iterpartsseekable),
1049 b'bundle2 iterparts() seekable',
1049 b'bundle2 iterparts() seekable',
1050 ),
1050 ),
1051 (makebench(seek), b'bundle2 part seek()'),
1051 (makebench(seek), b'bundle2 part seek()'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1056 ]
1056 ]
1057 )
1057 )
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1059 raise error.Abort(b'stream clone bundles not supported')
1059 raise error.Abort(b'stream clone bundles not supported')
1060 else:
1060 else:
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1062
1062
1063 for fn, title in benches:
1063 for fn, title in benches:
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer(fn, title=title)
1065 timer(fn, title=title)
1066 fm.end()
1066 fm.end()
1067
1067
1068
1068
1069 @command(
1069 @command(
1070 b'perfchangegroupchangelog',
1070 b'perfchangegroupchangelog',
1071 formatteropts
1071 formatteropts
1072 + [
1072 + [
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1075 ],
1075 ],
1076 )
1076 )
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1078 """Benchmark producing a changelog group for a changegroup.
1078 """Benchmark producing a changelog group for a changegroup.
1079
1079
1080 This measures the time spent processing the changelog during a
1080 This measures the time spent processing the changelog during a
1081 bundle operation. This occurs during `hg bundle` and on a server
1081 bundle operation. This occurs during `hg bundle` and on a server
1082 processing a `getbundle` wire protocol request (handles clones
1082 processing a `getbundle` wire protocol request (handles clones
1083 and pull requests).
1083 and pull requests).
1084
1084
1085 By default, all revisions are added to the changegroup.
1085 By default, all revisions are added to the changegroup.
1086 """
1086 """
1087 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1088 cl = repo.changelog
1088 cl = repo.changelog
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1090 bundler = changegroup.getbundler(cgversion, repo)
1090 bundler = changegroup.getbundler(cgversion, repo)
1091
1091
1092 def d():
1092 def d():
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1094 for chunk in chunks:
1094 for chunk in chunks:
1095 pass
1095 pass
1096
1096
1097 timer, fm = gettimer(ui, opts)
1097 timer, fm = gettimer(ui, opts)
1098
1098
1099 # Terminal printing can interfere with timing. So disable it.
1099 # Terminal printing can interfere with timing. So disable it.
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1101 timer(d)
1101 timer(d)
1102
1102
1103 fm.end()
1103 fm.end()
1104
1104
1105
1105
1106 @command(b'perfdirs', formatteropts)
1106 @command(b'perfdirs', formatteropts)
1107 def perfdirs(ui, repo, **opts):
1107 def perfdirs(ui, repo, **opts):
1108 opts = _byteskwargs(opts)
1108 opts = _byteskwargs(opts)
1109 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1110 dirstate = repo.dirstate
1110 dirstate = repo.dirstate
1111 b'a' in dirstate
1111 b'a' in dirstate
1112
1112
1113 def d():
1113 def d():
1114 dirstate.hasdir(b'a')
1114 dirstate.hasdir(b'a')
1115 del dirstate._map._dirs
1115 del dirstate._map._dirs
1116
1116
1117 timer(d)
1117 timer(d)
1118 fm.end()
1118 fm.end()
1119
1119
1120
1120
1121 @command(
1121 @command(
1122 b'perfdirstate',
1122 b'perfdirstate',
1123 [
1123 [
1124 (
1124 (
1125 b'',
1125 b'',
1126 b'iteration',
1126 b'iteration',
1127 None,
1127 None,
1128 b'benchmark a full iteration for the dirstate',
1128 b'benchmark a full iteration for the dirstate',
1129 ),
1129 ),
1130 (
1130 (
1131 b'',
1131 b'',
1132 b'contains',
1132 b'contains',
1133 None,
1133 None,
1134 b'benchmark a large amount of `nf in dirstate` calls',
1134 b'benchmark a large amount of `nf in dirstate` calls',
1135 ),
1135 ),
1136 ]
1136 ]
1137 + formatteropts,
1137 + formatteropts,
1138 )
1138 )
1139 def perfdirstate(ui, repo, **opts):
1139 def perfdirstate(ui, repo, **opts):
1140 """benchmap the time of various distate operations
1140 """benchmap the time of various distate operations
1141
1141
1142 By default benchmark the time necessary to load a dirstate from scratch.
1142 By default benchmark the time necessary to load a dirstate from scratch.
1143 The dirstate is loaded to the point were a "contains" request can be
1143 The dirstate is loaded to the point were a "contains" request can be
1144 answered.
1144 answered.
1145 """
1145 """
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 b"a" in repo.dirstate
1148 b"a" in repo.dirstate
1149
1149
1150 if opts[b'iteration'] and opts[b'contains']:
1150 if opts[b'iteration'] and opts[b'contains']:
1151 msg = b'only specify one of --iteration or --contains'
1151 msg = b'only specify one of --iteration or --contains'
1152 raise error.Abort(msg)
1152 raise error.Abort(msg)
1153
1153
1154 if opts[b'iteration']:
1154 if opts[b'iteration']:
1155 setup = None
1155 setup = None
1156 dirstate = repo.dirstate
1156 dirstate = repo.dirstate
1157
1157
1158 def d():
1158 def d():
1159 for f in dirstate:
1159 for f in dirstate:
1160 pass
1160 pass
1161
1161
1162 elif opts[b'contains']:
1162 elif opts[b'contains']:
1163 setup = None
1163 setup = None
1164 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1165 allfiles = list(dirstate)
1165 allfiles = list(dirstate)
1166 # also add file path that will be "missing" from the dirstate
1166 # also add file path that will be "missing" from the dirstate
1167 allfiles.extend([f[::-1] for f in allfiles])
1167 allfiles.extend([f[::-1] for f in allfiles])
1168
1168
1169 def d():
1169 def d():
1170 for f in allfiles:
1170 for f in allfiles:
1171 f in dirstate
1171 f in dirstate
1172
1172
1173 else:
1173 else:
1174
1174
1175 def setup():
1175 def setup():
1176 repo.dirstate.invalidate()
1176 repo.dirstate.invalidate()
1177
1177
1178 def d():
1178 def d():
1179 b"a" in repo.dirstate
1179 b"a" in repo.dirstate
1180
1180
1181 timer(d, setup=setup)
1181 timer(d, setup=setup)
1182 fm.end()
1182 fm.end()
1183
1183
1184
1184
1185 @command(b'perfdirstatedirs', formatteropts)
1185 @command(b'perfdirstatedirs', formatteropts)
1186 def perfdirstatedirs(ui, repo, **opts):
1186 def perfdirstatedirs(ui, repo, **opts):
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1188 """
1188 """
1189 opts = _byteskwargs(opts)
1189 opts = _byteskwargs(opts)
1190 timer, fm = gettimer(ui, opts)
1190 timer, fm = gettimer(ui, opts)
1191 repo.dirstate.hasdir(b"a")
1191 repo.dirstate.hasdir(b"a")
1192
1192
1193 def setup():
1193 def setup():
1194 del repo.dirstate._map._dirs
1194 del repo.dirstate._map._dirs
1195
1195
1196 def d():
1196 def d():
1197 repo.dirstate.hasdir(b"a")
1197 repo.dirstate.hasdir(b"a")
1198
1198
1199 timer(d, setup=setup)
1199 timer(d, setup=setup)
1200 fm.end()
1200 fm.end()
1201
1201
1202
1202
1203 @command(b'perfdirstatefoldmap', formatteropts)
1203 @command(b'perfdirstatefoldmap', formatteropts)
1204 def perfdirstatefoldmap(ui, repo, **opts):
1204 def perfdirstatefoldmap(ui, repo, **opts):
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1206
1206
1207 The dirstate filefoldmap cache is dropped between every request.
1207 The dirstate filefoldmap cache is dropped between every request.
1208 """
1208 """
1209 opts = _byteskwargs(opts)
1209 opts = _byteskwargs(opts)
1210 timer, fm = gettimer(ui, opts)
1210 timer, fm = gettimer(ui, opts)
1211 dirstate = repo.dirstate
1211 dirstate = repo.dirstate
1212 dirstate._map.filefoldmap.get(b'a')
1212 dirstate._map.filefoldmap.get(b'a')
1213
1213
1214 def setup():
1214 def setup():
1215 del dirstate._map.filefoldmap
1215 del dirstate._map.filefoldmap
1216
1216
1217 def d():
1217 def d():
1218 dirstate._map.filefoldmap.get(b'a')
1218 dirstate._map.filefoldmap.get(b'a')
1219
1219
1220 timer(d, setup=setup)
1220 timer(d, setup=setup)
1221 fm.end()
1221 fm.end()
1222
1222
1223
1223
1224 @command(b'perfdirfoldmap', formatteropts)
1224 @command(b'perfdirfoldmap', formatteropts)
1225 def perfdirfoldmap(ui, repo, **opts):
1225 def perfdirfoldmap(ui, repo, **opts):
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1227
1227
1228 The dirstate dirfoldmap cache is dropped between every request.
1228 The dirstate dirfoldmap cache is dropped between every request.
1229 """
1229 """
1230 opts = _byteskwargs(opts)
1230 opts = _byteskwargs(opts)
1231 timer, fm = gettimer(ui, opts)
1231 timer, fm = gettimer(ui, opts)
1232 dirstate = repo.dirstate
1232 dirstate = repo.dirstate
1233 dirstate._map.dirfoldmap.get(b'a')
1233 dirstate._map.dirfoldmap.get(b'a')
1234
1234
1235 def setup():
1235 def setup():
1236 del dirstate._map.dirfoldmap
1236 del dirstate._map.dirfoldmap
1237 del dirstate._map._dirs
1237 del dirstate._map._dirs
1238
1238
1239 def d():
1239 def d():
1240 dirstate._map.dirfoldmap.get(b'a')
1240 dirstate._map.dirfoldmap.get(b'a')
1241
1241
1242 timer(d, setup=setup)
1242 timer(d, setup=setup)
1243 fm.end()
1243 fm.end()
1244
1244
1245
1245
1246 @command(b'perfdirstatewrite', formatteropts)
1246 @command(b'perfdirstatewrite', formatteropts)
1247 def perfdirstatewrite(ui, repo, **opts):
1247 def perfdirstatewrite(ui, repo, **opts):
1248 """benchmap the time it take to write a dirstate on disk
1248 """benchmap the time it take to write a dirstate on disk
1249 """
1249 """
1250 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1251 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1252 ds = repo.dirstate
1252 ds = repo.dirstate
1253 b"a" in ds
1253 b"a" in ds
1254
1254
1255 def setup():
1255 def setup():
1256 ds._dirty = True
1256 ds._dirty = True
1257
1257
1258 def d():
1258 def d():
1259 ds.write(repo.currenttransaction())
1259 ds.write(repo.currenttransaction())
1260
1260
1261 timer(d, setup=setup)
1261 timer(d, setup=setup)
1262 fm.end()
1262 fm.end()
1263
1263
1264
1264
1265 def _getmergerevs(repo, opts):
1265 def _getmergerevs(repo, opts):
1266 """parse command argument to return rev involved in merge
1266 """parse command argument to return rev involved in merge
1267
1267
1268 input: options dictionnary with `rev`, `from` and `bse`
1268 input: options dictionnary with `rev`, `from` and `bse`
1269 output: (localctx, otherctx, basectx)
1269 output: (localctx, otherctx, basectx)
1270 """
1270 """
1271 if opts[b'from']:
1271 if opts[b'from']:
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1273 wctx = repo[fromrev]
1273 wctx = repo[fromrev]
1274 else:
1274 else:
1275 wctx = repo[None]
1275 wctx = repo[None]
1276 # we don't want working dir files to be stat'd in the benchmark, so
1276 # we don't want working dir files to be stat'd in the benchmark, so
1277 # prime that cache
1277 # prime that cache
1278 wctx.dirty()
1278 wctx.dirty()
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1280 if opts[b'base']:
1280 if opts[b'base']:
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1282 ancestor = repo[fromrev]
1282 ancestor = repo[fromrev]
1283 else:
1283 else:
1284 ancestor = wctx.ancestor(rctx)
1284 ancestor = wctx.ancestor(rctx)
1285 return (wctx, rctx, ancestor)
1285 return (wctx, rctx, ancestor)
1286
1286
1287
1287
1288 @command(
1288 @command(
1289 b'perfmergecalculate',
1289 b'perfmergecalculate',
1290 [
1290 [
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1292 (b'', b'from', b'', b'rev to merge from'),
1292 (b'', b'from', b'', b'rev to merge from'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1294 ]
1294 ]
1295 + formatteropts,
1295 + formatteropts,
1296 )
1296 )
1297 def perfmergecalculate(ui, repo, **opts):
1297 def perfmergecalculate(ui, repo, **opts):
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1300
1300
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1302
1302
1303 def d():
1303 def d():
1304 # acceptremote is True because we don't want prompts in the middle of
1304 # acceptremote is True because we don't want prompts in the middle of
1305 # our benchmark
1305 # our benchmark
1306 merge.calculateupdates(
1306 merge.calculateupdates(
1307 repo,
1307 repo,
1308 wctx,
1308 wctx,
1309 rctx,
1309 rctx,
1310 [ancestor],
1310 [ancestor],
1311 branchmerge=False,
1311 branchmerge=False,
1312 force=False,
1312 force=False,
1313 acceptremote=True,
1313 acceptremote=True,
1314 followcopies=True,
1314 followcopies=True,
1315 )
1315 )
1316
1316
1317 timer(d)
1317 timer(d)
1318 fm.end()
1318 fm.end()
1319
1319
1320
1320
1321 @command(
1321 @command(
1322 b'perfmergecopies',
1322 b'perfmergecopies',
1323 [
1323 [
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1325 (b'', b'from', b'', b'rev to merge from'),
1325 (b'', b'from', b'', b'rev to merge from'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1327 ]
1327 ]
1328 + formatteropts,
1328 + formatteropts,
1329 )
1329 )
1330 def perfmergecopies(ui, repo, **opts):
1330 def perfmergecopies(ui, repo, **opts):
1331 """measure runtime of `copies.mergecopies`"""
1331 """measure runtime of `copies.mergecopies`"""
1332 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1335
1335
1336 def d():
1336 def d():
1337 # acceptremote is True because we don't want prompts in the middle of
1337 # acceptremote is True because we don't want prompts in the middle of
1338 # our benchmark
1338 # our benchmark
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1340
1340
1341 timer(d)
1341 timer(d)
1342 fm.end()
1342 fm.end()
1343
1343
1344
1344
1345 @command(b'perfpathcopies', [], b"REV REV")
1345 @command(b'perfpathcopies', [], b"REV REV")
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1347 """benchmark the copy tracing logic"""
1347 """benchmark the copy tracing logic"""
1348 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1349 timer, fm = gettimer(ui, opts)
1349 timer, fm = gettimer(ui, opts)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1352
1352
1353 def d():
1353 def d():
1354 copies.pathcopies(ctx1, ctx2)
1354 copies.pathcopies(ctx1, ctx2)
1355
1355
1356 timer(d)
1356 timer(d)
1357 fm.end()
1357 fm.end()
1358
1358
1359
1359
1360 @command(
1360 @command(
1361 b'perfphases',
1361 b'perfphases',
1362 [(b'', b'full', False, b'include file reading time too'),],
1362 [(b'', b'full', False, b'include file reading time too'),],
1363 b"",
1363 b"",
1364 )
1364 )
1365 def perfphases(ui, repo, **opts):
1365 def perfphases(ui, repo, **opts):
1366 """benchmark phasesets computation"""
1366 """benchmark phasesets computation"""
1367 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1368 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1369 _phases = repo._phasecache
1369 _phases = repo._phasecache
1370 full = opts.get(b'full')
1370 full = opts.get(b'full')
1371
1371
1372 def d():
1372 def d():
1373 phases = _phases
1373 phases = _phases
1374 if full:
1374 if full:
1375 clearfilecache(repo, b'_phasecache')
1375 clearfilecache(repo, b'_phasecache')
1376 phases = repo._phasecache
1376 phases = repo._phasecache
1377 phases.invalidate()
1377 phases.invalidate()
1378 phases.loadphaserevs(repo)
1378 phases.loadphaserevs(repo)
1379
1379
1380 timer(d)
1380 timer(d)
1381 fm.end()
1381 fm.end()
1382
1382
1383
1383
1384 @command(b'perfphasesremote', [], b"[DEST]")
1384 @command(b'perfphasesremote', [], b"[DEST]")
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1386 """benchmark time needed to analyse phases of the remote server"""
1386 """benchmark time needed to analyse phases of the remote server"""
1387 from mercurial.node import bin
1387 from mercurial.node import bin
1388 from mercurial import (
1388 from mercurial import (
1389 exchange,
1389 exchange,
1390 hg,
1390 hg,
1391 phases,
1391 phases,
1392 )
1392 )
1393
1393
1394 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1395 timer, fm = gettimer(ui, opts)
1395 timer, fm = gettimer(ui, opts)
1396
1396
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1398 if not path:
1398 if not path:
1399 raise error.Abort(
1399 raise error.Abort(
1400 b'default repository not configured!',
1400 b'default repository not configured!',
1401 hint=b"see 'hg help config.paths'",
1401 hint=b"see 'hg help config.paths'",
1402 )
1402 )
1403 dest = path.pushloc or path.loc
1403 dest = path.pushloc or path.loc
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1405 other = hg.peer(repo, opts, dest)
1405 other = hg.peer(repo, opts, dest)
1406
1406
1407 # easier to perform discovery through the operation
1407 # easier to perform discovery through the operation
1408 op = exchange.pushoperation(repo, other)
1408 op = exchange.pushoperation(repo, other)
1409 exchange._pushdiscoverychangeset(op)
1409 exchange._pushdiscoverychangeset(op)
1410
1410
1411 remotesubset = op.fallbackheads
1411 remotesubset = op.fallbackheads
1412
1412
1413 with other.commandexecutor() as e:
1413 with other.commandexecutor() as e:
1414 remotephases = e.callcommand(
1414 remotephases = e.callcommand(
1415 b'listkeys', {b'namespace': b'phases'}
1415 b'listkeys', {b'namespace': b'phases'}
1416 ).result()
1416 ).result()
1417 del other
1417 del other
1418 publishing = remotephases.get(b'publishing', False)
1418 publishing = remotephases.get(b'publishing', False)
1419 if publishing:
1419 if publishing:
1420 ui.statusnoi18n(b'publishing: yes\n')
1420 ui.statusnoi18n(b'publishing: yes\n')
1421 else:
1421 else:
1422 ui.statusnoi18n(b'publishing: no\n')
1422 ui.statusnoi18n(b'publishing: no\n')
1423
1423
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1425 if has_node is None:
1425 if has_node is None:
1426 has_node = repo.changelog.nodemap.__contains__
1426 has_node = repo.changelog.nodemap.__contains__
1427 nonpublishroots = 0
1427 nonpublishroots = 0
1428 for nhex, phase in remotephases.iteritems():
1428 for nhex, phase in remotephases.iteritems():
1429 if nhex == b'publishing': # ignore data related to publish option
1429 if nhex == b'publishing': # ignore data related to publish option
1430 continue
1430 continue
1431 node = bin(nhex)
1431 node = bin(nhex)
1432 if has_node(node) and int(phase):
1432 if has_node(node) and int(phase):
1433 nonpublishroots += 1
1433 nonpublishroots += 1
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1436
1436
1437 def d():
1437 def d():
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1439
1439
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443
1443
1444 @command(
1444 @command(
1445 b'perfmanifest',
1445 b'perfmanifest',
1446 [
1446 [
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1449 ]
1449 ]
1450 + formatteropts,
1450 + formatteropts,
1451 b'REV|NODE',
1451 b'REV|NODE',
1452 )
1452 )
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1454 """benchmark the time to read a manifest from disk and return a usable
1454 """benchmark the time to read a manifest from disk and return a usable
1455 dict-like object
1455 dict-like object
1456
1456
1457 Manifest caches are cleared before retrieval."""
1457 Manifest caches are cleared before retrieval."""
1458 opts = _byteskwargs(opts)
1458 opts = _byteskwargs(opts)
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 if not manifest_rev:
1460 if not manifest_rev:
1461 ctx = scmutil.revsingle(repo, rev, rev)
1461 ctx = scmutil.revsingle(repo, rev, rev)
1462 t = ctx.manifestnode()
1462 t = ctx.manifestnode()
1463 else:
1463 else:
1464 from mercurial.node import bin
1464 from mercurial.node import bin
1465
1465
1466 if len(rev) == 40:
1466 if len(rev) == 40:
1467 t = bin(rev)
1467 t = bin(rev)
1468 else:
1468 else:
1469 try:
1469 try:
1470 rev = int(rev)
1470 rev = int(rev)
1471
1471
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1474 else:
1474 else:
1475 t = repo.manifestlog._revlog.lookup(rev)
1475 t = repo.manifestlog._revlog.lookup(rev)
1476 except ValueError:
1476 except ValueError:
1477 raise error.Abort(
1477 raise error.Abort(
1478 b'manifest revision must be integer or full node'
1478 b'manifest revision must be integer or full node'
1479 )
1479 )
1480
1480
1481 def d():
1481 def d():
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1483 repo.manifestlog[t].read()
1483 repo.manifestlog[t].read()
1484
1484
1485 timer(d)
1485 timer(d)
1486 fm.end()
1486 fm.end()
1487
1487
1488
1488
1489 @command(b'perfchangeset', formatteropts)
1489 @command(b'perfchangeset', formatteropts)
1490 def perfchangeset(ui, repo, rev, **opts):
1490 def perfchangeset(ui, repo, rev, **opts):
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 n = scmutil.revsingle(repo, rev).node()
1493 n = scmutil.revsingle(repo, rev).node()
1494
1494
1495 def d():
1495 def d():
1496 repo.changelog.read(n)
1496 repo.changelog.read(n)
1497 # repo.changelog._cache = None
1497 # repo.changelog._cache = None
1498
1498
1499 timer(d)
1499 timer(d)
1500 fm.end()
1500 fm.end()
1501
1501
1502
1502
1503 @command(b'perfignore', formatteropts)
1503 @command(b'perfignore', formatteropts)
1504 def perfignore(ui, repo, **opts):
1504 def perfignore(ui, repo, **opts):
1505 """benchmark operation related to computing ignore"""
1505 """benchmark operation related to computing ignore"""
1506 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1507 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509
1509
1510 def setupone():
1510 def setupone():
1511 dirstate.invalidate()
1511 dirstate.invalidate()
1512 clearfilecache(dirstate, b'_ignore')
1512 clearfilecache(dirstate, b'_ignore')
1513
1513
1514 def runone():
1514 def runone():
1515 dirstate._ignore
1515 dirstate._ignore
1516
1516
1517 timer(runone, setup=setupone, title=b"load")
1517 timer(runone, setup=setupone, title=b"load")
1518 fm.end()
1518 fm.end()
1519
1519
1520
1520
1521 @command(
1521 @command(
1522 b'perfindex',
1522 b'perfindex',
1523 [
1523 [
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1526 ]
1526 ]
1527 + formatteropts,
1527 + formatteropts,
1528 )
1528 )
1529 def perfindex(ui, repo, **opts):
1529 def perfindex(ui, repo, **opts):
1530 """benchmark index creation time followed by a lookup
1530 """benchmark index creation time followed by a lookup
1531
1531
1532 The default is to look `tip` up. Depending on the index implementation,
1532 The default is to look `tip` up. Depending on the index implementation,
1533 the revision looked up can matters. For example, an implementation
1533 the revision looked up can matters. For example, an implementation
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1535 `--rev 0`. The number of looked up revisions and their order can also
1535 `--rev 0`. The number of looked up revisions and their order can also
1536 matters.
1536 matters.
1537
1537
1538 Example of useful set to test:
1538 Example of useful set to test:
1539 * tip
1539 * tip
1540 * 0
1540 * 0
1541 * -10:
1541 * -10:
1542 * :10
1542 * :10
1543 * -10: + :10
1543 * -10: + :10
1544 * :10: + -10:
1544 * :10: + -10:
1545 * -10000:
1545 * -10000:
1546 * -10000: + 0
1546 * -10000: + 0
1547
1547
1548 It is not currently possible to check for lookup of a missing node. For
1548 It is not currently possible to check for lookup of a missing node. For
1549 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1549 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1550 import mercurial.revlog
1550 import mercurial.revlog
1551
1551
1552 opts = _byteskwargs(opts)
1552 opts = _byteskwargs(opts)
1553 timer, fm = gettimer(ui, opts)
1553 timer, fm = gettimer(ui, opts)
1554 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1554 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1555 if opts[b'no_lookup']:
1555 if opts[b'no_lookup']:
1556 if opts['rev']:
1556 if opts['rev']:
1557 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1557 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1558 nodes = []
1558 nodes = []
1559 elif not opts[b'rev']:
1559 elif not opts[b'rev']:
1560 nodes = [repo[b"tip"].node()]
1560 nodes = [repo[b"tip"].node()]
1561 else:
1561 else:
1562 revs = scmutil.revrange(repo, opts[b'rev'])
1562 revs = scmutil.revrange(repo, opts[b'rev'])
1563 cl = repo.changelog
1563 cl = repo.changelog
1564 nodes = [cl.node(r) for r in revs]
1564 nodes = [cl.node(r) for r in revs]
1565
1565
1566 unfi = repo.unfiltered()
1566 unfi = repo.unfiltered()
1567 # find the filecache func directly
1567 # find the filecache func directly
1568 # This avoid polluting the benchmark with the filecache logic
1568 # This avoid polluting the benchmark with the filecache logic
1569 makecl = unfi.__class__.changelog.func
1569 makecl = unfi.__class__.changelog.func
1570
1570
1571 def setup():
1571 def setup():
1572 # probably not necessary, but for good measure
1572 # probably not necessary, but for good measure
1573 clearchangelog(unfi)
1573 clearchangelog(unfi)
1574
1574
1575 def d():
1575 def d():
1576 cl = makecl(unfi)
1576 cl = makecl(unfi)
1577 for n in nodes:
1577 for n in nodes:
1578 cl.rev(n)
1578 cl.rev(n)
1579
1579
1580 timer(d, setup=setup)
1580 timer(d, setup=setup)
1581 fm.end()
1581 fm.end()
1582
1582
1583
1583
1584 @command(
1584 @command(
1585 b'perfnodemap',
1585 b'perfnodemap',
1586 [
1586 [
1587 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1587 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1588 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1588 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1589 ]
1589 ]
1590 + formatteropts,
1590 + formatteropts,
1591 )
1591 )
1592 def perfnodemap(ui, repo, **opts):
1592 def perfnodemap(ui, repo, **opts):
1593 """benchmark the time necessary to look up revision from a cold nodemap
1593 """benchmark the time necessary to look up revision from a cold nodemap
1594
1594
1595 Depending on the implementation, the amount and order of revision we look
1595 Depending on the implementation, the amount and order of revision we look
1596 up can varies. Example of useful set to test:
1596 up can varies. Example of useful set to test:
1597 * tip
1597 * tip
1598 * 0
1598 * 0
1599 * -10:
1599 * -10:
1600 * :10
1600 * :10
1601 * -10: + :10
1601 * -10: + :10
1602 * :10: + -10:
1602 * :10: + -10:
1603 * -10000:
1603 * -10000:
1604 * -10000: + 0
1604 * -10000: + 0
1605
1605
1606 The command currently focus on valid binary lookup. Benchmarking for
1606 The command currently focus on valid binary lookup. Benchmarking for
1607 hexlookup, prefix lookup and missing lookup would also be valuable.
1607 hexlookup, prefix lookup and missing lookup would also be valuable.
1608 """
1608 """
1609 import mercurial.revlog
1609 import mercurial.revlog
1610
1610
1611 opts = _byteskwargs(opts)
1611 opts = _byteskwargs(opts)
1612 timer, fm = gettimer(ui, opts)
1612 timer, fm = gettimer(ui, opts)
1613 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1613 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1614
1614
1615 unfi = repo.unfiltered()
1615 unfi = repo.unfiltered()
1616 clearcaches = opts['clear_caches']
1616 clearcaches = opts['clear_caches']
1617 # find the filecache func directly
1617 # find the filecache func directly
1618 # This avoid polluting the benchmark with the filecache logic
1618 # This avoid polluting the benchmark with the filecache logic
1619 makecl = unfi.__class__.changelog.func
1619 makecl = unfi.__class__.changelog.func
1620 if not opts[b'rev']:
1620 if not opts[b'rev']:
1621 raise error.Abort('use --rev to specify revisions to look up')
1621 raise error.Abort('use --rev to specify revisions to look up')
1622 revs = scmutil.revrange(repo, opts[b'rev'])
1622 revs = scmutil.revrange(repo, opts[b'rev'])
1623 cl = repo.changelog
1623 cl = repo.changelog
1624 nodes = [cl.node(r) for r in revs]
1624 nodes = [cl.node(r) for r in revs]
1625
1625
1626 # use a list to pass reference to a nodemap from one closure to the next
1626 # use a list to pass reference to a nodemap from one closure to the next
1627 nodeget = [None]
1627 nodeget = [None]
1628
1628
1629 def setnodeget():
1629 def setnodeget():
1630 # probably not necessary, but for good measure
1630 # probably not necessary, but for good measure
1631 clearchangelog(unfi)
1631 clearchangelog(unfi)
1632 cl = makecl(unfi)
1632 cl = makecl(unfi)
1633 if util.safehasattr(cl.index, 'get_rev'):
1633 if util.safehasattr(cl.index, 'get_rev'):
1634 nodeget[0] = cl.index.get_rev
1634 nodeget[0] = cl.index.get_rev
1635 else:
1635 else:
1636 nodeget[0] = cl.nodemap.get
1636 nodeget[0] = cl.nodemap.get
1637
1637
1638 def d():
1638 def d():
1639 get = nodeget[0]
1639 get = nodeget[0]
1640 for n in nodes:
1640 for n in nodes:
1641 get(n)
1641 get(n)
1642
1642
1643 setup = None
1643 setup = None
1644 if clearcaches:
1644 if clearcaches:
1645
1645
1646 def setup():
1646 def setup():
1647 setnodeget()
1647 setnodeget()
1648
1648
1649 else:
1649 else:
1650 setnodeget()
1650 setnodeget()
1651 d() # prewarm the data structure
1651 d() # prewarm the data structure
1652 timer(d, setup=setup)
1652 timer(d, setup=setup)
1653 fm.end()
1653 fm.end()
1654
1654
1655
1655
1656 @command(b'perfstartup', formatteropts)
1656 @command(b'perfstartup', formatteropts)
1657 def perfstartup(ui, repo, **opts):
1657 def perfstartup(ui, repo, **opts):
1658 opts = _byteskwargs(opts)
1658 opts = _byteskwargs(opts)
1659 timer, fm = gettimer(ui, opts)
1659 timer, fm = gettimer(ui, opts)
1660
1660
1661 def d():
1661 def d():
1662 if os.name != 'nt':
1662 if os.name != 'nt':
1663 os.system(
1663 os.system(
1664 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1664 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1665 )
1665 )
1666 else:
1666 else:
1667 os.environ['HGRCPATH'] = r' '
1667 os.environ['HGRCPATH'] = r' '
1668 os.system("%s version -q > NUL" % sys.argv[0])
1668 os.system("%s version -q > NUL" % sys.argv[0])
1669
1669
1670 timer(d)
1670 timer(d)
1671 fm.end()
1671 fm.end()
1672
1672
1673
1673
1674 @command(b'perfparents', formatteropts)
1674 @command(b'perfparents', formatteropts)
1675 def perfparents(ui, repo, **opts):
1675 def perfparents(ui, repo, **opts):
1676 """benchmark the time necessary to fetch one changeset's parents.
1676 """benchmark the time necessary to fetch one changeset's parents.
1677
1677
1678 The fetch is done using the `node identifier`, traversing all object layers
1678 The fetch is done using the `node identifier`, traversing all object layers
1679 from the repository object. The first N revisions will be used for this
1679 from the repository object. The first N revisions will be used for this
1680 benchmark. N is controlled by the ``perf.parentscount`` config option
1680 benchmark. N is controlled by the ``perf.parentscount`` config option
1681 (default: 1000).
1681 (default: 1000).
1682 """
1682 """
1683 opts = _byteskwargs(opts)
1683 opts = _byteskwargs(opts)
1684 timer, fm = gettimer(ui, opts)
1684 timer, fm = gettimer(ui, opts)
1685 # control the number of commits perfparents iterates over
1685 # control the number of commits perfparents iterates over
1686 # experimental config: perf.parentscount
1686 # experimental config: perf.parentscount
1687 count = getint(ui, b"perf", b"parentscount", 1000)
1687 count = getint(ui, b"perf", b"parentscount", 1000)
1688 if len(repo.changelog) < count:
1688 if len(repo.changelog) < count:
1689 raise error.Abort(b"repo needs %d commits for this test" % count)
1689 raise error.Abort(b"repo needs %d commits for this test" % count)
1690 repo = repo.unfiltered()
1690 repo = repo.unfiltered()
1691 nl = [repo.changelog.node(i) for i in _xrange(count)]
1691 nl = [repo.changelog.node(i) for i in _xrange(count)]
1692
1692
1693 def d():
1693 def d():
1694 for n in nl:
1694 for n in nl:
1695 repo.changelog.parents(n)
1695 repo.changelog.parents(n)
1696
1696
1697 timer(d)
1697 timer(d)
1698 fm.end()
1698 fm.end()
1699
1699
1700
1700
1701 @command(b'perfctxfiles', formatteropts)
1701 @command(b'perfctxfiles', formatteropts)
1702 def perfctxfiles(ui, repo, x, **opts):
1702 def perfctxfiles(ui, repo, x, **opts):
1703 opts = _byteskwargs(opts)
1703 opts = _byteskwargs(opts)
1704 x = int(x)
1704 x = int(x)
1705 timer, fm = gettimer(ui, opts)
1705 timer, fm = gettimer(ui, opts)
1706
1706
1707 def d():
1707 def d():
1708 len(repo[x].files())
1708 len(repo[x].files())
1709
1709
1710 timer(d)
1710 timer(d)
1711 fm.end()
1711 fm.end()
1712
1712
1713
1713
1714 @command(b'perfrawfiles', formatteropts)
1714 @command(b'perfrawfiles', formatteropts)
1715 def perfrawfiles(ui, repo, x, **opts):
1715 def perfrawfiles(ui, repo, x, **opts):
1716 opts = _byteskwargs(opts)
1716 opts = _byteskwargs(opts)
1717 x = int(x)
1717 x = int(x)
1718 timer, fm = gettimer(ui, opts)
1718 timer, fm = gettimer(ui, opts)
1719 cl = repo.changelog
1719 cl = repo.changelog
1720
1720
1721 def d():
1721 def d():
1722 len(cl.read(x)[3])
1722 len(cl.read(x)[3])
1723
1723
1724 timer(d)
1724 timer(d)
1725 fm.end()
1725 fm.end()
1726
1726
1727
1727
1728 @command(b'perflookup', formatteropts)
1728 @command(b'perflookup', formatteropts)
1729 def perflookup(ui, repo, rev, **opts):
1729 def perflookup(ui, repo, rev, **opts):
1730 opts = _byteskwargs(opts)
1730 opts = _byteskwargs(opts)
1731 timer, fm = gettimer(ui, opts)
1731 timer, fm = gettimer(ui, opts)
1732 timer(lambda: len(repo.lookup(rev)))
1732 timer(lambda: len(repo.lookup(rev)))
1733 fm.end()
1733 fm.end()
1734
1734
1735
1735
1736 @command(
1736 @command(
1737 b'perflinelogedits',
1737 b'perflinelogedits',
1738 [
1738 [
1739 (b'n', b'edits', 10000, b'number of edits'),
1739 (b'n', b'edits', 10000, b'number of edits'),
1740 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1740 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1741 ],
1741 ],
1742 norepo=True,
1742 norepo=True,
1743 )
1743 )
1744 def perflinelogedits(ui, **opts):
1744 def perflinelogedits(ui, **opts):
1745 from mercurial import linelog
1745 from mercurial import linelog
1746
1746
1747 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1748
1748
1749 edits = opts[b'edits']
1749 edits = opts[b'edits']
1750 maxhunklines = opts[b'max_hunk_lines']
1750 maxhunklines = opts[b'max_hunk_lines']
1751
1751
1752 maxb1 = 100000
1752 maxb1 = 100000
1753 random.seed(0)
1753 random.seed(0)
1754 randint = random.randint
1754 randint = random.randint
1755 currentlines = 0
1755 currentlines = 0
1756 arglist = []
1756 arglist = []
1757 for rev in _xrange(edits):
1757 for rev in _xrange(edits):
1758 a1 = randint(0, currentlines)
1758 a1 = randint(0, currentlines)
1759 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1759 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1760 b1 = randint(0, maxb1)
1760 b1 = randint(0, maxb1)
1761 b2 = randint(b1, b1 + maxhunklines)
1761 b2 = randint(b1, b1 + maxhunklines)
1762 currentlines += (b2 - b1) - (a2 - a1)
1762 currentlines += (b2 - b1) - (a2 - a1)
1763 arglist.append((rev, a1, a2, b1, b2))
1763 arglist.append((rev, a1, a2, b1, b2))
1764
1764
1765 def d():
1765 def d():
1766 ll = linelog.linelog()
1766 ll = linelog.linelog()
1767 for args in arglist:
1767 for args in arglist:
1768 ll.replacelines(*args)
1768 ll.replacelines(*args)
1769
1769
1770 timer, fm = gettimer(ui, opts)
1770 timer, fm = gettimer(ui, opts)
1771 timer(d)
1771 timer(d)
1772 fm.end()
1772 fm.end()
1773
1773
1774
1774
1775 @command(b'perfrevrange', formatteropts)
1775 @command(b'perfrevrange', formatteropts)
1776 def perfrevrange(ui, repo, *specs, **opts):
1776 def perfrevrange(ui, repo, *specs, **opts):
1777 opts = _byteskwargs(opts)
1777 opts = _byteskwargs(opts)
1778 timer, fm = gettimer(ui, opts)
1778 timer, fm = gettimer(ui, opts)
1779 revrange = scmutil.revrange
1779 revrange = scmutil.revrange
1780 timer(lambda: len(revrange(repo, specs)))
1780 timer(lambda: len(revrange(repo, specs)))
1781 fm.end()
1781 fm.end()
1782
1782
1783
1783
1784 @command(b'perfnodelookup', formatteropts)
1784 @command(b'perfnodelookup', formatteropts)
1785 def perfnodelookup(ui, repo, rev, **opts):
1785 def perfnodelookup(ui, repo, rev, **opts):
1786 opts = _byteskwargs(opts)
1786 opts = _byteskwargs(opts)
1787 timer, fm = gettimer(ui, opts)
1787 timer, fm = gettimer(ui, opts)
1788 import mercurial.revlog
1788 import mercurial.revlog
1789
1789
1790 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1790 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 n = scmutil.revsingle(repo, rev).node()
1791 n = scmutil.revsingle(repo, rev).node()
1792 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1792 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1793
1793
1794 def d():
1794 def d():
1795 cl.rev(n)
1795 cl.rev(n)
1796 clearcaches(cl)
1796 clearcaches(cl)
1797
1797
1798 timer(d)
1798 timer(d)
1799 fm.end()
1799 fm.end()
1800
1800
1801
1801
1802 @command(
1802 @command(
1803 b'perflog',
1803 b'perflog',
1804 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1804 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1805 )
1805 )
1806 def perflog(ui, repo, rev=None, **opts):
1806 def perflog(ui, repo, rev=None, **opts):
1807 opts = _byteskwargs(opts)
1807 opts = _byteskwargs(opts)
1808 if rev is None:
1808 if rev is None:
1809 rev = []
1809 rev = []
1810 timer, fm = gettimer(ui, opts)
1810 timer, fm = gettimer(ui, opts)
1811 ui.pushbuffer()
1811 ui.pushbuffer()
1812 timer(
1812 timer(
1813 lambda: commands.log(
1813 lambda: commands.log(
1814 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1814 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1815 )
1815 )
1816 )
1816 )
1817 ui.popbuffer()
1817 ui.popbuffer()
1818 fm.end()
1818 fm.end()
1819
1819
1820
1820
1821 @command(b'perfmoonwalk', formatteropts)
1821 @command(b'perfmoonwalk', formatteropts)
1822 def perfmoonwalk(ui, repo, **opts):
1822 def perfmoonwalk(ui, repo, **opts):
1823 """benchmark walking the changelog backwards
1823 """benchmark walking the changelog backwards
1824
1824
1825 This also loads the changelog data for each revision in the changelog.
1825 This also loads the changelog data for each revision in the changelog.
1826 """
1826 """
1827 opts = _byteskwargs(opts)
1827 opts = _byteskwargs(opts)
1828 timer, fm = gettimer(ui, opts)
1828 timer, fm = gettimer(ui, opts)
1829
1829
1830 def moonwalk():
1830 def moonwalk():
1831 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1831 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1832 ctx = repo[i]
1832 ctx = repo[i]
1833 ctx.branch() # read changelog data (in addition to the index)
1833 ctx.branch() # read changelog data (in addition to the index)
1834
1834
1835 timer(moonwalk)
1835 timer(moonwalk)
1836 fm.end()
1836 fm.end()
1837
1837
1838
1838
1839 @command(
1839 @command(
1840 b'perftemplating',
1840 b'perftemplating',
1841 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1841 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1842 )
1842 )
1843 def perftemplating(ui, repo, testedtemplate=None, **opts):
1843 def perftemplating(ui, repo, testedtemplate=None, **opts):
1844 """test the rendering time of a given template"""
1844 """test the rendering time of a given template"""
1845 if makelogtemplater is None:
1845 if makelogtemplater is None:
1846 raise error.Abort(
1846 raise error.Abort(
1847 b"perftemplating not available with this Mercurial",
1847 b"perftemplating not available with this Mercurial",
1848 hint=b"use 4.3 or later",
1848 hint=b"use 4.3 or later",
1849 )
1849 )
1850
1850
1851 opts = _byteskwargs(opts)
1851 opts = _byteskwargs(opts)
1852
1852
1853 nullui = ui.copy()
1853 nullui = ui.copy()
1854 nullui.fout = open(os.devnull, 'wb')
1854 nullui.fout = open(os.devnull, 'wb')
1855 nullui.disablepager()
1855 nullui.disablepager()
1856 revs = opts.get(b'rev')
1856 revs = opts.get(b'rev')
1857 if not revs:
1857 if not revs:
1858 revs = [b'all()']
1858 revs = [b'all()']
1859 revs = list(scmutil.revrange(repo, revs))
1859 revs = list(scmutil.revrange(repo, revs))
1860
1860
1861 defaulttemplate = (
1861 defaulttemplate = (
1862 b'{date|shortdate} [{rev}:{node|short}]'
1862 b'{date|shortdate} [{rev}:{node|short}]'
1863 b' {author|person}: {desc|firstline}\n'
1863 b' {author|person}: {desc|firstline}\n'
1864 )
1864 )
1865 if testedtemplate is None:
1865 if testedtemplate is None:
1866 testedtemplate = defaulttemplate
1866 testedtemplate = defaulttemplate
1867 displayer = makelogtemplater(nullui, repo, testedtemplate)
1867 displayer = makelogtemplater(nullui, repo, testedtemplate)
1868
1868
1869 def format():
1869 def format():
1870 for r in revs:
1870 for r in revs:
1871 ctx = repo[r]
1871 ctx = repo[r]
1872 displayer.show(ctx)
1872 displayer.show(ctx)
1873 displayer.flush(ctx)
1873 displayer.flush(ctx)
1874
1874
1875 timer, fm = gettimer(ui, opts)
1875 timer, fm = gettimer(ui, opts)
1876 timer(format)
1876 timer(format)
1877 fm.end()
1877 fm.end()
1878
1878
1879
1879
1880 def _displaystats(ui, opts, entries, data):
1880 def _displaystats(ui, opts, entries, data):
1881 pass
1881 pass
1882 # use a second formatter because the data are quite different, not sure
1882 # use a second formatter because the data are quite different, not sure
1883 # how it flies with the templater.
1883 # how it flies with the templater.
1884 fm = ui.formatter(b'perf-stats', opts)
1884 fm = ui.formatter(b'perf-stats', opts)
1885 for key, title in entries:
1885 for key, title in entries:
1886 values = data[key]
1886 values = data[key]
1887 nbvalues = len(data)
1887 nbvalues = len(data)
1888 values.sort()
1888 values.sort()
1889 stats = {
1889 stats = {
1890 'key': key,
1890 'key': key,
1891 'title': title,
1891 'title': title,
1892 'nbitems': len(values),
1892 'nbitems': len(values),
1893 'min': values[0][0],
1893 'min': values[0][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1903 'max': values[-1][0],
1903 'max': values[-1][0],
1904 }
1904 }
1905 fm.startitem()
1905 fm.startitem()
1906 fm.data(**stats)
1906 fm.data(**stats)
1907 # make node pretty for the human output
1907 # make node pretty for the human output
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1909 lines = [
1909 lines = [
1910 'min',
1910 'min',
1911 '10%',
1911 '10%',
1912 '25%',
1912 '25%',
1913 '50%',
1913 '50%',
1914 '75%',
1914 '75%',
1915 '80%',
1915 '80%',
1916 '85%',
1916 '85%',
1917 '90%',
1917 '90%',
1918 '95%',
1918 '95%',
1919 '99%',
1919 '99%',
1920 'max',
1920 'max',
1921 ]
1921 ]
1922 for l in lines:
1922 for l in lines:
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1924 fm.end()
1924 fm.end()
1925
1925
1926
1926
1927 @command(
1927 @command(
1928 b'perfhelper-mergecopies',
1928 b'perfhelper-mergecopies',
1929 formatteropts
1929 formatteropts
1930 + [
1930 + [
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1934 ],
1934 ],
1935 )
1935 )
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1937 """find statistics about potential parameters for `perfmergecopies`
1937 """find statistics about potential parameters for `perfmergecopies`
1938
1938
1939 This command find (base, p1, p2) triplet relevant for copytracing
1939 This command find (base, p1, p2) triplet relevant for copytracing
1940 benchmarking in the context of a merge. It reports values for some of the
1940 benchmarking in the context of a merge. It reports values for some of the
1941 parameters that impact merge copy tracing time during merge.
1941 parameters that impact merge copy tracing time during merge.
1942
1942
1943 If `--timing` is set, rename detection is run and the associated timing
1943 If `--timing` is set, rename detection is run and the associated timing
1944 will be reported. The extra details come at the cost of slower command
1944 will be reported. The extra details come at the cost of slower command
1945 execution.
1945 execution.
1946
1946
1947 Since rename detection is only run once, other factors might easily
1947 Since rename detection is only run once, other factors might easily
1948 affect the precision of the timing. However it should give a good
1948 affect the precision of the timing. However it should give a good
1949 approximation of which revision triplets are very costly.
1949 approximation of which revision triplets are very costly.
1950 """
1950 """
1951 opts = _byteskwargs(opts)
1951 opts = _byteskwargs(opts)
1952 fm = ui.formatter(b'perf', opts)
1952 fm = ui.formatter(b'perf', opts)
1953 dotiming = opts[b'timing']
1953 dotiming = opts[b'timing']
1954 dostats = opts[b'stats']
1954 dostats = opts[b'stats']
1955
1955
1956 output_template = [
1956 output_template = [
1957 ("base", "%(base)12s"),
1957 ("base", "%(base)12s"),
1958 ("p1", "%(p1.node)12s"),
1958 ("p1", "%(p1.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1963 ("p1.time", "%(p1.time)12.3f"),
1963 ("p1.time", "%(p1.time)12.3f"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1967 ("p2.time", "%(p2.time)12.3f"),
1967 ("p2.time", "%(p2.time)12.3f"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1969 ("total.time", "%(time)12.3f"),
1969 ("total.time", "%(time)12.3f"),
1970 ]
1970 ]
1971 if not dotiming:
1971 if not dotiming:
1972 output_template = [
1972 output_template = [
1973 i
1973 i
1974 for i in output_template
1974 for i in output_template
1975 if not ('time' in i[0] or 'renames' in i[0])
1975 if not ('time' in i[0] or 'renames' in i[0])
1976 ]
1976 ]
1977 header_names = [h for (h, v) in output_template]
1977 header_names = [h for (h, v) in output_template]
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1980 fm.plain(header % tuple(header_names))
1980 fm.plain(header % tuple(header_names))
1981
1981
1982 if not revs:
1982 if not revs:
1983 revs = ['all()']
1983 revs = ['all()']
1984 revs = scmutil.revrange(repo, revs)
1984 revs = scmutil.revrange(repo, revs)
1985
1985
1986 if dostats:
1986 if dostats:
1987 alldata = {
1987 alldata = {
1988 'nbrevs': [],
1988 'nbrevs': [],
1989 'nbmissingfiles': [],
1989 'nbmissingfiles': [],
1990 }
1990 }
1991 if dotiming:
1991 if dotiming:
1992 alldata['parentnbrenames'] = []
1992 alldata['parentnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1994 alldata['parenttime'] = []
1994 alldata['parenttime'] = []
1995 alldata['totaltime'] = []
1995 alldata['totaltime'] = []
1996
1996
1997 roi = repo.revs('merge() and %ld', revs)
1997 roi = repo.revs('merge() and %ld', revs)
1998 for r in roi:
1998 for r in roi:
1999 ctx = repo[r]
1999 ctx = repo[r]
2000 p1 = ctx.p1()
2000 p1 = ctx.p1()
2001 p2 = ctx.p2()
2001 p2 = ctx.p2()
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2003 for b in bases:
2003 for b in bases:
2004 b = repo[b]
2004 b = repo[b]
2005 p1missing = copies._computeforwardmissing(b, p1)
2005 p1missing = copies._computeforwardmissing(b, p1)
2006 p2missing = copies._computeforwardmissing(b, p2)
2006 p2missing = copies._computeforwardmissing(b, p2)
2007 data = {
2007 data = {
2008 b'base': b.hex(),
2008 b'base': b.hex(),
2009 b'p1.node': p1.hex(),
2009 b'p1.node': p1.hex(),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2011 b'p1.nbmissingfiles': len(p1missing),
2011 b'p1.nbmissingfiles': len(p1missing),
2012 b'p2.node': p2.hex(),
2012 b'p2.node': p2.hex(),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2014 b'p2.nbmissingfiles': len(p2missing),
2014 b'p2.nbmissingfiles': len(p2missing),
2015 }
2015 }
2016 if dostats:
2016 if dostats:
2017 if p1missing:
2017 if p1missing:
2018 alldata['nbrevs'].append(
2018 alldata['nbrevs'].append(
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2020 )
2020 )
2021 alldata['nbmissingfiles'].append(
2021 alldata['nbmissingfiles'].append(
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2023 )
2023 )
2024 if p2missing:
2024 if p2missing:
2025 alldata['nbrevs'].append(
2025 alldata['nbrevs'].append(
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2027 )
2027 )
2028 alldata['nbmissingfiles'].append(
2028 alldata['nbmissingfiles'].append(
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2030 )
2030 )
2031 if dotiming:
2031 if dotiming:
2032 begin = util.timer()
2032 begin = util.timer()
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2034 end = util.timer()
2034 end = util.timer()
2035 # not very stable timing since we did only one run
2035 # not very stable timing since we did only one run
2036 data['time'] = end - begin
2036 data['time'] = end - begin
2037 # mergedata contains five dicts: "copy", "movewithdir",
2037 # mergedata contains five dicts: "copy", "movewithdir",
2038 # "diverge", "renamedelete" and "dirmove".
2038 # "diverge", "renamedelete" and "dirmove".
2039 # The first 4 are about renamed file so lets count that.
2039 # The first 4 are about renamed file so lets count that.
2040 renames = len(mergedata[0])
2040 renames = len(mergedata[0])
2041 renames += len(mergedata[1])
2041 renames += len(mergedata[1])
2042 renames += len(mergedata[2])
2042 renames += len(mergedata[2])
2043 renames += len(mergedata[3])
2043 renames += len(mergedata[3])
2044 data['nbrenamedfiles'] = renames
2044 data['nbrenamedfiles'] = renames
2045 begin = util.timer()
2045 begin = util.timer()
2046 p1renames = copies.pathcopies(b, p1)
2046 p1renames = copies.pathcopies(b, p1)
2047 end = util.timer()
2047 end = util.timer()
2048 data['p1.time'] = end - begin
2048 data['p1.time'] = end - begin
2049 begin = util.timer()
2049 begin = util.timer()
2050 p2renames = copies.pathcopies(b, p2)
2050 p2renames = copies.pathcopies(b, p2)
2051 data['p2.time'] = end - begin
2051 data['p2.time'] = end - begin
2052 end = util.timer()
2052 end = util.timer()
2053 data['p1.renamedfiles'] = len(p1renames)
2053 data['p1.renamedfiles'] = len(p1renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2055
2055
2056 if dostats:
2056 if dostats:
2057 if p1missing:
2057 if p1missing:
2058 alldata['parentnbrenames'].append(
2058 alldata['parentnbrenames'].append(
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2060 )
2060 )
2061 alldata['parenttime'].append(
2061 alldata['parenttime'].append(
2062 (data['p1.time'], b.hex(), p1.hex())
2062 (data['p1.time'], b.hex(), p1.hex())
2063 )
2063 )
2064 if p2missing:
2064 if p2missing:
2065 alldata['parentnbrenames'].append(
2065 alldata['parentnbrenames'].append(
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2067 )
2067 )
2068 alldata['parenttime'].append(
2068 alldata['parenttime'].append(
2069 (data['p2.time'], b.hex(), p2.hex())
2069 (data['p2.time'], b.hex(), p2.hex())
2070 )
2070 )
2071 if p1missing or p2missing:
2071 if p1missing or p2missing:
2072 alldata['totalnbrenames'].append(
2072 alldata['totalnbrenames'].append(
2073 (
2073 (
2074 data['nbrenamedfiles'],
2074 data['nbrenamedfiles'],
2075 b.hex(),
2075 b.hex(),
2076 p1.hex(),
2076 p1.hex(),
2077 p2.hex(),
2077 p2.hex(),
2078 )
2078 )
2079 )
2079 )
2080 alldata['totaltime'].append(
2080 alldata['totaltime'].append(
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2082 )
2082 )
2083 fm.startitem()
2083 fm.startitem()
2084 fm.data(**data)
2084 fm.data(**data)
2085 # make node pretty for the human output
2085 # make node pretty for the human output
2086 out = data.copy()
2086 out = data.copy()
2087 out['base'] = fm.hexfunc(b.node())
2087 out['base'] = fm.hexfunc(b.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2090 fm.plain(output % out)
2090 fm.plain(output % out)
2091
2091
2092 fm.end()
2092 fm.end()
2093 if dostats:
2093 if dostats:
2094 # use a second formatter because the data are quite different, not sure
2094 # use a second formatter because the data are quite different, not sure
2095 # how it flies with the templater.
2095 # how it flies with the templater.
2096 entries = [
2096 entries = [
2097 ('nbrevs', 'number of revision covered'),
2097 ('nbrevs', 'number of revision covered'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2099 ]
2099 ]
2100 if dotiming:
2100 if dotiming:
2101 entries.append(
2101 entries.append(
2102 ('parentnbrenames', 'rename from one parent to base')
2102 ('parentnbrenames', 'rename from one parent to base')
2103 )
2103 )
2104 entries.append(('totalnbrenames', 'total number of renames'))
2104 entries.append(('totalnbrenames', 'total number of renames'))
2105 entries.append(('parenttime', 'time for one parent'))
2105 entries.append(('parenttime', 'time for one parent'))
2106 entries.append(('totaltime', 'time for both parents'))
2106 entries.append(('totaltime', 'time for both parents'))
2107 _displaystats(ui, opts, entries, alldata)
2107 _displaystats(ui, opts, entries, alldata)
2108
2108
2109
2109
2110 @command(
2110 @command(
2111 b'perfhelper-pathcopies',
2111 b'perfhelper-pathcopies',
2112 formatteropts
2112 formatteropts
2113 + [
2113 + [
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2117 ],
2117 ],
2118 )
2118 )
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2120 """find statistic about potential parameters for the `perftracecopies`
2120 """find statistic about potential parameters for the `perftracecopies`
2121
2121
2122 This command find source-destination pair relevant for copytracing testing.
2122 This command find source-destination pair relevant for copytracing testing.
2123 It report value for some of the parameters that impact copy tracing time.
2123 It report value for some of the parameters that impact copy tracing time.
2124
2124
2125 If `--timing` is set, rename detection is run and the associated timing
2125 If `--timing` is set, rename detection is run and the associated timing
2126 will be reported. The extra details comes at the cost of a slower command
2126 will be reported. The extra details comes at the cost of a slower command
2127 execution.
2127 execution.
2128
2128
2129 Since the rename detection is only run once, other factors might easily
2129 Since the rename detection is only run once, other factors might easily
2130 affect the precision of the timing. However it should give a good
2130 affect the precision of the timing. However it should give a good
2131 approximation of which revision pairs are very costly.
2131 approximation of which revision pairs are very costly.
2132 """
2132 """
2133 opts = _byteskwargs(opts)
2133 opts = _byteskwargs(opts)
2134 fm = ui.formatter(b'perf', opts)
2134 fm = ui.formatter(b'perf', opts)
2135 dotiming = opts[b'timing']
2135 dotiming = opts[b'timing']
2136 dostats = opts[b'stats']
2136 dostats = opts[b'stats']
2137
2137
2138 if dotiming:
2138 if dotiming:
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2140 output = (
2140 output = (
2141 "%(source)12s %(destination)12s "
2141 "%(source)12s %(destination)12s "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2144 )
2144 )
2145 header_names = (
2145 header_names = (
2146 "source",
2146 "source",
2147 "destination",
2147 "destination",
2148 "nb-revs",
2148 "nb-revs",
2149 "nb-files",
2149 "nb-files",
2150 "nb-renames",
2150 "nb-renames",
2151 "time",
2151 "time",
2152 )
2152 )
2153 fm.plain(header % header_names)
2153 fm.plain(header % header_names)
2154 else:
2154 else:
2155 header = '%12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s\n'
2156 output = (
2156 output = (
2157 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2159 )
2159 )
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2161
2161
2162 if not revs:
2162 if not revs:
2163 revs = ['all()']
2163 revs = ['all()']
2164 revs = scmutil.revrange(repo, revs)
2164 revs = scmutil.revrange(repo, revs)
2165
2165
2166 if dostats:
2166 if dostats:
2167 alldata = {
2167 alldata = {
2168 'nbrevs': [],
2168 'nbrevs': [],
2169 'nbmissingfiles': [],
2169 'nbmissingfiles': [],
2170 }
2170 }
2171 if dotiming:
2171 if dotiming:
2172 alldata['nbrenames'] = []
2172 alldata['nbrenames'] = []
2173 alldata['time'] = []
2173 alldata['time'] = []
2174
2174
2175 roi = repo.revs('merge() and %ld', revs)
2175 roi = repo.revs('merge() and %ld', revs)
2176 for r in roi:
2176 for r in roi:
2177 ctx = repo[r]
2177 ctx = repo[r]
2178 p1 = ctx.p1().rev()
2178 p1 = ctx.p1().rev()
2179 p2 = ctx.p2().rev()
2179 p2 = ctx.p2().rev()
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2181 for p in (p1, p2):
2181 for p in (p1, p2):
2182 for b in bases:
2182 for b in bases:
2183 base = repo[b]
2183 base = repo[b]
2184 parent = repo[p]
2184 parent = repo[p]
2185 missing = copies._computeforwardmissing(base, parent)
2185 missing = copies._computeforwardmissing(base, parent)
2186 if not missing:
2186 if not missing:
2187 continue
2187 continue
2188 data = {
2188 data = {
2189 b'source': base.hex(),
2189 b'source': base.hex(),
2190 b'destination': parent.hex(),
2190 b'destination': parent.hex(),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2192 b'nbmissingfiles': len(missing),
2192 b'nbmissingfiles': len(missing),
2193 }
2193 }
2194 if dostats:
2194 if dostats:
2195 alldata['nbrevs'].append(
2195 alldata['nbrevs'].append(
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2197 )
2197 )
2198 alldata['nbmissingfiles'].append(
2198 alldata['nbmissingfiles'].append(
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2200 )
2200 )
2201 if dotiming:
2201 if dotiming:
2202 begin = util.timer()
2202 begin = util.timer()
2203 renames = copies.pathcopies(base, parent)
2203 renames = copies.pathcopies(base, parent)
2204 end = util.timer()
2204 end = util.timer()
2205 # not very stable timing since we did only one run
2205 # not very stable timing since we did only one run
2206 data['time'] = end - begin
2206 data['time'] = end - begin
2207 data['nbrenamedfiles'] = len(renames)
2207 data['nbrenamedfiles'] = len(renames)
2208 if dostats:
2208 if dostats:
2209 alldata['time'].append(
2209 alldata['time'].append(
2210 (data['time'], base.hex(), parent.hex(),)
2210 (data['time'], base.hex(), parent.hex(),)
2211 )
2211 )
2212 alldata['nbrenames'].append(
2212 alldata['nbrenames'].append(
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2214 )
2214 )
2215 fm.startitem()
2215 fm.startitem()
2216 fm.data(**data)
2216 fm.data(**data)
2217 out = data.copy()
2217 out = data.copy()
2218 out['source'] = fm.hexfunc(base.node())
2218 out['source'] = fm.hexfunc(base.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2220 fm.plain(output % out)
2220 fm.plain(output % out)
2221
2221
2222 fm.end()
2222 fm.end()
2223 if dostats:
2223 if dostats:
2224 # use a second formatter because the data are quite different, not sure
2224 # use a second formatter because the data are quite different, not sure
2225 # how it flies with the templater.
2225 # how it flies with the templater.
2226 fm = ui.formatter(b'perf', opts)
2226 fm = ui.formatter(b'perf', opts)
2227 entries = [
2227 entries = [
2228 ('nbrevs', 'number of revision covered'),
2228 ('nbrevs', 'number of revision covered'),
2229 ('nbmissingfiles', 'number of missing files at head'),
2229 ('nbmissingfiles', 'number of missing files at head'),
2230 ]
2230 ]
2231 if dotiming:
2231 if dotiming:
2232 entries.append(('nbrenames', 'renamed files'))
2232 entries.append(('nbrenames', 'renamed files'))
2233 entries.append(('time', 'time'))
2233 entries.append(('time', 'time'))
2234 _displaystats(ui, opts, entries, alldata)
2234 _displaystats(ui, opts, entries, alldata)
2235
2235
2236
2236
2237 @command(b'perfcca', formatteropts)
2237 @command(b'perfcca', formatteropts)
2238 def perfcca(ui, repo, **opts):
2238 def perfcca(ui, repo, **opts):
2239 opts = _byteskwargs(opts)
2239 opts = _byteskwargs(opts)
2240 timer, fm = gettimer(ui, opts)
2240 timer, fm = gettimer(ui, opts)
2241 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2241 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2242 fm.end()
2242 fm.end()
2243
2243
2244
2244
2245 @command(b'perffncacheload', formatteropts)
2245 @command(b'perffncacheload', formatteropts)
2246 def perffncacheload(ui, repo, **opts):
2246 def perffncacheload(ui, repo, **opts):
2247 opts = _byteskwargs(opts)
2247 opts = _byteskwargs(opts)
2248 timer, fm = gettimer(ui, opts)
2248 timer, fm = gettimer(ui, opts)
2249 s = repo.store
2249 s = repo.store
2250
2250
2251 def d():
2251 def d():
2252 s.fncache._load()
2252 s.fncache._load()
2253
2253
2254 timer(d)
2254 timer(d)
2255 fm.end()
2255 fm.end()
2256
2256
2257
2257
2258 @command(b'perffncachewrite', formatteropts)
2258 @command(b'perffncachewrite', formatteropts)
2259 def perffncachewrite(ui, repo, **opts):
2259 def perffncachewrite(ui, repo, **opts):
2260 opts = _byteskwargs(opts)
2260 opts = _byteskwargs(opts)
2261 timer, fm = gettimer(ui, opts)
2261 timer, fm = gettimer(ui, opts)
2262 s = repo.store
2262 s = repo.store
2263 lock = repo.lock()
2263 lock = repo.lock()
2264 s.fncache._load()
2264 s.fncache._load()
2265 tr = repo.transaction(b'perffncachewrite')
2265 tr = repo.transaction(b'perffncachewrite')
2266 tr.addbackup(b'fncache')
2266 tr.addbackup(b'fncache')
2267
2267
2268 def d():
2268 def d():
2269 s.fncache._dirty = True
2269 s.fncache._dirty = True
2270 s.fncache.write(tr)
2270 s.fncache.write(tr)
2271
2271
2272 timer(d)
2272 timer(d)
2273 tr.close()
2273 tr.close()
2274 lock.release()
2274 lock.release()
2275 fm.end()
2275 fm.end()
2276
2276
2277
2277
2278 @command(b'perffncacheencode', formatteropts)
2278 @command(b'perffncacheencode', formatteropts)
2279 def perffncacheencode(ui, repo, **opts):
2279 def perffncacheencode(ui, repo, **opts):
2280 opts = _byteskwargs(opts)
2280 opts = _byteskwargs(opts)
2281 timer, fm = gettimer(ui, opts)
2281 timer, fm = gettimer(ui, opts)
2282 s = repo.store
2282 s = repo.store
2283 s.fncache._load()
2283 s.fncache._load()
2284
2284
2285 def d():
2285 def d():
2286 for p in s.fncache.entries:
2286 for p in s.fncache.entries:
2287 s.encode(p)
2287 s.encode(p)
2288
2288
2289 timer(d)
2289 timer(d)
2290 fm.end()
2290 fm.end()
2291
2291
2292
2292
2293 def _bdiffworker(q, blocks, xdiff, ready, done):
2293 def _bdiffworker(q, blocks, xdiff, ready, done):
2294 while not done.is_set():
2294 while not done.is_set():
2295 pair = q.get()
2295 pair = q.get()
2296 while pair is not None:
2296 while pair is not None:
2297 if xdiff:
2297 if xdiff:
2298 mdiff.bdiff.xdiffblocks(*pair)
2298 mdiff.bdiff.xdiffblocks(*pair)
2299 elif blocks:
2299 elif blocks:
2300 mdiff.bdiff.blocks(*pair)
2300 mdiff.bdiff.blocks(*pair)
2301 else:
2301 else:
2302 mdiff.textdiff(*pair)
2302 mdiff.textdiff(*pair)
2303 q.task_done()
2303 q.task_done()
2304 pair = q.get()
2304 pair = q.get()
2305 q.task_done() # for the None one
2305 q.task_done() # for the None one
2306 with ready:
2306 with ready:
2307 ready.wait()
2307 ready.wait()
2308
2308
2309
2309
2310 def _manifestrevision(repo, mnode):
2310 def _manifestrevision(repo, mnode):
2311 ml = repo.manifestlog
2311 ml = repo.manifestlog
2312
2312
2313 if util.safehasattr(ml, b'getstorage'):
2313 if util.safehasattr(ml, b'getstorage'):
2314 store = ml.getstorage(b'')
2314 store = ml.getstorage(b'')
2315 else:
2315 else:
2316 store = ml._revlog
2316 store = ml._revlog
2317
2317
2318 return store.revision(mnode)
2318 return store.revision(mnode)
2319
2319
2320
2320
2321 @command(
2321 @command(
2322 b'perfbdiff',
2322 b'perfbdiff',
2323 revlogopts
2323 revlogopts
2324 + formatteropts
2324 + formatteropts
2325 + [
2325 + [
2326 (
2326 (
2327 b'',
2327 b'',
2328 b'count',
2328 b'count',
2329 1,
2329 1,
2330 b'number of revisions to test (when using --startrev)',
2330 b'number of revisions to test (when using --startrev)',
2331 ),
2331 ),
2332 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2332 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2333 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2333 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2334 (b'', b'blocks', False, b'test computing diffs into blocks'),
2334 (b'', b'blocks', False, b'test computing diffs into blocks'),
2335 (b'', b'xdiff', False, b'use xdiff algorithm'),
2335 (b'', b'xdiff', False, b'use xdiff algorithm'),
2336 ],
2336 ],
2337 b'-c|-m|FILE REV',
2337 b'-c|-m|FILE REV',
2338 )
2338 )
2339 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2339 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2340 """benchmark a bdiff between revisions
2340 """benchmark a bdiff between revisions
2341
2341
2342 By default, benchmark a bdiff between its delta parent and itself.
2342 By default, benchmark a bdiff between its delta parent and itself.
2343
2343
2344 With ``--count``, benchmark bdiffs between delta parents and self for N
2344 With ``--count``, benchmark bdiffs between delta parents and self for N
2345 revisions starting at the specified revision.
2345 revisions starting at the specified revision.
2346
2346
2347 With ``--alldata``, assume the requested revision is a changeset and
2347 With ``--alldata``, assume the requested revision is a changeset and
2348 measure bdiffs for all changes related to that changeset (manifest
2348 measure bdiffs for all changes related to that changeset (manifest
2349 and filelogs).
2349 and filelogs).
2350 """
2350 """
2351 opts = _byteskwargs(opts)
2351 opts = _byteskwargs(opts)
2352
2352
2353 if opts[b'xdiff'] and not opts[b'blocks']:
2353 if opts[b'xdiff'] and not opts[b'blocks']:
2354 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2354 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2355
2355
2356 if opts[b'alldata']:
2356 if opts[b'alldata']:
2357 opts[b'changelog'] = True
2357 opts[b'changelog'] = True
2358
2358
2359 if opts.get(b'changelog') or opts.get(b'manifest'):
2359 if opts.get(b'changelog') or opts.get(b'manifest'):
2360 file_, rev = None, file_
2360 file_, rev = None, file_
2361 elif rev is None:
2361 elif rev is None:
2362 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2362 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2363
2363
2364 blocks = opts[b'blocks']
2364 blocks = opts[b'blocks']
2365 xdiff = opts[b'xdiff']
2365 xdiff = opts[b'xdiff']
2366 textpairs = []
2366 textpairs = []
2367
2367
2368 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2368 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2369
2369
2370 startrev = r.rev(r.lookup(rev))
2370 startrev = r.rev(r.lookup(rev))
2371 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2371 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2372 if opts[b'alldata']:
2372 if opts[b'alldata']:
2373 # Load revisions associated with changeset.
2373 # Load revisions associated with changeset.
2374 ctx = repo[rev]
2374 ctx = repo[rev]
2375 mtext = _manifestrevision(repo, ctx.manifestnode())
2375 mtext = _manifestrevision(repo, ctx.manifestnode())
2376 for pctx in ctx.parents():
2376 for pctx in ctx.parents():
2377 pman = _manifestrevision(repo, pctx.manifestnode())
2377 pman = _manifestrevision(repo, pctx.manifestnode())
2378 textpairs.append((pman, mtext))
2378 textpairs.append((pman, mtext))
2379
2379
2380 # Load filelog revisions by iterating manifest delta.
2380 # Load filelog revisions by iterating manifest delta.
2381 man = ctx.manifest()
2381 man = ctx.manifest()
2382 pman = ctx.p1().manifest()
2382 pman = ctx.p1().manifest()
2383 for filename, change in pman.diff(man).items():
2383 for filename, change in pman.diff(man).items():
2384 fctx = repo.file(filename)
2384 fctx = repo.file(filename)
2385 f1 = fctx.revision(change[0][0] or -1)
2385 f1 = fctx.revision(change[0][0] or -1)
2386 f2 = fctx.revision(change[1][0] or -1)
2386 f2 = fctx.revision(change[1][0] or -1)
2387 textpairs.append((f1, f2))
2387 textpairs.append((f1, f2))
2388 else:
2388 else:
2389 dp = r.deltaparent(rev)
2389 dp = r.deltaparent(rev)
2390 textpairs.append((r.revision(dp), r.revision(rev)))
2390 textpairs.append((r.revision(dp), r.revision(rev)))
2391
2391
2392 withthreads = threads > 0
2392 withthreads = threads > 0
2393 if not withthreads:
2393 if not withthreads:
2394
2394
2395 def d():
2395 def d():
2396 for pair in textpairs:
2396 for pair in textpairs:
2397 if xdiff:
2397 if xdiff:
2398 mdiff.bdiff.xdiffblocks(*pair)
2398 mdiff.bdiff.xdiffblocks(*pair)
2399 elif blocks:
2399 elif blocks:
2400 mdiff.bdiff.blocks(*pair)
2400 mdiff.bdiff.blocks(*pair)
2401 else:
2401 else:
2402 mdiff.textdiff(*pair)
2402 mdiff.textdiff(*pair)
2403
2403
2404 else:
2404 else:
2405 q = queue()
2405 q = queue()
2406 for i in _xrange(threads):
2406 for i in _xrange(threads):
2407 q.put(None)
2407 q.put(None)
2408 ready = threading.Condition()
2408 ready = threading.Condition()
2409 done = threading.Event()
2409 done = threading.Event()
2410 for i in _xrange(threads):
2410 for i in _xrange(threads):
2411 threading.Thread(
2411 threading.Thread(
2412 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2412 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2413 ).start()
2413 ).start()
2414 q.join()
2414 q.join()
2415
2415
2416 def d():
2416 def d():
2417 for pair in textpairs:
2417 for pair in textpairs:
2418 q.put(pair)
2418 q.put(pair)
2419 for i in _xrange(threads):
2419 for i in _xrange(threads):
2420 q.put(None)
2420 q.put(None)
2421 with ready:
2421 with ready:
2422 ready.notify_all()
2422 ready.notify_all()
2423 q.join()
2423 q.join()
2424
2424
2425 timer, fm = gettimer(ui, opts)
2425 timer, fm = gettimer(ui, opts)
2426 timer(d)
2426 timer(d)
2427 fm.end()
2427 fm.end()
2428
2428
2429 if withthreads:
2429 if withthreads:
2430 done.set()
2430 done.set()
2431 for i in _xrange(threads):
2431 for i in _xrange(threads):
2432 q.put(None)
2432 q.put(None)
2433 with ready:
2433 with ready:
2434 ready.notify_all()
2434 ready.notify_all()
2435
2435
2436
2436
2437 @command(
2437 @command(
2438 b'perfunidiff',
2438 b'perfunidiff',
2439 revlogopts
2439 revlogopts
2440 + formatteropts
2440 + formatteropts
2441 + [
2441 + [
2442 (
2442 (
2443 b'',
2443 b'',
2444 b'count',
2444 b'count',
2445 1,
2445 1,
2446 b'number of revisions to test (when using --startrev)',
2446 b'number of revisions to test (when using --startrev)',
2447 ),
2447 ),
2448 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2448 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2449 ],
2449 ],
2450 b'-c|-m|FILE REV',
2450 b'-c|-m|FILE REV',
2451 )
2451 )
2452 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2452 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2453 """benchmark a unified diff between revisions
2453 """benchmark a unified diff between revisions
2454
2454
2455 This doesn't include any copy tracing - it's just a unified diff
2455 This doesn't include any copy tracing - it's just a unified diff
2456 of the texts.
2456 of the texts.
2457
2457
2458 By default, benchmark a diff between its delta parent and itself.
2458 By default, benchmark a diff between its delta parent and itself.
2459
2459
2460 With ``--count``, benchmark diffs between delta parents and self for N
2460 With ``--count``, benchmark diffs between delta parents and self for N
2461 revisions starting at the specified revision.
2461 revisions starting at the specified revision.
2462
2462
2463 With ``--alldata``, assume the requested revision is a changeset and
2463 With ``--alldata``, assume the requested revision is a changeset and
2464 measure diffs for all changes related to that changeset (manifest
2464 measure diffs for all changes related to that changeset (manifest
2465 and filelogs).
2465 and filelogs).
2466 """
2466 """
2467 opts = _byteskwargs(opts)
2467 opts = _byteskwargs(opts)
2468 if opts[b'alldata']:
2468 if opts[b'alldata']:
2469 opts[b'changelog'] = True
2469 opts[b'changelog'] = True
2470
2470
2471 if opts.get(b'changelog') or opts.get(b'manifest'):
2471 if opts.get(b'changelog') or opts.get(b'manifest'):
2472 file_, rev = None, file_
2472 file_, rev = None, file_
2473 elif rev is None:
2473 elif rev is None:
2474 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2474 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2475
2475
2476 textpairs = []
2476 textpairs = []
2477
2477
2478 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2478 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2479
2479
2480 startrev = r.rev(r.lookup(rev))
2480 startrev = r.rev(r.lookup(rev))
2481 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2481 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2482 if opts[b'alldata']:
2482 if opts[b'alldata']:
2483 # Load revisions associated with changeset.
2483 # Load revisions associated with changeset.
2484 ctx = repo[rev]
2484 ctx = repo[rev]
2485 mtext = _manifestrevision(repo, ctx.manifestnode())
2485 mtext = _manifestrevision(repo, ctx.manifestnode())
2486 for pctx in ctx.parents():
2486 for pctx in ctx.parents():
2487 pman = _manifestrevision(repo, pctx.manifestnode())
2487 pman = _manifestrevision(repo, pctx.manifestnode())
2488 textpairs.append((pman, mtext))
2488 textpairs.append((pman, mtext))
2489
2489
2490 # Load filelog revisions by iterating manifest delta.
2490 # Load filelog revisions by iterating manifest delta.
2491 man = ctx.manifest()
2491 man = ctx.manifest()
2492 pman = ctx.p1().manifest()
2492 pman = ctx.p1().manifest()
2493 for filename, change in pman.diff(man).items():
2493 for filename, change in pman.diff(man).items():
2494 fctx = repo.file(filename)
2494 fctx = repo.file(filename)
2495 f1 = fctx.revision(change[0][0] or -1)
2495 f1 = fctx.revision(change[0][0] or -1)
2496 f2 = fctx.revision(change[1][0] or -1)
2496 f2 = fctx.revision(change[1][0] or -1)
2497 textpairs.append((f1, f2))
2497 textpairs.append((f1, f2))
2498 else:
2498 else:
2499 dp = r.deltaparent(rev)
2499 dp = r.deltaparent(rev)
2500 textpairs.append((r.revision(dp), r.revision(rev)))
2500 textpairs.append((r.revision(dp), r.revision(rev)))
2501
2501
2502 def d():
2502 def d():
2503 for left, right in textpairs:
2503 for left, right in textpairs:
2504 # The date strings don't matter, so we pass empty strings.
2504 # The date strings don't matter, so we pass empty strings.
2505 headerlines, hunks = mdiff.unidiff(
2505 headerlines, hunks = mdiff.unidiff(
2506 left, b'', right, b'', b'left', b'right', binary=False
2506 left, b'', right, b'', b'left', b'right', binary=False
2507 )
2507 )
2508 # consume iterators in roughly the way patch.py does
2508 # consume iterators in roughly the way patch.py does
2509 b'\n'.join(headerlines)
2509 b'\n'.join(headerlines)
2510 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2510 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2511
2511
2512 timer, fm = gettimer(ui, opts)
2512 timer, fm = gettimer(ui, opts)
2513 timer(d)
2513 timer(d)
2514 fm.end()
2514 fm.end()
2515
2515
2516
2516
2517 @command(b'perfdiffwd', formatteropts)
2517 @command(b'perfdiffwd', formatteropts)
2518 def perfdiffwd(ui, repo, **opts):
2518 def perfdiffwd(ui, repo, **opts):
2519 """Profile diff of working directory changes"""
2519 """Profile diff of working directory changes"""
2520 opts = _byteskwargs(opts)
2520 opts = _byteskwargs(opts)
2521 timer, fm = gettimer(ui, opts)
2521 timer, fm = gettimer(ui, opts)
2522 options = {
2522 options = {
2523 'w': 'ignore_all_space',
2523 'w': 'ignore_all_space',
2524 'b': 'ignore_space_change',
2524 'b': 'ignore_space_change',
2525 'B': 'ignore_blank_lines',
2525 'B': 'ignore_blank_lines',
2526 }
2526 }
2527
2527
2528 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2528 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2529 opts = dict((options[c], b'1') for c in diffopt)
2529 opts = dict((options[c], b'1') for c in diffopt)
2530
2530
2531 def d():
2531 def d():
2532 ui.pushbuffer()
2532 ui.pushbuffer()
2533 commands.diff(ui, repo, **opts)
2533 commands.diff(ui, repo, **opts)
2534 ui.popbuffer()
2534 ui.popbuffer()
2535
2535
2536 diffopt = diffopt.encode('ascii')
2536 diffopt = diffopt.encode('ascii')
2537 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2537 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2538 timer(d, title=title)
2538 timer(d, title=title)
2539 fm.end()
2539 fm.end()
2540
2540
2541
2541
2542 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2542 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2543 def perfrevlogindex(ui, repo, file_=None, **opts):
2543 def perfrevlogindex(ui, repo, file_=None, **opts):
2544 """Benchmark operations against a revlog index.
2544 """Benchmark operations against a revlog index.
2545
2545
2546 This tests constructing a revlog instance, reading index data,
2546 This tests constructing a revlog instance, reading index data,
2547 parsing index data, and performing various operations related to
2547 parsing index data, and performing various operations related to
2548 index data.
2548 index data.
2549 """
2549 """
2550
2550
2551 opts = _byteskwargs(opts)
2551 opts = _byteskwargs(opts)
2552
2552
2553 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2553 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2554
2554
2555 opener = getattr(rl, 'opener') # trick linter
2555 opener = getattr(rl, 'opener') # trick linter
2556 indexfile = rl.indexfile
2556 indexfile = rl.indexfile
2557 data = opener.read(indexfile)
2557 data = opener.read(indexfile)
2558
2558
2559 header = struct.unpack(b'>I', data[0:4])[0]
2559 header = struct.unpack(b'>I', data[0:4])[0]
2560 version = header & 0xFFFF
2560 version = header & 0xFFFF
2561 if version == 1:
2561 if version == 1:
2562 revlogio = revlog.revlogio()
2562 revlogio = revlog.revlogio()
2563 inline = header & (1 << 16)
2563 inline = header & (1 << 16)
2564 else:
2564 else:
2565 raise error.Abort(b'unsupported revlog version: %d' % version)
2565 raise error.Abort(b'unsupported revlog version: %d' % version)
2566
2566
2567 rllen = len(rl)
2567 rllen = len(rl)
2568
2568
2569 node0 = rl.node(0)
2569 node0 = rl.node(0)
2570 node25 = rl.node(rllen // 4)
2570 node25 = rl.node(rllen // 4)
2571 node50 = rl.node(rllen // 2)
2571 node50 = rl.node(rllen // 2)
2572 node75 = rl.node(rllen // 4 * 3)
2572 node75 = rl.node(rllen // 4 * 3)
2573 node100 = rl.node(rllen - 1)
2573 node100 = rl.node(rllen - 1)
2574
2574
2575 allrevs = range(rllen)
2575 allrevs = range(rllen)
2576 allrevsrev = list(reversed(allrevs))
2576 allrevsrev = list(reversed(allrevs))
2577 allnodes = [rl.node(rev) for rev in range(rllen)]
2577 allnodes = [rl.node(rev) for rev in range(rllen)]
2578 allnodesrev = list(reversed(allnodes))
2578 allnodesrev = list(reversed(allnodes))
2579
2579
2580 def constructor():
2580 def constructor():
2581 revlog.revlog(opener, indexfile)
2581 revlog.revlog(opener, indexfile)
2582
2582
2583 def read():
2583 def read():
2584 with opener(indexfile) as fh:
2584 with opener(indexfile) as fh:
2585 fh.read()
2585 fh.read()
2586
2586
2587 def parseindex():
2587 def parseindex():
2588 revlogio.parseindex(data, inline)
2588 revlogio.parseindex(data, inline)
2589
2589
2590 def getentry(revornode):
2590 def getentry(revornode):
2591 index = revlogio.parseindex(data, inline)[0]
2591 index = revlogio.parseindex(data, inline)[0]
2592 index[revornode]
2592 index[revornode]
2593
2593
2594 def getentries(revs, count=1):
2594 def getentries(revs, count=1):
2595 index = revlogio.parseindex(data, inline)[0]
2595 index = revlogio.parseindex(data, inline)[0]
2596
2596
2597 for i in range(count):
2597 for i in range(count):
2598 for rev in revs:
2598 for rev in revs:
2599 index[rev]
2599 index[rev]
2600
2600
2601 def resolvenode(node):
2601 def resolvenode(node):
2602 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2602 index = revlogio.parseindex(data, inline)[0]
2603 # This only works for the C code.
2603 rev = getattr(index, 'rev', None)
2604 if nodemap is None:
2604 if rev is None:
2605 return
2605 nodemap = getattr(
2606 revlogio.parseindex(data, inline)[0], 'nodemap', None
2607 )
2608 # This only works for the C code.
2609 if nodemap is None:
2610 return
2611 rev = nodemap.__getitem__
2606
2612
2607 try:
2613 try:
2608 nodemap[node]
2614 rev(node)
2609 except error.RevlogError:
2615 except error.RevlogError:
2610 pass
2616 pass
2611
2617
2612 def resolvenodes(nodes, count=1):
2618 def resolvenodes(nodes, count=1):
2613 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2619 index = revlogio.parseindex(data, inline)[0]
2614 if nodemap is None:
2620 rev = getattr(index, 'rev', None)
2615 return
2621 if rev is None:
2622 nodemap = getattr(
2623 revlogio.parseindex(data, inline)[0], 'nodemap', None
2624 )
2625 # This only works for the C code.
2626 if nodemap is None:
2627 return
2628 rev = nodemap.__getitem__
2616
2629
2617 for i in range(count):
2630 for i in range(count):
2618 for node in nodes:
2631 for node in nodes:
2619 try:
2632 try:
2620 nodemap[node]
2633 rev(node)
2621 except error.RevlogError:
2634 except error.RevlogError:
2622 pass
2635 pass
2623
2636
2624 benches = [
2637 benches = [
2625 (constructor, b'revlog constructor'),
2638 (constructor, b'revlog constructor'),
2626 (read, b'read'),
2639 (read, b'read'),
2627 (parseindex, b'create index object'),
2640 (parseindex, b'create index object'),
2628 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2641 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2629 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2642 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2630 (lambda: resolvenode(node0), b'look up node at rev 0'),
2643 (lambda: resolvenode(node0), b'look up node at rev 0'),
2631 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2644 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2632 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2645 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2633 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2646 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2634 (lambda: resolvenode(node100), b'look up node at tip'),
2647 (lambda: resolvenode(node100), b'look up node at tip'),
2635 # 2x variation is to measure caching impact.
2648 # 2x variation is to measure caching impact.
2636 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2649 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2637 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2650 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2638 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2651 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2639 (
2652 (
2640 lambda: resolvenodes(allnodesrev, 2),
2653 lambda: resolvenodes(allnodesrev, 2),
2641 b'look up all nodes 2x (reverse)',
2654 b'look up all nodes 2x (reverse)',
2642 ),
2655 ),
2643 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2656 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2644 (
2657 (
2645 lambda: getentries(allrevs, 2),
2658 lambda: getentries(allrevs, 2),
2646 b'retrieve all index entries 2x (forward)',
2659 b'retrieve all index entries 2x (forward)',
2647 ),
2660 ),
2648 (
2661 (
2649 lambda: getentries(allrevsrev),
2662 lambda: getentries(allrevsrev),
2650 b'retrieve all index entries (reverse)',
2663 b'retrieve all index entries (reverse)',
2651 ),
2664 ),
2652 (
2665 (
2653 lambda: getentries(allrevsrev, 2),
2666 lambda: getentries(allrevsrev, 2),
2654 b'retrieve all index entries 2x (reverse)',
2667 b'retrieve all index entries 2x (reverse)',
2655 ),
2668 ),
2656 ]
2669 ]
2657
2670
2658 for fn, title in benches:
2671 for fn, title in benches:
2659 timer, fm = gettimer(ui, opts)
2672 timer, fm = gettimer(ui, opts)
2660 timer(fn, title=title)
2673 timer(fn, title=title)
2661 fm.end()
2674 fm.end()
2662
2675
2663
2676
2664 @command(
2677 @command(
2665 b'perfrevlogrevisions',
2678 b'perfrevlogrevisions',
2666 revlogopts
2679 revlogopts
2667 + formatteropts
2680 + formatteropts
2668 + [
2681 + [
2669 (b'd', b'dist', 100, b'distance between the revisions'),
2682 (b'd', b'dist', 100, b'distance between the revisions'),
2670 (b's', b'startrev', 0, b'revision to start reading at'),
2683 (b's', b'startrev', 0, b'revision to start reading at'),
2671 (b'', b'reverse', False, b'read in reverse'),
2684 (b'', b'reverse', False, b'read in reverse'),
2672 ],
2685 ],
2673 b'-c|-m|FILE',
2686 b'-c|-m|FILE',
2674 )
2687 )
2675 def perfrevlogrevisions(
2688 def perfrevlogrevisions(
2676 ui, repo, file_=None, startrev=0, reverse=False, **opts
2689 ui, repo, file_=None, startrev=0, reverse=False, **opts
2677 ):
2690 ):
2678 """Benchmark reading a series of revisions from a revlog.
2691 """Benchmark reading a series of revisions from a revlog.
2679
2692
2680 By default, we read every ``-d/--dist`` revision from 0 to tip of
2693 By default, we read every ``-d/--dist`` revision from 0 to tip of
2681 the specified revlog.
2694 the specified revlog.
2682
2695
2683 The start revision can be defined via ``-s/--startrev``.
2696 The start revision can be defined via ``-s/--startrev``.
2684 """
2697 """
2685 opts = _byteskwargs(opts)
2698 opts = _byteskwargs(opts)
2686
2699
2687 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2700 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2688 rllen = getlen(ui)(rl)
2701 rllen = getlen(ui)(rl)
2689
2702
2690 if startrev < 0:
2703 if startrev < 0:
2691 startrev = rllen + startrev
2704 startrev = rllen + startrev
2692
2705
2693 def d():
2706 def d():
2694 rl.clearcaches()
2707 rl.clearcaches()
2695
2708
2696 beginrev = startrev
2709 beginrev = startrev
2697 endrev = rllen
2710 endrev = rllen
2698 dist = opts[b'dist']
2711 dist = opts[b'dist']
2699
2712
2700 if reverse:
2713 if reverse:
2701 beginrev, endrev = endrev - 1, beginrev - 1
2714 beginrev, endrev = endrev - 1, beginrev - 1
2702 dist = -1 * dist
2715 dist = -1 * dist
2703
2716
2704 for x in _xrange(beginrev, endrev, dist):
2717 for x in _xrange(beginrev, endrev, dist):
2705 # Old revisions don't support passing int.
2718 # Old revisions don't support passing int.
2706 n = rl.node(x)
2719 n = rl.node(x)
2707 rl.revision(n)
2720 rl.revision(n)
2708
2721
2709 timer, fm = gettimer(ui, opts)
2722 timer, fm = gettimer(ui, opts)
2710 timer(d)
2723 timer(d)
2711 fm.end()
2724 fm.end()
2712
2725
2713
2726
2714 @command(
2727 @command(
2715 b'perfrevlogwrite',
2728 b'perfrevlogwrite',
2716 revlogopts
2729 revlogopts
2717 + formatteropts
2730 + formatteropts
2718 + [
2731 + [
2719 (b's', b'startrev', 1000, b'revision to start writing at'),
2732 (b's', b'startrev', 1000, b'revision to start writing at'),
2720 (b'', b'stoprev', -1, b'last revision to write'),
2733 (b'', b'stoprev', -1, b'last revision to write'),
2721 (b'', b'count', 3, b'number of passes to perform'),
2734 (b'', b'count', 3, b'number of passes to perform'),
2722 (b'', b'details', False, b'print timing for every revisions tested'),
2735 (b'', b'details', False, b'print timing for every revisions tested'),
2723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2736 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2737 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2738 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2726 ],
2739 ],
2727 b'-c|-m|FILE',
2740 b'-c|-m|FILE',
2728 )
2741 )
2729 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2742 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2730 """Benchmark writing a series of revisions to a revlog.
2743 """Benchmark writing a series of revisions to a revlog.
2731
2744
2732 Possible source values are:
2745 Possible source values are:
2733 * `full`: add from a full text (default).
2746 * `full`: add from a full text (default).
2734 * `parent-1`: add from a delta to the first parent
2747 * `parent-1`: add from a delta to the first parent
2735 * `parent-2`: add from a delta to the second parent if it exists
2748 * `parent-2`: add from a delta to the second parent if it exists
2736 (use a delta from the first parent otherwise)
2749 (use a delta from the first parent otherwise)
2737 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2750 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2738 * `storage`: add from the existing precomputed deltas
2751 * `storage`: add from the existing precomputed deltas
2739
2752
2740 Note: This performance command measures performance in a custom way. As a
2753 Note: This performance command measures performance in a custom way. As a
2741 result some of the global configuration of the 'perf' command does not
2754 result some of the global configuration of the 'perf' command does not
2742 apply to it:
2755 apply to it:
2743
2756
2744 * ``pre-run``: disabled
2757 * ``pre-run``: disabled
2745
2758
2746 * ``profile-benchmark``: disabled
2759 * ``profile-benchmark``: disabled
2747
2760
2748 * ``run-limits``: disabled use --count instead
2761 * ``run-limits``: disabled use --count instead
2749 """
2762 """
2750 opts = _byteskwargs(opts)
2763 opts = _byteskwargs(opts)
2751
2764
2752 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2765 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2753 rllen = getlen(ui)(rl)
2766 rllen = getlen(ui)(rl)
2754 if startrev < 0:
2767 if startrev < 0:
2755 startrev = rllen + startrev
2768 startrev = rllen + startrev
2756 if stoprev < 0:
2769 if stoprev < 0:
2757 stoprev = rllen + stoprev
2770 stoprev = rllen + stoprev
2758
2771
2759 lazydeltabase = opts['lazydeltabase']
2772 lazydeltabase = opts['lazydeltabase']
2760 source = opts['source']
2773 source = opts['source']
2761 clearcaches = opts['clear_caches']
2774 clearcaches = opts['clear_caches']
2762 validsource = (
2775 validsource = (
2763 b'full',
2776 b'full',
2764 b'parent-1',
2777 b'parent-1',
2765 b'parent-2',
2778 b'parent-2',
2766 b'parent-smallest',
2779 b'parent-smallest',
2767 b'storage',
2780 b'storage',
2768 )
2781 )
2769 if source not in validsource:
2782 if source not in validsource:
2770 raise error.Abort('invalid source type: %s' % source)
2783 raise error.Abort('invalid source type: %s' % source)
2771
2784
2772 ### actually gather results
2785 ### actually gather results
2773 count = opts['count']
2786 count = opts['count']
2774 if count <= 0:
2787 if count <= 0:
2775 raise error.Abort('invalide run count: %d' % count)
2788 raise error.Abort('invalide run count: %d' % count)
2776 allresults = []
2789 allresults = []
2777 for c in range(count):
2790 for c in range(count):
2778 timing = _timeonewrite(
2791 timing = _timeonewrite(
2779 ui,
2792 ui,
2780 rl,
2793 rl,
2781 source,
2794 source,
2782 startrev,
2795 startrev,
2783 stoprev,
2796 stoprev,
2784 c + 1,
2797 c + 1,
2785 lazydeltabase=lazydeltabase,
2798 lazydeltabase=lazydeltabase,
2786 clearcaches=clearcaches,
2799 clearcaches=clearcaches,
2787 )
2800 )
2788 allresults.append(timing)
2801 allresults.append(timing)
2789
2802
2790 ### consolidate the results in a single list
2803 ### consolidate the results in a single list
2791 results = []
2804 results = []
2792 for idx, (rev, t) in enumerate(allresults[0]):
2805 for idx, (rev, t) in enumerate(allresults[0]):
2793 ts = [t]
2806 ts = [t]
2794 for other in allresults[1:]:
2807 for other in allresults[1:]:
2795 orev, ot = other[idx]
2808 orev, ot = other[idx]
2796 assert orev == rev
2809 assert orev == rev
2797 ts.append(ot)
2810 ts.append(ot)
2798 results.append((rev, ts))
2811 results.append((rev, ts))
2799 resultcount = len(results)
2812 resultcount = len(results)
2800
2813
2801 ### Compute and display relevant statistics
2814 ### Compute and display relevant statistics
2802
2815
2803 # get a formatter
2816 # get a formatter
2804 fm = ui.formatter(b'perf', opts)
2817 fm = ui.formatter(b'perf', opts)
2805 displayall = ui.configbool(b"perf", b"all-timing", False)
2818 displayall = ui.configbool(b"perf", b"all-timing", False)
2806
2819
2807 # print individual details if requested
2820 # print individual details if requested
2808 if opts['details']:
2821 if opts['details']:
2809 for idx, item in enumerate(results, 1):
2822 for idx, item in enumerate(results, 1):
2810 rev, data = item
2823 rev, data = item
2811 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2824 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2812 formatone(fm, data, title=title, displayall=displayall)
2825 formatone(fm, data, title=title, displayall=displayall)
2813
2826
2814 # sorts results by median time
2827 # sorts results by median time
2815 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2828 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2816 # list of (name, index) to display)
2829 # list of (name, index) to display)
2817 relevants = [
2830 relevants = [
2818 ("min", 0),
2831 ("min", 0),
2819 ("10%", resultcount * 10 // 100),
2832 ("10%", resultcount * 10 // 100),
2820 ("25%", resultcount * 25 // 100),
2833 ("25%", resultcount * 25 // 100),
2821 ("50%", resultcount * 70 // 100),
2834 ("50%", resultcount * 70 // 100),
2822 ("75%", resultcount * 75 // 100),
2835 ("75%", resultcount * 75 // 100),
2823 ("90%", resultcount * 90 // 100),
2836 ("90%", resultcount * 90 // 100),
2824 ("95%", resultcount * 95 // 100),
2837 ("95%", resultcount * 95 // 100),
2825 ("99%", resultcount * 99 // 100),
2838 ("99%", resultcount * 99 // 100),
2826 ("99.9%", resultcount * 999 // 1000),
2839 ("99.9%", resultcount * 999 // 1000),
2827 ("99.99%", resultcount * 9999 // 10000),
2840 ("99.99%", resultcount * 9999 // 10000),
2828 ("99.999%", resultcount * 99999 // 100000),
2841 ("99.999%", resultcount * 99999 // 100000),
2829 ("max", -1),
2842 ("max", -1),
2830 ]
2843 ]
2831 if not ui.quiet:
2844 if not ui.quiet:
2832 for name, idx in relevants:
2845 for name, idx in relevants:
2833 data = results[idx]
2846 data = results[idx]
2834 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2847 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2835 formatone(fm, data[1], title=title, displayall=displayall)
2848 formatone(fm, data[1], title=title, displayall=displayall)
2836
2849
2837 # XXX summing that many float will not be very precise, we ignore this fact
2850 # XXX summing that many float will not be very precise, we ignore this fact
2838 # for now
2851 # for now
2839 totaltime = []
2852 totaltime = []
2840 for item in allresults:
2853 for item in allresults:
2841 totaltime.append(
2854 totaltime.append(
2842 (
2855 (
2843 sum(x[1][0] for x in item),
2856 sum(x[1][0] for x in item),
2844 sum(x[1][1] for x in item),
2857 sum(x[1][1] for x in item),
2845 sum(x[1][2] for x in item),
2858 sum(x[1][2] for x in item),
2846 )
2859 )
2847 )
2860 )
2848 formatone(
2861 formatone(
2849 fm,
2862 fm,
2850 totaltime,
2863 totaltime,
2851 title="total time (%d revs)" % resultcount,
2864 title="total time (%d revs)" % resultcount,
2852 displayall=displayall,
2865 displayall=displayall,
2853 )
2866 )
2854 fm.end()
2867 fm.end()
2855
2868
2856
2869
2857 class _faketr(object):
2870 class _faketr(object):
2858 def add(s, x, y, z=None):
2871 def add(s, x, y, z=None):
2859 return None
2872 return None
2860
2873
2861
2874
2862 def _timeonewrite(
2875 def _timeonewrite(
2863 ui,
2876 ui,
2864 orig,
2877 orig,
2865 source,
2878 source,
2866 startrev,
2879 startrev,
2867 stoprev,
2880 stoprev,
2868 runidx=None,
2881 runidx=None,
2869 lazydeltabase=True,
2882 lazydeltabase=True,
2870 clearcaches=True,
2883 clearcaches=True,
2871 ):
2884 ):
2872 timings = []
2885 timings = []
2873 tr = _faketr()
2886 tr = _faketr()
2874 with _temprevlog(ui, orig, startrev) as dest:
2887 with _temprevlog(ui, orig, startrev) as dest:
2875 dest._lazydeltabase = lazydeltabase
2888 dest._lazydeltabase = lazydeltabase
2876 revs = list(orig.revs(startrev, stoprev))
2889 revs = list(orig.revs(startrev, stoprev))
2877 total = len(revs)
2890 total = len(revs)
2878 topic = 'adding'
2891 topic = 'adding'
2879 if runidx is not None:
2892 if runidx is not None:
2880 topic += ' (run #%d)' % runidx
2893 topic += ' (run #%d)' % runidx
2881 # Support both old and new progress API
2894 # Support both old and new progress API
2882 if util.safehasattr(ui, 'makeprogress'):
2895 if util.safehasattr(ui, 'makeprogress'):
2883 progress = ui.makeprogress(topic, unit='revs', total=total)
2896 progress = ui.makeprogress(topic, unit='revs', total=total)
2884
2897
2885 def updateprogress(pos):
2898 def updateprogress(pos):
2886 progress.update(pos)
2899 progress.update(pos)
2887
2900
2888 def completeprogress():
2901 def completeprogress():
2889 progress.complete()
2902 progress.complete()
2890
2903
2891 else:
2904 else:
2892
2905
2893 def updateprogress(pos):
2906 def updateprogress(pos):
2894 ui.progress(topic, pos, unit='revs', total=total)
2907 ui.progress(topic, pos, unit='revs', total=total)
2895
2908
2896 def completeprogress():
2909 def completeprogress():
2897 ui.progress(topic, None, unit='revs', total=total)
2910 ui.progress(topic, None, unit='revs', total=total)
2898
2911
2899 for idx, rev in enumerate(revs):
2912 for idx, rev in enumerate(revs):
2900 updateprogress(idx)
2913 updateprogress(idx)
2901 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2914 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2902 if clearcaches:
2915 if clearcaches:
2903 dest.index.clearcaches()
2916 dest.index.clearcaches()
2904 dest.clearcaches()
2917 dest.clearcaches()
2905 with timeone() as r:
2918 with timeone() as r:
2906 dest.addrawrevision(*addargs, **addkwargs)
2919 dest.addrawrevision(*addargs, **addkwargs)
2907 timings.append((rev, r[0]))
2920 timings.append((rev, r[0]))
2908 updateprogress(total)
2921 updateprogress(total)
2909 completeprogress()
2922 completeprogress()
2910 return timings
2923 return timings
2911
2924
2912
2925
2913 def _getrevisionseed(orig, rev, tr, source):
2926 def _getrevisionseed(orig, rev, tr, source):
2914 from mercurial.node import nullid
2927 from mercurial.node import nullid
2915
2928
2916 linkrev = orig.linkrev(rev)
2929 linkrev = orig.linkrev(rev)
2917 node = orig.node(rev)
2930 node = orig.node(rev)
2918 p1, p2 = orig.parents(node)
2931 p1, p2 = orig.parents(node)
2919 flags = orig.flags(rev)
2932 flags = orig.flags(rev)
2920 cachedelta = None
2933 cachedelta = None
2921 text = None
2934 text = None
2922
2935
2923 if source == b'full':
2936 if source == b'full':
2924 text = orig.revision(rev)
2937 text = orig.revision(rev)
2925 elif source == b'parent-1':
2938 elif source == b'parent-1':
2926 baserev = orig.rev(p1)
2939 baserev = orig.rev(p1)
2927 cachedelta = (baserev, orig.revdiff(p1, rev))
2940 cachedelta = (baserev, orig.revdiff(p1, rev))
2928 elif source == b'parent-2':
2941 elif source == b'parent-2':
2929 parent = p2
2942 parent = p2
2930 if p2 == nullid:
2943 if p2 == nullid:
2931 parent = p1
2944 parent = p1
2932 baserev = orig.rev(parent)
2945 baserev = orig.rev(parent)
2933 cachedelta = (baserev, orig.revdiff(parent, rev))
2946 cachedelta = (baserev, orig.revdiff(parent, rev))
2934 elif source == b'parent-smallest':
2947 elif source == b'parent-smallest':
2935 p1diff = orig.revdiff(p1, rev)
2948 p1diff = orig.revdiff(p1, rev)
2936 parent = p1
2949 parent = p1
2937 diff = p1diff
2950 diff = p1diff
2938 if p2 != nullid:
2951 if p2 != nullid:
2939 p2diff = orig.revdiff(p2, rev)
2952 p2diff = orig.revdiff(p2, rev)
2940 if len(p1diff) > len(p2diff):
2953 if len(p1diff) > len(p2diff):
2941 parent = p2
2954 parent = p2
2942 diff = p2diff
2955 diff = p2diff
2943 baserev = orig.rev(parent)
2956 baserev = orig.rev(parent)
2944 cachedelta = (baserev, diff)
2957 cachedelta = (baserev, diff)
2945 elif source == b'storage':
2958 elif source == b'storage':
2946 baserev = orig.deltaparent(rev)
2959 baserev = orig.deltaparent(rev)
2947 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2960 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2948
2961
2949 return (
2962 return (
2950 (text, tr, linkrev, p1, p2),
2963 (text, tr, linkrev, p1, p2),
2951 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2964 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2952 )
2965 )
2953
2966
2954
2967
2955 @contextlib.contextmanager
2968 @contextlib.contextmanager
2956 def _temprevlog(ui, orig, truncaterev):
2969 def _temprevlog(ui, orig, truncaterev):
2957 from mercurial import vfs as vfsmod
2970 from mercurial import vfs as vfsmod
2958
2971
2959 if orig._inline:
2972 if orig._inline:
2960 raise error.Abort('not supporting inline revlog (yet)')
2973 raise error.Abort('not supporting inline revlog (yet)')
2961 revlogkwargs = {}
2974 revlogkwargs = {}
2962 k = 'upperboundcomp'
2975 k = 'upperboundcomp'
2963 if util.safehasattr(orig, k):
2976 if util.safehasattr(orig, k):
2964 revlogkwargs[k] = getattr(orig, k)
2977 revlogkwargs[k] = getattr(orig, k)
2965
2978
2966 origindexpath = orig.opener.join(orig.indexfile)
2979 origindexpath = orig.opener.join(orig.indexfile)
2967 origdatapath = orig.opener.join(orig.datafile)
2980 origdatapath = orig.opener.join(orig.datafile)
2968 indexname = 'revlog.i'
2981 indexname = 'revlog.i'
2969 dataname = 'revlog.d'
2982 dataname = 'revlog.d'
2970
2983
2971 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2984 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2972 try:
2985 try:
2973 # copy the data file in a temporary directory
2986 # copy the data file in a temporary directory
2974 ui.debug('copying data in %s\n' % tmpdir)
2987 ui.debug('copying data in %s\n' % tmpdir)
2975 destindexpath = os.path.join(tmpdir, 'revlog.i')
2988 destindexpath = os.path.join(tmpdir, 'revlog.i')
2976 destdatapath = os.path.join(tmpdir, 'revlog.d')
2989 destdatapath = os.path.join(tmpdir, 'revlog.d')
2977 shutil.copyfile(origindexpath, destindexpath)
2990 shutil.copyfile(origindexpath, destindexpath)
2978 shutil.copyfile(origdatapath, destdatapath)
2991 shutil.copyfile(origdatapath, destdatapath)
2979
2992
2980 # remove the data we want to add again
2993 # remove the data we want to add again
2981 ui.debug('truncating data to be rewritten\n')
2994 ui.debug('truncating data to be rewritten\n')
2982 with open(destindexpath, 'ab') as index:
2995 with open(destindexpath, 'ab') as index:
2983 index.seek(0)
2996 index.seek(0)
2984 index.truncate(truncaterev * orig._io.size)
2997 index.truncate(truncaterev * orig._io.size)
2985 with open(destdatapath, 'ab') as data:
2998 with open(destdatapath, 'ab') as data:
2986 data.seek(0)
2999 data.seek(0)
2987 data.truncate(orig.start(truncaterev))
3000 data.truncate(orig.start(truncaterev))
2988
3001
2989 # instantiate a new revlog from the temporary copy
3002 # instantiate a new revlog from the temporary copy
2990 ui.debug('truncating adding to be rewritten\n')
3003 ui.debug('truncating adding to be rewritten\n')
2991 vfs = vfsmod.vfs(tmpdir)
3004 vfs = vfsmod.vfs(tmpdir)
2992 vfs.options = getattr(orig.opener, 'options', None)
3005 vfs.options = getattr(orig.opener, 'options', None)
2993
3006
2994 dest = revlog.revlog(
3007 dest = revlog.revlog(
2995 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3008 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2996 )
3009 )
2997 if dest._inline:
3010 if dest._inline:
2998 raise error.Abort('not supporting inline revlog (yet)')
3011 raise error.Abort('not supporting inline revlog (yet)')
2999 # make sure internals are initialized
3012 # make sure internals are initialized
3000 dest.revision(len(dest) - 1)
3013 dest.revision(len(dest) - 1)
3001 yield dest
3014 yield dest
3002 del dest, vfs
3015 del dest, vfs
3003 finally:
3016 finally:
3004 shutil.rmtree(tmpdir, True)
3017 shutil.rmtree(tmpdir, True)
3005
3018
3006
3019
3007 @command(
3020 @command(
3008 b'perfrevlogchunks',
3021 b'perfrevlogchunks',
3009 revlogopts
3022 revlogopts
3010 + formatteropts
3023 + formatteropts
3011 + [
3024 + [
3012 (b'e', b'engines', b'', b'compression engines to use'),
3025 (b'e', b'engines', b'', b'compression engines to use'),
3013 (b's', b'startrev', 0, b'revision to start at'),
3026 (b's', b'startrev', 0, b'revision to start at'),
3014 ],
3027 ],
3015 b'-c|-m|FILE',
3028 b'-c|-m|FILE',
3016 )
3029 )
3017 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3030 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3018 """Benchmark operations on revlog chunks.
3031 """Benchmark operations on revlog chunks.
3019
3032
3020 Logically, each revlog is a collection of fulltext revisions. However,
3033 Logically, each revlog is a collection of fulltext revisions. However,
3021 stored within each revlog are "chunks" of possibly compressed data. This
3034 stored within each revlog are "chunks" of possibly compressed data. This
3022 data needs to be read and decompressed or compressed and written.
3035 data needs to be read and decompressed or compressed and written.
3023
3036
3024 This command measures the time it takes to read+decompress and recompress
3037 This command measures the time it takes to read+decompress and recompress
3025 chunks in a revlog. It effectively isolates I/O and compression performance.
3038 chunks in a revlog. It effectively isolates I/O and compression performance.
3026 For measurements of higher-level operations like resolving revisions,
3039 For measurements of higher-level operations like resolving revisions,
3027 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3040 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3028 """
3041 """
3029 opts = _byteskwargs(opts)
3042 opts = _byteskwargs(opts)
3030
3043
3031 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3044 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3032
3045
3033 # _chunkraw was renamed to _getsegmentforrevs.
3046 # _chunkraw was renamed to _getsegmentforrevs.
3034 try:
3047 try:
3035 segmentforrevs = rl._getsegmentforrevs
3048 segmentforrevs = rl._getsegmentforrevs
3036 except AttributeError:
3049 except AttributeError:
3037 segmentforrevs = rl._chunkraw
3050 segmentforrevs = rl._chunkraw
3038
3051
3039 # Verify engines argument.
3052 # Verify engines argument.
3040 if engines:
3053 if engines:
3041 engines = set(e.strip() for e in engines.split(b','))
3054 engines = set(e.strip() for e in engines.split(b','))
3042 for engine in engines:
3055 for engine in engines:
3043 try:
3056 try:
3044 util.compressionengines[engine]
3057 util.compressionengines[engine]
3045 except KeyError:
3058 except KeyError:
3046 raise error.Abort(b'unknown compression engine: %s' % engine)
3059 raise error.Abort(b'unknown compression engine: %s' % engine)
3047 else:
3060 else:
3048 engines = []
3061 engines = []
3049 for e in util.compengines:
3062 for e in util.compengines:
3050 engine = util.compengines[e]
3063 engine = util.compengines[e]
3051 try:
3064 try:
3052 if engine.available():
3065 if engine.available():
3053 engine.revlogcompressor().compress(b'dummy')
3066 engine.revlogcompressor().compress(b'dummy')
3054 engines.append(e)
3067 engines.append(e)
3055 except NotImplementedError:
3068 except NotImplementedError:
3056 pass
3069 pass
3057
3070
3058 revs = list(rl.revs(startrev, len(rl) - 1))
3071 revs = list(rl.revs(startrev, len(rl) - 1))
3059
3072
3060 def rlfh(rl):
3073 def rlfh(rl):
3061 if rl._inline:
3074 if rl._inline:
3062 return getsvfs(repo)(rl.indexfile)
3075 return getsvfs(repo)(rl.indexfile)
3063 else:
3076 else:
3064 return getsvfs(repo)(rl.datafile)
3077 return getsvfs(repo)(rl.datafile)
3065
3078
3066 def doread():
3079 def doread():
3067 rl.clearcaches()
3080 rl.clearcaches()
3068 for rev in revs:
3081 for rev in revs:
3069 segmentforrevs(rev, rev)
3082 segmentforrevs(rev, rev)
3070
3083
3071 def doreadcachedfh():
3084 def doreadcachedfh():
3072 rl.clearcaches()
3085 rl.clearcaches()
3073 fh = rlfh(rl)
3086 fh = rlfh(rl)
3074 for rev in revs:
3087 for rev in revs:
3075 segmentforrevs(rev, rev, df=fh)
3088 segmentforrevs(rev, rev, df=fh)
3076
3089
3077 def doreadbatch():
3090 def doreadbatch():
3078 rl.clearcaches()
3091 rl.clearcaches()
3079 segmentforrevs(revs[0], revs[-1])
3092 segmentforrevs(revs[0], revs[-1])
3080
3093
3081 def doreadbatchcachedfh():
3094 def doreadbatchcachedfh():
3082 rl.clearcaches()
3095 rl.clearcaches()
3083 fh = rlfh(rl)
3096 fh = rlfh(rl)
3084 segmentforrevs(revs[0], revs[-1], df=fh)
3097 segmentforrevs(revs[0], revs[-1], df=fh)
3085
3098
3086 def dochunk():
3099 def dochunk():
3087 rl.clearcaches()
3100 rl.clearcaches()
3088 fh = rlfh(rl)
3101 fh = rlfh(rl)
3089 for rev in revs:
3102 for rev in revs:
3090 rl._chunk(rev, df=fh)
3103 rl._chunk(rev, df=fh)
3091
3104
3092 chunks = [None]
3105 chunks = [None]
3093
3106
3094 def dochunkbatch():
3107 def dochunkbatch():
3095 rl.clearcaches()
3108 rl.clearcaches()
3096 fh = rlfh(rl)
3109 fh = rlfh(rl)
3097 # Save chunks as a side-effect.
3110 # Save chunks as a side-effect.
3098 chunks[0] = rl._chunks(revs, df=fh)
3111 chunks[0] = rl._chunks(revs, df=fh)
3099
3112
3100 def docompress(compressor):
3113 def docompress(compressor):
3101 rl.clearcaches()
3114 rl.clearcaches()
3102
3115
3103 try:
3116 try:
3104 # Swap in the requested compression engine.
3117 # Swap in the requested compression engine.
3105 oldcompressor = rl._compressor
3118 oldcompressor = rl._compressor
3106 rl._compressor = compressor
3119 rl._compressor = compressor
3107 for chunk in chunks[0]:
3120 for chunk in chunks[0]:
3108 rl.compress(chunk)
3121 rl.compress(chunk)
3109 finally:
3122 finally:
3110 rl._compressor = oldcompressor
3123 rl._compressor = oldcompressor
3111
3124
3112 benches = [
3125 benches = [
3113 (lambda: doread(), b'read'),
3126 (lambda: doread(), b'read'),
3114 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3127 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3115 (lambda: doreadbatch(), b'read batch'),
3128 (lambda: doreadbatch(), b'read batch'),
3116 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3129 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3117 (lambda: dochunk(), b'chunk'),
3130 (lambda: dochunk(), b'chunk'),
3118 (lambda: dochunkbatch(), b'chunk batch'),
3131 (lambda: dochunkbatch(), b'chunk batch'),
3119 ]
3132 ]
3120
3133
3121 for engine in sorted(engines):
3134 for engine in sorted(engines):
3122 compressor = util.compengines[engine].revlogcompressor()
3135 compressor = util.compengines[engine].revlogcompressor()
3123 benches.append(
3136 benches.append(
3124 (
3137 (
3125 functools.partial(docompress, compressor),
3138 functools.partial(docompress, compressor),
3126 b'compress w/ %s' % engine,
3139 b'compress w/ %s' % engine,
3127 )
3140 )
3128 )
3141 )
3129
3142
3130 for fn, title in benches:
3143 for fn, title in benches:
3131 timer, fm = gettimer(ui, opts)
3144 timer, fm = gettimer(ui, opts)
3132 timer(fn, title=title)
3145 timer(fn, title=title)
3133 fm.end()
3146 fm.end()
3134
3147
3135
3148
3136 @command(
3149 @command(
3137 b'perfrevlogrevision',
3150 b'perfrevlogrevision',
3138 revlogopts
3151 revlogopts
3139 + formatteropts
3152 + formatteropts
3140 + [(b'', b'cache', False, b'use caches instead of clearing')],
3153 + [(b'', b'cache', False, b'use caches instead of clearing')],
3141 b'-c|-m|FILE REV',
3154 b'-c|-m|FILE REV',
3142 )
3155 )
3143 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3156 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3144 """Benchmark obtaining a revlog revision.
3157 """Benchmark obtaining a revlog revision.
3145
3158
3146 Obtaining a revlog revision consists of roughly the following steps:
3159 Obtaining a revlog revision consists of roughly the following steps:
3147
3160
3148 1. Compute the delta chain
3161 1. Compute the delta chain
3149 2. Slice the delta chain if applicable
3162 2. Slice the delta chain if applicable
3150 3. Obtain the raw chunks for that delta chain
3163 3. Obtain the raw chunks for that delta chain
3151 4. Decompress each raw chunk
3164 4. Decompress each raw chunk
3152 5. Apply binary patches to obtain fulltext
3165 5. Apply binary patches to obtain fulltext
3153 6. Verify hash of fulltext
3166 6. Verify hash of fulltext
3154
3167
3155 This command measures the time spent in each of these phases.
3168 This command measures the time spent in each of these phases.
3156 """
3169 """
3157 opts = _byteskwargs(opts)
3170 opts = _byteskwargs(opts)
3158
3171
3159 if opts.get(b'changelog') or opts.get(b'manifest'):
3172 if opts.get(b'changelog') or opts.get(b'manifest'):
3160 file_, rev = None, file_
3173 file_, rev = None, file_
3161 elif rev is None:
3174 elif rev is None:
3162 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3175 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3163
3176
3164 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3177 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3165
3178
3166 # _chunkraw was renamed to _getsegmentforrevs.
3179 # _chunkraw was renamed to _getsegmentforrevs.
3167 try:
3180 try:
3168 segmentforrevs = r._getsegmentforrevs
3181 segmentforrevs = r._getsegmentforrevs
3169 except AttributeError:
3182 except AttributeError:
3170 segmentforrevs = r._chunkraw
3183 segmentforrevs = r._chunkraw
3171
3184
3172 node = r.lookup(rev)
3185 node = r.lookup(rev)
3173 rev = r.rev(node)
3186 rev = r.rev(node)
3174
3187
3175 def getrawchunks(data, chain):
3188 def getrawchunks(data, chain):
3176 start = r.start
3189 start = r.start
3177 length = r.length
3190 length = r.length
3178 inline = r._inline
3191 inline = r._inline
3179 iosize = r._io.size
3192 iosize = r._io.size
3180 buffer = util.buffer
3193 buffer = util.buffer
3181
3194
3182 chunks = []
3195 chunks = []
3183 ladd = chunks.append
3196 ladd = chunks.append
3184 for idx, item in enumerate(chain):
3197 for idx, item in enumerate(chain):
3185 offset = start(item[0])
3198 offset = start(item[0])
3186 bits = data[idx]
3199 bits = data[idx]
3187 for rev in item:
3200 for rev in item:
3188 chunkstart = start(rev)
3201 chunkstart = start(rev)
3189 if inline:
3202 if inline:
3190 chunkstart += (rev + 1) * iosize
3203 chunkstart += (rev + 1) * iosize
3191 chunklength = length(rev)
3204 chunklength = length(rev)
3192 ladd(buffer(bits, chunkstart - offset, chunklength))
3205 ladd(buffer(bits, chunkstart - offset, chunklength))
3193
3206
3194 return chunks
3207 return chunks
3195
3208
3196 def dodeltachain(rev):
3209 def dodeltachain(rev):
3197 if not cache:
3210 if not cache:
3198 r.clearcaches()
3211 r.clearcaches()
3199 r._deltachain(rev)
3212 r._deltachain(rev)
3200
3213
3201 def doread(chain):
3214 def doread(chain):
3202 if not cache:
3215 if not cache:
3203 r.clearcaches()
3216 r.clearcaches()
3204 for item in slicedchain:
3217 for item in slicedchain:
3205 segmentforrevs(item[0], item[-1])
3218 segmentforrevs(item[0], item[-1])
3206
3219
3207 def doslice(r, chain, size):
3220 def doslice(r, chain, size):
3208 for s in slicechunk(r, chain, targetsize=size):
3221 for s in slicechunk(r, chain, targetsize=size):
3209 pass
3222 pass
3210
3223
3211 def dorawchunks(data, chain):
3224 def dorawchunks(data, chain):
3212 if not cache:
3225 if not cache:
3213 r.clearcaches()
3226 r.clearcaches()
3214 getrawchunks(data, chain)
3227 getrawchunks(data, chain)
3215
3228
3216 def dodecompress(chunks):
3229 def dodecompress(chunks):
3217 decomp = r.decompress
3230 decomp = r.decompress
3218 for chunk in chunks:
3231 for chunk in chunks:
3219 decomp(chunk)
3232 decomp(chunk)
3220
3233
3221 def dopatch(text, bins):
3234 def dopatch(text, bins):
3222 if not cache:
3235 if not cache:
3223 r.clearcaches()
3236 r.clearcaches()
3224 mdiff.patches(text, bins)
3237 mdiff.patches(text, bins)
3225
3238
3226 def dohash(text):
3239 def dohash(text):
3227 if not cache:
3240 if not cache:
3228 r.clearcaches()
3241 r.clearcaches()
3229 r.checkhash(text, node, rev=rev)
3242 r.checkhash(text, node, rev=rev)
3230
3243
3231 def dorevision():
3244 def dorevision():
3232 if not cache:
3245 if not cache:
3233 r.clearcaches()
3246 r.clearcaches()
3234 r.revision(node)
3247 r.revision(node)
3235
3248
3236 try:
3249 try:
3237 from mercurial.revlogutils.deltas import slicechunk
3250 from mercurial.revlogutils.deltas import slicechunk
3238 except ImportError:
3251 except ImportError:
3239 slicechunk = getattr(revlog, '_slicechunk', None)
3252 slicechunk = getattr(revlog, '_slicechunk', None)
3240
3253
3241 size = r.length(rev)
3254 size = r.length(rev)
3242 chain = r._deltachain(rev)[0]
3255 chain = r._deltachain(rev)[0]
3243 if not getattr(r, '_withsparseread', False):
3256 if not getattr(r, '_withsparseread', False):
3244 slicedchain = (chain,)
3257 slicedchain = (chain,)
3245 else:
3258 else:
3246 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3259 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3247 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3260 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3248 rawchunks = getrawchunks(data, slicedchain)
3261 rawchunks = getrawchunks(data, slicedchain)
3249 bins = r._chunks(chain)
3262 bins = r._chunks(chain)
3250 text = bytes(bins[0])
3263 text = bytes(bins[0])
3251 bins = bins[1:]
3264 bins = bins[1:]
3252 text = mdiff.patches(text, bins)
3265 text = mdiff.patches(text, bins)
3253
3266
3254 benches = [
3267 benches = [
3255 (lambda: dorevision(), b'full'),
3268 (lambda: dorevision(), b'full'),
3256 (lambda: dodeltachain(rev), b'deltachain'),
3269 (lambda: dodeltachain(rev), b'deltachain'),
3257 (lambda: doread(chain), b'read'),
3270 (lambda: doread(chain), b'read'),
3258 ]
3271 ]
3259
3272
3260 if getattr(r, '_withsparseread', False):
3273 if getattr(r, '_withsparseread', False):
3261 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3274 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3262 benches.append(slicing)
3275 benches.append(slicing)
3263
3276
3264 benches.extend(
3277 benches.extend(
3265 [
3278 [
3266 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3279 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3267 (lambda: dodecompress(rawchunks), b'decompress'),
3280 (lambda: dodecompress(rawchunks), b'decompress'),
3268 (lambda: dopatch(text, bins), b'patch'),
3281 (lambda: dopatch(text, bins), b'patch'),
3269 (lambda: dohash(text), b'hash'),
3282 (lambda: dohash(text), b'hash'),
3270 ]
3283 ]
3271 )
3284 )
3272
3285
3273 timer, fm = gettimer(ui, opts)
3286 timer, fm = gettimer(ui, opts)
3274 for fn, title in benches:
3287 for fn, title in benches:
3275 timer(fn, title=title)
3288 timer(fn, title=title)
3276 fm.end()
3289 fm.end()
3277
3290
3278
3291
3279 @command(
3292 @command(
3280 b'perfrevset',
3293 b'perfrevset',
3281 [
3294 [
3282 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3295 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3283 (b'', b'contexts', False, b'obtain changectx for each revision'),
3296 (b'', b'contexts', False, b'obtain changectx for each revision'),
3284 ]
3297 ]
3285 + formatteropts,
3298 + formatteropts,
3286 b"REVSET",
3299 b"REVSET",
3287 )
3300 )
3288 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3301 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3289 """benchmark the execution time of a revset
3302 """benchmark the execution time of a revset
3290
3303
3291 Use the --clean option if need to evaluate the impact of build volatile
3304 Use the --clean option if need to evaluate the impact of build volatile
3292 revisions set cache on the revset execution. Volatile cache hold filtered
3305 revisions set cache on the revset execution. Volatile cache hold filtered
3293 and obsolete related cache."""
3306 and obsolete related cache."""
3294 opts = _byteskwargs(opts)
3307 opts = _byteskwargs(opts)
3295
3308
3296 timer, fm = gettimer(ui, opts)
3309 timer, fm = gettimer(ui, opts)
3297
3310
3298 def d():
3311 def d():
3299 if clear:
3312 if clear:
3300 repo.invalidatevolatilesets()
3313 repo.invalidatevolatilesets()
3301 if contexts:
3314 if contexts:
3302 for ctx in repo.set(expr):
3315 for ctx in repo.set(expr):
3303 pass
3316 pass
3304 else:
3317 else:
3305 for r in repo.revs(expr):
3318 for r in repo.revs(expr):
3306 pass
3319 pass
3307
3320
3308 timer(d)
3321 timer(d)
3309 fm.end()
3322 fm.end()
3310
3323
3311
3324
3312 @command(
3325 @command(
3313 b'perfvolatilesets',
3326 b'perfvolatilesets',
3314 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3327 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3315 + formatteropts,
3328 + formatteropts,
3316 )
3329 )
3317 def perfvolatilesets(ui, repo, *names, **opts):
3330 def perfvolatilesets(ui, repo, *names, **opts):
3318 """benchmark the computation of various volatile set
3331 """benchmark the computation of various volatile set
3319
3332
3320 Volatile set computes element related to filtering and obsolescence."""
3333 Volatile set computes element related to filtering and obsolescence."""
3321 opts = _byteskwargs(opts)
3334 opts = _byteskwargs(opts)
3322 timer, fm = gettimer(ui, opts)
3335 timer, fm = gettimer(ui, opts)
3323 repo = repo.unfiltered()
3336 repo = repo.unfiltered()
3324
3337
3325 def getobs(name):
3338 def getobs(name):
3326 def d():
3339 def d():
3327 repo.invalidatevolatilesets()
3340 repo.invalidatevolatilesets()
3328 if opts[b'clear_obsstore']:
3341 if opts[b'clear_obsstore']:
3329 clearfilecache(repo, b'obsstore')
3342 clearfilecache(repo, b'obsstore')
3330 obsolete.getrevs(repo, name)
3343 obsolete.getrevs(repo, name)
3331
3344
3332 return d
3345 return d
3333
3346
3334 allobs = sorted(obsolete.cachefuncs)
3347 allobs = sorted(obsolete.cachefuncs)
3335 if names:
3348 if names:
3336 allobs = [n for n in allobs if n in names]
3349 allobs = [n for n in allobs if n in names]
3337
3350
3338 for name in allobs:
3351 for name in allobs:
3339 timer(getobs(name), title=name)
3352 timer(getobs(name), title=name)
3340
3353
3341 def getfiltered(name):
3354 def getfiltered(name):
3342 def d():
3355 def d():
3343 repo.invalidatevolatilesets()
3356 repo.invalidatevolatilesets()
3344 if opts[b'clear_obsstore']:
3357 if opts[b'clear_obsstore']:
3345 clearfilecache(repo, b'obsstore')
3358 clearfilecache(repo, b'obsstore')
3346 repoview.filterrevs(repo, name)
3359 repoview.filterrevs(repo, name)
3347
3360
3348 return d
3361 return d
3349
3362
3350 allfilter = sorted(repoview.filtertable)
3363 allfilter = sorted(repoview.filtertable)
3351 if names:
3364 if names:
3352 allfilter = [n for n in allfilter if n in names]
3365 allfilter = [n for n in allfilter if n in names]
3353
3366
3354 for name in allfilter:
3367 for name in allfilter:
3355 timer(getfiltered(name), title=name)
3368 timer(getfiltered(name), title=name)
3356 fm.end()
3369 fm.end()
3357
3370
3358
3371
3359 @command(
3372 @command(
3360 b'perfbranchmap',
3373 b'perfbranchmap',
3361 [
3374 [
3362 (b'f', b'full', False, b'Includes build time of subset'),
3375 (b'f', b'full', False, b'Includes build time of subset'),
3363 (
3376 (
3364 b'',
3377 b'',
3365 b'clear-revbranch',
3378 b'clear-revbranch',
3366 False,
3379 False,
3367 b'purge the revbranch cache between computation',
3380 b'purge the revbranch cache between computation',
3368 ),
3381 ),
3369 ]
3382 ]
3370 + formatteropts,
3383 + formatteropts,
3371 )
3384 )
3372 def perfbranchmap(ui, repo, *filternames, **opts):
3385 def perfbranchmap(ui, repo, *filternames, **opts):
3373 """benchmark the update of a branchmap
3386 """benchmark the update of a branchmap
3374
3387
3375 This benchmarks the full repo.branchmap() call with read and write disabled
3388 This benchmarks the full repo.branchmap() call with read and write disabled
3376 """
3389 """
3377 opts = _byteskwargs(opts)
3390 opts = _byteskwargs(opts)
3378 full = opts.get(b"full", False)
3391 full = opts.get(b"full", False)
3379 clear_revbranch = opts.get(b"clear_revbranch", False)
3392 clear_revbranch = opts.get(b"clear_revbranch", False)
3380 timer, fm = gettimer(ui, opts)
3393 timer, fm = gettimer(ui, opts)
3381
3394
3382 def getbranchmap(filtername):
3395 def getbranchmap(filtername):
3383 """generate a benchmark function for the filtername"""
3396 """generate a benchmark function for the filtername"""
3384 if filtername is None:
3397 if filtername is None:
3385 view = repo
3398 view = repo
3386 else:
3399 else:
3387 view = repo.filtered(filtername)
3400 view = repo.filtered(filtername)
3388 if util.safehasattr(view._branchcaches, '_per_filter'):
3401 if util.safehasattr(view._branchcaches, '_per_filter'):
3389 filtered = view._branchcaches._per_filter
3402 filtered = view._branchcaches._per_filter
3390 else:
3403 else:
3391 # older versions
3404 # older versions
3392 filtered = view._branchcaches
3405 filtered = view._branchcaches
3393
3406
3394 def d():
3407 def d():
3395 if clear_revbranch:
3408 if clear_revbranch:
3396 repo.revbranchcache()._clear()
3409 repo.revbranchcache()._clear()
3397 if full:
3410 if full:
3398 view._branchcaches.clear()
3411 view._branchcaches.clear()
3399 else:
3412 else:
3400 filtered.pop(filtername, None)
3413 filtered.pop(filtername, None)
3401 view.branchmap()
3414 view.branchmap()
3402
3415
3403 return d
3416 return d
3404
3417
3405 # add filter in smaller subset to bigger subset
3418 # add filter in smaller subset to bigger subset
3406 possiblefilters = set(repoview.filtertable)
3419 possiblefilters = set(repoview.filtertable)
3407 if filternames:
3420 if filternames:
3408 possiblefilters &= set(filternames)
3421 possiblefilters &= set(filternames)
3409 subsettable = getbranchmapsubsettable()
3422 subsettable = getbranchmapsubsettable()
3410 allfilters = []
3423 allfilters = []
3411 while possiblefilters:
3424 while possiblefilters:
3412 for name in possiblefilters:
3425 for name in possiblefilters:
3413 subset = subsettable.get(name)
3426 subset = subsettable.get(name)
3414 if subset not in possiblefilters:
3427 if subset not in possiblefilters:
3415 break
3428 break
3416 else:
3429 else:
3417 assert False, b'subset cycle %s!' % possiblefilters
3430 assert False, b'subset cycle %s!' % possiblefilters
3418 allfilters.append(name)
3431 allfilters.append(name)
3419 possiblefilters.remove(name)
3432 possiblefilters.remove(name)
3420
3433
3421 # warm the cache
3434 # warm the cache
3422 if not full:
3435 if not full:
3423 for name in allfilters:
3436 for name in allfilters:
3424 repo.filtered(name).branchmap()
3437 repo.filtered(name).branchmap()
3425 if not filternames or b'unfiltered' in filternames:
3438 if not filternames or b'unfiltered' in filternames:
3426 # add unfiltered
3439 # add unfiltered
3427 allfilters.append(None)
3440 allfilters.append(None)
3428
3441
3429 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3442 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3430 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3443 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3431 branchcacheread.set(classmethod(lambda *args: None))
3444 branchcacheread.set(classmethod(lambda *args: None))
3432 else:
3445 else:
3433 # older versions
3446 # older versions
3434 branchcacheread = safeattrsetter(branchmap, b'read')
3447 branchcacheread = safeattrsetter(branchmap, b'read')
3435 branchcacheread.set(lambda *args: None)
3448 branchcacheread.set(lambda *args: None)
3436 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3449 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3437 branchcachewrite.set(lambda *args: None)
3450 branchcachewrite.set(lambda *args: None)
3438 try:
3451 try:
3439 for name in allfilters:
3452 for name in allfilters:
3440 printname = name
3453 printname = name
3441 if name is None:
3454 if name is None:
3442 printname = b'unfiltered'
3455 printname = b'unfiltered'
3443 timer(getbranchmap(name), title=str(printname))
3456 timer(getbranchmap(name), title=str(printname))
3444 finally:
3457 finally:
3445 branchcacheread.restore()
3458 branchcacheread.restore()
3446 branchcachewrite.restore()
3459 branchcachewrite.restore()
3447 fm.end()
3460 fm.end()
3448
3461
3449
3462
3450 @command(
3463 @command(
3451 b'perfbranchmapupdate',
3464 b'perfbranchmapupdate',
3452 [
3465 [
3453 (b'', b'base', [], b'subset of revision to start from'),
3466 (b'', b'base', [], b'subset of revision to start from'),
3454 (b'', b'target', [], b'subset of revision to end with'),
3467 (b'', b'target', [], b'subset of revision to end with'),
3455 (b'', b'clear-caches', False, b'clear cache between each runs'),
3468 (b'', b'clear-caches', False, b'clear cache between each runs'),
3456 ]
3469 ]
3457 + formatteropts,
3470 + formatteropts,
3458 )
3471 )
3459 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3472 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3460 """benchmark branchmap update from for <base> revs to <target> revs
3473 """benchmark branchmap update from for <base> revs to <target> revs
3461
3474
3462 If `--clear-caches` is passed, the following items will be reset before
3475 If `--clear-caches` is passed, the following items will be reset before
3463 each update:
3476 each update:
3464 * the changelog instance and associated indexes
3477 * the changelog instance and associated indexes
3465 * the rev-branch-cache instance
3478 * the rev-branch-cache instance
3466
3479
3467 Examples:
3480 Examples:
3468
3481
3469 # update for the one last revision
3482 # update for the one last revision
3470 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3483 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3471
3484
3472 $ update for change coming with a new branch
3485 $ update for change coming with a new branch
3473 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3486 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3474 """
3487 """
3475 from mercurial import branchmap
3488 from mercurial import branchmap
3476 from mercurial import repoview
3489 from mercurial import repoview
3477
3490
3478 opts = _byteskwargs(opts)
3491 opts = _byteskwargs(opts)
3479 timer, fm = gettimer(ui, opts)
3492 timer, fm = gettimer(ui, opts)
3480 clearcaches = opts[b'clear_caches']
3493 clearcaches = opts[b'clear_caches']
3481 unfi = repo.unfiltered()
3494 unfi = repo.unfiltered()
3482 x = [None] # used to pass data between closure
3495 x = [None] # used to pass data between closure
3483
3496
3484 # we use a `list` here to avoid possible side effect from smartset
3497 # we use a `list` here to avoid possible side effect from smartset
3485 baserevs = list(scmutil.revrange(repo, base))
3498 baserevs = list(scmutil.revrange(repo, base))
3486 targetrevs = list(scmutil.revrange(repo, target))
3499 targetrevs = list(scmutil.revrange(repo, target))
3487 if not baserevs:
3500 if not baserevs:
3488 raise error.Abort(b'no revisions selected for --base')
3501 raise error.Abort(b'no revisions selected for --base')
3489 if not targetrevs:
3502 if not targetrevs:
3490 raise error.Abort(b'no revisions selected for --target')
3503 raise error.Abort(b'no revisions selected for --target')
3491
3504
3492 # make sure the target branchmap also contains the one in the base
3505 # make sure the target branchmap also contains the one in the base
3493 targetrevs = list(set(baserevs) | set(targetrevs))
3506 targetrevs = list(set(baserevs) | set(targetrevs))
3494 targetrevs.sort()
3507 targetrevs.sort()
3495
3508
3496 cl = repo.changelog
3509 cl = repo.changelog
3497 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3510 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3498 allbaserevs.sort()
3511 allbaserevs.sort()
3499 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3512 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3500
3513
3501 newrevs = list(alltargetrevs.difference(allbaserevs))
3514 newrevs = list(alltargetrevs.difference(allbaserevs))
3502 newrevs.sort()
3515 newrevs.sort()
3503
3516
3504 allrevs = frozenset(unfi.changelog.revs())
3517 allrevs = frozenset(unfi.changelog.revs())
3505 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3518 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3506 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3519 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3507
3520
3508 def basefilter(repo, visibilityexceptions=None):
3521 def basefilter(repo, visibilityexceptions=None):
3509 return basefilterrevs
3522 return basefilterrevs
3510
3523
3511 def targetfilter(repo, visibilityexceptions=None):
3524 def targetfilter(repo, visibilityexceptions=None):
3512 return targetfilterrevs
3525 return targetfilterrevs
3513
3526
3514 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3527 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3515 ui.status(msg % (len(allbaserevs), len(newrevs)))
3528 ui.status(msg % (len(allbaserevs), len(newrevs)))
3516 if targetfilterrevs:
3529 if targetfilterrevs:
3517 msg = b'(%d revisions still filtered)\n'
3530 msg = b'(%d revisions still filtered)\n'
3518 ui.status(msg % len(targetfilterrevs))
3531 ui.status(msg % len(targetfilterrevs))
3519
3532
3520 try:
3533 try:
3521 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3534 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3522 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3535 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3523
3536
3524 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3537 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3525 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3538 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3526
3539
3527 # try to find an existing branchmap to reuse
3540 # try to find an existing branchmap to reuse
3528 subsettable = getbranchmapsubsettable()
3541 subsettable = getbranchmapsubsettable()
3529 candidatefilter = subsettable.get(None)
3542 candidatefilter = subsettable.get(None)
3530 while candidatefilter is not None:
3543 while candidatefilter is not None:
3531 candidatebm = repo.filtered(candidatefilter).branchmap()
3544 candidatebm = repo.filtered(candidatefilter).branchmap()
3532 if candidatebm.validfor(baserepo):
3545 if candidatebm.validfor(baserepo):
3533 filtered = repoview.filterrevs(repo, candidatefilter)
3546 filtered = repoview.filterrevs(repo, candidatefilter)
3534 missing = [r for r in allbaserevs if r in filtered]
3547 missing = [r for r in allbaserevs if r in filtered]
3535 base = candidatebm.copy()
3548 base = candidatebm.copy()
3536 base.update(baserepo, missing)
3549 base.update(baserepo, missing)
3537 break
3550 break
3538 candidatefilter = subsettable.get(candidatefilter)
3551 candidatefilter = subsettable.get(candidatefilter)
3539 else:
3552 else:
3540 # no suitable subset where found
3553 # no suitable subset where found
3541 base = branchmap.branchcache()
3554 base = branchmap.branchcache()
3542 base.update(baserepo, allbaserevs)
3555 base.update(baserepo, allbaserevs)
3543
3556
3544 def setup():
3557 def setup():
3545 x[0] = base.copy()
3558 x[0] = base.copy()
3546 if clearcaches:
3559 if clearcaches:
3547 unfi._revbranchcache = None
3560 unfi._revbranchcache = None
3548 clearchangelog(repo)
3561 clearchangelog(repo)
3549
3562
3550 def bench():
3563 def bench():
3551 x[0].update(targetrepo, newrevs)
3564 x[0].update(targetrepo, newrevs)
3552
3565
3553 timer(bench, setup=setup)
3566 timer(bench, setup=setup)
3554 fm.end()
3567 fm.end()
3555 finally:
3568 finally:
3556 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3569 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3557 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3570 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3558
3571
3559
3572
3560 @command(
3573 @command(
3561 b'perfbranchmapload',
3574 b'perfbranchmapload',
3562 [
3575 [
3563 (b'f', b'filter', b'', b'Specify repoview filter'),
3576 (b'f', b'filter', b'', b'Specify repoview filter'),
3564 (b'', b'list', False, b'List brachmap filter caches'),
3577 (b'', b'list', False, b'List brachmap filter caches'),
3565 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3578 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3566 ]
3579 ]
3567 + formatteropts,
3580 + formatteropts,
3568 )
3581 )
3569 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3582 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3570 """benchmark reading the branchmap"""
3583 """benchmark reading the branchmap"""
3571 opts = _byteskwargs(opts)
3584 opts = _byteskwargs(opts)
3572 clearrevlogs = opts[b'clear_revlogs']
3585 clearrevlogs = opts[b'clear_revlogs']
3573
3586
3574 if list:
3587 if list:
3575 for name, kind, st in repo.cachevfs.readdir(stat=True):
3588 for name, kind, st in repo.cachevfs.readdir(stat=True):
3576 if name.startswith(b'branch2'):
3589 if name.startswith(b'branch2'):
3577 filtername = name.partition(b'-')[2] or b'unfiltered'
3590 filtername = name.partition(b'-')[2] or b'unfiltered'
3578 ui.status(
3591 ui.status(
3579 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3592 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3580 )
3593 )
3581 return
3594 return
3582 if not filter:
3595 if not filter:
3583 filter = None
3596 filter = None
3584 subsettable = getbranchmapsubsettable()
3597 subsettable = getbranchmapsubsettable()
3585 if filter is None:
3598 if filter is None:
3586 repo = repo.unfiltered()
3599 repo = repo.unfiltered()
3587 else:
3600 else:
3588 repo = repoview.repoview(repo, filter)
3601 repo = repoview.repoview(repo, filter)
3589
3602
3590 repo.branchmap() # make sure we have a relevant, up to date branchmap
3603 repo.branchmap() # make sure we have a relevant, up to date branchmap
3591
3604
3592 try:
3605 try:
3593 fromfile = branchmap.branchcache.fromfile
3606 fromfile = branchmap.branchcache.fromfile
3594 except AttributeError:
3607 except AttributeError:
3595 # older versions
3608 # older versions
3596 fromfile = branchmap.read
3609 fromfile = branchmap.read
3597
3610
3598 currentfilter = filter
3611 currentfilter = filter
3599 # try once without timer, the filter may not be cached
3612 # try once without timer, the filter may not be cached
3600 while fromfile(repo) is None:
3613 while fromfile(repo) is None:
3601 currentfilter = subsettable.get(currentfilter)
3614 currentfilter = subsettable.get(currentfilter)
3602 if currentfilter is None:
3615 if currentfilter is None:
3603 raise error.Abort(
3616 raise error.Abort(
3604 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3617 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3605 )
3618 )
3606 repo = repo.filtered(currentfilter)
3619 repo = repo.filtered(currentfilter)
3607 timer, fm = gettimer(ui, opts)
3620 timer, fm = gettimer(ui, opts)
3608
3621
3609 def setup():
3622 def setup():
3610 if clearrevlogs:
3623 if clearrevlogs:
3611 clearchangelog(repo)
3624 clearchangelog(repo)
3612
3625
3613 def bench():
3626 def bench():
3614 fromfile(repo)
3627 fromfile(repo)
3615
3628
3616 timer(bench, setup=setup)
3629 timer(bench, setup=setup)
3617 fm.end()
3630 fm.end()
3618
3631
3619
3632
3620 @command(b'perfloadmarkers')
3633 @command(b'perfloadmarkers')
3621 def perfloadmarkers(ui, repo):
3634 def perfloadmarkers(ui, repo):
3622 """benchmark the time to parse the on-disk markers for a repo
3635 """benchmark the time to parse the on-disk markers for a repo
3623
3636
3624 Result is the number of markers in the repo."""
3637 Result is the number of markers in the repo."""
3625 timer, fm = gettimer(ui)
3638 timer, fm = gettimer(ui)
3626 svfs = getsvfs(repo)
3639 svfs = getsvfs(repo)
3627 timer(lambda: len(obsolete.obsstore(svfs)))
3640 timer(lambda: len(obsolete.obsstore(svfs)))
3628 fm.end()
3641 fm.end()
3629
3642
3630
3643
3631 @command(
3644 @command(
3632 b'perflrucachedict',
3645 b'perflrucachedict',
3633 formatteropts
3646 formatteropts
3634 + [
3647 + [
3635 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3648 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3636 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3649 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3637 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3650 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3638 (b'', b'size', 4, b'size of cache'),
3651 (b'', b'size', 4, b'size of cache'),
3639 (b'', b'gets', 10000, b'number of key lookups'),
3652 (b'', b'gets', 10000, b'number of key lookups'),
3640 (b'', b'sets', 10000, b'number of key sets'),
3653 (b'', b'sets', 10000, b'number of key sets'),
3641 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3654 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3642 (
3655 (
3643 b'',
3656 b'',
3644 b'mixedgetfreq',
3657 b'mixedgetfreq',
3645 50,
3658 50,
3646 b'frequency of get vs set ops in mixed mode',
3659 b'frequency of get vs set ops in mixed mode',
3647 ),
3660 ),
3648 ],
3661 ],
3649 norepo=True,
3662 norepo=True,
3650 )
3663 )
3651 def perflrucache(
3664 def perflrucache(
3652 ui,
3665 ui,
3653 mincost=0,
3666 mincost=0,
3654 maxcost=100,
3667 maxcost=100,
3655 costlimit=0,
3668 costlimit=0,
3656 size=4,
3669 size=4,
3657 gets=10000,
3670 gets=10000,
3658 sets=10000,
3671 sets=10000,
3659 mixed=10000,
3672 mixed=10000,
3660 mixedgetfreq=50,
3673 mixedgetfreq=50,
3661 **opts
3674 **opts
3662 ):
3675 ):
3663 opts = _byteskwargs(opts)
3676 opts = _byteskwargs(opts)
3664
3677
3665 def doinit():
3678 def doinit():
3666 for i in _xrange(10000):
3679 for i in _xrange(10000):
3667 util.lrucachedict(size)
3680 util.lrucachedict(size)
3668
3681
3669 costrange = list(range(mincost, maxcost + 1))
3682 costrange = list(range(mincost, maxcost + 1))
3670
3683
3671 values = []
3684 values = []
3672 for i in _xrange(size):
3685 for i in _xrange(size):
3673 values.append(random.randint(0, _maxint))
3686 values.append(random.randint(0, _maxint))
3674
3687
3675 # Get mode fills the cache and tests raw lookup performance with no
3688 # Get mode fills the cache and tests raw lookup performance with no
3676 # eviction.
3689 # eviction.
3677 getseq = []
3690 getseq = []
3678 for i in _xrange(gets):
3691 for i in _xrange(gets):
3679 getseq.append(random.choice(values))
3692 getseq.append(random.choice(values))
3680
3693
3681 def dogets():
3694 def dogets():
3682 d = util.lrucachedict(size)
3695 d = util.lrucachedict(size)
3683 for v in values:
3696 for v in values:
3684 d[v] = v
3697 d[v] = v
3685 for key in getseq:
3698 for key in getseq:
3686 value = d[key]
3699 value = d[key]
3687 value # silence pyflakes warning
3700 value # silence pyflakes warning
3688
3701
3689 def dogetscost():
3702 def dogetscost():
3690 d = util.lrucachedict(size, maxcost=costlimit)
3703 d = util.lrucachedict(size, maxcost=costlimit)
3691 for i, v in enumerate(values):
3704 for i, v in enumerate(values):
3692 d.insert(v, v, cost=costs[i])
3705 d.insert(v, v, cost=costs[i])
3693 for key in getseq:
3706 for key in getseq:
3694 try:
3707 try:
3695 value = d[key]
3708 value = d[key]
3696 value # silence pyflakes warning
3709 value # silence pyflakes warning
3697 except KeyError:
3710 except KeyError:
3698 pass
3711 pass
3699
3712
3700 # Set mode tests insertion speed with cache eviction.
3713 # Set mode tests insertion speed with cache eviction.
3701 setseq = []
3714 setseq = []
3702 costs = []
3715 costs = []
3703 for i in _xrange(sets):
3716 for i in _xrange(sets):
3704 setseq.append(random.randint(0, _maxint))
3717 setseq.append(random.randint(0, _maxint))
3705 costs.append(random.choice(costrange))
3718 costs.append(random.choice(costrange))
3706
3719
3707 def doinserts():
3720 def doinserts():
3708 d = util.lrucachedict(size)
3721 d = util.lrucachedict(size)
3709 for v in setseq:
3722 for v in setseq:
3710 d.insert(v, v)
3723 d.insert(v, v)
3711
3724
3712 def doinsertscost():
3725 def doinsertscost():
3713 d = util.lrucachedict(size, maxcost=costlimit)
3726 d = util.lrucachedict(size, maxcost=costlimit)
3714 for i, v in enumerate(setseq):
3727 for i, v in enumerate(setseq):
3715 d.insert(v, v, cost=costs[i])
3728 d.insert(v, v, cost=costs[i])
3716
3729
3717 def dosets():
3730 def dosets():
3718 d = util.lrucachedict(size)
3731 d = util.lrucachedict(size)
3719 for v in setseq:
3732 for v in setseq:
3720 d[v] = v
3733 d[v] = v
3721
3734
3722 # Mixed mode randomly performs gets and sets with eviction.
3735 # Mixed mode randomly performs gets and sets with eviction.
3723 mixedops = []
3736 mixedops = []
3724 for i in _xrange(mixed):
3737 for i in _xrange(mixed):
3725 r = random.randint(0, 100)
3738 r = random.randint(0, 100)
3726 if r < mixedgetfreq:
3739 if r < mixedgetfreq:
3727 op = 0
3740 op = 0
3728 else:
3741 else:
3729 op = 1
3742 op = 1
3730
3743
3731 mixedops.append(
3744 mixedops.append(
3732 (op, random.randint(0, size * 2), random.choice(costrange))
3745 (op, random.randint(0, size * 2), random.choice(costrange))
3733 )
3746 )
3734
3747
3735 def domixed():
3748 def domixed():
3736 d = util.lrucachedict(size)
3749 d = util.lrucachedict(size)
3737
3750
3738 for op, v, cost in mixedops:
3751 for op, v, cost in mixedops:
3739 if op == 0:
3752 if op == 0:
3740 try:
3753 try:
3741 d[v]
3754 d[v]
3742 except KeyError:
3755 except KeyError:
3743 pass
3756 pass
3744 else:
3757 else:
3745 d[v] = v
3758 d[v] = v
3746
3759
3747 def domixedcost():
3760 def domixedcost():
3748 d = util.lrucachedict(size, maxcost=costlimit)
3761 d = util.lrucachedict(size, maxcost=costlimit)
3749
3762
3750 for op, v, cost in mixedops:
3763 for op, v, cost in mixedops:
3751 if op == 0:
3764 if op == 0:
3752 try:
3765 try:
3753 d[v]
3766 d[v]
3754 except KeyError:
3767 except KeyError:
3755 pass
3768 pass
3756 else:
3769 else:
3757 d.insert(v, v, cost=cost)
3770 d.insert(v, v, cost=cost)
3758
3771
3759 benches = [
3772 benches = [
3760 (doinit, b'init'),
3773 (doinit, b'init'),
3761 ]
3774 ]
3762
3775
3763 if costlimit:
3776 if costlimit:
3764 benches.extend(
3777 benches.extend(
3765 [
3778 [
3766 (dogetscost, b'gets w/ cost limit'),
3779 (dogetscost, b'gets w/ cost limit'),
3767 (doinsertscost, b'inserts w/ cost limit'),
3780 (doinsertscost, b'inserts w/ cost limit'),
3768 (domixedcost, b'mixed w/ cost limit'),
3781 (domixedcost, b'mixed w/ cost limit'),
3769 ]
3782 ]
3770 )
3783 )
3771 else:
3784 else:
3772 benches.extend(
3785 benches.extend(
3773 [
3786 [
3774 (dogets, b'gets'),
3787 (dogets, b'gets'),
3775 (doinserts, b'inserts'),
3788 (doinserts, b'inserts'),
3776 (dosets, b'sets'),
3789 (dosets, b'sets'),
3777 (domixed, b'mixed'),
3790 (domixed, b'mixed'),
3778 ]
3791 ]
3779 )
3792 )
3780
3793
3781 for fn, title in benches:
3794 for fn, title in benches:
3782 timer, fm = gettimer(ui, opts)
3795 timer, fm = gettimer(ui, opts)
3783 timer(fn, title=title)
3796 timer(fn, title=title)
3784 fm.end()
3797 fm.end()
3785
3798
3786
3799
3787 @command(b'perfwrite', formatteropts)
3800 @command(b'perfwrite', formatteropts)
3788 def perfwrite(ui, repo, **opts):
3801 def perfwrite(ui, repo, **opts):
3789 """microbenchmark ui.write
3802 """microbenchmark ui.write
3790 """
3803 """
3791 opts = _byteskwargs(opts)
3804 opts = _byteskwargs(opts)
3792
3805
3793 timer, fm = gettimer(ui, opts)
3806 timer, fm = gettimer(ui, opts)
3794
3807
3795 def write():
3808 def write():
3796 for i in range(100000):
3809 for i in range(100000):
3797 ui.writenoi18n(b'Testing write performance\n')
3810 ui.writenoi18n(b'Testing write performance\n')
3798
3811
3799 timer(write)
3812 timer(write)
3800 fm.end()
3813 fm.end()
3801
3814
3802
3815
3803 def uisetup(ui):
3816 def uisetup(ui):
3804 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3817 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3805 commands, b'debugrevlogopts'
3818 commands, b'debugrevlogopts'
3806 ):
3819 ):
3807 # for "historical portability":
3820 # for "historical portability":
3808 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3821 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3809 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3822 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3810 # openrevlog() should cause failure, because it has been
3823 # openrevlog() should cause failure, because it has been
3811 # available since 3.5 (or 49c583ca48c4).
3824 # available since 3.5 (or 49c583ca48c4).
3812 def openrevlog(orig, repo, cmd, file_, opts):
3825 def openrevlog(orig, repo, cmd, file_, opts):
3813 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3826 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3814 raise error.Abort(
3827 raise error.Abort(
3815 b"This version doesn't support --dir option",
3828 b"This version doesn't support --dir option",
3816 hint=b"use 3.5 or later",
3829 hint=b"use 3.5 or later",
3817 )
3830 )
3818 return orig(repo, cmd, file_, opts)
3831 return orig(repo, cmd, file_, opts)
3819
3832
3820 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3833 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3821
3834
3822
3835
3823 @command(
3836 @command(
3824 b'perfprogress',
3837 b'perfprogress',
3825 formatteropts
3838 formatteropts
3826 + [
3839 + [
3827 (b'', b'topic', b'topic', b'topic for progress messages'),
3840 (b'', b'topic', b'topic', b'topic for progress messages'),
3828 (b'c', b'total', 1000000, b'total value we are progressing to'),
3841 (b'c', b'total', 1000000, b'total value we are progressing to'),
3829 ],
3842 ],
3830 norepo=True,
3843 norepo=True,
3831 )
3844 )
3832 def perfprogress(ui, topic=None, total=None, **opts):
3845 def perfprogress(ui, topic=None, total=None, **opts):
3833 """printing of progress bars"""
3846 """printing of progress bars"""
3834 opts = _byteskwargs(opts)
3847 opts = _byteskwargs(opts)
3835
3848
3836 timer, fm = gettimer(ui, opts)
3849 timer, fm = gettimer(ui, opts)
3837
3850
3838 def doprogress():
3851 def doprogress():
3839 with ui.makeprogress(topic, total=total) as progress:
3852 with ui.makeprogress(topic, total=total) as progress:
3840 for i in _xrange(total):
3853 for i in _xrange(total):
3841 progress.increment()
3854 progress.increment()
3842
3855
3843 timer(doprogress)
3856 timer(doprogress)
3844 fm.end()
3857 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now