##// END OF EJS Templates
perf: fix list formatting in perfindex documentation...
marmoute -
r44732:216fc463 default
parent child Browse files
Show More
@@ -1,3853 +1,3854 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if '_tagscache' in vars(repo):
694 if '_tagscache' in vars(repo):
695 del repo.__dict__['_tagscache']
695 del repo.__dict__['_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, '_clcachekey', None)
729 object.__setattr__(repo, '_clcachekey', None)
730 object.__setattr__(repo, '_clcache', None)
730 object.__setattr__(repo, '_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
766 ]
767 + formatteropts,
767 + formatteropts,
768 )
768 )
769 def perfstatus(ui, repo, **opts):
769 def perfstatus(ui, repo, **opts):
770 """benchmark the performance of a single status call
770 """benchmark the performance of a single status call
771
771
772 The repository data are preserved between each call.
772 The repository data are preserved between each call.
773
773
774 By default, only the status of the tracked file are requested. If
774 By default, only the status of the tracked file are requested. If
775 `--unknown` is passed, the "unknown" files are also tracked.
775 `--unknown` is passed, the "unknown" files are also tracked.
776 """
776 """
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 # m = match.always(repo.root, repo.getcwd())
778 # m = match.always(repo.root, repo.getcwd())
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
780 # False))))
780 # False))))
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 if opts[b'dirstate']:
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
785 unknown = opts[b'unknown']
786
786
787 def status_dirstate():
787 def status_dirstate():
788 s = dirstate.status(
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
790 )
791 sum(map(bool, s))
791 sum(map(bool, s))
792
792
793 timer(status_dirstate)
793 timer(status_dirstate)
794 else:
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
796 fm.end()
796 fm.end()
797
797
798
798
799 @command(b'perfaddremove', formatteropts)
799 @command(b'perfaddremove', formatteropts)
800 def perfaddremove(ui, repo, **opts):
800 def perfaddremove(ui, repo, **opts):
801 opts = _byteskwargs(opts)
801 opts = _byteskwargs(opts)
802 timer, fm = gettimer(ui, opts)
802 timer, fm = gettimer(ui, opts)
803 try:
803 try:
804 oldquiet = repo.ui.quiet
804 oldquiet = repo.ui.quiet
805 repo.ui.quiet = True
805 repo.ui.quiet = True
806 matcher = scmutil.match(repo[None])
806 matcher = scmutil.match(repo[None])
807 opts[b'dry_run'] = True
807 opts[b'dry_run'] = True
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
809 uipathfn = scmutil.getuipathfn(repo)
809 uipathfn = scmutil.getuipathfn(repo)
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
811 else:
811 else:
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
813 finally:
813 finally:
814 repo.ui.quiet = oldquiet
814 repo.ui.quiet = oldquiet
815 fm.end()
815 fm.end()
816
816
817
817
818 def clearcaches(cl):
818 def clearcaches(cl):
819 # behave somewhat consistently across internal API changes
819 # behave somewhat consistently across internal API changes
820 if util.safehasattr(cl, b'clearcaches'):
820 if util.safehasattr(cl, b'clearcaches'):
821 cl.clearcaches()
821 cl.clearcaches()
822 elif util.safehasattr(cl, b'_nodecache'):
822 elif util.safehasattr(cl, b'_nodecache'):
823 # <= hg-5.2
823 # <= hg-5.2
824 from mercurial.node import nullid, nullrev
824 from mercurial.node import nullid, nullrev
825
825
826 cl._nodecache = {nullid: nullrev}
826 cl._nodecache = {nullid: nullrev}
827 cl._nodepos = None
827 cl._nodepos = None
828
828
829
829
830 @command(b'perfheads', formatteropts)
830 @command(b'perfheads', formatteropts)
831 def perfheads(ui, repo, **opts):
831 def perfheads(ui, repo, **opts):
832 """benchmark the computation of a changelog heads"""
832 """benchmark the computation of a changelog heads"""
833 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
835 cl = repo.changelog
835 cl = repo.changelog
836
836
837 def s():
837 def s():
838 clearcaches(cl)
838 clearcaches(cl)
839
839
840 def d():
840 def d():
841 len(cl.headrevs())
841 len(cl.headrevs())
842
842
843 timer(d, setup=s)
843 timer(d, setup=s)
844 fm.end()
844 fm.end()
845
845
846
846
847 @command(
847 @command(
848 b'perftags',
848 b'perftags',
849 formatteropts
849 formatteropts
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
851 )
851 )
852 def perftags(ui, repo, **opts):
852 def perftags(ui, repo, **opts):
853 opts = _byteskwargs(opts)
853 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
854 timer, fm = gettimer(ui, opts)
855 repocleartagscache = repocleartagscachefunc(repo)
855 repocleartagscache = repocleartagscachefunc(repo)
856 clearrevlogs = opts[b'clear_revlogs']
856 clearrevlogs = opts[b'clear_revlogs']
857
857
858 def s():
858 def s():
859 if clearrevlogs:
859 if clearrevlogs:
860 clearchangelog(repo)
860 clearchangelog(repo)
861 clearfilecache(repo.unfiltered(), 'manifest')
861 clearfilecache(repo.unfiltered(), 'manifest')
862 repocleartagscache()
862 repocleartagscache()
863
863
864 def t():
864 def t():
865 return len(repo.tags())
865 return len(repo.tags())
866
866
867 timer(t, setup=s)
867 timer(t, setup=s)
868 fm.end()
868 fm.end()
869
869
870
870
871 @command(b'perfancestors', formatteropts)
871 @command(b'perfancestors', formatteropts)
872 def perfancestors(ui, repo, **opts):
872 def perfancestors(ui, repo, **opts):
873 opts = _byteskwargs(opts)
873 opts = _byteskwargs(opts)
874 timer, fm = gettimer(ui, opts)
874 timer, fm = gettimer(ui, opts)
875 heads = repo.changelog.headrevs()
875 heads = repo.changelog.headrevs()
876
876
877 def d():
877 def d():
878 for a in repo.changelog.ancestors(heads):
878 for a in repo.changelog.ancestors(heads):
879 pass
879 pass
880
880
881 timer(d)
881 timer(d)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(b'perfancestorset', formatteropts)
885 @command(b'perfancestorset', formatteropts)
886 def perfancestorset(ui, repo, revset, **opts):
886 def perfancestorset(ui, repo, revset, **opts):
887 opts = _byteskwargs(opts)
887 opts = _byteskwargs(opts)
888 timer, fm = gettimer(ui, opts)
888 timer, fm = gettimer(ui, opts)
889 revs = repo.revs(revset)
889 revs = repo.revs(revset)
890 heads = repo.changelog.headrevs()
890 heads = repo.changelog.headrevs()
891
891
892 def d():
892 def d():
893 s = repo.changelog.ancestors(heads)
893 s = repo.changelog.ancestors(heads)
894 for rev in revs:
894 for rev in revs:
895 rev in s
895 rev in s
896
896
897 timer(d)
897 timer(d)
898 fm.end()
898 fm.end()
899
899
900
900
901 @command(b'perfdiscovery', formatteropts, b'PATH')
901 @command(b'perfdiscovery', formatteropts, b'PATH')
902 def perfdiscovery(ui, repo, path, **opts):
902 def perfdiscovery(ui, repo, path, **opts):
903 """benchmark discovery between local repo and the peer at given path
903 """benchmark discovery between local repo and the peer at given path
904 """
904 """
905 repos = [repo, None]
905 repos = [repo, None]
906 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
907 path = ui.expandpath(path)
907 path = ui.expandpath(path)
908
908
909 def s():
909 def s():
910 repos[1] = hg.peer(ui, opts, path)
910 repos[1] = hg.peer(ui, opts, path)
911
911
912 def d():
912 def d():
913 setdiscovery.findcommonheads(ui, *repos)
913 setdiscovery.findcommonheads(ui, *repos)
914
914
915 timer(d, setup=s)
915 timer(d, setup=s)
916 fm.end()
916 fm.end()
917
917
918
918
919 @command(
919 @command(
920 b'perfbookmarks',
920 b'perfbookmarks',
921 formatteropts
921 formatteropts
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
923 )
923 )
924 def perfbookmarks(ui, repo, **opts):
924 def perfbookmarks(ui, repo, **opts):
925 """benchmark parsing bookmarks from disk to memory"""
925 """benchmark parsing bookmarks from disk to memory"""
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928
928
929 clearrevlogs = opts[b'clear_revlogs']
929 clearrevlogs = opts[b'clear_revlogs']
930
930
931 def s():
931 def s():
932 if clearrevlogs:
932 if clearrevlogs:
933 clearchangelog(repo)
933 clearchangelog(repo)
934 clearfilecache(repo, b'_bookmarks')
934 clearfilecache(repo, b'_bookmarks')
935
935
936 def d():
936 def d():
937 repo._bookmarks
937 repo._bookmarks
938
938
939 timer(d, setup=s)
939 timer(d, setup=s)
940 fm.end()
940 fm.end()
941
941
942
942
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
944 def perfbundleread(ui, repo, bundlepath, **opts):
944 def perfbundleread(ui, repo, bundlepath, **opts):
945 """Benchmark reading of bundle files.
945 """Benchmark reading of bundle files.
946
946
947 This command is meant to isolate the I/O part of bundle reading as
947 This command is meant to isolate the I/O part of bundle reading as
948 much as possible.
948 much as possible.
949 """
949 """
950 from mercurial import (
950 from mercurial import (
951 bundle2,
951 bundle2,
952 exchange,
952 exchange,
953 streamclone,
953 streamclone,
954 )
954 )
955
955
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957
957
958 def makebench(fn):
958 def makebench(fn):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 bundle = exchange.readbundle(ui, fh, bundlepath)
961 bundle = exchange.readbundle(ui, fh, bundlepath)
962 fn(bundle)
962 fn(bundle)
963
963
964 return run
964 return run
965
965
966 def makereadnbytes(size):
966 def makereadnbytes(size):
967 def run():
967 def run():
968 with open(bundlepath, b'rb') as fh:
968 with open(bundlepath, b'rb') as fh:
969 bundle = exchange.readbundle(ui, fh, bundlepath)
969 bundle = exchange.readbundle(ui, fh, bundlepath)
970 while bundle.read(size):
970 while bundle.read(size):
971 pass
971 pass
972
972
973 return run
973 return run
974
974
975 def makestdioread(size):
975 def makestdioread(size):
976 def run():
976 def run():
977 with open(bundlepath, b'rb') as fh:
977 with open(bundlepath, b'rb') as fh:
978 while fh.read(size):
978 while fh.read(size):
979 pass
979 pass
980
980
981 return run
981 return run
982
982
983 # bundle1
983 # bundle1
984
984
985 def deltaiter(bundle):
985 def deltaiter(bundle):
986 for delta in bundle.deltaiter():
986 for delta in bundle.deltaiter():
987 pass
987 pass
988
988
989 def iterchunks(bundle):
989 def iterchunks(bundle):
990 for chunk in bundle.getchunks():
990 for chunk in bundle.getchunks():
991 pass
991 pass
992
992
993 # bundle2
993 # bundle2
994
994
995 def forwardchunks(bundle):
995 def forwardchunks(bundle):
996 for chunk in bundle._forwardchunks():
996 for chunk in bundle._forwardchunks():
997 pass
997 pass
998
998
999 def iterparts(bundle):
999 def iterparts(bundle):
1000 for part in bundle.iterparts():
1000 for part in bundle.iterparts():
1001 pass
1001 pass
1002
1002
1003 def iterpartsseekable(bundle):
1003 def iterpartsseekable(bundle):
1004 for part in bundle.iterparts(seekable=True):
1004 for part in bundle.iterparts(seekable=True):
1005 pass
1005 pass
1006
1006
1007 def seek(bundle):
1007 def seek(bundle):
1008 for part in bundle.iterparts(seekable=True):
1008 for part in bundle.iterparts(seekable=True):
1009 part.seek(0, os.SEEK_END)
1009 part.seek(0, os.SEEK_END)
1010
1010
1011 def makepartreadnbytes(size):
1011 def makepartreadnbytes(size):
1012 def run():
1012 def run():
1013 with open(bundlepath, b'rb') as fh:
1013 with open(bundlepath, b'rb') as fh:
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1015 for part in bundle.iterparts():
1015 for part in bundle.iterparts():
1016 while part.read(size):
1016 while part.read(size):
1017 pass
1017 pass
1018
1018
1019 return run
1019 return run
1020
1020
1021 benches = [
1021 benches = [
1022 (makestdioread(8192), b'read(8k)'),
1022 (makestdioread(8192), b'read(8k)'),
1023 (makestdioread(16384), b'read(16k)'),
1023 (makestdioread(16384), b'read(16k)'),
1024 (makestdioread(32768), b'read(32k)'),
1024 (makestdioread(32768), b'read(32k)'),
1025 (makestdioread(131072), b'read(128k)'),
1025 (makestdioread(131072), b'read(128k)'),
1026 ]
1026 ]
1027
1027
1028 with open(bundlepath, b'rb') as fh:
1028 with open(bundlepath, b'rb') as fh:
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1030
1030
1031 if isinstance(bundle, changegroup.cg1unpacker):
1031 if isinstance(bundle, changegroup.cg1unpacker):
1032 benches.extend(
1032 benches.extend(
1033 [
1033 [
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1040 ]
1040 ]
1041 )
1041 )
1042 elif isinstance(bundle, bundle2.unbundle20):
1042 elif isinstance(bundle, bundle2.unbundle20):
1043 benches.extend(
1043 benches.extend(
1044 [
1044 [
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1047 (
1047 (
1048 makebench(iterpartsseekable),
1048 makebench(iterpartsseekable),
1049 b'bundle2 iterparts() seekable',
1049 b'bundle2 iterparts() seekable',
1050 ),
1050 ),
1051 (makebench(seek), b'bundle2 part seek()'),
1051 (makebench(seek), b'bundle2 part seek()'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1056 ]
1056 ]
1057 )
1057 )
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1059 raise error.Abort(b'stream clone bundles not supported')
1059 raise error.Abort(b'stream clone bundles not supported')
1060 else:
1060 else:
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1062
1062
1063 for fn, title in benches:
1063 for fn, title in benches:
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer(fn, title=title)
1065 timer(fn, title=title)
1066 fm.end()
1066 fm.end()
1067
1067
1068
1068
1069 @command(
1069 @command(
1070 b'perfchangegroupchangelog',
1070 b'perfchangegroupchangelog',
1071 formatteropts
1071 formatteropts
1072 + [
1072 + [
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1075 ],
1075 ],
1076 )
1076 )
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1078 """Benchmark producing a changelog group for a changegroup.
1078 """Benchmark producing a changelog group for a changegroup.
1079
1079
1080 This measures the time spent processing the changelog during a
1080 This measures the time spent processing the changelog during a
1081 bundle operation. This occurs during `hg bundle` and on a server
1081 bundle operation. This occurs during `hg bundle` and on a server
1082 processing a `getbundle` wire protocol request (handles clones
1082 processing a `getbundle` wire protocol request (handles clones
1083 and pull requests).
1083 and pull requests).
1084
1084
1085 By default, all revisions are added to the changegroup.
1085 By default, all revisions are added to the changegroup.
1086 """
1086 """
1087 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1088 cl = repo.changelog
1088 cl = repo.changelog
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1090 bundler = changegroup.getbundler(cgversion, repo)
1090 bundler = changegroup.getbundler(cgversion, repo)
1091
1091
1092 def d():
1092 def d():
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1094 for chunk in chunks:
1094 for chunk in chunks:
1095 pass
1095 pass
1096
1096
1097 timer, fm = gettimer(ui, opts)
1097 timer, fm = gettimer(ui, opts)
1098
1098
1099 # Terminal printing can interfere with timing. So disable it.
1099 # Terminal printing can interfere with timing. So disable it.
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1101 timer(d)
1101 timer(d)
1102
1102
1103 fm.end()
1103 fm.end()
1104
1104
1105
1105
1106 @command(b'perfdirs', formatteropts)
1106 @command(b'perfdirs', formatteropts)
1107 def perfdirs(ui, repo, **opts):
1107 def perfdirs(ui, repo, **opts):
1108 opts = _byteskwargs(opts)
1108 opts = _byteskwargs(opts)
1109 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1110 dirstate = repo.dirstate
1110 dirstate = repo.dirstate
1111 b'a' in dirstate
1111 b'a' in dirstate
1112
1112
1113 def d():
1113 def d():
1114 dirstate.hasdir(b'a')
1114 dirstate.hasdir(b'a')
1115 del dirstate._map._dirs
1115 del dirstate._map._dirs
1116
1116
1117 timer(d)
1117 timer(d)
1118 fm.end()
1118 fm.end()
1119
1119
1120
1120
1121 @command(
1121 @command(
1122 b'perfdirstate',
1122 b'perfdirstate',
1123 [
1123 [
1124 (
1124 (
1125 b'',
1125 b'',
1126 b'iteration',
1126 b'iteration',
1127 None,
1127 None,
1128 b'benchmark a full iteration for the dirstate',
1128 b'benchmark a full iteration for the dirstate',
1129 ),
1129 ),
1130 (
1130 (
1131 b'',
1131 b'',
1132 b'contains',
1132 b'contains',
1133 None,
1133 None,
1134 b'benchmark a large amount of `nf in dirstate` calls',
1134 b'benchmark a large amount of `nf in dirstate` calls',
1135 ),
1135 ),
1136 ]
1136 ]
1137 + formatteropts,
1137 + formatteropts,
1138 )
1138 )
1139 def perfdirstate(ui, repo, **opts):
1139 def perfdirstate(ui, repo, **opts):
1140 """benchmap the time of various distate operations
1140 """benchmap the time of various distate operations
1141
1141
1142 By default benchmark the time necessary to load a dirstate from scratch.
1142 By default benchmark the time necessary to load a dirstate from scratch.
1143 The dirstate is loaded to the point were a "contains" request can be
1143 The dirstate is loaded to the point were a "contains" request can be
1144 answered.
1144 answered.
1145 """
1145 """
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 b"a" in repo.dirstate
1148 b"a" in repo.dirstate
1149
1149
1150 if opts[b'iteration'] and opts[b'contains']:
1150 if opts[b'iteration'] and opts[b'contains']:
1151 msg = b'only specify one of --iteration or --contains'
1151 msg = b'only specify one of --iteration or --contains'
1152 raise error.Abort(msg)
1152 raise error.Abort(msg)
1153
1153
1154 if opts[b'iteration']:
1154 if opts[b'iteration']:
1155 setup = None
1155 setup = None
1156 dirstate = repo.dirstate
1156 dirstate = repo.dirstate
1157
1157
1158 def d():
1158 def d():
1159 for f in dirstate:
1159 for f in dirstate:
1160 pass
1160 pass
1161
1161
1162 elif opts[b'contains']:
1162 elif opts[b'contains']:
1163 setup = None
1163 setup = None
1164 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1165 allfiles = list(dirstate)
1165 allfiles = list(dirstate)
1166 # also add file path that will be "missing" from the dirstate
1166 # also add file path that will be "missing" from the dirstate
1167 allfiles.extend([f[::-1] for f in allfiles])
1167 allfiles.extend([f[::-1] for f in allfiles])
1168
1168
1169 def d():
1169 def d():
1170 for f in allfiles:
1170 for f in allfiles:
1171 f in dirstate
1171 f in dirstate
1172
1172
1173 else:
1173 else:
1174
1174
1175 def setup():
1175 def setup():
1176 repo.dirstate.invalidate()
1176 repo.dirstate.invalidate()
1177
1177
1178 def d():
1178 def d():
1179 b"a" in repo.dirstate
1179 b"a" in repo.dirstate
1180
1180
1181 timer(d, setup=setup)
1181 timer(d, setup=setup)
1182 fm.end()
1182 fm.end()
1183
1183
1184
1184
1185 @command(b'perfdirstatedirs', formatteropts)
1185 @command(b'perfdirstatedirs', formatteropts)
1186 def perfdirstatedirs(ui, repo, **opts):
1186 def perfdirstatedirs(ui, repo, **opts):
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1188 """
1188 """
1189 opts = _byteskwargs(opts)
1189 opts = _byteskwargs(opts)
1190 timer, fm = gettimer(ui, opts)
1190 timer, fm = gettimer(ui, opts)
1191 repo.dirstate.hasdir(b"a")
1191 repo.dirstate.hasdir(b"a")
1192
1192
1193 def setup():
1193 def setup():
1194 del repo.dirstate._map._dirs
1194 del repo.dirstate._map._dirs
1195
1195
1196 def d():
1196 def d():
1197 repo.dirstate.hasdir(b"a")
1197 repo.dirstate.hasdir(b"a")
1198
1198
1199 timer(d, setup=setup)
1199 timer(d, setup=setup)
1200 fm.end()
1200 fm.end()
1201
1201
1202
1202
1203 @command(b'perfdirstatefoldmap', formatteropts)
1203 @command(b'perfdirstatefoldmap', formatteropts)
1204 def perfdirstatefoldmap(ui, repo, **opts):
1204 def perfdirstatefoldmap(ui, repo, **opts):
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1206
1206
1207 The dirstate filefoldmap cache is dropped between every request.
1207 The dirstate filefoldmap cache is dropped between every request.
1208 """
1208 """
1209 opts = _byteskwargs(opts)
1209 opts = _byteskwargs(opts)
1210 timer, fm = gettimer(ui, opts)
1210 timer, fm = gettimer(ui, opts)
1211 dirstate = repo.dirstate
1211 dirstate = repo.dirstate
1212 dirstate._map.filefoldmap.get(b'a')
1212 dirstate._map.filefoldmap.get(b'a')
1213
1213
1214 def setup():
1214 def setup():
1215 del dirstate._map.filefoldmap
1215 del dirstate._map.filefoldmap
1216
1216
1217 def d():
1217 def d():
1218 dirstate._map.filefoldmap.get(b'a')
1218 dirstate._map.filefoldmap.get(b'a')
1219
1219
1220 timer(d, setup=setup)
1220 timer(d, setup=setup)
1221 fm.end()
1221 fm.end()
1222
1222
1223
1223
1224 @command(b'perfdirfoldmap', formatteropts)
1224 @command(b'perfdirfoldmap', formatteropts)
1225 def perfdirfoldmap(ui, repo, **opts):
1225 def perfdirfoldmap(ui, repo, **opts):
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1227
1227
1228 The dirstate dirfoldmap cache is dropped between every request.
1228 The dirstate dirfoldmap cache is dropped between every request.
1229 """
1229 """
1230 opts = _byteskwargs(opts)
1230 opts = _byteskwargs(opts)
1231 timer, fm = gettimer(ui, opts)
1231 timer, fm = gettimer(ui, opts)
1232 dirstate = repo.dirstate
1232 dirstate = repo.dirstate
1233 dirstate._map.dirfoldmap.get(b'a')
1233 dirstate._map.dirfoldmap.get(b'a')
1234
1234
1235 def setup():
1235 def setup():
1236 del dirstate._map.dirfoldmap
1236 del dirstate._map.dirfoldmap
1237 del dirstate._map._dirs
1237 del dirstate._map._dirs
1238
1238
1239 def d():
1239 def d():
1240 dirstate._map.dirfoldmap.get(b'a')
1240 dirstate._map.dirfoldmap.get(b'a')
1241
1241
1242 timer(d, setup=setup)
1242 timer(d, setup=setup)
1243 fm.end()
1243 fm.end()
1244
1244
1245
1245
1246 @command(b'perfdirstatewrite', formatteropts)
1246 @command(b'perfdirstatewrite', formatteropts)
1247 def perfdirstatewrite(ui, repo, **opts):
1247 def perfdirstatewrite(ui, repo, **opts):
1248 """benchmap the time it take to write a dirstate on disk
1248 """benchmap the time it take to write a dirstate on disk
1249 """
1249 """
1250 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1251 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1252 ds = repo.dirstate
1252 ds = repo.dirstate
1253 b"a" in ds
1253 b"a" in ds
1254
1254
1255 def setup():
1255 def setup():
1256 ds._dirty = True
1256 ds._dirty = True
1257
1257
1258 def d():
1258 def d():
1259 ds.write(repo.currenttransaction())
1259 ds.write(repo.currenttransaction())
1260
1260
1261 timer(d, setup=setup)
1261 timer(d, setup=setup)
1262 fm.end()
1262 fm.end()
1263
1263
1264
1264
1265 def _getmergerevs(repo, opts):
1265 def _getmergerevs(repo, opts):
1266 """parse command argument to return rev involved in merge
1266 """parse command argument to return rev involved in merge
1267
1267
1268 input: options dictionnary with `rev`, `from` and `bse`
1268 input: options dictionnary with `rev`, `from` and `bse`
1269 output: (localctx, otherctx, basectx)
1269 output: (localctx, otherctx, basectx)
1270 """
1270 """
1271 if opts[b'from']:
1271 if opts[b'from']:
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1273 wctx = repo[fromrev]
1273 wctx = repo[fromrev]
1274 else:
1274 else:
1275 wctx = repo[None]
1275 wctx = repo[None]
1276 # we don't want working dir files to be stat'd in the benchmark, so
1276 # we don't want working dir files to be stat'd in the benchmark, so
1277 # prime that cache
1277 # prime that cache
1278 wctx.dirty()
1278 wctx.dirty()
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1280 if opts[b'base']:
1280 if opts[b'base']:
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1282 ancestor = repo[fromrev]
1282 ancestor = repo[fromrev]
1283 else:
1283 else:
1284 ancestor = wctx.ancestor(rctx)
1284 ancestor = wctx.ancestor(rctx)
1285 return (wctx, rctx, ancestor)
1285 return (wctx, rctx, ancestor)
1286
1286
1287
1287
1288 @command(
1288 @command(
1289 b'perfmergecalculate',
1289 b'perfmergecalculate',
1290 [
1290 [
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1292 (b'', b'from', b'', b'rev to merge from'),
1292 (b'', b'from', b'', b'rev to merge from'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1294 ]
1294 ]
1295 + formatteropts,
1295 + formatteropts,
1296 )
1296 )
1297 def perfmergecalculate(ui, repo, **opts):
1297 def perfmergecalculate(ui, repo, **opts):
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1300
1300
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1302
1302
1303 def d():
1303 def d():
1304 # acceptremote is True because we don't want prompts in the middle of
1304 # acceptremote is True because we don't want prompts in the middle of
1305 # our benchmark
1305 # our benchmark
1306 merge.calculateupdates(
1306 merge.calculateupdates(
1307 repo,
1307 repo,
1308 wctx,
1308 wctx,
1309 rctx,
1309 rctx,
1310 [ancestor],
1310 [ancestor],
1311 branchmerge=False,
1311 branchmerge=False,
1312 force=False,
1312 force=False,
1313 acceptremote=True,
1313 acceptremote=True,
1314 followcopies=True,
1314 followcopies=True,
1315 )
1315 )
1316
1316
1317 timer(d)
1317 timer(d)
1318 fm.end()
1318 fm.end()
1319
1319
1320
1320
1321 @command(
1321 @command(
1322 b'perfmergecopies',
1322 b'perfmergecopies',
1323 [
1323 [
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1325 (b'', b'from', b'', b'rev to merge from'),
1325 (b'', b'from', b'', b'rev to merge from'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1327 ]
1327 ]
1328 + formatteropts,
1328 + formatteropts,
1329 )
1329 )
1330 def perfmergecopies(ui, repo, **opts):
1330 def perfmergecopies(ui, repo, **opts):
1331 """measure runtime of `copies.mergecopies`"""
1331 """measure runtime of `copies.mergecopies`"""
1332 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1335
1335
1336 def d():
1336 def d():
1337 # acceptremote is True because we don't want prompts in the middle of
1337 # acceptremote is True because we don't want prompts in the middle of
1338 # our benchmark
1338 # our benchmark
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1340
1340
1341 timer(d)
1341 timer(d)
1342 fm.end()
1342 fm.end()
1343
1343
1344
1344
1345 @command(b'perfpathcopies', [], b"REV REV")
1345 @command(b'perfpathcopies', [], b"REV REV")
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1347 """benchmark the copy tracing logic"""
1347 """benchmark the copy tracing logic"""
1348 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1349 timer, fm = gettimer(ui, opts)
1349 timer, fm = gettimer(ui, opts)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1352
1352
1353 def d():
1353 def d():
1354 copies.pathcopies(ctx1, ctx2)
1354 copies.pathcopies(ctx1, ctx2)
1355
1355
1356 timer(d)
1356 timer(d)
1357 fm.end()
1357 fm.end()
1358
1358
1359
1359
1360 @command(
1360 @command(
1361 b'perfphases',
1361 b'perfphases',
1362 [(b'', b'full', False, b'include file reading time too'),],
1362 [(b'', b'full', False, b'include file reading time too'),],
1363 b"",
1363 b"",
1364 )
1364 )
1365 def perfphases(ui, repo, **opts):
1365 def perfphases(ui, repo, **opts):
1366 """benchmark phasesets computation"""
1366 """benchmark phasesets computation"""
1367 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1368 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1369 _phases = repo._phasecache
1369 _phases = repo._phasecache
1370 full = opts.get(b'full')
1370 full = opts.get(b'full')
1371
1371
1372 def d():
1372 def d():
1373 phases = _phases
1373 phases = _phases
1374 if full:
1374 if full:
1375 clearfilecache(repo, b'_phasecache')
1375 clearfilecache(repo, b'_phasecache')
1376 phases = repo._phasecache
1376 phases = repo._phasecache
1377 phases.invalidate()
1377 phases.invalidate()
1378 phases.loadphaserevs(repo)
1378 phases.loadphaserevs(repo)
1379
1379
1380 timer(d)
1380 timer(d)
1381 fm.end()
1381 fm.end()
1382
1382
1383
1383
1384 @command(b'perfphasesremote', [], b"[DEST]")
1384 @command(b'perfphasesremote', [], b"[DEST]")
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1386 """benchmark time needed to analyse phases of the remote server"""
1386 """benchmark time needed to analyse phases of the remote server"""
1387 from mercurial.node import bin
1387 from mercurial.node import bin
1388 from mercurial import (
1388 from mercurial import (
1389 exchange,
1389 exchange,
1390 hg,
1390 hg,
1391 phases,
1391 phases,
1392 )
1392 )
1393
1393
1394 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1395 timer, fm = gettimer(ui, opts)
1395 timer, fm = gettimer(ui, opts)
1396
1396
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1398 if not path:
1398 if not path:
1399 raise error.Abort(
1399 raise error.Abort(
1400 b'default repository not configured!',
1400 b'default repository not configured!',
1401 hint=b"see 'hg help config.paths'",
1401 hint=b"see 'hg help config.paths'",
1402 )
1402 )
1403 dest = path.pushloc or path.loc
1403 dest = path.pushloc or path.loc
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1405 other = hg.peer(repo, opts, dest)
1405 other = hg.peer(repo, opts, dest)
1406
1406
1407 # easier to perform discovery through the operation
1407 # easier to perform discovery through the operation
1408 op = exchange.pushoperation(repo, other)
1408 op = exchange.pushoperation(repo, other)
1409 exchange._pushdiscoverychangeset(op)
1409 exchange._pushdiscoverychangeset(op)
1410
1410
1411 remotesubset = op.fallbackheads
1411 remotesubset = op.fallbackheads
1412
1412
1413 with other.commandexecutor() as e:
1413 with other.commandexecutor() as e:
1414 remotephases = e.callcommand(
1414 remotephases = e.callcommand(
1415 b'listkeys', {b'namespace': b'phases'}
1415 b'listkeys', {b'namespace': b'phases'}
1416 ).result()
1416 ).result()
1417 del other
1417 del other
1418 publishing = remotephases.get(b'publishing', False)
1418 publishing = remotephases.get(b'publishing', False)
1419 if publishing:
1419 if publishing:
1420 ui.statusnoi18n(b'publishing: yes\n')
1420 ui.statusnoi18n(b'publishing: yes\n')
1421 else:
1421 else:
1422 ui.statusnoi18n(b'publishing: no\n')
1422 ui.statusnoi18n(b'publishing: no\n')
1423
1423
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1425 if has_node is None:
1425 if has_node is None:
1426 has_node = repo.changelog.nodemap.__contains__
1426 has_node = repo.changelog.nodemap.__contains__
1427 nonpublishroots = 0
1427 nonpublishroots = 0
1428 for nhex, phase in remotephases.iteritems():
1428 for nhex, phase in remotephases.iteritems():
1429 if nhex == b'publishing': # ignore data related to publish option
1429 if nhex == b'publishing': # ignore data related to publish option
1430 continue
1430 continue
1431 node = bin(nhex)
1431 node = bin(nhex)
1432 if has_node(node) and int(phase):
1432 if has_node(node) and int(phase):
1433 nonpublishroots += 1
1433 nonpublishroots += 1
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1436
1436
1437 def d():
1437 def d():
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1439
1439
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443
1443
1444 @command(
1444 @command(
1445 b'perfmanifest',
1445 b'perfmanifest',
1446 [
1446 [
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1449 ]
1449 ]
1450 + formatteropts,
1450 + formatteropts,
1451 b'REV|NODE',
1451 b'REV|NODE',
1452 )
1452 )
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1454 """benchmark the time to read a manifest from disk and return a usable
1454 """benchmark the time to read a manifest from disk and return a usable
1455 dict-like object
1455 dict-like object
1456
1456
1457 Manifest caches are cleared before retrieval."""
1457 Manifest caches are cleared before retrieval."""
1458 opts = _byteskwargs(opts)
1458 opts = _byteskwargs(opts)
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 if not manifest_rev:
1460 if not manifest_rev:
1461 ctx = scmutil.revsingle(repo, rev, rev)
1461 ctx = scmutil.revsingle(repo, rev, rev)
1462 t = ctx.manifestnode()
1462 t = ctx.manifestnode()
1463 else:
1463 else:
1464 from mercurial.node import bin
1464 from mercurial.node import bin
1465
1465
1466 if len(rev) == 40:
1466 if len(rev) == 40:
1467 t = bin(rev)
1467 t = bin(rev)
1468 else:
1468 else:
1469 try:
1469 try:
1470 rev = int(rev)
1470 rev = int(rev)
1471
1471
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1474 else:
1474 else:
1475 t = repo.manifestlog._revlog.lookup(rev)
1475 t = repo.manifestlog._revlog.lookup(rev)
1476 except ValueError:
1476 except ValueError:
1477 raise error.Abort(
1477 raise error.Abort(
1478 b'manifest revision must be integer or full node'
1478 b'manifest revision must be integer or full node'
1479 )
1479 )
1480
1480
1481 def d():
1481 def d():
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1483 repo.manifestlog[t].read()
1483 repo.manifestlog[t].read()
1484
1484
1485 timer(d)
1485 timer(d)
1486 fm.end()
1486 fm.end()
1487
1487
1488
1488
1489 @command(b'perfchangeset', formatteropts)
1489 @command(b'perfchangeset', formatteropts)
1490 def perfchangeset(ui, repo, rev, **opts):
1490 def perfchangeset(ui, repo, rev, **opts):
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 n = scmutil.revsingle(repo, rev).node()
1493 n = scmutil.revsingle(repo, rev).node()
1494
1494
1495 def d():
1495 def d():
1496 repo.changelog.read(n)
1496 repo.changelog.read(n)
1497 # repo.changelog._cache = None
1497 # repo.changelog._cache = None
1498
1498
1499 timer(d)
1499 timer(d)
1500 fm.end()
1500 fm.end()
1501
1501
1502
1502
1503 @command(b'perfignore', formatteropts)
1503 @command(b'perfignore', formatteropts)
1504 def perfignore(ui, repo, **opts):
1504 def perfignore(ui, repo, **opts):
1505 """benchmark operation related to computing ignore"""
1505 """benchmark operation related to computing ignore"""
1506 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1507 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509
1509
1510 def setupone():
1510 def setupone():
1511 dirstate.invalidate()
1511 dirstate.invalidate()
1512 clearfilecache(dirstate, b'_ignore')
1512 clearfilecache(dirstate, b'_ignore')
1513
1513
1514 def runone():
1514 def runone():
1515 dirstate._ignore
1515 dirstate._ignore
1516
1516
1517 timer(runone, setup=setupone, title=b"load")
1517 timer(runone, setup=setupone, title=b"load")
1518 fm.end()
1518 fm.end()
1519
1519
1520
1520
1521 @command(
1521 @command(
1522 b'perfindex',
1522 b'perfindex',
1523 [
1523 [
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1526 ]
1526 ]
1527 + formatteropts,
1527 + formatteropts,
1528 )
1528 )
1529 def perfindex(ui, repo, **opts):
1529 def perfindex(ui, repo, **opts):
1530 """benchmark index creation time followed by a lookup
1530 """benchmark index creation time followed by a lookup
1531
1531
1532 The default is to look `tip` up. Depending on the index implementation,
1532 The default is to look `tip` up. Depending on the index implementation,
1533 the revision looked up can matters. For example, an implementation
1533 the revision looked up can matters. For example, an implementation
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1535 `--rev 0`. The number of looked up revisions and their order can also
1535 `--rev 0`. The number of looked up revisions and their order can also
1536 matters.
1536 matters.
1537
1537
1538 Example of useful set to test:
1538 Example of useful set to test:
1539
1539 * tip
1540 * tip
1540 * 0
1541 * 0
1541 * -10:
1542 * -10:
1542 * :10
1543 * :10
1543 * -10: + :10
1544 * -10: + :10
1544 * :10: + -10:
1545 * :10: + -10:
1545 * -10000:
1546 * -10000:
1546 * -10000: + 0
1547 * -10000: + 0
1547
1548
1548 It is not currently possible to check for lookup of a missing node. For
1549 It is not currently possible to check for lookup of a missing node. For
1549 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1550 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1550 import mercurial.revlog
1551 import mercurial.revlog
1551
1552
1552 opts = _byteskwargs(opts)
1553 opts = _byteskwargs(opts)
1553 timer, fm = gettimer(ui, opts)
1554 timer, fm = gettimer(ui, opts)
1554 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1555 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1555 if opts[b'no_lookup']:
1556 if opts[b'no_lookup']:
1556 if opts['rev']:
1557 if opts['rev']:
1557 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1558 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1558 nodes = []
1559 nodes = []
1559 elif not opts[b'rev']:
1560 elif not opts[b'rev']:
1560 nodes = [repo[b"tip"].node()]
1561 nodes = [repo[b"tip"].node()]
1561 else:
1562 else:
1562 revs = scmutil.revrange(repo, opts[b'rev'])
1563 revs = scmutil.revrange(repo, opts[b'rev'])
1563 cl = repo.changelog
1564 cl = repo.changelog
1564 nodes = [cl.node(r) for r in revs]
1565 nodes = [cl.node(r) for r in revs]
1565
1566
1566 unfi = repo.unfiltered()
1567 unfi = repo.unfiltered()
1567 # find the filecache func directly
1568 # find the filecache func directly
1568 # This avoid polluting the benchmark with the filecache logic
1569 # This avoid polluting the benchmark with the filecache logic
1569 makecl = unfi.__class__.changelog.func
1570 makecl = unfi.__class__.changelog.func
1570
1571
1571 def setup():
1572 def setup():
1572 # probably not necessary, but for good measure
1573 # probably not necessary, but for good measure
1573 clearchangelog(unfi)
1574 clearchangelog(unfi)
1574
1575
1575 def d():
1576 def d():
1576 cl = makecl(unfi)
1577 cl = makecl(unfi)
1577 for n in nodes:
1578 for n in nodes:
1578 cl.rev(n)
1579 cl.rev(n)
1579
1580
1580 timer(d, setup=setup)
1581 timer(d, setup=setup)
1581 fm.end()
1582 fm.end()
1582
1583
1583
1584
1584 @command(
1585 @command(
1585 b'perfnodemap',
1586 b'perfnodemap',
1586 [
1587 [
1587 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1588 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1588 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1589 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1589 ]
1590 ]
1590 + formatteropts,
1591 + formatteropts,
1591 )
1592 )
1592 def perfnodemap(ui, repo, **opts):
1593 def perfnodemap(ui, repo, **opts):
1593 """benchmark the time necessary to look up revision from a cold nodemap
1594 """benchmark the time necessary to look up revision from a cold nodemap
1594
1595
1595 Depending on the implementation, the amount and order of revision we look
1596 Depending on the implementation, the amount and order of revision we look
1596 up can varies. Example of useful set to test:
1597 up can varies. Example of useful set to test:
1597 * tip
1598 * tip
1598 * 0
1599 * 0
1599 * -10:
1600 * -10:
1600 * :10
1601 * :10
1601 * -10: + :10
1602 * -10: + :10
1602 * :10: + -10:
1603 * :10: + -10:
1603 * -10000:
1604 * -10000:
1604 * -10000: + 0
1605 * -10000: + 0
1605
1606
1606 The command currently focus on valid binary lookup. Benchmarking for
1607 The command currently focus on valid binary lookup. Benchmarking for
1607 hexlookup, prefix lookup and missing lookup would also be valuable.
1608 hexlookup, prefix lookup and missing lookup would also be valuable.
1608 """
1609 """
1609 import mercurial.revlog
1610 import mercurial.revlog
1610
1611
1611 opts = _byteskwargs(opts)
1612 opts = _byteskwargs(opts)
1612 timer, fm = gettimer(ui, opts)
1613 timer, fm = gettimer(ui, opts)
1613 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1614 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1614
1615
1615 unfi = repo.unfiltered()
1616 unfi = repo.unfiltered()
1616 clearcaches = opts['clear_caches']
1617 clearcaches = opts['clear_caches']
1617 # find the filecache func directly
1618 # find the filecache func directly
1618 # This avoid polluting the benchmark with the filecache logic
1619 # This avoid polluting the benchmark with the filecache logic
1619 makecl = unfi.__class__.changelog.func
1620 makecl = unfi.__class__.changelog.func
1620 if not opts[b'rev']:
1621 if not opts[b'rev']:
1621 raise error.Abort('use --rev to specify revisions to look up')
1622 raise error.Abort('use --rev to specify revisions to look up')
1622 revs = scmutil.revrange(repo, opts[b'rev'])
1623 revs = scmutil.revrange(repo, opts[b'rev'])
1623 cl = repo.changelog
1624 cl = repo.changelog
1624 nodes = [cl.node(r) for r in revs]
1625 nodes = [cl.node(r) for r in revs]
1625
1626
1626 # use a list to pass reference to a nodemap from one closure to the next
1627 # use a list to pass reference to a nodemap from one closure to the next
1627 nodeget = [None]
1628 nodeget = [None]
1628
1629
1629 def setnodeget():
1630 def setnodeget():
1630 # probably not necessary, but for good measure
1631 # probably not necessary, but for good measure
1631 clearchangelog(unfi)
1632 clearchangelog(unfi)
1632 cl = makecl(unfi)
1633 cl = makecl(unfi)
1633 if util.safehasattr(cl.index, 'get_rev'):
1634 if util.safehasattr(cl.index, 'get_rev'):
1634 nodeget[0] = cl.index.get_rev
1635 nodeget[0] = cl.index.get_rev
1635 else:
1636 else:
1636 nodeget[0] = cl.nodemap.get
1637 nodeget[0] = cl.nodemap.get
1637
1638
1638 def d():
1639 def d():
1639 get = nodeget[0]
1640 get = nodeget[0]
1640 for n in nodes:
1641 for n in nodes:
1641 get(n)
1642 get(n)
1642
1643
1643 setup = None
1644 setup = None
1644 if clearcaches:
1645 if clearcaches:
1645
1646
1646 def setup():
1647 def setup():
1647 setnodeget()
1648 setnodeget()
1648
1649
1649 else:
1650 else:
1650 setnodeget()
1651 setnodeget()
1651 d() # prewarm the data structure
1652 d() # prewarm the data structure
1652 timer(d, setup=setup)
1653 timer(d, setup=setup)
1653 fm.end()
1654 fm.end()
1654
1655
1655
1656
1656 @command(b'perfstartup', formatteropts)
1657 @command(b'perfstartup', formatteropts)
1657 def perfstartup(ui, repo, **opts):
1658 def perfstartup(ui, repo, **opts):
1658 opts = _byteskwargs(opts)
1659 opts = _byteskwargs(opts)
1659 timer, fm = gettimer(ui, opts)
1660 timer, fm = gettimer(ui, opts)
1660
1661
1661 def d():
1662 def d():
1662 if os.name != 'nt':
1663 if os.name != 'nt':
1663 os.system(
1664 os.system(
1664 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1665 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1665 )
1666 )
1666 else:
1667 else:
1667 os.environ['HGRCPATH'] = r' '
1668 os.environ['HGRCPATH'] = r' '
1668 os.system("%s version -q > NUL" % sys.argv[0])
1669 os.system("%s version -q > NUL" % sys.argv[0])
1669
1670
1670 timer(d)
1671 timer(d)
1671 fm.end()
1672 fm.end()
1672
1673
1673
1674
1674 @command(b'perfparents', formatteropts)
1675 @command(b'perfparents', formatteropts)
1675 def perfparents(ui, repo, **opts):
1676 def perfparents(ui, repo, **opts):
1676 """benchmark the time necessary to fetch one changeset's parents.
1677 """benchmark the time necessary to fetch one changeset's parents.
1677
1678
1678 The fetch is done using the `node identifier`, traversing all object layers
1679 The fetch is done using the `node identifier`, traversing all object layers
1679 from the repository object. The first N revisions will be used for this
1680 from the repository object. The first N revisions will be used for this
1680 benchmark. N is controlled by the ``perf.parentscount`` config option
1681 benchmark. N is controlled by the ``perf.parentscount`` config option
1681 (default: 1000).
1682 (default: 1000).
1682 """
1683 """
1683 opts = _byteskwargs(opts)
1684 opts = _byteskwargs(opts)
1684 timer, fm = gettimer(ui, opts)
1685 timer, fm = gettimer(ui, opts)
1685 # control the number of commits perfparents iterates over
1686 # control the number of commits perfparents iterates over
1686 # experimental config: perf.parentscount
1687 # experimental config: perf.parentscount
1687 count = getint(ui, b"perf", b"parentscount", 1000)
1688 count = getint(ui, b"perf", b"parentscount", 1000)
1688 if len(repo.changelog) < count:
1689 if len(repo.changelog) < count:
1689 raise error.Abort(b"repo needs %d commits for this test" % count)
1690 raise error.Abort(b"repo needs %d commits for this test" % count)
1690 repo = repo.unfiltered()
1691 repo = repo.unfiltered()
1691 nl = [repo.changelog.node(i) for i in _xrange(count)]
1692 nl = [repo.changelog.node(i) for i in _xrange(count)]
1692
1693
1693 def d():
1694 def d():
1694 for n in nl:
1695 for n in nl:
1695 repo.changelog.parents(n)
1696 repo.changelog.parents(n)
1696
1697
1697 timer(d)
1698 timer(d)
1698 fm.end()
1699 fm.end()
1699
1700
1700
1701
1701 @command(b'perfctxfiles', formatteropts)
1702 @command(b'perfctxfiles', formatteropts)
1702 def perfctxfiles(ui, repo, x, **opts):
1703 def perfctxfiles(ui, repo, x, **opts):
1703 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1704 x = int(x)
1705 x = int(x)
1705 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1706
1707
1707 def d():
1708 def d():
1708 len(repo[x].files())
1709 len(repo[x].files())
1709
1710
1710 timer(d)
1711 timer(d)
1711 fm.end()
1712 fm.end()
1712
1713
1713
1714
1714 @command(b'perfrawfiles', formatteropts)
1715 @command(b'perfrawfiles', formatteropts)
1715 def perfrawfiles(ui, repo, x, **opts):
1716 def perfrawfiles(ui, repo, x, **opts):
1716 opts = _byteskwargs(opts)
1717 opts = _byteskwargs(opts)
1717 x = int(x)
1718 x = int(x)
1718 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1719 cl = repo.changelog
1720 cl = repo.changelog
1720
1721
1721 def d():
1722 def d():
1722 len(cl.read(x)[3])
1723 len(cl.read(x)[3])
1723
1724
1724 timer(d)
1725 timer(d)
1725 fm.end()
1726 fm.end()
1726
1727
1727
1728
1728 @command(b'perflookup', formatteropts)
1729 @command(b'perflookup', formatteropts)
1729 def perflookup(ui, repo, rev, **opts):
1730 def perflookup(ui, repo, rev, **opts):
1730 opts = _byteskwargs(opts)
1731 opts = _byteskwargs(opts)
1731 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1732 timer(lambda: len(repo.lookup(rev)))
1733 timer(lambda: len(repo.lookup(rev)))
1733 fm.end()
1734 fm.end()
1734
1735
1735
1736
1736 @command(
1737 @command(
1737 b'perflinelogedits',
1738 b'perflinelogedits',
1738 [
1739 [
1739 (b'n', b'edits', 10000, b'number of edits'),
1740 (b'n', b'edits', 10000, b'number of edits'),
1740 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1741 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1741 ],
1742 ],
1742 norepo=True,
1743 norepo=True,
1743 )
1744 )
1744 def perflinelogedits(ui, **opts):
1745 def perflinelogedits(ui, **opts):
1745 from mercurial import linelog
1746 from mercurial import linelog
1746
1747
1747 opts = _byteskwargs(opts)
1748 opts = _byteskwargs(opts)
1748
1749
1749 edits = opts[b'edits']
1750 edits = opts[b'edits']
1750 maxhunklines = opts[b'max_hunk_lines']
1751 maxhunklines = opts[b'max_hunk_lines']
1751
1752
1752 maxb1 = 100000
1753 maxb1 = 100000
1753 random.seed(0)
1754 random.seed(0)
1754 randint = random.randint
1755 randint = random.randint
1755 currentlines = 0
1756 currentlines = 0
1756 arglist = []
1757 arglist = []
1757 for rev in _xrange(edits):
1758 for rev in _xrange(edits):
1758 a1 = randint(0, currentlines)
1759 a1 = randint(0, currentlines)
1759 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1760 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1760 b1 = randint(0, maxb1)
1761 b1 = randint(0, maxb1)
1761 b2 = randint(b1, b1 + maxhunklines)
1762 b2 = randint(b1, b1 + maxhunklines)
1762 currentlines += (b2 - b1) - (a2 - a1)
1763 currentlines += (b2 - b1) - (a2 - a1)
1763 arglist.append((rev, a1, a2, b1, b2))
1764 arglist.append((rev, a1, a2, b1, b2))
1764
1765
1765 def d():
1766 def d():
1766 ll = linelog.linelog()
1767 ll = linelog.linelog()
1767 for args in arglist:
1768 for args in arglist:
1768 ll.replacelines(*args)
1769 ll.replacelines(*args)
1769
1770
1770 timer, fm = gettimer(ui, opts)
1771 timer, fm = gettimer(ui, opts)
1771 timer(d)
1772 timer(d)
1772 fm.end()
1773 fm.end()
1773
1774
1774
1775
1775 @command(b'perfrevrange', formatteropts)
1776 @command(b'perfrevrange', formatteropts)
1776 def perfrevrange(ui, repo, *specs, **opts):
1777 def perfrevrange(ui, repo, *specs, **opts):
1777 opts = _byteskwargs(opts)
1778 opts = _byteskwargs(opts)
1778 timer, fm = gettimer(ui, opts)
1779 timer, fm = gettimer(ui, opts)
1779 revrange = scmutil.revrange
1780 revrange = scmutil.revrange
1780 timer(lambda: len(revrange(repo, specs)))
1781 timer(lambda: len(revrange(repo, specs)))
1781 fm.end()
1782 fm.end()
1782
1783
1783
1784
1784 @command(b'perfnodelookup', formatteropts)
1785 @command(b'perfnodelookup', formatteropts)
1785 def perfnodelookup(ui, repo, rev, **opts):
1786 def perfnodelookup(ui, repo, rev, **opts):
1786 opts = _byteskwargs(opts)
1787 opts = _byteskwargs(opts)
1787 timer, fm = gettimer(ui, opts)
1788 timer, fm = gettimer(ui, opts)
1788 import mercurial.revlog
1789 import mercurial.revlog
1789
1790
1790 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 n = scmutil.revsingle(repo, rev).node()
1792 n = scmutil.revsingle(repo, rev).node()
1792 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1793 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1793
1794
1794 def d():
1795 def d():
1795 cl.rev(n)
1796 cl.rev(n)
1796 clearcaches(cl)
1797 clearcaches(cl)
1797
1798
1798 timer(d)
1799 timer(d)
1799 fm.end()
1800 fm.end()
1800
1801
1801
1802
1802 @command(
1803 @command(
1803 b'perflog',
1804 b'perflog',
1804 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1805 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1805 )
1806 )
1806 def perflog(ui, repo, rev=None, **opts):
1807 def perflog(ui, repo, rev=None, **opts):
1807 opts = _byteskwargs(opts)
1808 opts = _byteskwargs(opts)
1808 if rev is None:
1809 if rev is None:
1809 rev = []
1810 rev = []
1810 timer, fm = gettimer(ui, opts)
1811 timer, fm = gettimer(ui, opts)
1811 ui.pushbuffer()
1812 ui.pushbuffer()
1812 timer(
1813 timer(
1813 lambda: commands.log(
1814 lambda: commands.log(
1814 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1815 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1815 )
1816 )
1816 )
1817 )
1817 ui.popbuffer()
1818 ui.popbuffer()
1818 fm.end()
1819 fm.end()
1819
1820
1820
1821
1821 @command(b'perfmoonwalk', formatteropts)
1822 @command(b'perfmoonwalk', formatteropts)
1822 def perfmoonwalk(ui, repo, **opts):
1823 def perfmoonwalk(ui, repo, **opts):
1823 """benchmark walking the changelog backwards
1824 """benchmark walking the changelog backwards
1824
1825
1825 This also loads the changelog data for each revision in the changelog.
1826 This also loads the changelog data for each revision in the changelog.
1826 """
1827 """
1827 opts = _byteskwargs(opts)
1828 opts = _byteskwargs(opts)
1828 timer, fm = gettimer(ui, opts)
1829 timer, fm = gettimer(ui, opts)
1829
1830
1830 def moonwalk():
1831 def moonwalk():
1831 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1832 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1832 ctx = repo[i]
1833 ctx = repo[i]
1833 ctx.branch() # read changelog data (in addition to the index)
1834 ctx.branch() # read changelog data (in addition to the index)
1834
1835
1835 timer(moonwalk)
1836 timer(moonwalk)
1836 fm.end()
1837 fm.end()
1837
1838
1838
1839
1839 @command(
1840 @command(
1840 b'perftemplating',
1841 b'perftemplating',
1841 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1842 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1842 )
1843 )
1843 def perftemplating(ui, repo, testedtemplate=None, **opts):
1844 def perftemplating(ui, repo, testedtemplate=None, **opts):
1844 """test the rendering time of a given template"""
1845 """test the rendering time of a given template"""
1845 if makelogtemplater is None:
1846 if makelogtemplater is None:
1846 raise error.Abort(
1847 raise error.Abort(
1847 b"perftemplating not available with this Mercurial",
1848 b"perftemplating not available with this Mercurial",
1848 hint=b"use 4.3 or later",
1849 hint=b"use 4.3 or later",
1849 )
1850 )
1850
1851
1851 opts = _byteskwargs(opts)
1852 opts = _byteskwargs(opts)
1852
1853
1853 nullui = ui.copy()
1854 nullui = ui.copy()
1854 nullui.fout = open(os.devnull, 'wb')
1855 nullui.fout = open(os.devnull, 'wb')
1855 nullui.disablepager()
1856 nullui.disablepager()
1856 revs = opts.get(b'rev')
1857 revs = opts.get(b'rev')
1857 if not revs:
1858 if not revs:
1858 revs = [b'all()']
1859 revs = [b'all()']
1859 revs = list(scmutil.revrange(repo, revs))
1860 revs = list(scmutil.revrange(repo, revs))
1860
1861
1861 defaulttemplate = (
1862 defaulttemplate = (
1862 b'{date|shortdate} [{rev}:{node|short}]'
1863 b'{date|shortdate} [{rev}:{node|short}]'
1863 b' {author|person}: {desc|firstline}\n'
1864 b' {author|person}: {desc|firstline}\n'
1864 )
1865 )
1865 if testedtemplate is None:
1866 if testedtemplate is None:
1866 testedtemplate = defaulttemplate
1867 testedtemplate = defaulttemplate
1867 displayer = makelogtemplater(nullui, repo, testedtemplate)
1868 displayer = makelogtemplater(nullui, repo, testedtemplate)
1868
1869
1869 def format():
1870 def format():
1870 for r in revs:
1871 for r in revs:
1871 ctx = repo[r]
1872 ctx = repo[r]
1872 displayer.show(ctx)
1873 displayer.show(ctx)
1873 displayer.flush(ctx)
1874 displayer.flush(ctx)
1874
1875
1875 timer, fm = gettimer(ui, opts)
1876 timer, fm = gettimer(ui, opts)
1876 timer(format)
1877 timer(format)
1877 fm.end()
1878 fm.end()
1878
1879
1879
1880
1880 def _displaystats(ui, opts, entries, data):
1881 def _displaystats(ui, opts, entries, data):
1881 # use a second formatter because the data are quite different, not sure
1882 # use a second formatter because the data are quite different, not sure
1882 # how it flies with the templater.
1883 # how it flies with the templater.
1883 fm = ui.formatter(b'perf-stats', opts)
1884 fm = ui.formatter(b'perf-stats', opts)
1884 for key, title in entries:
1885 for key, title in entries:
1885 values = data[key]
1886 values = data[key]
1886 nbvalues = len(data)
1887 nbvalues = len(data)
1887 values.sort()
1888 values.sort()
1888 stats = {
1889 stats = {
1889 'key': key,
1890 'key': key,
1890 'title': title,
1891 'title': title,
1891 'nbitems': len(values),
1892 'nbitems': len(values),
1892 'min': values[0][0],
1893 'min': values[0][0],
1893 '10%': values[(nbvalues * 10) // 100][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1894 '25%': values[(nbvalues * 25) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1895 '50%': values[(nbvalues * 50) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1896 '75%': values[(nbvalues * 75) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1897 '80%': values[(nbvalues * 80) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1898 '85%': values[(nbvalues * 85) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1899 '90%': values[(nbvalues * 90) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1900 '95%': values[(nbvalues * 95) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1901 '99%': values[(nbvalues * 99) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1902 'max': values[-1][0],
1903 'max': values[-1][0],
1903 }
1904 }
1904 fm.startitem()
1905 fm.startitem()
1905 fm.data(**stats)
1906 fm.data(**stats)
1906 # make node pretty for the human output
1907 # make node pretty for the human output
1907 fm.plain('### %s (%d items)\n' % (title, len(values)))
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1908 lines = [
1909 lines = [
1909 'min',
1910 'min',
1910 '10%',
1911 '10%',
1911 '25%',
1912 '25%',
1912 '50%',
1913 '50%',
1913 '75%',
1914 '75%',
1914 '80%',
1915 '80%',
1915 '85%',
1916 '85%',
1916 '90%',
1917 '90%',
1917 '95%',
1918 '95%',
1918 '99%',
1919 '99%',
1919 'max',
1920 'max',
1920 ]
1921 ]
1921 for l in lines:
1922 for l in lines:
1922 fm.plain('%s: %s\n' % (l, stats[l]))
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1923 fm.end()
1924 fm.end()
1924
1925
1925
1926
1926 @command(
1927 @command(
1927 b'perfhelper-mergecopies',
1928 b'perfhelper-mergecopies',
1928 formatteropts
1929 formatteropts
1929 + [
1930 + [
1930 (b'r', b'revs', [], b'restrict search to these revisions'),
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1931 (b'', b'timing', False, b'provides extra data (costly)'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1932 (b'', b'stats', False, b'provides statistic about the measured data'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1933 ],
1934 ],
1934 )
1935 )
1935 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1936 """find statistics about potential parameters for `perfmergecopies`
1937 """find statistics about potential parameters for `perfmergecopies`
1937
1938
1938 This command find (base, p1, p2) triplet relevant for copytracing
1939 This command find (base, p1, p2) triplet relevant for copytracing
1939 benchmarking in the context of a merge. It reports values for some of the
1940 benchmarking in the context of a merge. It reports values for some of the
1940 parameters that impact merge copy tracing time during merge.
1941 parameters that impact merge copy tracing time during merge.
1941
1942
1942 If `--timing` is set, rename detection is run and the associated timing
1943 If `--timing` is set, rename detection is run and the associated timing
1943 will be reported. The extra details come at the cost of slower command
1944 will be reported. The extra details come at the cost of slower command
1944 execution.
1945 execution.
1945
1946
1946 Since rename detection is only run once, other factors might easily
1947 Since rename detection is only run once, other factors might easily
1947 affect the precision of the timing. However it should give a good
1948 affect the precision of the timing. However it should give a good
1948 approximation of which revision triplets are very costly.
1949 approximation of which revision triplets are very costly.
1949 """
1950 """
1950 opts = _byteskwargs(opts)
1951 opts = _byteskwargs(opts)
1951 fm = ui.formatter(b'perf', opts)
1952 fm = ui.formatter(b'perf', opts)
1952 dotiming = opts[b'timing']
1953 dotiming = opts[b'timing']
1953 dostats = opts[b'stats']
1954 dostats = opts[b'stats']
1954
1955
1955 output_template = [
1956 output_template = [
1956 ("base", "%(base)12s"),
1957 ("base", "%(base)12s"),
1957 ("p1", "%(p1.node)12s"),
1958 ("p1", "%(p1.node)12s"),
1958 ("p2", "%(p2.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1959 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1960 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1961 ("p1.renames", "%(p1.renamedfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1962 ("p1.time", "%(p1.time)12.3f"),
1963 ("p1.time", "%(p1.time)12.3f"),
1963 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1964 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1965 ("p2.renames", "%(p2.renamedfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1966 ("p2.time", "%(p2.time)12.3f"),
1967 ("p2.time", "%(p2.time)12.3f"),
1967 ("renames", "%(nbrenamedfiles)12d"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1968 ("total.time", "%(time)12.3f"),
1969 ("total.time", "%(time)12.3f"),
1969 ]
1970 ]
1970 if not dotiming:
1971 if not dotiming:
1971 output_template = [
1972 output_template = [
1972 i
1973 i
1973 for i in output_template
1974 for i in output_template
1974 if not ('time' in i[0] or 'renames' in i[0])
1975 if not ('time' in i[0] or 'renames' in i[0])
1975 ]
1976 ]
1976 header_names = [h for (h, v) in output_template]
1977 header_names = [h for (h, v) in output_template]
1977 output = ' '.join([v for (h, v) in output_template]) + '\n'
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1978 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1979 fm.plain(header % tuple(header_names))
1980 fm.plain(header % tuple(header_names))
1980
1981
1981 if not revs:
1982 if not revs:
1982 revs = ['all()']
1983 revs = ['all()']
1983 revs = scmutil.revrange(repo, revs)
1984 revs = scmutil.revrange(repo, revs)
1984
1985
1985 if dostats:
1986 if dostats:
1986 alldata = {
1987 alldata = {
1987 'nbrevs': [],
1988 'nbrevs': [],
1988 'nbmissingfiles': [],
1989 'nbmissingfiles': [],
1989 }
1990 }
1990 if dotiming:
1991 if dotiming:
1991 alldata['parentnbrenames'] = []
1992 alldata['parentnbrenames'] = []
1992 alldata['totalnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1993 alldata['parenttime'] = []
1994 alldata['parenttime'] = []
1994 alldata['totaltime'] = []
1995 alldata['totaltime'] = []
1995
1996
1996 roi = repo.revs('merge() and %ld', revs)
1997 roi = repo.revs('merge() and %ld', revs)
1997 for r in roi:
1998 for r in roi:
1998 ctx = repo[r]
1999 ctx = repo[r]
1999 p1 = ctx.p1()
2000 p1 = ctx.p1()
2000 p2 = ctx.p2()
2001 p2 = ctx.p2()
2001 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2002 for b in bases:
2003 for b in bases:
2003 b = repo[b]
2004 b = repo[b]
2004 p1missing = copies._computeforwardmissing(b, p1)
2005 p1missing = copies._computeforwardmissing(b, p1)
2005 p2missing = copies._computeforwardmissing(b, p2)
2006 p2missing = copies._computeforwardmissing(b, p2)
2006 data = {
2007 data = {
2007 b'base': b.hex(),
2008 b'base': b.hex(),
2008 b'p1.node': p1.hex(),
2009 b'p1.node': p1.hex(),
2009 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2010 b'p1.nbmissingfiles': len(p1missing),
2011 b'p1.nbmissingfiles': len(p1missing),
2011 b'p2.node': p2.hex(),
2012 b'p2.node': p2.hex(),
2012 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2013 b'p2.nbmissingfiles': len(p2missing),
2014 b'p2.nbmissingfiles': len(p2missing),
2014 }
2015 }
2015 if dostats:
2016 if dostats:
2016 if p1missing:
2017 if p1missing:
2017 alldata['nbrevs'].append(
2018 alldata['nbrevs'].append(
2018 (data['p1.nbrevs'], b.hex(), p1.hex())
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2019 )
2020 )
2020 alldata['nbmissingfiles'].append(
2021 alldata['nbmissingfiles'].append(
2021 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2022 )
2023 )
2023 if p2missing:
2024 if p2missing:
2024 alldata['nbrevs'].append(
2025 alldata['nbrevs'].append(
2025 (data['p2.nbrevs'], b.hex(), p2.hex())
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2026 )
2027 )
2027 alldata['nbmissingfiles'].append(
2028 alldata['nbmissingfiles'].append(
2028 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2029 )
2030 )
2030 if dotiming:
2031 if dotiming:
2031 begin = util.timer()
2032 begin = util.timer()
2032 mergedata = copies.mergecopies(repo, p1, p2, b)
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2033 end = util.timer()
2034 end = util.timer()
2034 # not very stable timing since we did only one run
2035 # not very stable timing since we did only one run
2035 data['time'] = end - begin
2036 data['time'] = end - begin
2036 # mergedata contains five dicts: "copy", "movewithdir",
2037 # mergedata contains five dicts: "copy", "movewithdir",
2037 # "diverge", "renamedelete" and "dirmove".
2038 # "diverge", "renamedelete" and "dirmove".
2038 # The first 4 are about renamed file so lets count that.
2039 # The first 4 are about renamed file so lets count that.
2039 renames = len(mergedata[0])
2040 renames = len(mergedata[0])
2040 renames += len(mergedata[1])
2041 renames += len(mergedata[1])
2041 renames += len(mergedata[2])
2042 renames += len(mergedata[2])
2042 renames += len(mergedata[3])
2043 renames += len(mergedata[3])
2043 data['nbrenamedfiles'] = renames
2044 data['nbrenamedfiles'] = renames
2044 begin = util.timer()
2045 begin = util.timer()
2045 p1renames = copies.pathcopies(b, p1)
2046 p1renames = copies.pathcopies(b, p1)
2046 end = util.timer()
2047 end = util.timer()
2047 data['p1.time'] = end - begin
2048 data['p1.time'] = end - begin
2048 begin = util.timer()
2049 begin = util.timer()
2049 p2renames = copies.pathcopies(b, p2)
2050 p2renames = copies.pathcopies(b, p2)
2050 end = util.timer()
2051 end = util.timer()
2051 data['p2.time'] = end - begin
2052 data['p2.time'] = end - begin
2052 data['p1.renamedfiles'] = len(p1renames)
2053 data['p1.renamedfiles'] = len(p1renames)
2053 data['p2.renamedfiles'] = len(p2renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2054
2055
2055 if dostats:
2056 if dostats:
2056 if p1missing:
2057 if p1missing:
2057 alldata['parentnbrenames'].append(
2058 alldata['parentnbrenames'].append(
2058 (data['p1.renamedfiles'], b.hex(), p1.hex())
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2059 )
2060 )
2060 alldata['parenttime'].append(
2061 alldata['parenttime'].append(
2061 (data['p1.time'], b.hex(), p1.hex())
2062 (data['p1.time'], b.hex(), p1.hex())
2062 )
2063 )
2063 if p2missing:
2064 if p2missing:
2064 alldata['parentnbrenames'].append(
2065 alldata['parentnbrenames'].append(
2065 (data['p2.renamedfiles'], b.hex(), p2.hex())
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2066 )
2067 )
2067 alldata['parenttime'].append(
2068 alldata['parenttime'].append(
2068 (data['p2.time'], b.hex(), p2.hex())
2069 (data['p2.time'], b.hex(), p2.hex())
2069 )
2070 )
2070 if p1missing or p2missing:
2071 if p1missing or p2missing:
2071 alldata['totalnbrenames'].append(
2072 alldata['totalnbrenames'].append(
2072 (
2073 (
2073 data['nbrenamedfiles'],
2074 data['nbrenamedfiles'],
2074 b.hex(),
2075 b.hex(),
2075 p1.hex(),
2076 p1.hex(),
2076 p2.hex(),
2077 p2.hex(),
2077 )
2078 )
2078 )
2079 )
2079 alldata['totaltime'].append(
2080 alldata['totaltime'].append(
2080 (data['time'], b.hex(), p1.hex(), p2.hex())
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2081 )
2082 )
2082 fm.startitem()
2083 fm.startitem()
2083 fm.data(**data)
2084 fm.data(**data)
2084 # make node pretty for the human output
2085 # make node pretty for the human output
2085 out = data.copy()
2086 out = data.copy()
2086 out['base'] = fm.hexfunc(b.node())
2087 out['base'] = fm.hexfunc(b.node())
2087 out['p1.node'] = fm.hexfunc(p1.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2088 out['p2.node'] = fm.hexfunc(p2.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2089 fm.plain(output % out)
2090 fm.plain(output % out)
2090
2091
2091 fm.end()
2092 fm.end()
2092 if dostats:
2093 if dostats:
2093 # use a second formatter because the data are quite different, not sure
2094 # use a second formatter because the data are quite different, not sure
2094 # how it flies with the templater.
2095 # how it flies with the templater.
2095 entries = [
2096 entries = [
2096 ('nbrevs', 'number of revision covered'),
2097 ('nbrevs', 'number of revision covered'),
2097 ('nbmissingfiles', 'number of missing files at head'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2098 ]
2099 ]
2099 if dotiming:
2100 if dotiming:
2100 entries.append(
2101 entries.append(
2101 ('parentnbrenames', 'rename from one parent to base')
2102 ('parentnbrenames', 'rename from one parent to base')
2102 )
2103 )
2103 entries.append(('totalnbrenames', 'total number of renames'))
2104 entries.append(('totalnbrenames', 'total number of renames'))
2104 entries.append(('parenttime', 'time for one parent'))
2105 entries.append(('parenttime', 'time for one parent'))
2105 entries.append(('totaltime', 'time for both parents'))
2106 entries.append(('totaltime', 'time for both parents'))
2106 _displaystats(ui, opts, entries, alldata)
2107 _displaystats(ui, opts, entries, alldata)
2107
2108
2108
2109
2109 @command(
2110 @command(
2110 b'perfhelper-pathcopies',
2111 b'perfhelper-pathcopies',
2111 formatteropts
2112 formatteropts
2112 + [
2113 + [
2113 (b'r', b'revs', [], b'restrict search to these revisions'),
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2114 (b'', b'timing', False, b'provides extra data (costly)'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2115 (b'', b'stats', False, b'provides statistic about the measured data'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2116 ],
2117 ],
2117 )
2118 )
2118 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2119 """find statistic about potential parameters for the `perftracecopies`
2120 """find statistic about potential parameters for the `perftracecopies`
2120
2121
2121 This command find source-destination pair relevant for copytracing testing.
2122 This command find source-destination pair relevant for copytracing testing.
2122 It report value for some of the parameters that impact copy tracing time.
2123 It report value for some of the parameters that impact copy tracing time.
2123
2124
2124 If `--timing` is set, rename detection is run and the associated timing
2125 If `--timing` is set, rename detection is run and the associated timing
2125 will be reported. The extra details comes at the cost of a slower command
2126 will be reported. The extra details comes at the cost of a slower command
2126 execution.
2127 execution.
2127
2128
2128 Since the rename detection is only run once, other factors might easily
2129 Since the rename detection is only run once, other factors might easily
2129 affect the precision of the timing. However it should give a good
2130 affect the precision of the timing. However it should give a good
2130 approximation of which revision pairs are very costly.
2131 approximation of which revision pairs are very costly.
2131 """
2132 """
2132 opts = _byteskwargs(opts)
2133 opts = _byteskwargs(opts)
2133 fm = ui.formatter(b'perf', opts)
2134 fm = ui.formatter(b'perf', opts)
2134 dotiming = opts[b'timing']
2135 dotiming = opts[b'timing']
2135 dostats = opts[b'stats']
2136 dostats = opts[b'stats']
2136
2137
2137 if dotiming:
2138 if dotiming:
2138 header = '%12s %12s %12s %12s %12s %12s\n'
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2139 output = (
2140 output = (
2140 "%(source)12s %(destination)12s "
2141 "%(source)12s %(destination)12s "
2141 "%(nbrevs)12d %(nbmissingfiles)12d "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2142 "%(nbrenamedfiles)12d %(time)18.5f\n"
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2143 )
2144 )
2144 header_names = (
2145 header_names = (
2145 "source",
2146 "source",
2146 "destination",
2147 "destination",
2147 "nb-revs",
2148 "nb-revs",
2148 "nb-files",
2149 "nb-files",
2149 "nb-renames",
2150 "nb-renames",
2150 "time",
2151 "time",
2151 )
2152 )
2152 fm.plain(header % header_names)
2153 fm.plain(header % header_names)
2153 else:
2154 else:
2154 header = '%12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s\n'
2155 output = (
2156 output = (
2156 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2157 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2158 )
2159 )
2159 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2160
2161
2161 if not revs:
2162 if not revs:
2162 revs = ['all()']
2163 revs = ['all()']
2163 revs = scmutil.revrange(repo, revs)
2164 revs = scmutil.revrange(repo, revs)
2164
2165
2165 if dostats:
2166 if dostats:
2166 alldata = {
2167 alldata = {
2167 'nbrevs': [],
2168 'nbrevs': [],
2168 'nbmissingfiles': [],
2169 'nbmissingfiles': [],
2169 }
2170 }
2170 if dotiming:
2171 if dotiming:
2171 alldata['nbrenames'] = []
2172 alldata['nbrenames'] = []
2172 alldata['time'] = []
2173 alldata['time'] = []
2173
2174
2174 roi = repo.revs('merge() and %ld', revs)
2175 roi = repo.revs('merge() and %ld', revs)
2175 for r in roi:
2176 for r in roi:
2176 ctx = repo[r]
2177 ctx = repo[r]
2177 p1 = ctx.p1().rev()
2178 p1 = ctx.p1().rev()
2178 p2 = ctx.p2().rev()
2179 p2 = ctx.p2().rev()
2179 bases = repo.changelog._commonancestorsheads(p1, p2)
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2180 for p in (p1, p2):
2181 for p in (p1, p2):
2181 for b in bases:
2182 for b in bases:
2182 base = repo[b]
2183 base = repo[b]
2183 parent = repo[p]
2184 parent = repo[p]
2184 missing = copies._computeforwardmissing(base, parent)
2185 missing = copies._computeforwardmissing(base, parent)
2185 if not missing:
2186 if not missing:
2186 continue
2187 continue
2187 data = {
2188 data = {
2188 b'source': base.hex(),
2189 b'source': base.hex(),
2189 b'destination': parent.hex(),
2190 b'destination': parent.hex(),
2190 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2191 b'nbmissingfiles': len(missing),
2192 b'nbmissingfiles': len(missing),
2192 }
2193 }
2193 if dostats:
2194 if dostats:
2194 alldata['nbrevs'].append(
2195 alldata['nbrevs'].append(
2195 (data['nbrevs'], base.hex(), parent.hex(),)
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2196 )
2197 )
2197 alldata['nbmissingfiles'].append(
2198 alldata['nbmissingfiles'].append(
2198 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2199 )
2200 )
2200 if dotiming:
2201 if dotiming:
2201 begin = util.timer()
2202 begin = util.timer()
2202 renames = copies.pathcopies(base, parent)
2203 renames = copies.pathcopies(base, parent)
2203 end = util.timer()
2204 end = util.timer()
2204 # not very stable timing since we did only one run
2205 # not very stable timing since we did only one run
2205 data['time'] = end - begin
2206 data['time'] = end - begin
2206 data['nbrenamedfiles'] = len(renames)
2207 data['nbrenamedfiles'] = len(renames)
2207 if dostats:
2208 if dostats:
2208 alldata['time'].append(
2209 alldata['time'].append(
2209 (data['time'], base.hex(), parent.hex(),)
2210 (data['time'], base.hex(), parent.hex(),)
2210 )
2211 )
2211 alldata['nbrenames'].append(
2212 alldata['nbrenames'].append(
2212 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2213 )
2214 )
2214 fm.startitem()
2215 fm.startitem()
2215 fm.data(**data)
2216 fm.data(**data)
2216 out = data.copy()
2217 out = data.copy()
2217 out['source'] = fm.hexfunc(base.node())
2218 out['source'] = fm.hexfunc(base.node())
2218 out['destination'] = fm.hexfunc(parent.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2219 fm.plain(output % out)
2220 fm.plain(output % out)
2220
2221
2221 fm.end()
2222 fm.end()
2222 if dostats:
2223 if dostats:
2223 entries = [
2224 entries = [
2224 ('nbrevs', 'number of revision covered'),
2225 ('nbrevs', 'number of revision covered'),
2225 ('nbmissingfiles', 'number of missing files at head'),
2226 ('nbmissingfiles', 'number of missing files at head'),
2226 ]
2227 ]
2227 if dotiming:
2228 if dotiming:
2228 entries.append(('nbrenames', 'renamed files'))
2229 entries.append(('nbrenames', 'renamed files'))
2229 entries.append(('time', 'time'))
2230 entries.append(('time', 'time'))
2230 _displaystats(ui, opts, entries, alldata)
2231 _displaystats(ui, opts, entries, alldata)
2231
2232
2232
2233
2233 @command(b'perfcca', formatteropts)
2234 @command(b'perfcca', formatteropts)
2234 def perfcca(ui, repo, **opts):
2235 def perfcca(ui, repo, **opts):
2235 opts = _byteskwargs(opts)
2236 opts = _byteskwargs(opts)
2236 timer, fm = gettimer(ui, opts)
2237 timer, fm = gettimer(ui, opts)
2237 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2238 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2238 fm.end()
2239 fm.end()
2239
2240
2240
2241
2241 @command(b'perffncacheload', formatteropts)
2242 @command(b'perffncacheload', formatteropts)
2242 def perffncacheload(ui, repo, **opts):
2243 def perffncacheload(ui, repo, **opts):
2243 opts = _byteskwargs(opts)
2244 opts = _byteskwargs(opts)
2244 timer, fm = gettimer(ui, opts)
2245 timer, fm = gettimer(ui, opts)
2245 s = repo.store
2246 s = repo.store
2246
2247
2247 def d():
2248 def d():
2248 s.fncache._load()
2249 s.fncache._load()
2249
2250
2250 timer(d)
2251 timer(d)
2251 fm.end()
2252 fm.end()
2252
2253
2253
2254
2254 @command(b'perffncachewrite', formatteropts)
2255 @command(b'perffncachewrite', formatteropts)
2255 def perffncachewrite(ui, repo, **opts):
2256 def perffncachewrite(ui, repo, **opts):
2256 opts = _byteskwargs(opts)
2257 opts = _byteskwargs(opts)
2257 timer, fm = gettimer(ui, opts)
2258 timer, fm = gettimer(ui, opts)
2258 s = repo.store
2259 s = repo.store
2259 lock = repo.lock()
2260 lock = repo.lock()
2260 s.fncache._load()
2261 s.fncache._load()
2261 tr = repo.transaction(b'perffncachewrite')
2262 tr = repo.transaction(b'perffncachewrite')
2262 tr.addbackup(b'fncache')
2263 tr.addbackup(b'fncache')
2263
2264
2264 def d():
2265 def d():
2265 s.fncache._dirty = True
2266 s.fncache._dirty = True
2266 s.fncache.write(tr)
2267 s.fncache.write(tr)
2267
2268
2268 timer(d)
2269 timer(d)
2269 tr.close()
2270 tr.close()
2270 lock.release()
2271 lock.release()
2271 fm.end()
2272 fm.end()
2272
2273
2273
2274
2274 @command(b'perffncacheencode', formatteropts)
2275 @command(b'perffncacheencode', formatteropts)
2275 def perffncacheencode(ui, repo, **opts):
2276 def perffncacheencode(ui, repo, **opts):
2276 opts = _byteskwargs(opts)
2277 opts = _byteskwargs(opts)
2277 timer, fm = gettimer(ui, opts)
2278 timer, fm = gettimer(ui, opts)
2278 s = repo.store
2279 s = repo.store
2279 s.fncache._load()
2280 s.fncache._load()
2280
2281
2281 def d():
2282 def d():
2282 for p in s.fncache.entries:
2283 for p in s.fncache.entries:
2283 s.encode(p)
2284 s.encode(p)
2284
2285
2285 timer(d)
2286 timer(d)
2286 fm.end()
2287 fm.end()
2287
2288
2288
2289
2289 def _bdiffworker(q, blocks, xdiff, ready, done):
2290 def _bdiffworker(q, blocks, xdiff, ready, done):
2290 while not done.is_set():
2291 while not done.is_set():
2291 pair = q.get()
2292 pair = q.get()
2292 while pair is not None:
2293 while pair is not None:
2293 if xdiff:
2294 if xdiff:
2294 mdiff.bdiff.xdiffblocks(*pair)
2295 mdiff.bdiff.xdiffblocks(*pair)
2295 elif blocks:
2296 elif blocks:
2296 mdiff.bdiff.blocks(*pair)
2297 mdiff.bdiff.blocks(*pair)
2297 else:
2298 else:
2298 mdiff.textdiff(*pair)
2299 mdiff.textdiff(*pair)
2299 q.task_done()
2300 q.task_done()
2300 pair = q.get()
2301 pair = q.get()
2301 q.task_done() # for the None one
2302 q.task_done() # for the None one
2302 with ready:
2303 with ready:
2303 ready.wait()
2304 ready.wait()
2304
2305
2305
2306
2306 def _manifestrevision(repo, mnode):
2307 def _manifestrevision(repo, mnode):
2307 ml = repo.manifestlog
2308 ml = repo.manifestlog
2308
2309
2309 if util.safehasattr(ml, b'getstorage'):
2310 if util.safehasattr(ml, b'getstorage'):
2310 store = ml.getstorage(b'')
2311 store = ml.getstorage(b'')
2311 else:
2312 else:
2312 store = ml._revlog
2313 store = ml._revlog
2313
2314
2314 return store.revision(mnode)
2315 return store.revision(mnode)
2315
2316
2316
2317
2317 @command(
2318 @command(
2318 b'perfbdiff',
2319 b'perfbdiff',
2319 revlogopts
2320 revlogopts
2320 + formatteropts
2321 + formatteropts
2321 + [
2322 + [
2322 (
2323 (
2323 b'',
2324 b'',
2324 b'count',
2325 b'count',
2325 1,
2326 1,
2326 b'number of revisions to test (when using --startrev)',
2327 b'number of revisions to test (when using --startrev)',
2327 ),
2328 ),
2328 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2329 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2329 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2330 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2330 (b'', b'blocks', False, b'test computing diffs into blocks'),
2331 (b'', b'blocks', False, b'test computing diffs into blocks'),
2331 (b'', b'xdiff', False, b'use xdiff algorithm'),
2332 (b'', b'xdiff', False, b'use xdiff algorithm'),
2332 ],
2333 ],
2333 b'-c|-m|FILE REV',
2334 b'-c|-m|FILE REV',
2334 )
2335 )
2335 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2336 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2336 """benchmark a bdiff between revisions
2337 """benchmark a bdiff between revisions
2337
2338
2338 By default, benchmark a bdiff between its delta parent and itself.
2339 By default, benchmark a bdiff between its delta parent and itself.
2339
2340
2340 With ``--count``, benchmark bdiffs between delta parents and self for N
2341 With ``--count``, benchmark bdiffs between delta parents and self for N
2341 revisions starting at the specified revision.
2342 revisions starting at the specified revision.
2342
2343
2343 With ``--alldata``, assume the requested revision is a changeset and
2344 With ``--alldata``, assume the requested revision is a changeset and
2344 measure bdiffs for all changes related to that changeset (manifest
2345 measure bdiffs for all changes related to that changeset (manifest
2345 and filelogs).
2346 and filelogs).
2346 """
2347 """
2347 opts = _byteskwargs(opts)
2348 opts = _byteskwargs(opts)
2348
2349
2349 if opts[b'xdiff'] and not opts[b'blocks']:
2350 if opts[b'xdiff'] and not opts[b'blocks']:
2350 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2351 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2351
2352
2352 if opts[b'alldata']:
2353 if opts[b'alldata']:
2353 opts[b'changelog'] = True
2354 opts[b'changelog'] = True
2354
2355
2355 if opts.get(b'changelog') or opts.get(b'manifest'):
2356 if opts.get(b'changelog') or opts.get(b'manifest'):
2356 file_, rev = None, file_
2357 file_, rev = None, file_
2357 elif rev is None:
2358 elif rev is None:
2358 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2359 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2359
2360
2360 blocks = opts[b'blocks']
2361 blocks = opts[b'blocks']
2361 xdiff = opts[b'xdiff']
2362 xdiff = opts[b'xdiff']
2362 textpairs = []
2363 textpairs = []
2363
2364
2364 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2365 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2365
2366
2366 startrev = r.rev(r.lookup(rev))
2367 startrev = r.rev(r.lookup(rev))
2367 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2368 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2368 if opts[b'alldata']:
2369 if opts[b'alldata']:
2369 # Load revisions associated with changeset.
2370 # Load revisions associated with changeset.
2370 ctx = repo[rev]
2371 ctx = repo[rev]
2371 mtext = _manifestrevision(repo, ctx.manifestnode())
2372 mtext = _manifestrevision(repo, ctx.manifestnode())
2372 for pctx in ctx.parents():
2373 for pctx in ctx.parents():
2373 pman = _manifestrevision(repo, pctx.manifestnode())
2374 pman = _manifestrevision(repo, pctx.manifestnode())
2374 textpairs.append((pman, mtext))
2375 textpairs.append((pman, mtext))
2375
2376
2376 # Load filelog revisions by iterating manifest delta.
2377 # Load filelog revisions by iterating manifest delta.
2377 man = ctx.manifest()
2378 man = ctx.manifest()
2378 pman = ctx.p1().manifest()
2379 pman = ctx.p1().manifest()
2379 for filename, change in pman.diff(man).items():
2380 for filename, change in pman.diff(man).items():
2380 fctx = repo.file(filename)
2381 fctx = repo.file(filename)
2381 f1 = fctx.revision(change[0][0] or -1)
2382 f1 = fctx.revision(change[0][0] or -1)
2382 f2 = fctx.revision(change[1][0] or -1)
2383 f2 = fctx.revision(change[1][0] or -1)
2383 textpairs.append((f1, f2))
2384 textpairs.append((f1, f2))
2384 else:
2385 else:
2385 dp = r.deltaparent(rev)
2386 dp = r.deltaparent(rev)
2386 textpairs.append((r.revision(dp), r.revision(rev)))
2387 textpairs.append((r.revision(dp), r.revision(rev)))
2387
2388
2388 withthreads = threads > 0
2389 withthreads = threads > 0
2389 if not withthreads:
2390 if not withthreads:
2390
2391
2391 def d():
2392 def d():
2392 for pair in textpairs:
2393 for pair in textpairs:
2393 if xdiff:
2394 if xdiff:
2394 mdiff.bdiff.xdiffblocks(*pair)
2395 mdiff.bdiff.xdiffblocks(*pair)
2395 elif blocks:
2396 elif blocks:
2396 mdiff.bdiff.blocks(*pair)
2397 mdiff.bdiff.blocks(*pair)
2397 else:
2398 else:
2398 mdiff.textdiff(*pair)
2399 mdiff.textdiff(*pair)
2399
2400
2400 else:
2401 else:
2401 q = queue()
2402 q = queue()
2402 for i in _xrange(threads):
2403 for i in _xrange(threads):
2403 q.put(None)
2404 q.put(None)
2404 ready = threading.Condition()
2405 ready = threading.Condition()
2405 done = threading.Event()
2406 done = threading.Event()
2406 for i in _xrange(threads):
2407 for i in _xrange(threads):
2407 threading.Thread(
2408 threading.Thread(
2408 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2409 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2409 ).start()
2410 ).start()
2410 q.join()
2411 q.join()
2411
2412
2412 def d():
2413 def d():
2413 for pair in textpairs:
2414 for pair in textpairs:
2414 q.put(pair)
2415 q.put(pair)
2415 for i in _xrange(threads):
2416 for i in _xrange(threads):
2416 q.put(None)
2417 q.put(None)
2417 with ready:
2418 with ready:
2418 ready.notify_all()
2419 ready.notify_all()
2419 q.join()
2420 q.join()
2420
2421
2421 timer, fm = gettimer(ui, opts)
2422 timer, fm = gettimer(ui, opts)
2422 timer(d)
2423 timer(d)
2423 fm.end()
2424 fm.end()
2424
2425
2425 if withthreads:
2426 if withthreads:
2426 done.set()
2427 done.set()
2427 for i in _xrange(threads):
2428 for i in _xrange(threads):
2428 q.put(None)
2429 q.put(None)
2429 with ready:
2430 with ready:
2430 ready.notify_all()
2431 ready.notify_all()
2431
2432
2432
2433
2433 @command(
2434 @command(
2434 b'perfunidiff',
2435 b'perfunidiff',
2435 revlogopts
2436 revlogopts
2436 + formatteropts
2437 + formatteropts
2437 + [
2438 + [
2438 (
2439 (
2439 b'',
2440 b'',
2440 b'count',
2441 b'count',
2441 1,
2442 1,
2442 b'number of revisions to test (when using --startrev)',
2443 b'number of revisions to test (when using --startrev)',
2443 ),
2444 ),
2444 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2445 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2445 ],
2446 ],
2446 b'-c|-m|FILE REV',
2447 b'-c|-m|FILE REV',
2447 )
2448 )
2448 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2449 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2449 """benchmark a unified diff between revisions
2450 """benchmark a unified diff between revisions
2450
2451
2451 This doesn't include any copy tracing - it's just a unified diff
2452 This doesn't include any copy tracing - it's just a unified diff
2452 of the texts.
2453 of the texts.
2453
2454
2454 By default, benchmark a diff between its delta parent and itself.
2455 By default, benchmark a diff between its delta parent and itself.
2455
2456
2456 With ``--count``, benchmark diffs between delta parents and self for N
2457 With ``--count``, benchmark diffs between delta parents and self for N
2457 revisions starting at the specified revision.
2458 revisions starting at the specified revision.
2458
2459
2459 With ``--alldata``, assume the requested revision is a changeset and
2460 With ``--alldata``, assume the requested revision is a changeset and
2460 measure diffs for all changes related to that changeset (manifest
2461 measure diffs for all changes related to that changeset (manifest
2461 and filelogs).
2462 and filelogs).
2462 """
2463 """
2463 opts = _byteskwargs(opts)
2464 opts = _byteskwargs(opts)
2464 if opts[b'alldata']:
2465 if opts[b'alldata']:
2465 opts[b'changelog'] = True
2466 opts[b'changelog'] = True
2466
2467
2467 if opts.get(b'changelog') or opts.get(b'manifest'):
2468 if opts.get(b'changelog') or opts.get(b'manifest'):
2468 file_, rev = None, file_
2469 file_, rev = None, file_
2469 elif rev is None:
2470 elif rev is None:
2470 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2471 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2471
2472
2472 textpairs = []
2473 textpairs = []
2473
2474
2474 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2475 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2475
2476
2476 startrev = r.rev(r.lookup(rev))
2477 startrev = r.rev(r.lookup(rev))
2477 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2478 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2478 if opts[b'alldata']:
2479 if opts[b'alldata']:
2479 # Load revisions associated with changeset.
2480 # Load revisions associated with changeset.
2480 ctx = repo[rev]
2481 ctx = repo[rev]
2481 mtext = _manifestrevision(repo, ctx.manifestnode())
2482 mtext = _manifestrevision(repo, ctx.manifestnode())
2482 for pctx in ctx.parents():
2483 for pctx in ctx.parents():
2483 pman = _manifestrevision(repo, pctx.manifestnode())
2484 pman = _manifestrevision(repo, pctx.manifestnode())
2484 textpairs.append((pman, mtext))
2485 textpairs.append((pman, mtext))
2485
2486
2486 # Load filelog revisions by iterating manifest delta.
2487 # Load filelog revisions by iterating manifest delta.
2487 man = ctx.manifest()
2488 man = ctx.manifest()
2488 pman = ctx.p1().manifest()
2489 pman = ctx.p1().manifest()
2489 for filename, change in pman.diff(man).items():
2490 for filename, change in pman.diff(man).items():
2490 fctx = repo.file(filename)
2491 fctx = repo.file(filename)
2491 f1 = fctx.revision(change[0][0] or -1)
2492 f1 = fctx.revision(change[0][0] or -1)
2492 f2 = fctx.revision(change[1][0] or -1)
2493 f2 = fctx.revision(change[1][0] or -1)
2493 textpairs.append((f1, f2))
2494 textpairs.append((f1, f2))
2494 else:
2495 else:
2495 dp = r.deltaparent(rev)
2496 dp = r.deltaparent(rev)
2496 textpairs.append((r.revision(dp), r.revision(rev)))
2497 textpairs.append((r.revision(dp), r.revision(rev)))
2497
2498
2498 def d():
2499 def d():
2499 for left, right in textpairs:
2500 for left, right in textpairs:
2500 # The date strings don't matter, so we pass empty strings.
2501 # The date strings don't matter, so we pass empty strings.
2501 headerlines, hunks = mdiff.unidiff(
2502 headerlines, hunks = mdiff.unidiff(
2502 left, b'', right, b'', b'left', b'right', binary=False
2503 left, b'', right, b'', b'left', b'right', binary=False
2503 )
2504 )
2504 # consume iterators in roughly the way patch.py does
2505 # consume iterators in roughly the way patch.py does
2505 b'\n'.join(headerlines)
2506 b'\n'.join(headerlines)
2506 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2507 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2507
2508
2508 timer, fm = gettimer(ui, opts)
2509 timer, fm = gettimer(ui, opts)
2509 timer(d)
2510 timer(d)
2510 fm.end()
2511 fm.end()
2511
2512
2512
2513
2513 @command(b'perfdiffwd', formatteropts)
2514 @command(b'perfdiffwd', formatteropts)
2514 def perfdiffwd(ui, repo, **opts):
2515 def perfdiffwd(ui, repo, **opts):
2515 """Profile diff of working directory changes"""
2516 """Profile diff of working directory changes"""
2516 opts = _byteskwargs(opts)
2517 opts = _byteskwargs(opts)
2517 timer, fm = gettimer(ui, opts)
2518 timer, fm = gettimer(ui, opts)
2518 options = {
2519 options = {
2519 'w': 'ignore_all_space',
2520 'w': 'ignore_all_space',
2520 'b': 'ignore_space_change',
2521 'b': 'ignore_space_change',
2521 'B': 'ignore_blank_lines',
2522 'B': 'ignore_blank_lines',
2522 }
2523 }
2523
2524
2524 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2525 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2525 opts = dict((options[c], b'1') for c in diffopt)
2526 opts = dict((options[c], b'1') for c in diffopt)
2526
2527
2527 def d():
2528 def d():
2528 ui.pushbuffer()
2529 ui.pushbuffer()
2529 commands.diff(ui, repo, **opts)
2530 commands.diff(ui, repo, **opts)
2530 ui.popbuffer()
2531 ui.popbuffer()
2531
2532
2532 diffopt = diffopt.encode('ascii')
2533 diffopt = diffopt.encode('ascii')
2533 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2534 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2534 timer(d, title=title)
2535 timer(d, title=title)
2535 fm.end()
2536 fm.end()
2536
2537
2537
2538
2538 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2539 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2539 def perfrevlogindex(ui, repo, file_=None, **opts):
2540 def perfrevlogindex(ui, repo, file_=None, **opts):
2540 """Benchmark operations against a revlog index.
2541 """Benchmark operations against a revlog index.
2541
2542
2542 This tests constructing a revlog instance, reading index data,
2543 This tests constructing a revlog instance, reading index data,
2543 parsing index data, and performing various operations related to
2544 parsing index data, and performing various operations related to
2544 index data.
2545 index data.
2545 """
2546 """
2546
2547
2547 opts = _byteskwargs(opts)
2548 opts = _byteskwargs(opts)
2548
2549
2549 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2550 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2550
2551
2551 opener = getattr(rl, 'opener') # trick linter
2552 opener = getattr(rl, 'opener') # trick linter
2552 indexfile = rl.indexfile
2553 indexfile = rl.indexfile
2553 data = opener.read(indexfile)
2554 data = opener.read(indexfile)
2554
2555
2555 header = struct.unpack(b'>I', data[0:4])[0]
2556 header = struct.unpack(b'>I', data[0:4])[0]
2556 version = header & 0xFFFF
2557 version = header & 0xFFFF
2557 if version == 1:
2558 if version == 1:
2558 revlogio = revlog.revlogio()
2559 revlogio = revlog.revlogio()
2559 inline = header & (1 << 16)
2560 inline = header & (1 << 16)
2560 else:
2561 else:
2561 raise error.Abort(b'unsupported revlog version: %d' % version)
2562 raise error.Abort(b'unsupported revlog version: %d' % version)
2562
2563
2563 rllen = len(rl)
2564 rllen = len(rl)
2564
2565
2565 node0 = rl.node(0)
2566 node0 = rl.node(0)
2566 node25 = rl.node(rllen // 4)
2567 node25 = rl.node(rllen // 4)
2567 node50 = rl.node(rllen // 2)
2568 node50 = rl.node(rllen // 2)
2568 node75 = rl.node(rllen // 4 * 3)
2569 node75 = rl.node(rllen // 4 * 3)
2569 node100 = rl.node(rllen - 1)
2570 node100 = rl.node(rllen - 1)
2570
2571
2571 allrevs = range(rllen)
2572 allrevs = range(rllen)
2572 allrevsrev = list(reversed(allrevs))
2573 allrevsrev = list(reversed(allrevs))
2573 allnodes = [rl.node(rev) for rev in range(rllen)]
2574 allnodes = [rl.node(rev) for rev in range(rllen)]
2574 allnodesrev = list(reversed(allnodes))
2575 allnodesrev = list(reversed(allnodes))
2575
2576
2576 def constructor():
2577 def constructor():
2577 revlog.revlog(opener, indexfile)
2578 revlog.revlog(opener, indexfile)
2578
2579
2579 def read():
2580 def read():
2580 with opener(indexfile) as fh:
2581 with opener(indexfile) as fh:
2581 fh.read()
2582 fh.read()
2582
2583
2583 def parseindex():
2584 def parseindex():
2584 revlogio.parseindex(data, inline)
2585 revlogio.parseindex(data, inline)
2585
2586
2586 def getentry(revornode):
2587 def getentry(revornode):
2587 index = revlogio.parseindex(data, inline)[0]
2588 index = revlogio.parseindex(data, inline)[0]
2588 index[revornode]
2589 index[revornode]
2589
2590
2590 def getentries(revs, count=1):
2591 def getentries(revs, count=1):
2591 index = revlogio.parseindex(data, inline)[0]
2592 index = revlogio.parseindex(data, inline)[0]
2592
2593
2593 for i in range(count):
2594 for i in range(count):
2594 for rev in revs:
2595 for rev in revs:
2595 index[rev]
2596 index[rev]
2596
2597
2597 def resolvenode(node):
2598 def resolvenode(node):
2598 index = revlogio.parseindex(data, inline)[0]
2599 index = revlogio.parseindex(data, inline)[0]
2599 rev = getattr(index, 'rev', None)
2600 rev = getattr(index, 'rev', None)
2600 if rev is None:
2601 if rev is None:
2601 nodemap = getattr(
2602 nodemap = getattr(
2602 revlogio.parseindex(data, inline)[0], 'nodemap', None
2603 revlogio.parseindex(data, inline)[0], 'nodemap', None
2603 )
2604 )
2604 # This only works for the C code.
2605 # This only works for the C code.
2605 if nodemap is None:
2606 if nodemap is None:
2606 return
2607 return
2607 rev = nodemap.__getitem__
2608 rev = nodemap.__getitem__
2608
2609
2609 try:
2610 try:
2610 rev(node)
2611 rev(node)
2611 except error.RevlogError:
2612 except error.RevlogError:
2612 pass
2613 pass
2613
2614
2614 def resolvenodes(nodes, count=1):
2615 def resolvenodes(nodes, count=1):
2615 index = revlogio.parseindex(data, inline)[0]
2616 index = revlogio.parseindex(data, inline)[0]
2616 rev = getattr(index, 'rev', None)
2617 rev = getattr(index, 'rev', None)
2617 if rev is None:
2618 if rev is None:
2618 nodemap = getattr(
2619 nodemap = getattr(
2619 revlogio.parseindex(data, inline)[0], 'nodemap', None
2620 revlogio.parseindex(data, inline)[0], 'nodemap', None
2620 )
2621 )
2621 # This only works for the C code.
2622 # This only works for the C code.
2622 if nodemap is None:
2623 if nodemap is None:
2623 return
2624 return
2624 rev = nodemap.__getitem__
2625 rev = nodemap.__getitem__
2625
2626
2626 for i in range(count):
2627 for i in range(count):
2627 for node in nodes:
2628 for node in nodes:
2628 try:
2629 try:
2629 rev(node)
2630 rev(node)
2630 except error.RevlogError:
2631 except error.RevlogError:
2631 pass
2632 pass
2632
2633
2633 benches = [
2634 benches = [
2634 (constructor, b'revlog constructor'),
2635 (constructor, b'revlog constructor'),
2635 (read, b'read'),
2636 (read, b'read'),
2636 (parseindex, b'create index object'),
2637 (parseindex, b'create index object'),
2637 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2638 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2638 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2639 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2639 (lambda: resolvenode(node0), b'look up node at rev 0'),
2640 (lambda: resolvenode(node0), b'look up node at rev 0'),
2640 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2641 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2641 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2642 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2642 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2643 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2643 (lambda: resolvenode(node100), b'look up node at tip'),
2644 (lambda: resolvenode(node100), b'look up node at tip'),
2644 # 2x variation is to measure caching impact.
2645 # 2x variation is to measure caching impact.
2645 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2646 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2646 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2647 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2647 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2648 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2648 (
2649 (
2649 lambda: resolvenodes(allnodesrev, 2),
2650 lambda: resolvenodes(allnodesrev, 2),
2650 b'look up all nodes 2x (reverse)',
2651 b'look up all nodes 2x (reverse)',
2651 ),
2652 ),
2652 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2653 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2653 (
2654 (
2654 lambda: getentries(allrevs, 2),
2655 lambda: getentries(allrevs, 2),
2655 b'retrieve all index entries 2x (forward)',
2656 b'retrieve all index entries 2x (forward)',
2656 ),
2657 ),
2657 (
2658 (
2658 lambda: getentries(allrevsrev),
2659 lambda: getentries(allrevsrev),
2659 b'retrieve all index entries (reverse)',
2660 b'retrieve all index entries (reverse)',
2660 ),
2661 ),
2661 (
2662 (
2662 lambda: getentries(allrevsrev, 2),
2663 lambda: getentries(allrevsrev, 2),
2663 b'retrieve all index entries 2x (reverse)',
2664 b'retrieve all index entries 2x (reverse)',
2664 ),
2665 ),
2665 ]
2666 ]
2666
2667
2667 for fn, title in benches:
2668 for fn, title in benches:
2668 timer, fm = gettimer(ui, opts)
2669 timer, fm = gettimer(ui, opts)
2669 timer(fn, title=title)
2670 timer(fn, title=title)
2670 fm.end()
2671 fm.end()
2671
2672
2672
2673
2673 @command(
2674 @command(
2674 b'perfrevlogrevisions',
2675 b'perfrevlogrevisions',
2675 revlogopts
2676 revlogopts
2676 + formatteropts
2677 + formatteropts
2677 + [
2678 + [
2678 (b'd', b'dist', 100, b'distance between the revisions'),
2679 (b'd', b'dist', 100, b'distance between the revisions'),
2679 (b's', b'startrev', 0, b'revision to start reading at'),
2680 (b's', b'startrev', 0, b'revision to start reading at'),
2680 (b'', b'reverse', False, b'read in reverse'),
2681 (b'', b'reverse', False, b'read in reverse'),
2681 ],
2682 ],
2682 b'-c|-m|FILE',
2683 b'-c|-m|FILE',
2683 )
2684 )
2684 def perfrevlogrevisions(
2685 def perfrevlogrevisions(
2685 ui, repo, file_=None, startrev=0, reverse=False, **opts
2686 ui, repo, file_=None, startrev=0, reverse=False, **opts
2686 ):
2687 ):
2687 """Benchmark reading a series of revisions from a revlog.
2688 """Benchmark reading a series of revisions from a revlog.
2688
2689
2689 By default, we read every ``-d/--dist`` revision from 0 to tip of
2690 By default, we read every ``-d/--dist`` revision from 0 to tip of
2690 the specified revlog.
2691 the specified revlog.
2691
2692
2692 The start revision can be defined via ``-s/--startrev``.
2693 The start revision can be defined via ``-s/--startrev``.
2693 """
2694 """
2694 opts = _byteskwargs(opts)
2695 opts = _byteskwargs(opts)
2695
2696
2696 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2697 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2697 rllen = getlen(ui)(rl)
2698 rllen = getlen(ui)(rl)
2698
2699
2699 if startrev < 0:
2700 if startrev < 0:
2700 startrev = rllen + startrev
2701 startrev = rllen + startrev
2701
2702
2702 def d():
2703 def d():
2703 rl.clearcaches()
2704 rl.clearcaches()
2704
2705
2705 beginrev = startrev
2706 beginrev = startrev
2706 endrev = rllen
2707 endrev = rllen
2707 dist = opts[b'dist']
2708 dist = opts[b'dist']
2708
2709
2709 if reverse:
2710 if reverse:
2710 beginrev, endrev = endrev - 1, beginrev - 1
2711 beginrev, endrev = endrev - 1, beginrev - 1
2711 dist = -1 * dist
2712 dist = -1 * dist
2712
2713
2713 for x in _xrange(beginrev, endrev, dist):
2714 for x in _xrange(beginrev, endrev, dist):
2714 # Old revisions don't support passing int.
2715 # Old revisions don't support passing int.
2715 n = rl.node(x)
2716 n = rl.node(x)
2716 rl.revision(n)
2717 rl.revision(n)
2717
2718
2718 timer, fm = gettimer(ui, opts)
2719 timer, fm = gettimer(ui, opts)
2719 timer(d)
2720 timer(d)
2720 fm.end()
2721 fm.end()
2721
2722
2722
2723
2723 @command(
2724 @command(
2724 b'perfrevlogwrite',
2725 b'perfrevlogwrite',
2725 revlogopts
2726 revlogopts
2726 + formatteropts
2727 + formatteropts
2727 + [
2728 + [
2728 (b's', b'startrev', 1000, b'revision to start writing at'),
2729 (b's', b'startrev', 1000, b'revision to start writing at'),
2729 (b'', b'stoprev', -1, b'last revision to write'),
2730 (b'', b'stoprev', -1, b'last revision to write'),
2730 (b'', b'count', 3, b'number of passes to perform'),
2731 (b'', b'count', 3, b'number of passes to perform'),
2731 (b'', b'details', False, b'print timing for every revisions tested'),
2732 (b'', b'details', False, b'print timing for every revisions tested'),
2732 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2733 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2733 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2734 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2734 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2735 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2735 ],
2736 ],
2736 b'-c|-m|FILE',
2737 b'-c|-m|FILE',
2737 )
2738 )
2738 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2739 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2739 """Benchmark writing a series of revisions to a revlog.
2740 """Benchmark writing a series of revisions to a revlog.
2740
2741
2741 Possible source values are:
2742 Possible source values are:
2742 * `full`: add from a full text (default).
2743 * `full`: add from a full text (default).
2743 * `parent-1`: add from a delta to the first parent
2744 * `parent-1`: add from a delta to the first parent
2744 * `parent-2`: add from a delta to the second parent if it exists
2745 * `parent-2`: add from a delta to the second parent if it exists
2745 (use a delta from the first parent otherwise)
2746 (use a delta from the first parent otherwise)
2746 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2747 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2747 * `storage`: add from the existing precomputed deltas
2748 * `storage`: add from the existing precomputed deltas
2748
2749
2749 Note: This performance command measures performance in a custom way. As a
2750 Note: This performance command measures performance in a custom way. As a
2750 result some of the global configuration of the 'perf' command does not
2751 result some of the global configuration of the 'perf' command does not
2751 apply to it:
2752 apply to it:
2752
2753
2753 * ``pre-run``: disabled
2754 * ``pre-run``: disabled
2754
2755
2755 * ``profile-benchmark``: disabled
2756 * ``profile-benchmark``: disabled
2756
2757
2757 * ``run-limits``: disabled use --count instead
2758 * ``run-limits``: disabled use --count instead
2758 """
2759 """
2759 opts = _byteskwargs(opts)
2760 opts = _byteskwargs(opts)
2760
2761
2761 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2762 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2762 rllen = getlen(ui)(rl)
2763 rllen = getlen(ui)(rl)
2763 if startrev < 0:
2764 if startrev < 0:
2764 startrev = rllen + startrev
2765 startrev = rllen + startrev
2765 if stoprev < 0:
2766 if stoprev < 0:
2766 stoprev = rllen + stoprev
2767 stoprev = rllen + stoprev
2767
2768
2768 lazydeltabase = opts['lazydeltabase']
2769 lazydeltabase = opts['lazydeltabase']
2769 source = opts['source']
2770 source = opts['source']
2770 clearcaches = opts['clear_caches']
2771 clearcaches = opts['clear_caches']
2771 validsource = (
2772 validsource = (
2772 b'full',
2773 b'full',
2773 b'parent-1',
2774 b'parent-1',
2774 b'parent-2',
2775 b'parent-2',
2775 b'parent-smallest',
2776 b'parent-smallest',
2776 b'storage',
2777 b'storage',
2777 )
2778 )
2778 if source not in validsource:
2779 if source not in validsource:
2779 raise error.Abort('invalid source type: %s' % source)
2780 raise error.Abort('invalid source type: %s' % source)
2780
2781
2781 ### actually gather results
2782 ### actually gather results
2782 count = opts['count']
2783 count = opts['count']
2783 if count <= 0:
2784 if count <= 0:
2784 raise error.Abort('invalide run count: %d' % count)
2785 raise error.Abort('invalide run count: %d' % count)
2785 allresults = []
2786 allresults = []
2786 for c in range(count):
2787 for c in range(count):
2787 timing = _timeonewrite(
2788 timing = _timeonewrite(
2788 ui,
2789 ui,
2789 rl,
2790 rl,
2790 source,
2791 source,
2791 startrev,
2792 startrev,
2792 stoprev,
2793 stoprev,
2793 c + 1,
2794 c + 1,
2794 lazydeltabase=lazydeltabase,
2795 lazydeltabase=lazydeltabase,
2795 clearcaches=clearcaches,
2796 clearcaches=clearcaches,
2796 )
2797 )
2797 allresults.append(timing)
2798 allresults.append(timing)
2798
2799
2799 ### consolidate the results in a single list
2800 ### consolidate the results in a single list
2800 results = []
2801 results = []
2801 for idx, (rev, t) in enumerate(allresults[0]):
2802 for idx, (rev, t) in enumerate(allresults[0]):
2802 ts = [t]
2803 ts = [t]
2803 for other in allresults[1:]:
2804 for other in allresults[1:]:
2804 orev, ot = other[idx]
2805 orev, ot = other[idx]
2805 assert orev == rev
2806 assert orev == rev
2806 ts.append(ot)
2807 ts.append(ot)
2807 results.append((rev, ts))
2808 results.append((rev, ts))
2808 resultcount = len(results)
2809 resultcount = len(results)
2809
2810
2810 ### Compute and display relevant statistics
2811 ### Compute and display relevant statistics
2811
2812
2812 # get a formatter
2813 # get a formatter
2813 fm = ui.formatter(b'perf', opts)
2814 fm = ui.formatter(b'perf', opts)
2814 displayall = ui.configbool(b"perf", b"all-timing", False)
2815 displayall = ui.configbool(b"perf", b"all-timing", False)
2815
2816
2816 # print individual details if requested
2817 # print individual details if requested
2817 if opts['details']:
2818 if opts['details']:
2818 for idx, item in enumerate(results, 1):
2819 for idx, item in enumerate(results, 1):
2819 rev, data = item
2820 rev, data = item
2820 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2821 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2821 formatone(fm, data, title=title, displayall=displayall)
2822 formatone(fm, data, title=title, displayall=displayall)
2822
2823
2823 # sorts results by median time
2824 # sorts results by median time
2824 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2825 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2825 # list of (name, index) to display)
2826 # list of (name, index) to display)
2826 relevants = [
2827 relevants = [
2827 ("min", 0),
2828 ("min", 0),
2828 ("10%", resultcount * 10 // 100),
2829 ("10%", resultcount * 10 // 100),
2829 ("25%", resultcount * 25 // 100),
2830 ("25%", resultcount * 25 // 100),
2830 ("50%", resultcount * 70 // 100),
2831 ("50%", resultcount * 70 // 100),
2831 ("75%", resultcount * 75 // 100),
2832 ("75%", resultcount * 75 // 100),
2832 ("90%", resultcount * 90 // 100),
2833 ("90%", resultcount * 90 // 100),
2833 ("95%", resultcount * 95 // 100),
2834 ("95%", resultcount * 95 // 100),
2834 ("99%", resultcount * 99 // 100),
2835 ("99%", resultcount * 99 // 100),
2835 ("99.9%", resultcount * 999 // 1000),
2836 ("99.9%", resultcount * 999 // 1000),
2836 ("99.99%", resultcount * 9999 // 10000),
2837 ("99.99%", resultcount * 9999 // 10000),
2837 ("99.999%", resultcount * 99999 // 100000),
2838 ("99.999%", resultcount * 99999 // 100000),
2838 ("max", -1),
2839 ("max", -1),
2839 ]
2840 ]
2840 if not ui.quiet:
2841 if not ui.quiet:
2841 for name, idx in relevants:
2842 for name, idx in relevants:
2842 data = results[idx]
2843 data = results[idx]
2843 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2844 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2844 formatone(fm, data[1], title=title, displayall=displayall)
2845 formatone(fm, data[1], title=title, displayall=displayall)
2845
2846
2846 # XXX summing that many float will not be very precise, we ignore this fact
2847 # XXX summing that many float will not be very precise, we ignore this fact
2847 # for now
2848 # for now
2848 totaltime = []
2849 totaltime = []
2849 for item in allresults:
2850 for item in allresults:
2850 totaltime.append(
2851 totaltime.append(
2851 (
2852 (
2852 sum(x[1][0] for x in item),
2853 sum(x[1][0] for x in item),
2853 sum(x[1][1] for x in item),
2854 sum(x[1][1] for x in item),
2854 sum(x[1][2] for x in item),
2855 sum(x[1][2] for x in item),
2855 )
2856 )
2856 )
2857 )
2857 formatone(
2858 formatone(
2858 fm,
2859 fm,
2859 totaltime,
2860 totaltime,
2860 title="total time (%d revs)" % resultcount,
2861 title="total time (%d revs)" % resultcount,
2861 displayall=displayall,
2862 displayall=displayall,
2862 )
2863 )
2863 fm.end()
2864 fm.end()
2864
2865
2865
2866
2866 class _faketr(object):
2867 class _faketr(object):
2867 def add(s, x, y, z=None):
2868 def add(s, x, y, z=None):
2868 return None
2869 return None
2869
2870
2870
2871
2871 def _timeonewrite(
2872 def _timeonewrite(
2872 ui,
2873 ui,
2873 orig,
2874 orig,
2874 source,
2875 source,
2875 startrev,
2876 startrev,
2876 stoprev,
2877 stoprev,
2877 runidx=None,
2878 runidx=None,
2878 lazydeltabase=True,
2879 lazydeltabase=True,
2879 clearcaches=True,
2880 clearcaches=True,
2880 ):
2881 ):
2881 timings = []
2882 timings = []
2882 tr = _faketr()
2883 tr = _faketr()
2883 with _temprevlog(ui, orig, startrev) as dest:
2884 with _temprevlog(ui, orig, startrev) as dest:
2884 dest._lazydeltabase = lazydeltabase
2885 dest._lazydeltabase = lazydeltabase
2885 revs = list(orig.revs(startrev, stoprev))
2886 revs = list(orig.revs(startrev, stoprev))
2886 total = len(revs)
2887 total = len(revs)
2887 topic = 'adding'
2888 topic = 'adding'
2888 if runidx is not None:
2889 if runidx is not None:
2889 topic += ' (run #%d)' % runidx
2890 topic += ' (run #%d)' % runidx
2890 # Support both old and new progress API
2891 # Support both old and new progress API
2891 if util.safehasattr(ui, 'makeprogress'):
2892 if util.safehasattr(ui, 'makeprogress'):
2892 progress = ui.makeprogress(topic, unit='revs', total=total)
2893 progress = ui.makeprogress(topic, unit='revs', total=total)
2893
2894
2894 def updateprogress(pos):
2895 def updateprogress(pos):
2895 progress.update(pos)
2896 progress.update(pos)
2896
2897
2897 def completeprogress():
2898 def completeprogress():
2898 progress.complete()
2899 progress.complete()
2899
2900
2900 else:
2901 else:
2901
2902
2902 def updateprogress(pos):
2903 def updateprogress(pos):
2903 ui.progress(topic, pos, unit='revs', total=total)
2904 ui.progress(topic, pos, unit='revs', total=total)
2904
2905
2905 def completeprogress():
2906 def completeprogress():
2906 ui.progress(topic, None, unit='revs', total=total)
2907 ui.progress(topic, None, unit='revs', total=total)
2907
2908
2908 for idx, rev in enumerate(revs):
2909 for idx, rev in enumerate(revs):
2909 updateprogress(idx)
2910 updateprogress(idx)
2910 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2911 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2911 if clearcaches:
2912 if clearcaches:
2912 dest.index.clearcaches()
2913 dest.index.clearcaches()
2913 dest.clearcaches()
2914 dest.clearcaches()
2914 with timeone() as r:
2915 with timeone() as r:
2915 dest.addrawrevision(*addargs, **addkwargs)
2916 dest.addrawrevision(*addargs, **addkwargs)
2916 timings.append((rev, r[0]))
2917 timings.append((rev, r[0]))
2917 updateprogress(total)
2918 updateprogress(total)
2918 completeprogress()
2919 completeprogress()
2919 return timings
2920 return timings
2920
2921
2921
2922
2922 def _getrevisionseed(orig, rev, tr, source):
2923 def _getrevisionseed(orig, rev, tr, source):
2923 from mercurial.node import nullid
2924 from mercurial.node import nullid
2924
2925
2925 linkrev = orig.linkrev(rev)
2926 linkrev = orig.linkrev(rev)
2926 node = orig.node(rev)
2927 node = orig.node(rev)
2927 p1, p2 = orig.parents(node)
2928 p1, p2 = orig.parents(node)
2928 flags = orig.flags(rev)
2929 flags = orig.flags(rev)
2929 cachedelta = None
2930 cachedelta = None
2930 text = None
2931 text = None
2931
2932
2932 if source == b'full':
2933 if source == b'full':
2933 text = orig.revision(rev)
2934 text = orig.revision(rev)
2934 elif source == b'parent-1':
2935 elif source == b'parent-1':
2935 baserev = orig.rev(p1)
2936 baserev = orig.rev(p1)
2936 cachedelta = (baserev, orig.revdiff(p1, rev))
2937 cachedelta = (baserev, orig.revdiff(p1, rev))
2937 elif source == b'parent-2':
2938 elif source == b'parent-2':
2938 parent = p2
2939 parent = p2
2939 if p2 == nullid:
2940 if p2 == nullid:
2940 parent = p1
2941 parent = p1
2941 baserev = orig.rev(parent)
2942 baserev = orig.rev(parent)
2942 cachedelta = (baserev, orig.revdiff(parent, rev))
2943 cachedelta = (baserev, orig.revdiff(parent, rev))
2943 elif source == b'parent-smallest':
2944 elif source == b'parent-smallest':
2944 p1diff = orig.revdiff(p1, rev)
2945 p1diff = orig.revdiff(p1, rev)
2945 parent = p1
2946 parent = p1
2946 diff = p1diff
2947 diff = p1diff
2947 if p2 != nullid:
2948 if p2 != nullid:
2948 p2diff = orig.revdiff(p2, rev)
2949 p2diff = orig.revdiff(p2, rev)
2949 if len(p1diff) > len(p2diff):
2950 if len(p1diff) > len(p2diff):
2950 parent = p2
2951 parent = p2
2951 diff = p2diff
2952 diff = p2diff
2952 baserev = orig.rev(parent)
2953 baserev = orig.rev(parent)
2953 cachedelta = (baserev, diff)
2954 cachedelta = (baserev, diff)
2954 elif source == b'storage':
2955 elif source == b'storage':
2955 baserev = orig.deltaparent(rev)
2956 baserev = orig.deltaparent(rev)
2956 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2957 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2957
2958
2958 return (
2959 return (
2959 (text, tr, linkrev, p1, p2),
2960 (text, tr, linkrev, p1, p2),
2960 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2961 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2961 )
2962 )
2962
2963
2963
2964
2964 @contextlib.contextmanager
2965 @contextlib.contextmanager
2965 def _temprevlog(ui, orig, truncaterev):
2966 def _temprevlog(ui, orig, truncaterev):
2966 from mercurial import vfs as vfsmod
2967 from mercurial import vfs as vfsmod
2967
2968
2968 if orig._inline:
2969 if orig._inline:
2969 raise error.Abort('not supporting inline revlog (yet)')
2970 raise error.Abort('not supporting inline revlog (yet)')
2970 revlogkwargs = {}
2971 revlogkwargs = {}
2971 k = 'upperboundcomp'
2972 k = 'upperboundcomp'
2972 if util.safehasattr(orig, k):
2973 if util.safehasattr(orig, k):
2973 revlogkwargs[k] = getattr(orig, k)
2974 revlogkwargs[k] = getattr(orig, k)
2974
2975
2975 origindexpath = orig.opener.join(orig.indexfile)
2976 origindexpath = orig.opener.join(orig.indexfile)
2976 origdatapath = orig.opener.join(orig.datafile)
2977 origdatapath = orig.opener.join(orig.datafile)
2977 indexname = 'revlog.i'
2978 indexname = 'revlog.i'
2978 dataname = 'revlog.d'
2979 dataname = 'revlog.d'
2979
2980
2980 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2981 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2981 try:
2982 try:
2982 # copy the data file in a temporary directory
2983 # copy the data file in a temporary directory
2983 ui.debug('copying data in %s\n' % tmpdir)
2984 ui.debug('copying data in %s\n' % tmpdir)
2984 destindexpath = os.path.join(tmpdir, 'revlog.i')
2985 destindexpath = os.path.join(tmpdir, 'revlog.i')
2985 destdatapath = os.path.join(tmpdir, 'revlog.d')
2986 destdatapath = os.path.join(tmpdir, 'revlog.d')
2986 shutil.copyfile(origindexpath, destindexpath)
2987 shutil.copyfile(origindexpath, destindexpath)
2987 shutil.copyfile(origdatapath, destdatapath)
2988 shutil.copyfile(origdatapath, destdatapath)
2988
2989
2989 # remove the data we want to add again
2990 # remove the data we want to add again
2990 ui.debug('truncating data to be rewritten\n')
2991 ui.debug('truncating data to be rewritten\n')
2991 with open(destindexpath, 'ab') as index:
2992 with open(destindexpath, 'ab') as index:
2992 index.seek(0)
2993 index.seek(0)
2993 index.truncate(truncaterev * orig._io.size)
2994 index.truncate(truncaterev * orig._io.size)
2994 with open(destdatapath, 'ab') as data:
2995 with open(destdatapath, 'ab') as data:
2995 data.seek(0)
2996 data.seek(0)
2996 data.truncate(orig.start(truncaterev))
2997 data.truncate(orig.start(truncaterev))
2997
2998
2998 # instantiate a new revlog from the temporary copy
2999 # instantiate a new revlog from the temporary copy
2999 ui.debug('truncating adding to be rewritten\n')
3000 ui.debug('truncating adding to be rewritten\n')
3000 vfs = vfsmod.vfs(tmpdir)
3001 vfs = vfsmod.vfs(tmpdir)
3001 vfs.options = getattr(orig.opener, 'options', None)
3002 vfs.options = getattr(orig.opener, 'options', None)
3002
3003
3003 dest = revlog.revlog(
3004 dest = revlog.revlog(
3004 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3005 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3005 )
3006 )
3006 if dest._inline:
3007 if dest._inline:
3007 raise error.Abort('not supporting inline revlog (yet)')
3008 raise error.Abort('not supporting inline revlog (yet)')
3008 # make sure internals are initialized
3009 # make sure internals are initialized
3009 dest.revision(len(dest) - 1)
3010 dest.revision(len(dest) - 1)
3010 yield dest
3011 yield dest
3011 del dest, vfs
3012 del dest, vfs
3012 finally:
3013 finally:
3013 shutil.rmtree(tmpdir, True)
3014 shutil.rmtree(tmpdir, True)
3014
3015
3015
3016
3016 @command(
3017 @command(
3017 b'perfrevlogchunks',
3018 b'perfrevlogchunks',
3018 revlogopts
3019 revlogopts
3019 + formatteropts
3020 + formatteropts
3020 + [
3021 + [
3021 (b'e', b'engines', b'', b'compression engines to use'),
3022 (b'e', b'engines', b'', b'compression engines to use'),
3022 (b's', b'startrev', 0, b'revision to start at'),
3023 (b's', b'startrev', 0, b'revision to start at'),
3023 ],
3024 ],
3024 b'-c|-m|FILE',
3025 b'-c|-m|FILE',
3025 )
3026 )
3026 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3027 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3027 """Benchmark operations on revlog chunks.
3028 """Benchmark operations on revlog chunks.
3028
3029
3029 Logically, each revlog is a collection of fulltext revisions. However,
3030 Logically, each revlog is a collection of fulltext revisions. However,
3030 stored within each revlog are "chunks" of possibly compressed data. This
3031 stored within each revlog are "chunks" of possibly compressed data. This
3031 data needs to be read and decompressed or compressed and written.
3032 data needs to be read and decompressed or compressed and written.
3032
3033
3033 This command measures the time it takes to read+decompress and recompress
3034 This command measures the time it takes to read+decompress and recompress
3034 chunks in a revlog. It effectively isolates I/O and compression performance.
3035 chunks in a revlog. It effectively isolates I/O and compression performance.
3035 For measurements of higher-level operations like resolving revisions,
3036 For measurements of higher-level operations like resolving revisions,
3036 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3037 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3037 """
3038 """
3038 opts = _byteskwargs(opts)
3039 opts = _byteskwargs(opts)
3039
3040
3040 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3041 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3041
3042
3042 # _chunkraw was renamed to _getsegmentforrevs.
3043 # _chunkraw was renamed to _getsegmentforrevs.
3043 try:
3044 try:
3044 segmentforrevs = rl._getsegmentforrevs
3045 segmentforrevs = rl._getsegmentforrevs
3045 except AttributeError:
3046 except AttributeError:
3046 segmentforrevs = rl._chunkraw
3047 segmentforrevs = rl._chunkraw
3047
3048
3048 # Verify engines argument.
3049 # Verify engines argument.
3049 if engines:
3050 if engines:
3050 engines = set(e.strip() for e in engines.split(b','))
3051 engines = set(e.strip() for e in engines.split(b','))
3051 for engine in engines:
3052 for engine in engines:
3052 try:
3053 try:
3053 util.compressionengines[engine]
3054 util.compressionengines[engine]
3054 except KeyError:
3055 except KeyError:
3055 raise error.Abort(b'unknown compression engine: %s' % engine)
3056 raise error.Abort(b'unknown compression engine: %s' % engine)
3056 else:
3057 else:
3057 engines = []
3058 engines = []
3058 for e in util.compengines:
3059 for e in util.compengines:
3059 engine = util.compengines[e]
3060 engine = util.compengines[e]
3060 try:
3061 try:
3061 if engine.available():
3062 if engine.available():
3062 engine.revlogcompressor().compress(b'dummy')
3063 engine.revlogcompressor().compress(b'dummy')
3063 engines.append(e)
3064 engines.append(e)
3064 except NotImplementedError:
3065 except NotImplementedError:
3065 pass
3066 pass
3066
3067
3067 revs = list(rl.revs(startrev, len(rl) - 1))
3068 revs = list(rl.revs(startrev, len(rl) - 1))
3068
3069
3069 def rlfh(rl):
3070 def rlfh(rl):
3070 if rl._inline:
3071 if rl._inline:
3071 return getsvfs(repo)(rl.indexfile)
3072 return getsvfs(repo)(rl.indexfile)
3072 else:
3073 else:
3073 return getsvfs(repo)(rl.datafile)
3074 return getsvfs(repo)(rl.datafile)
3074
3075
3075 def doread():
3076 def doread():
3076 rl.clearcaches()
3077 rl.clearcaches()
3077 for rev in revs:
3078 for rev in revs:
3078 segmentforrevs(rev, rev)
3079 segmentforrevs(rev, rev)
3079
3080
3080 def doreadcachedfh():
3081 def doreadcachedfh():
3081 rl.clearcaches()
3082 rl.clearcaches()
3082 fh = rlfh(rl)
3083 fh = rlfh(rl)
3083 for rev in revs:
3084 for rev in revs:
3084 segmentforrevs(rev, rev, df=fh)
3085 segmentforrevs(rev, rev, df=fh)
3085
3086
3086 def doreadbatch():
3087 def doreadbatch():
3087 rl.clearcaches()
3088 rl.clearcaches()
3088 segmentforrevs(revs[0], revs[-1])
3089 segmentforrevs(revs[0], revs[-1])
3089
3090
3090 def doreadbatchcachedfh():
3091 def doreadbatchcachedfh():
3091 rl.clearcaches()
3092 rl.clearcaches()
3092 fh = rlfh(rl)
3093 fh = rlfh(rl)
3093 segmentforrevs(revs[0], revs[-1], df=fh)
3094 segmentforrevs(revs[0], revs[-1], df=fh)
3094
3095
3095 def dochunk():
3096 def dochunk():
3096 rl.clearcaches()
3097 rl.clearcaches()
3097 fh = rlfh(rl)
3098 fh = rlfh(rl)
3098 for rev in revs:
3099 for rev in revs:
3099 rl._chunk(rev, df=fh)
3100 rl._chunk(rev, df=fh)
3100
3101
3101 chunks = [None]
3102 chunks = [None]
3102
3103
3103 def dochunkbatch():
3104 def dochunkbatch():
3104 rl.clearcaches()
3105 rl.clearcaches()
3105 fh = rlfh(rl)
3106 fh = rlfh(rl)
3106 # Save chunks as a side-effect.
3107 # Save chunks as a side-effect.
3107 chunks[0] = rl._chunks(revs, df=fh)
3108 chunks[0] = rl._chunks(revs, df=fh)
3108
3109
3109 def docompress(compressor):
3110 def docompress(compressor):
3110 rl.clearcaches()
3111 rl.clearcaches()
3111
3112
3112 try:
3113 try:
3113 # Swap in the requested compression engine.
3114 # Swap in the requested compression engine.
3114 oldcompressor = rl._compressor
3115 oldcompressor = rl._compressor
3115 rl._compressor = compressor
3116 rl._compressor = compressor
3116 for chunk in chunks[0]:
3117 for chunk in chunks[0]:
3117 rl.compress(chunk)
3118 rl.compress(chunk)
3118 finally:
3119 finally:
3119 rl._compressor = oldcompressor
3120 rl._compressor = oldcompressor
3120
3121
3121 benches = [
3122 benches = [
3122 (lambda: doread(), b'read'),
3123 (lambda: doread(), b'read'),
3123 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3124 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3124 (lambda: doreadbatch(), b'read batch'),
3125 (lambda: doreadbatch(), b'read batch'),
3125 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3126 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3126 (lambda: dochunk(), b'chunk'),
3127 (lambda: dochunk(), b'chunk'),
3127 (lambda: dochunkbatch(), b'chunk batch'),
3128 (lambda: dochunkbatch(), b'chunk batch'),
3128 ]
3129 ]
3129
3130
3130 for engine in sorted(engines):
3131 for engine in sorted(engines):
3131 compressor = util.compengines[engine].revlogcompressor()
3132 compressor = util.compengines[engine].revlogcompressor()
3132 benches.append(
3133 benches.append(
3133 (
3134 (
3134 functools.partial(docompress, compressor),
3135 functools.partial(docompress, compressor),
3135 b'compress w/ %s' % engine,
3136 b'compress w/ %s' % engine,
3136 )
3137 )
3137 )
3138 )
3138
3139
3139 for fn, title in benches:
3140 for fn, title in benches:
3140 timer, fm = gettimer(ui, opts)
3141 timer, fm = gettimer(ui, opts)
3141 timer(fn, title=title)
3142 timer(fn, title=title)
3142 fm.end()
3143 fm.end()
3143
3144
3144
3145
3145 @command(
3146 @command(
3146 b'perfrevlogrevision',
3147 b'perfrevlogrevision',
3147 revlogopts
3148 revlogopts
3148 + formatteropts
3149 + formatteropts
3149 + [(b'', b'cache', False, b'use caches instead of clearing')],
3150 + [(b'', b'cache', False, b'use caches instead of clearing')],
3150 b'-c|-m|FILE REV',
3151 b'-c|-m|FILE REV',
3151 )
3152 )
3152 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3153 """Benchmark obtaining a revlog revision.
3154 """Benchmark obtaining a revlog revision.
3154
3155
3155 Obtaining a revlog revision consists of roughly the following steps:
3156 Obtaining a revlog revision consists of roughly the following steps:
3156
3157
3157 1. Compute the delta chain
3158 1. Compute the delta chain
3158 2. Slice the delta chain if applicable
3159 2. Slice the delta chain if applicable
3159 3. Obtain the raw chunks for that delta chain
3160 3. Obtain the raw chunks for that delta chain
3160 4. Decompress each raw chunk
3161 4. Decompress each raw chunk
3161 5. Apply binary patches to obtain fulltext
3162 5. Apply binary patches to obtain fulltext
3162 6. Verify hash of fulltext
3163 6. Verify hash of fulltext
3163
3164
3164 This command measures the time spent in each of these phases.
3165 This command measures the time spent in each of these phases.
3165 """
3166 """
3166 opts = _byteskwargs(opts)
3167 opts = _byteskwargs(opts)
3167
3168
3168 if opts.get(b'changelog') or opts.get(b'manifest'):
3169 if opts.get(b'changelog') or opts.get(b'manifest'):
3169 file_, rev = None, file_
3170 file_, rev = None, file_
3170 elif rev is None:
3171 elif rev is None:
3171 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3172 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3172
3173
3173 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3174 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3174
3175
3175 # _chunkraw was renamed to _getsegmentforrevs.
3176 # _chunkraw was renamed to _getsegmentforrevs.
3176 try:
3177 try:
3177 segmentforrevs = r._getsegmentforrevs
3178 segmentforrevs = r._getsegmentforrevs
3178 except AttributeError:
3179 except AttributeError:
3179 segmentforrevs = r._chunkraw
3180 segmentforrevs = r._chunkraw
3180
3181
3181 node = r.lookup(rev)
3182 node = r.lookup(rev)
3182 rev = r.rev(node)
3183 rev = r.rev(node)
3183
3184
3184 def getrawchunks(data, chain):
3185 def getrawchunks(data, chain):
3185 start = r.start
3186 start = r.start
3186 length = r.length
3187 length = r.length
3187 inline = r._inline
3188 inline = r._inline
3188 iosize = r._io.size
3189 iosize = r._io.size
3189 buffer = util.buffer
3190 buffer = util.buffer
3190
3191
3191 chunks = []
3192 chunks = []
3192 ladd = chunks.append
3193 ladd = chunks.append
3193 for idx, item in enumerate(chain):
3194 for idx, item in enumerate(chain):
3194 offset = start(item[0])
3195 offset = start(item[0])
3195 bits = data[idx]
3196 bits = data[idx]
3196 for rev in item:
3197 for rev in item:
3197 chunkstart = start(rev)
3198 chunkstart = start(rev)
3198 if inline:
3199 if inline:
3199 chunkstart += (rev + 1) * iosize
3200 chunkstart += (rev + 1) * iosize
3200 chunklength = length(rev)
3201 chunklength = length(rev)
3201 ladd(buffer(bits, chunkstart - offset, chunklength))
3202 ladd(buffer(bits, chunkstart - offset, chunklength))
3202
3203
3203 return chunks
3204 return chunks
3204
3205
3205 def dodeltachain(rev):
3206 def dodeltachain(rev):
3206 if not cache:
3207 if not cache:
3207 r.clearcaches()
3208 r.clearcaches()
3208 r._deltachain(rev)
3209 r._deltachain(rev)
3209
3210
3210 def doread(chain):
3211 def doread(chain):
3211 if not cache:
3212 if not cache:
3212 r.clearcaches()
3213 r.clearcaches()
3213 for item in slicedchain:
3214 for item in slicedchain:
3214 segmentforrevs(item[0], item[-1])
3215 segmentforrevs(item[0], item[-1])
3215
3216
3216 def doslice(r, chain, size):
3217 def doslice(r, chain, size):
3217 for s in slicechunk(r, chain, targetsize=size):
3218 for s in slicechunk(r, chain, targetsize=size):
3218 pass
3219 pass
3219
3220
3220 def dorawchunks(data, chain):
3221 def dorawchunks(data, chain):
3221 if not cache:
3222 if not cache:
3222 r.clearcaches()
3223 r.clearcaches()
3223 getrawchunks(data, chain)
3224 getrawchunks(data, chain)
3224
3225
3225 def dodecompress(chunks):
3226 def dodecompress(chunks):
3226 decomp = r.decompress
3227 decomp = r.decompress
3227 for chunk in chunks:
3228 for chunk in chunks:
3228 decomp(chunk)
3229 decomp(chunk)
3229
3230
3230 def dopatch(text, bins):
3231 def dopatch(text, bins):
3231 if not cache:
3232 if not cache:
3232 r.clearcaches()
3233 r.clearcaches()
3233 mdiff.patches(text, bins)
3234 mdiff.patches(text, bins)
3234
3235
3235 def dohash(text):
3236 def dohash(text):
3236 if not cache:
3237 if not cache:
3237 r.clearcaches()
3238 r.clearcaches()
3238 r.checkhash(text, node, rev=rev)
3239 r.checkhash(text, node, rev=rev)
3239
3240
3240 def dorevision():
3241 def dorevision():
3241 if not cache:
3242 if not cache:
3242 r.clearcaches()
3243 r.clearcaches()
3243 r.revision(node)
3244 r.revision(node)
3244
3245
3245 try:
3246 try:
3246 from mercurial.revlogutils.deltas import slicechunk
3247 from mercurial.revlogutils.deltas import slicechunk
3247 except ImportError:
3248 except ImportError:
3248 slicechunk = getattr(revlog, '_slicechunk', None)
3249 slicechunk = getattr(revlog, '_slicechunk', None)
3249
3250
3250 size = r.length(rev)
3251 size = r.length(rev)
3251 chain = r._deltachain(rev)[0]
3252 chain = r._deltachain(rev)[0]
3252 if not getattr(r, '_withsparseread', False):
3253 if not getattr(r, '_withsparseread', False):
3253 slicedchain = (chain,)
3254 slicedchain = (chain,)
3254 else:
3255 else:
3255 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3256 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3256 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3257 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3257 rawchunks = getrawchunks(data, slicedchain)
3258 rawchunks = getrawchunks(data, slicedchain)
3258 bins = r._chunks(chain)
3259 bins = r._chunks(chain)
3259 text = bytes(bins[0])
3260 text = bytes(bins[0])
3260 bins = bins[1:]
3261 bins = bins[1:]
3261 text = mdiff.patches(text, bins)
3262 text = mdiff.patches(text, bins)
3262
3263
3263 benches = [
3264 benches = [
3264 (lambda: dorevision(), b'full'),
3265 (lambda: dorevision(), b'full'),
3265 (lambda: dodeltachain(rev), b'deltachain'),
3266 (lambda: dodeltachain(rev), b'deltachain'),
3266 (lambda: doread(chain), b'read'),
3267 (lambda: doread(chain), b'read'),
3267 ]
3268 ]
3268
3269
3269 if getattr(r, '_withsparseread', False):
3270 if getattr(r, '_withsparseread', False):
3270 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3271 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3271 benches.append(slicing)
3272 benches.append(slicing)
3272
3273
3273 benches.extend(
3274 benches.extend(
3274 [
3275 [
3275 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3276 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3276 (lambda: dodecompress(rawchunks), b'decompress'),
3277 (lambda: dodecompress(rawchunks), b'decompress'),
3277 (lambda: dopatch(text, bins), b'patch'),
3278 (lambda: dopatch(text, bins), b'patch'),
3278 (lambda: dohash(text), b'hash'),
3279 (lambda: dohash(text), b'hash'),
3279 ]
3280 ]
3280 )
3281 )
3281
3282
3282 timer, fm = gettimer(ui, opts)
3283 timer, fm = gettimer(ui, opts)
3283 for fn, title in benches:
3284 for fn, title in benches:
3284 timer(fn, title=title)
3285 timer(fn, title=title)
3285 fm.end()
3286 fm.end()
3286
3287
3287
3288
3288 @command(
3289 @command(
3289 b'perfrevset',
3290 b'perfrevset',
3290 [
3291 [
3291 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3292 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3292 (b'', b'contexts', False, b'obtain changectx for each revision'),
3293 (b'', b'contexts', False, b'obtain changectx for each revision'),
3293 ]
3294 ]
3294 + formatteropts,
3295 + formatteropts,
3295 b"REVSET",
3296 b"REVSET",
3296 )
3297 )
3297 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3298 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3298 """benchmark the execution time of a revset
3299 """benchmark the execution time of a revset
3299
3300
3300 Use the --clean option if need to evaluate the impact of build volatile
3301 Use the --clean option if need to evaluate the impact of build volatile
3301 revisions set cache on the revset execution. Volatile cache hold filtered
3302 revisions set cache on the revset execution. Volatile cache hold filtered
3302 and obsolete related cache."""
3303 and obsolete related cache."""
3303 opts = _byteskwargs(opts)
3304 opts = _byteskwargs(opts)
3304
3305
3305 timer, fm = gettimer(ui, opts)
3306 timer, fm = gettimer(ui, opts)
3306
3307
3307 def d():
3308 def d():
3308 if clear:
3309 if clear:
3309 repo.invalidatevolatilesets()
3310 repo.invalidatevolatilesets()
3310 if contexts:
3311 if contexts:
3311 for ctx in repo.set(expr):
3312 for ctx in repo.set(expr):
3312 pass
3313 pass
3313 else:
3314 else:
3314 for r in repo.revs(expr):
3315 for r in repo.revs(expr):
3315 pass
3316 pass
3316
3317
3317 timer(d)
3318 timer(d)
3318 fm.end()
3319 fm.end()
3319
3320
3320
3321
3321 @command(
3322 @command(
3322 b'perfvolatilesets',
3323 b'perfvolatilesets',
3323 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3324 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3324 + formatteropts,
3325 + formatteropts,
3325 )
3326 )
3326 def perfvolatilesets(ui, repo, *names, **opts):
3327 def perfvolatilesets(ui, repo, *names, **opts):
3327 """benchmark the computation of various volatile set
3328 """benchmark the computation of various volatile set
3328
3329
3329 Volatile set computes element related to filtering and obsolescence."""
3330 Volatile set computes element related to filtering and obsolescence."""
3330 opts = _byteskwargs(opts)
3331 opts = _byteskwargs(opts)
3331 timer, fm = gettimer(ui, opts)
3332 timer, fm = gettimer(ui, opts)
3332 repo = repo.unfiltered()
3333 repo = repo.unfiltered()
3333
3334
3334 def getobs(name):
3335 def getobs(name):
3335 def d():
3336 def d():
3336 repo.invalidatevolatilesets()
3337 repo.invalidatevolatilesets()
3337 if opts[b'clear_obsstore']:
3338 if opts[b'clear_obsstore']:
3338 clearfilecache(repo, b'obsstore')
3339 clearfilecache(repo, b'obsstore')
3339 obsolete.getrevs(repo, name)
3340 obsolete.getrevs(repo, name)
3340
3341
3341 return d
3342 return d
3342
3343
3343 allobs = sorted(obsolete.cachefuncs)
3344 allobs = sorted(obsolete.cachefuncs)
3344 if names:
3345 if names:
3345 allobs = [n for n in allobs if n in names]
3346 allobs = [n for n in allobs if n in names]
3346
3347
3347 for name in allobs:
3348 for name in allobs:
3348 timer(getobs(name), title=name)
3349 timer(getobs(name), title=name)
3349
3350
3350 def getfiltered(name):
3351 def getfiltered(name):
3351 def d():
3352 def d():
3352 repo.invalidatevolatilesets()
3353 repo.invalidatevolatilesets()
3353 if opts[b'clear_obsstore']:
3354 if opts[b'clear_obsstore']:
3354 clearfilecache(repo, b'obsstore')
3355 clearfilecache(repo, b'obsstore')
3355 repoview.filterrevs(repo, name)
3356 repoview.filterrevs(repo, name)
3356
3357
3357 return d
3358 return d
3358
3359
3359 allfilter = sorted(repoview.filtertable)
3360 allfilter = sorted(repoview.filtertable)
3360 if names:
3361 if names:
3361 allfilter = [n for n in allfilter if n in names]
3362 allfilter = [n for n in allfilter if n in names]
3362
3363
3363 for name in allfilter:
3364 for name in allfilter:
3364 timer(getfiltered(name), title=name)
3365 timer(getfiltered(name), title=name)
3365 fm.end()
3366 fm.end()
3366
3367
3367
3368
3368 @command(
3369 @command(
3369 b'perfbranchmap',
3370 b'perfbranchmap',
3370 [
3371 [
3371 (b'f', b'full', False, b'Includes build time of subset'),
3372 (b'f', b'full', False, b'Includes build time of subset'),
3372 (
3373 (
3373 b'',
3374 b'',
3374 b'clear-revbranch',
3375 b'clear-revbranch',
3375 False,
3376 False,
3376 b'purge the revbranch cache between computation',
3377 b'purge the revbranch cache between computation',
3377 ),
3378 ),
3378 ]
3379 ]
3379 + formatteropts,
3380 + formatteropts,
3380 )
3381 )
3381 def perfbranchmap(ui, repo, *filternames, **opts):
3382 def perfbranchmap(ui, repo, *filternames, **opts):
3382 """benchmark the update of a branchmap
3383 """benchmark the update of a branchmap
3383
3384
3384 This benchmarks the full repo.branchmap() call with read and write disabled
3385 This benchmarks the full repo.branchmap() call with read and write disabled
3385 """
3386 """
3386 opts = _byteskwargs(opts)
3387 opts = _byteskwargs(opts)
3387 full = opts.get(b"full", False)
3388 full = opts.get(b"full", False)
3388 clear_revbranch = opts.get(b"clear_revbranch", False)
3389 clear_revbranch = opts.get(b"clear_revbranch", False)
3389 timer, fm = gettimer(ui, opts)
3390 timer, fm = gettimer(ui, opts)
3390
3391
3391 def getbranchmap(filtername):
3392 def getbranchmap(filtername):
3392 """generate a benchmark function for the filtername"""
3393 """generate a benchmark function for the filtername"""
3393 if filtername is None:
3394 if filtername is None:
3394 view = repo
3395 view = repo
3395 else:
3396 else:
3396 view = repo.filtered(filtername)
3397 view = repo.filtered(filtername)
3397 if util.safehasattr(view._branchcaches, '_per_filter'):
3398 if util.safehasattr(view._branchcaches, '_per_filter'):
3398 filtered = view._branchcaches._per_filter
3399 filtered = view._branchcaches._per_filter
3399 else:
3400 else:
3400 # older versions
3401 # older versions
3401 filtered = view._branchcaches
3402 filtered = view._branchcaches
3402
3403
3403 def d():
3404 def d():
3404 if clear_revbranch:
3405 if clear_revbranch:
3405 repo.revbranchcache()._clear()
3406 repo.revbranchcache()._clear()
3406 if full:
3407 if full:
3407 view._branchcaches.clear()
3408 view._branchcaches.clear()
3408 else:
3409 else:
3409 filtered.pop(filtername, None)
3410 filtered.pop(filtername, None)
3410 view.branchmap()
3411 view.branchmap()
3411
3412
3412 return d
3413 return d
3413
3414
3414 # add filter in smaller subset to bigger subset
3415 # add filter in smaller subset to bigger subset
3415 possiblefilters = set(repoview.filtertable)
3416 possiblefilters = set(repoview.filtertable)
3416 if filternames:
3417 if filternames:
3417 possiblefilters &= set(filternames)
3418 possiblefilters &= set(filternames)
3418 subsettable = getbranchmapsubsettable()
3419 subsettable = getbranchmapsubsettable()
3419 allfilters = []
3420 allfilters = []
3420 while possiblefilters:
3421 while possiblefilters:
3421 for name in possiblefilters:
3422 for name in possiblefilters:
3422 subset = subsettable.get(name)
3423 subset = subsettable.get(name)
3423 if subset not in possiblefilters:
3424 if subset not in possiblefilters:
3424 break
3425 break
3425 else:
3426 else:
3426 assert False, b'subset cycle %s!' % possiblefilters
3427 assert False, b'subset cycle %s!' % possiblefilters
3427 allfilters.append(name)
3428 allfilters.append(name)
3428 possiblefilters.remove(name)
3429 possiblefilters.remove(name)
3429
3430
3430 # warm the cache
3431 # warm the cache
3431 if not full:
3432 if not full:
3432 for name in allfilters:
3433 for name in allfilters:
3433 repo.filtered(name).branchmap()
3434 repo.filtered(name).branchmap()
3434 if not filternames or b'unfiltered' in filternames:
3435 if not filternames or b'unfiltered' in filternames:
3435 # add unfiltered
3436 # add unfiltered
3436 allfilters.append(None)
3437 allfilters.append(None)
3437
3438
3438 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3439 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3440 branchcacheread.set(classmethod(lambda *args: None))
3441 branchcacheread.set(classmethod(lambda *args: None))
3441 else:
3442 else:
3442 # older versions
3443 # older versions
3443 branchcacheread = safeattrsetter(branchmap, b'read')
3444 branchcacheread = safeattrsetter(branchmap, b'read')
3444 branchcacheread.set(lambda *args: None)
3445 branchcacheread.set(lambda *args: None)
3445 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3446 branchcachewrite.set(lambda *args: None)
3447 branchcachewrite.set(lambda *args: None)
3447 try:
3448 try:
3448 for name in allfilters:
3449 for name in allfilters:
3449 printname = name
3450 printname = name
3450 if name is None:
3451 if name is None:
3451 printname = b'unfiltered'
3452 printname = b'unfiltered'
3452 timer(getbranchmap(name), title=str(printname))
3453 timer(getbranchmap(name), title=str(printname))
3453 finally:
3454 finally:
3454 branchcacheread.restore()
3455 branchcacheread.restore()
3455 branchcachewrite.restore()
3456 branchcachewrite.restore()
3456 fm.end()
3457 fm.end()
3457
3458
3458
3459
3459 @command(
3460 @command(
3460 b'perfbranchmapupdate',
3461 b'perfbranchmapupdate',
3461 [
3462 [
3462 (b'', b'base', [], b'subset of revision to start from'),
3463 (b'', b'base', [], b'subset of revision to start from'),
3463 (b'', b'target', [], b'subset of revision to end with'),
3464 (b'', b'target', [], b'subset of revision to end with'),
3464 (b'', b'clear-caches', False, b'clear cache between each runs'),
3465 (b'', b'clear-caches', False, b'clear cache between each runs'),
3465 ]
3466 ]
3466 + formatteropts,
3467 + formatteropts,
3467 )
3468 )
3468 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3469 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3469 """benchmark branchmap update from for <base> revs to <target> revs
3470 """benchmark branchmap update from for <base> revs to <target> revs
3470
3471
3471 If `--clear-caches` is passed, the following items will be reset before
3472 If `--clear-caches` is passed, the following items will be reset before
3472 each update:
3473 each update:
3473 * the changelog instance and associated indexes
3474 * the changelog instance and associated indexes
3474 * the rev-branch-cache instance
3475 * the rev-branch-cache instance
3475
3476
3476 Examples:
3477 Examples:
3477
3478
3478 # update for the one last revision
3479 # update for the one last revision
3479 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3480 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3480
3481
3481 $ update for change coming with a new branch
3482 $ update for change coming with a new branch
3482 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3483 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3483 """
3484 """
3484 from mercurial import branchmap
3485 from mercurial import branchmap
3485 from mercurial import repoview
3486 from mercurial import repoview
3486
3487
3487 opts = _byteskwargs(opts)
3488 opts = _byteskwargs(opts)
3488 timer, fm = gettimer(ui, opts)
3489 timer, fm = gettimer(ui, opts)
3489 clearcaches = opts[b'clear_caches']
3490 clearcaches = opts[b'clear_caches']
3490 unfi = repo.unfiltered()
3491 unfi = repo.unfiltered()
3491 x = [None] # used to pass data between closure
3492 x = [None] # used to pass data between closure
3492
3493
3493 # we use a `list` here to avoid possible side effect from smartset
3494 # we use a `list` here to avoid possible side effect from smartset
3494 baserevs = list(scmutil.revrange(repo, base))
3495 baserevs = list(scmutil.revrange(repo, base))
3495 targetrevs = list(scmutil.revrange(repo, target))
3496 targetrevs = list(scmutil.revrange(repo, target))
3496 if not baserevs:
3497 if not baserevs:
3497 raise error.Abort(b'no revisions selected for --base')
3498 raise error.Abort(b'no revisions selected for --base')
3498 if not targetrevs:
3499 if not targetrevs:
3499 raise error.Abort(b'no revisions selected for --target')
3500 raise error.Abort(b'no revisions selected for --target')
3500
3501
3501 # make sure the target branchmap also contains the one in the base
3502 # make sure the target branchmap also contains the one in the base
3502 targetrevs = list(set(baserevs) | set(targetrevs))
3503 targetrevs = list(set(baserevs) | set(targetrevs))
3503 targetrevs.sort()
3504 targetrevs.sort()
3504
3505
3505 cl = repo.changelog
3506 cl = repo.changelog
3506 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3507 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3507 allbaserevs.sort()
3508 allbaserevs.sort()
3508 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3509 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3509
3510
3510 newrevs = list(alltargetrevs.difference(allbaserevs))
3511 newrevs = list(alltargetrevs.difference(allbaserevs))
3511 newrevs.sort()
3512 newrevs.sort()
3512
3513
3513 allrevs = frozenset(unfi.changelog.revs())
3514 allrevs = frozenset(unfi.changelog.revs())
3514 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3515 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3515 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3516 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3516
3517
3517 def basefilter(repo, visibilityexceptions=None):
3518 def basefilter(repo, visibilityexceptions=None):
3518 return basefilterrevs
3519 return basefilterrevs
3519
3520
3520 def targetfilter(repo, visibilityexceptions=None):
3521 def targetfilter(repo, visibilityexceptions=None):
3521 return targetfilterrevs
3522 return targetfilterrevs
3522
3523
3523 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3524 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3524 ui.status(msg % (len(allbaserevs), len(newrevs)))
3525 ui.status(msg % (len(allbaserevs), len(newrevs)))
3525 if targetfilterrevs:
3526 if targetfilterrevs:
3526 msg = b'(%d revisions still filtered)\n'
3527 msg = b'(%d revisions still filtered)\n'
3527 ui.status(msg % len(targetfilterrevs))
3528 ui.status(msg % len(targetfilterrevs))
3528
3529
3529 try:
3530 try:
3530 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3531 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3531 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3532 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3532
3533
3533 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3534 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3534 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3535 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3535
3536
3536 # try to find an existing branchmap to reuse
3537 # try to find an existing branchmap to reuse
3537 subsettable = getbranchmapsubsettable()
3538 subsettable = getbranchmapsubsettable()
3538 candidatefilter = subsettable.get(None)
3539 candidatefilter = subsettable.get(None)
3539 while candidatefilter is not None:
3540 while candidatefilter is not None:
3540 candidatebm = repo.filtered(candidatefilter).branchmap()
3541 candidatebm = repo.filtered(candidatefilter).branchmap()
3541 if candidatebm.validfor(baserepo):
3542 if candidatebm.validfor(baserepo):
3542 filtered = repoview.filterrevs(repo, candidatefilter)
3543 filtered = repoview.filterrevs(repo, candidatefilter)
3543 missing = [r for r in allbaserevs if r in filtered]
3544 missing = [r for r in allbaserevs if r in filtered]
3544 base = candidatebm.copy()
3545 base = candidatebm.copy()
3545 base.update(baserepo, missing)
3546 base.update(baserepo, missing)
3546 break
3547 break
3547 candidatefilter = subsettable.get(candidatefilter)
3548 candidatefilter = subsettable.get(candidatefilter)
3548 else:
3549 else:
3549 # no suitable subset where found
3550 # no suitable subset where found
3550 base = branchmap.branchcache()
3551 base = branchmap.branchcache()
3551 base.update(baserepo, allbaserevs)
3552 base.update(baserepo, allbaserevs)
3552
3553
3553 def setup():
3554 def setup():
3554 x[0] = base.copy()
3555 x[0] = base.copy()
3555 if clearcaches:
3556 if clearcaches:
3556 unfi._revbranchcache = None
3557 unfi._revbranchcache = None
3557 clearchangelog(repo)
3558 clearchangelog(repo)
3558
3559
3559 def bench():
3560 def bench():
3560 x[0].update(targetrepo, newrevs)
3561 x[0].update(targetrepo, newrevs)
3561
3562
3562 timer(bench, setup=setup)
3563 timer(bench, setup=setup)
3563 fm.end()
3564 fm.end()
3564 finally:
3565 finally:
3565 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3566 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3566 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3567 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3567
3568
3568
3569
3569 @command(
3570 @command(
3570 b'perfbranchmapload',
3571 b'perfbranchmapload',
3571 [
3572 [
3572 (b'f', b'filter', b'', b'Specify repoview filter'),
3573 (b'f', b'filter', b'', b'Specify repoview filter'),
3573 (b'', b'list', False, b'List brachmap filter caches'),
3574 (b'', b'list', False, b'List brachmap filter caches'),
3574 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3575 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3575 ]
3576 ]
3576 + formatteropts,
3577 + formatteropts,
3577 )
3578 )
3578 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3579 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3579 """benchmark reading the branchmap"""
3580 """benchmark reading the branchmap"""
3580 opts = _byteskwargs(opts)
3581 opts = _byteskwargs(opts)
3581 clearrevlogs = opts[b'clear_revlogs']
3582 clearrevlogs = opts[b'clear_revlogs']
3582
3583
3583 if list:
3584 if list:
3584 for name, kind, st in repo.cachevfs.readdir(stat=True):
3585 for name, kind, st in repo.cachevfs.readdir(stat=True):
3585 if name.startswith(b'branch2'):
3586 if name.startswith(b'branch2'):
3586 filtername = name.partition(b'-')[2] or b'unfiltered'
3587 filtername = name.partition(b'-')[2] or b'unfiltered'
3587 ui.status(
3588 ui.status(
3588 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3589 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3589 )
3590 )
3590 return
3591 return
3591 if not filter:
3592 if not filter:
3592 filter = None
3593 filter = None
3593 subsettable = getbranchmapsubsettable()
3594 subsettable = getbranchmapsubsettable()
3594 if filter is None:
3595 if filter is None:
3595 repo = repo.unfiltered()
3596 repo = repo.unfiltered()
3596 else:
3597 else:
3597 repo = repoview.repoview(repo, filter)
3598 repo = repoview.repoview(repo, filter)
3598
3599
3599 repo.branchmap() # make sure we have a relevant, up to date branchmap
3600 repo.branchmap() # make sure we have a relevant, up to date branchmap
3600
3601
3601 try:
3602 try:
3602 fromfile = branchmap.branchcache.fromfile
3603 fromfile = branchmap.branchcache.fromfile
3603 except AttributeError:
3604 except AttributeError:
3604 # older versions
3605 # older versions
3605 fromfile = branchmap.read
3606 fromfile = branchmap.read
3606
3607
3607 currentfilter = filter
3608 currentfilter = filter
3608 # try once without timer, the filter may not be cached
3609 # try once without timer, the filter may not be cached
3609 while fromfile(repo) is None:
3610 while fromfile(repo) is None:
3610 currentfilter = subsettable.get(currentfilter)
3611 currentfilter = subsettable.get(currentfilter)
3611 if currentfilter is None:
3612 if currentfilter is None:
3612 raise error.Abort(
3613 raise error.Abort(
3613 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3614 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3614 )
3615 )
3615 repo = repo.filtered(currentfilter)
3616 repo = repo.filtered(currentfilter)
3616 timer, fm = gettimer(ui, opts)
3617 timer, fm = gettimer(ui, opts)
3617
3618
3618 def setup():
3619 def setup():
3619 if clearrevlogs:
3620 if clearrevlogs:
3620 clearchangelog(repo)
3621 clearchangelog(repo)
3621
3622
3622 def bench():
3623 def bench():
3623 fromfile(repo)
3624 fromfile(repo)
3624
3625
3625 timer(bench, setup=setup)
3626 timer(bench, setup=setup)
3626 fm.end()
3627 fm.end()
3627
3628
3628
3629
3629 @command(b'perfloadmarkers')
3630 @command(b'perfloadmarkers')
3630 def perfloadmarkers(ui, repo):
3631 def perfloadmarkers(ui, repo):
3631 """benchmark the time to parse the on-disk markers for a repo
3632 """benchmark the time to parse the on-disk markers for a repo
3632
3633
3633 Result is the number of markers in the repo."""
3634 Result is the number of markers in the repo."""
3634 timer, fm = gettimer(ui)
3635 timer, fm = gettimer(ui)
3635 svfs = getsvfs(repo)
3636 svfs = getsvfs(repo)
3636 timer(lambda: len(obsolete.obsstore(svfs)))
3637 timer(lambda: len(obsolete.obsstore(svfs)))
3637 fm.end()
3638 fm.end()
3638
3639
3639
3640
3640 @command(
3641 @command(
3641 b'perflrucachedict',
3642 b'perflrucachedict',
3642 formatteropts
3643 formatteropts
3643 + [
3644 + [
3644 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3645 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3645 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3646 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3646 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3647 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3647 (b'', b'size', 4, b'size of cache'),
3648 (b'', b'size', 4, b'size of cache'),
3648 (b'', b'gets', 10000, b'number of key lookups'),
3649 (b'', b'gets', 10000, b'number of key lookups'),
3649 (b'', b'sets', 10000, b'number of key sets'),
3650 (b'', b'sets', 10000, b'number of key sets'),
3650 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3651 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3651 (
3652 (
3652 b'',
3653 b'',
3653 b'mixedgetfreq',
3654 b'mixedgetfreq',
3654 50,
3655 50,
3655 b'frequency of get vs set ops in mixed mode',
3656 b'frequency of get vs set ops in mixed mode',
3656 ),
3657 ),
3657 ],
3658 ],
3658 norepo=True,
3659 norepo=True,
3659 )
3660 )
3660 def perflrucache(
3661 def perflrucache(
3661 ui,
3662 ui,
3662 mincost=0,
3663 mincost=0,
3663 maxcost=100,
3664 maxcost=100,
3664 costlimit=0,
3665 costlimit=0,
3665 size=4,
3666 size=4,
3666 gets=10000,
3667 gets=10000,
3667 sets=10000,
3668 sets=10000,
3668 mixed=10000,
3669 mixed=10000,
3669 mixedgetfreq=50,
3670 mixedgetfreq=50,
3670 **opts
3671 **opts
3671 ):
3672 ):
3672 opts = _byteskwargs(opts)
3673 opts = _byteskwargs(opts)
3673
3674
3674 def doinit():
3675 def doinit():
3675 for i in _xrange(10000):
3676 for i in _xrange(10000):
3676 util.lrucachedict(size)
3677 util.lrucachedict(size)
3677
3678
3678 costrange = list(range(mincost, maxcost + 1))
3679 costrange = list(range(mincost, maxcost + 1))
3679
3680
3680 values = []
3681 values = []
3681 for i in _xrange(size):
3682 for i in _xrange(size):
3682 values.append(random.randint(0, _maxint))
3683 values.append(random.randint(0, _maxint))
3683
3684
3684 # Get mode fills the cache and tests raw lookup performance with no
3685 # Get mode fills the cache and tests raw lookup performance with no
3685 # eviction.
3686 # eviction.
3686 getseq = []
3687 getseq = []
3687 for i in _xrange(gets):
3688 for i in _xrange(gets):
3688 getseq.append(random.choice(values))
3689 getseq.append(random.choice(values))
3689
3690
3690 def dogets():
3691 def dogets():
3691 d = util.lrucachedict(size)
3692 d = util.lrucachedict(size)
3692 for v in values:
3693 for v in values:
3693 d[v] = v
3694 d[v] = v
3694 for key in getseq:
3695 for key in getseq:
3695 value = d[key]
3696 value = d[key]
3696 value # silence pyflakes warning
3697 value # silence pyflakes warning
3697
3698
3698 def dogetscost():
3699 def dogetscost():
3699 d = util.lrucachedict(size, maxcost=costlimit)
3700 d = util.lrucachedict(size, maxcost=costlimit)
3700 for i, v in enumerate(values):
3701 for i, v in enumerate(values):
3701 d.insert(v, v, cost=costs[i])
3702 d.insert(v, v, cost=costs[i])
3702 for key in getseq:
3703 for key in getseq:
3703 try:
3704 try:
3704 value = d[key]
3705 value = d[key]
3705 value # silence pyflakes warning
3706 value # silence pyflakes warning
3706 except KeyError:
3707 except KeyError:
3707 pass
3708 pass
3708
3709
3709 # Set mode tests insertion speed with cache eviction.
3710 # Set mode tests insertion speed with cache eviction.
3710 setseq = []
3711 setseq = []
3711 costs = []
3712 costs = []
3712 for i in _xrange(sets):
3713 for i in _xrange(sets):
3713 setseq.append(random.randint(0, _maxint))
3714 setseq.append(random.randint(0, _maxint))
3714 costs.append(random.choice(costrange))
3715 costs.append(random.choice(costrange))
3715
3716
3716 def doinserts():
3717 def doinserts():
3717 d = util.lrucachedict(size)
3718 d = util.lrucachedict(size)
3718 for v in setseq:
3719 for v in setseq:
3719 d.insert(v, v)
3720 d.insert(v, v)
3720
3721
3721 def doinsertscost():
3722 def doinsertscost():
3722 d = util.lrucachedict(size, maxcost=costlimit)
3723 d = util.lrucachedict(size, maxcost=costlimit)
3723 for i, v in enumerate(setseq):
3724 for i, v in enumerate(setseq):
3724 d.insert(v, v, cost=costs[i])
3725 d.insert(v, v, cost=costs[i])
3725
3726
3726 def dosets():
3727 def dosets():
3727 d = util.lrucachedict(size)
3728 d = util.lrucachedict(size)
3728 for v in setseq:
3729 for v in setseq:
3729 d[v] = v
3730 d[v] = v
3730
3731
3731 # Mixed mode randomly performs gets and sets with eviction.
3732 # Mixed mode randomly performs gets and sets with eviction.
3732 mixedops = []
3733 mixedops = []
3733 for i in _xrange(mixed):
3734 for i in _xrange(mixed):
3734 r = random.randint(0, 100)
3735 r = random.randint(0, 100)
3735 if r < mixedgetfreq:
3736 if r < mixedgetfreq:
3736 op = 0
3737 op = 0
3737 else:
3738 else:
3738 op = 1
3739 op = 1
3739
3740
3740 mixedops.append(
3741 mixedops.append(
3741 (op, random.randint(0, size * 2), random.choice(costrange))
3742 (op, random.randint(0, size * 2), random.choice(costrange))
3742 )
3743 )
3743
3744
3744 def domixed():
3745 def domixed():
3745 d = util.lrucachedict(size)
3746 d = util.lrucachedict(size)
3746
3747
3747 for op, v, cost in mixedops:
3748 for op, v, cost in mixedops:
3748 if op == 0:
3749 if op == 0:
3749 try:
3750 try:
3750 d[v]
3751 d[v]
3751 except KeyError:
3752 except KeyError:
3752 pass
3753 pass
3753 else:
3754 else:
3754 d[v] = v
3755 d[v] = v
3755
3756
3756 def domixedcost():
3757 def domixedcost():
3757 d = util.lrucachedict(size, maxcost=costlimit)
3758 d = util.lrucachedict(size, maxcost=costlimit)
3758
3759
3759 for op, v, cost in mixedops:
3760 for op, v, cost in mixedops:
3760 if op == 0:
3761 if op == 0:
3761 try:
3762 try:
3762 d[v]
3763 d[v]
3763 except KeyError:
3764 except KeyError:
3764 pass
3765 pass
3765 else:
3766 else:
3766 d.insert(v, v, cost=cost)
3767 d.insert(v, v, cost=cost)
3767
3768
3768 benches = [
3769 benches = [
3769 (doinit, b'init'),
3770 (doinit, b'init'),
3770 ]
3771 ]
3771
3772
3772 if costlimit:
3773 if costlimit:
3773 benches.extend(
3774 benches.extend(
3774 [
3775 [
3775 (dogetscost, b'gets w/ cost limit'),
3776 (dogetscost, b'gets w/ cost limit'),
3776 (doinsertscost, b'inserts w/ cost limit'),
3777 (doinsertscost, b'inserts w/ cost limit'),
3777 (domixedcost, b'mixed w/ cost limit'),
3778 (domixedcost, b'mixed w/ cost limit'),
3778 ]
3779 ]
3779 )
3780 )
3780 else:
3781 else:
3781 benches.extend(
3782 benches.extend(
3782 [
3783 [
3783 (dogets, b'gets'),
3784 (dogets, b'gets'),
3784 (doinserts, b'inserts'),
3785 (doinserts, b'inserts'),
3785 (dosets, b'sets'),
3786 (dosets, b'sets'),
3786 (domixed, b'mixed'),
3787 (domixed, b'mixed'),
3787 ]
3788 ]
3788 )
3789 )
3789
3790
3790 for fn, title in benches:
3791 for fn, title in benches:
3791 timer, fm = gettimer(ui, opts)
3792 timer, fm = gettimer(ui, opts)
3792 timer(fn, title=title)
3793 timer(fn, title=title)
3793 fm.end()
3794 fm.end()
3794
3795
3795
3796
3796 @command(b'perfwrite', formatteropts)
3797 @command(b'perfwrite', formatteropts)
3797 def perfwrite(ui, repo, **opts):
3798 def perfwrite(ui, repo, **opts):
3798 """microbenchmark ui.write
3799 """microbenchmark ui.write
3799 """
3800 """
3800 opts = _byteskwargs(opts)
3801 opts = _byteskwargs(opts)
3801
3802
3802 timer, fm = gettimer(ui, opts)
3803 timer, fm = gettimer(ui, opts)
3803
3804
3804 def write():
3805 def write():
3805 for i in range(100000):
3806 for i in range(100000):
3806 ui.writenoi18n(b'Testing write performance\n')
3807 ui.writenoi18n(b'Testing write performance\n')
3807
3808
3808 timer(write)
3809 timer(write)
3809 fm.end()
3810 fm.end()
3810
3811
3811
3812
3812 def uisetup(ui):
3813 def uisetup(ui):
3813 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3814 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3814 commands, b'debugrevlogopts'
3815 commands, b'debugrevlogopts'
3815 ):
3816 ):
3816 # for "historical portability":
3817 # for "historical portability":
3817 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3818 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3818 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3819 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3819 # openrevlog() should cause failure, because it has been
3820 # openrevlog() should cause failure, because it has been
3820 # available since 3.5 (or 49c583ca48c4).
3821 # available since 3.5 (or 49c583ca48c4).
3821 def openrevlog(orig, repo, cmd, file_, opts):
3822 def openrevlog(orig, repo, cmd, file_, opts):
3822 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3823 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3823 raise error.Abort(
3824 raise error.Abort(
3824 b"This version doesn't support --dir option",
3825 b"This version doesn't support --dir option",
3825 hint=b"use 3.5 or later",
3826 hint=b"use 3.5 or later",
3826 )
3827 )
3827 return orig(repo, cmd, file_, opts)
3828 return orig(repo, cmd, file_, opts)
3828
3829
3829 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3830 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3830
3831
3831
3832
3832 @command(
3833 @command(
3833 b'perfprogress',
3834 b'perfprogress',
3834 formatteropts
3835 formatteropts
3835 + [
3836 + [
3836 (b'', b'topic', b'topic', b'topic for progress messages'),
3837 (b'', b'topic', b'topic', b'topic for progress messages'),
3837 (b'c', b'total', 1000000, b'total value we are progressing to'),
3838 (b'c', b'total', 1000000, b'total value we are progressing to'),
3838 ],
3839 ],
3839 norepo=True,
3840 norepo=True,
3840 )
3841 )
3841 def perfprogress(ui, topic=None, total=None, **opts):
3842 def perfprogress(ui, topic=None, total=None, **opts):
3842 """printing of progress bars"""
3843 """printing of progress bars"""
3843 opts = _byteskwargs(opts)
3844 opts = _byteskwargs(opts)
3844
3845
3845 timer, fm = gettimer(ui, opts)
3846 timer, fm = gettimer(ui, opts)
3846
3847
3847 def doprogress():
3848 def doprogress():
3848 with ui.makeprogress(topic, total=total) as progress:
3849 with ui.makeprogress(topic, total=total) as progress:
3849 for i in _xrange(total):
3850 for i in _xrange(total):
3850 progress.increment()
3851 progress.increment()
3851
3852
3852 timer(doprogress)
3853 timer(doprogress)
3853 fm.end()
3854 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now