##// END OF EJS Templates
py3: use native string when comparing with a function's argspec...
Raphaël Gomès -
r46210:7d0e5405 default
parent child Browse files
Show More
@@ -1,3882 +1,3882 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if 'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if '_tagscache' in vars(repo):
694 if '_tagscache' in vars(repo):
695 del repo.__dict__['_tagscache']
695 del repo.__dict__['_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, '_clcachekey', None)
729 object.__setattr__(repo, '_clcachekey', None)
730 object.__setattr__(repo, '_clcache', None)
730 object.__setattr__(repo, '_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
766 ]
767 + formatteropts,
767 + formatteropts,
768 )
768 )
769 def perfstatus(ui, repo, **opts):
769 def perfstatus(ui, repo, **opts):
770 """benchmark the performance of a single status call
770 """benchmark the performance of a single status call
771
771
772 The repository data are preserved between each call.
772 The repository data are preserved between each call.
773
773
774 By default, only the status of the tracked file are requested. If
774 By default, only the status of the tracked file are requested. If
775 `--unknown` is passed, the "unknown" files are also tracked.
775 `--unknown` is passed, the "unknown" files are also tracked.
776 """
776 """
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 # m = match.always(repo.root, repo.getcwd())
778 # m = match.always(repo.root, repo.getcwd())
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
780 # False))))
780 # False))))
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 if opts[b'dirstate']:
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
785 unknown = opts[b'unknown']
786
786
787 def status_dirstate():
787 def status_dirstate():
788 s = dirstate.status(
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
790 )
791 sum(map(bool, s))
791 sum(map(bool, s))
792
792
793 timer(status_dirstate)
793 timer(status_dirstate)
794 else:
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
796 fm.end()
796 fm.end()
797
797
798
798
799 @command(b'perfaddremove', formatteropts)
799 @command(b'perfaddremove', formatteropts)
800 def perfaddremove(ui, repo, **opts):
800 def perfaddremove(ui, repo, **opts):
801 opts = _byteskwargs(opts)
801 opts = _byteskwargs(opts)
802 timer, fm = gettimer(ui, opts)
802 timer, fm = gettimer(ui, opts)
803 try:
803 try:
804 oldquiet = repo.ui.quiet
804 oldquiet = repo.ui.quiet
805 repo.ui.quiet = True
805 repo.ui.quiet = True
806 matcher = scmutil.match(repo[None])
806 matcher = scmutil.match(repo[None])
807 opts[b'dry_run'] = True
807 opts[b'dry_run'] = True
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
808 if 'uipathfn' in getargspec(scmutil.addremove).args:
809 uipathfn = scmutil.getuipathfn(repo)
809 uipathfn = scmutil.getuipathfn(repo)
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
811 else:
811 else:
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
813 finally:
813 finally:
814 repo.ui.quiet = oldquiet
814 repo.ui.quiet = oldquiet
815 fm.end()
815 fm.end()
816
816
817
817
818 def clearcaches(cl):
818 def clearcaches(cl):
819 # behave somewhat consistently across internal API changes
819 # behave somewhat consistently across internal API changes
820 if util.safehasattr(cl, b'clearcaches'):
820 if util.safehasattr(cl, b'clearcaches'):
821 cl.clearcaches()
821 cl.clearcaches()
822 elif util.safehasattr(cl, b'_nodecache'):
822 elif util.safehasattr(cl, b'_nodecache'):
823 # <= hg-5.2
823 # <= hg-5.2
824 from mercurial.node import nullid, nullrev
824 from mercurial.node import nullid, nullrev
825
825
826 cl._nodecache = {nullid: nullrev}
826 cl._nodecache = {nullid: nullrev}
827 cl._nodepos = None
827 cl._nodepos = None
828
828
829
829
830 @command(b'perfheads', formatteropts)
830 @command(b'perfheads', formatteropts)
831 def perfheads(ui, repo, **opts):
831 def perfheads(ui, repo, **opts):
832 """benchmark the computation of a changelog heads"""
832 """benchmark the computation of a changelog heads"""
833 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
835 cl = repo.changelog
835 cl = repo.changelog
836
836
837 def s():
837 def s():
838 clearcaches(cl)
838 clearcaches(cl)
839
839
840 def d():
840 def d():
841 len(cl.headrevs())
841 len(cl.headrevs())
842
842
843 timer(d, setup=s)
843 timer(d, setup=s)
844 fm.end()
844 fm.end()
845
845
846
846
847 @command(
847 @command(
848 b'perftags',
848 b'perftags',
849 formatteropts
849 formatteropts
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
851 )
851 )
852 def perftags(ui, repo, **opts):
852 def perftags(ui, repo, **opts):
853 opts = _byteskwargs(opts)
853 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
854 timer, fm = gettimer(ui, opts)
855 repocleartagscache = repocleartagscachefunc(repo)
855 repocleartagscache = repocleartagscachefunc(repo)
856 clearrevlogs = opts[b'clear_revlogs']
856 clearrevlogs = opts[b'clear_revlogs']
857
857
858 def s():
858 def s():
859 if clearrevlogs:
859 if clearrevlogs:
860 clearchangelog(repo)
860 clearchangelog(repo)
861 clearfilecache(repo.unfiltered(), 'manifest')
861 clearfilecache(repo.unfiltered(), 'manifest')
862 repocleartagscache()
862 repocleartagscache()
863
863
864 def t():
864 def t():
865 return len(repo.tags())
865 return len(repo.tags())
866
866
867 timer(t, setup=s)
867 timer(t, setup=s)
868 fm.end()
868 fm.end()
869
869
870
870
871 @command(b'perfancestors', formatteropts)
871 @command(b'perfancestors', formatteropts)
872 def perfancestors(ui, repo, **opts):
872 def perfancestors(ui, repo, **opts):
873 opts = _byteskwargs(opts)
873 opts = _byteskwargs(opts)
874 timer, fm = gettimer(ui, opts)
874 timer, fm = gettimer(ui, opts)
875 heads = repo.changelog.headrevs()
875 heads = repo.changelog.headrevs()
876
876
877 def d():
877 def d():
878 for a in repo.changelog.ancestors(heads):
878 for a in repo.changelog.ancestors(heads):
879 pass
879 pass
880
880
881 timer(d)
881 timer(d)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(b'perfancestorset', formatteropts)
885 @command(b'perfancestorset', formatteropts)
886 def perfancestorset(ui, repo, revset, **opts):
886 def perfancestorset(ui, repo, revset, **opts):
887 opts = _byteskwargs(opts)
887 opts = _byteskwargs(opts)
888 timer, fm = gettimer(ui, opts)
888 timer, fm = gettimer(ui, opts)
889 revs = repo.revs(revset)
889 revs = repo.revs(revset)
890 heads = repo.changelog.headrevs()
890 heads = repo.changelog.headrevs()
891
891
892 def d():
892 def d():
893 s = repo.changelog.ancestors(heads)
893 s = repo.changelog.ancestors(heads)
894 for rev in revs:
894 for rev in revs:
895 rev in s
895 rev in s
896
896
897 timer(d)
897 timer(d)
898 fm.end()
898 fm.end()
899
899
900
900
901 @command(b'perfdiscovery', formatteropts, b'PATH')
901 @command(b'perfdiscovery', formatteropts, b'PATH')
902 def perfdiscovery(ui, repo, path, **opts):
902 def perfdiscovery(ui, repo, path, **opts):
903 """benchmark discovery between local repo and the peer at given path
903 """benchmark discovery between local repo and the peer at given path
904 """
904 """
905 repos = [repo, None]
905 repos = [repo, None]
906 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
907 path = ui.expandpath(path)
907 path = ui.expandpath(path)
908
908
909 def s():
909 def s():
910 repos[1] = hg.peer(ui, opts, path)
910 repos[1] = hg.peer(ui, opts, path)
911
911
912 def d():
912 def d():
913 setdiscovery.findcommonheads(ui, *repos)
913 setdiscovery.findcommonheads(ui, *repos)
914
914
915 timer(d, setup=s)
915 timer(d, setup=s)
916 fm.end()
916 fm.end()
917
917
918
918
919 @command(
919 @command(
920 b'perfbookmarks',
920 b'perfbookmarks',
921 formatteropts
921 formatteropts
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
923 )
923 )
924 def perfbookmarks(ui, repo, **opts):
924 def perfbookmarks(ui, repo, **opts):
925 """benchmark parsing bookmarks from disk to memory"""
925 """benchmark parsing bookmarks from disk to memory"""
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928
928
929 clearrevlogs = opts[b'clear_revlogs']
929 clearrevlogs = opts[b'clear_revlogs']
930
930
931 def s():
931 def s():
932 if clearrevlogs:
932 if clearrevlogs:
933 clearchangelog(repo)
933 clearchangelog(repo)
934 clearfilecache(repo, b'_bookmarks')
934 clearfilecache(repo, b'_bookmarks')
935
935
936 def d():
936 def d():
937 repo._bookmarks
937 repo._bookmarks
938
938
939 timer(d, setup=s)
939 timer(d, setup=s)
940 fm.end()
940 fm.end()
941
941
942
942
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
944 def perfbundleread(ui, repo, bundlepath, **opts):
944 def perfbundleread(ui, repo, bundlepath, **opts):
945 """Benchmark reading of bundle files.
945 """Benchmark reading of bundle files.
946
946
947 This command is meant to isolate the I/O part of bundle reading as
947 This command is meant to isolate the I/O part of bundle reading as
948 much as possible.
948 much as possible.
949 """
949 """
950 from mercurial import (
950 from mercurial import (
951 bundle2,
951 bundle2,
952 exchange,
952 exchange,
953 streamclone,
953 streamclone,
954 )
954 )
955
955
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957
957
958 def makebench(fn):
958 def makebench(fn):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 bundle = exchange.readbundle(ui, fh, bundlepath)
961 bundle = exchange.readbundle(ui, fh, bundlepath)
962 fn(bundle)
962 fn(bundle)
963
963
964 return run
964 return run
965
965
966 def makereadnbytes(size):
966 def makereadnbytes(size):
967 def run():
967 def run():
968 with open(bundlepath, b'rb') as fh:
968 with open(bundlepath, b'rb') as fh:
969 bundle = exchange.readbundle(ui, fh, bundlepath)
969 bundle = exchange.readbundle(ui, fh, bundlepath)
970 while bundle.read(size):
970 while bundle.read(size):
971 pass
971 pass
972
972
973 return run
973 return run
974
974
975 def makestdioread(size):
975 def makestdioread(size):
976 def run():
976 def run():
977 with open(bundlepath, b'rb') as fh:
977 with open(bundlepath, b'rb') as fh:
978 while fh.read(size):
978 while fh.read(size):
979 pass
979 pass
980
980
981 return run
981 return run
982
982
983 # bundle1
983 # bundle1
984
984
985 def deltaiter(bundle):
985 def deltaiter(bundle):
986 for delta in bundle.deltaiter():
986 for delta in bundle.deltaiter():
987 pass
987 pass
988
988
989 def iterchunks(bundle):
989 def iterchunks(bundle):
990 for chunk in bundle.getchunks():
990 for chunk in bundle.getchunks():
991 pass
991 pass
992
992
993 # bundle2
993 # bundle2
994
994
995 def forwardchunks(bundle):
995 def forwardchunks(bundle):
996 for chunk in bundle._forwardchunks():
996 for chunk in bundle._forwardchunks():
997 pass
997 pass
998
998
999 def iterparts(bundle):
999 def iterparts(bundle):
1000 for part in bundle.iterparts():
1000 for part in bundle.iterparts():
1001 pass
1001 pass
1002
1002
1003 def iterpartsseekable(bundle):
1003 def iterpartsseekable(bundle):
1004 for part in bundle.iterparts(seekable=True):
1004 for part in bundle.iterparts(seekable=True):
1005 pass
1005 pass
1006
1006
1007 def seek(bundle):
1007 def seek(bundle):
1008 for part in bundle.iterparts(seekable=True):
1008 for part in bundle.iterparts(seekable=True):
1009 part.seek(0, os.SEEK_END)
1009 part.seek(0, os.SEEK_END)
1010
1010
1011 def makepartreadnbytes(size):
1011 def makepartreadnbytes(size):
1012 def run():
1012 def run():
1013 with open(bundlepath, b'rb') as fh:
1013 with open(bundlepath, b'rb') as fh:
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1015 for part in bundle.iterparts():
1015 for part in bundle.iterparts():
1016 while part.read(size):
1016 while part.read(size):
1017 pass
1017 pass
1018
1018
1019 return run
1019 return run
1020
1020
1021 benches = [
1021 benches = [
1022 (makestdioread(8192), b'read(8k)'),
1022 (makestdioread(8192), b'read(8k)'),
1023 (makestdioread(16384), b'read(16k)'),
1023 (makestdioread(16384), b'read(16k)'),
1024 (makestdioread(32768), b'read(32k)'),
1024 (makestdioread(32768), b'read(32k)'),
1025 (makestdioread(131072), b'read(128k)'),
1025 (makestdioread(131072), b'read(128k)'),
1026 ]
1026 ]
1027
1027
1028 with open(bundlepath, b'rb') as fh:
1028 with open(bundlepath, b'rb') as fh:
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1030
1030
1031 if isinstance(bundle, changegroup.cg1unpacker):
1031 if isinstance(bundle, changegroup.cg1unpacker):
1032 benches.extend(
1032 benches.extend(
1033 [
1033 [
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1040 ]
1040 ]
1041 )
1041 )
1042 elif isinstance(bundle, bundle2.unbundle20):
1042 elif isinstance(bundle, bundle2.unbundle20):
1043 benches.extend(
1043 benches.extend(
1044 [
1044 [
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1047 (
1047 (
1048 makebench(iterpartsseekable),
1048 makebench(iterpartsseekable),
1049 b'bundle2 iterparts() seekable',
1049 b'bundle2 iterparts() seekable',
1050 ),
1050 ),
1051 (makebench(seek), b'bundle2 part seek()'),
1051 (makebench(seek), b'bundle2 part seek()'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1056 ]
1056 ]
1057 )
1057 )
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1059 raise error.Abort(b'stream clone bundles not supported')
1059 raise error.Abort(b'stream clone bundles not supported')
1060 else:
1060 else:
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1062
1062
1063 for fn, title in benches:
1063 for fn, title in benches:
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer(fn, title=title)
1065 timer(fn, title=title)
1066 fm.end()
1066 fm.end()
1067
1067
1068
1068
1069 @command(
1069 @command(
1070 b'perfchangegroupchangelog',
1070 b'perfchangegroupchangelog',
1071 formatteropts
1071 formatteropts
1072 + [
1072 + [
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1075 ],
1075 ],
1076 )
1076 )
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1078 """Benchmark producing a changelog group for a changegroup.
1078 """Benchmark producing a changelog group for a changegroup.
1079
1079
1080 This measures the time spent processing the changelog during a
1080 This measures the time spent processing the changelog during a
1081 bundle operation. This occurs during `hg bundle` and on a server
1081 bundle operation. This occurs during `hg bundle` and on a server
1082 processing a `getbundle` wire protocol request (handles clones
1082 processing a `getbundle` wire protocol request (handles clones
1083 and pull requests).
1083 and pull requests).
1084
1084
1085 By default, all revisions are added to the changegroup.
1085 By default, all revisions are added to the changegroup.
1086 """
1086 """
1087 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1088 cl = repo.changelog
1088 cl = repo.changelog
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1090 bundler = changegroup.getbundler(cgversion, repo)
1090 bundler = changegroup.getbundler(cgversion, repo)
1091
1091
1092 def d():
1092 def d():
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1094 for chunk in chunks:
1094 for chunk in chunks:
1095 pass
1095 pass
1096
1096
1097 timer, fm = gettimer(ui, opts)
1097 timer, fm = gettimer(ui, opts)
1098
1098
1099 # Terminal printing can interfere with timing. So disable it.
1099 # Terminal printing can interfere with timing. So disable it.
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1101 timer(d)
1101 timer(d)
1102
1102
1103 fm.end()
1103 fm.end()
1104
1104
1105
1105
1106 @command(b'perfdirs', formatteropts)
1106 @command(b'perfdirs', formatteropts)
1107 def perfdirs(ui, repo, **opts):
1107 def perfdirs(ui, repo, **opts):
1108 opts = _byteskwargs(opts)
1108 opts = _byteskwargs(opts)
1109 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1110 dirstate = repo.dirstate
1110 dirstate = repo.dirstate
1111 b'a' in dirstate
1111 b'a' in dirstate
1112
1112
1113 def d():
1113 def d():
1114 dirstate.hasdir(b'a')
1114 dirstate.hasdir(b'a')
1115 del dirstate._map._dirs
1115 del dirstate._map._dirs
1116
1116
1117 timer(d)
1117 timer(d)
1118 fm.end()
1118 fm.end()
1119
1119
1120
1120
1121 @command(
1121 @command(
1122 b'perfdirstate',
1122 b'perfdirstate',
1123 [
1123 [
1124 (
1124 (
1125 b'',
1125 b'',
1126 b'iteration',
1126 b'iteration',
1127 None,
1127 None,
1128 b'benchmark a full iteration for the dirstate',
1128 b'benchmark a full iteration for the dirstate',
1129 ),
1129 ),
1130 (
1130 (
1131 b'',
1131 b'',
1132 b'contains',
1132 b'contains',
1133 None,
1133 None,
1134 b'benchmark a large amount of `nf in dirstate` calls',
1134 b'benchmark a large amount of `nf in dirstate` calls',
1135 ),
1135 ),
1136 ]
1136 ]
1137 + formatteropts,
1137 + formatteropts,
1138 )
1138 )
1139 def perfdirstate(ui, repo, **opts):
1139 def perfdirstate(ui, repo, **opts):
1140 """benchmap the time of various distate operations
1140 """benchmap the time of various distate operations
1141
1141
1142 By default benchmark the time necessary to load a dirstate from scratch.
1142 By default benchmark the time necessary to load a dirstate from scratch.
1143 The dirstate is loaded to the point were a "contains" request can be
1143 The dirstate is loaded to the point were a "contains" request can be
1144 answered.
1144 answered.
1145 """
1145 """
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 b"a" in repo.dirstate
1148 b"a" in repo.dirstate
1149
1149
1150 if opts[b'iteration'] and opts[b'contains']:
1150 if opts[b'iteration'] and opts[b'contains']:
1151 msg = b'only specify one of --iteration or --contains'
1151 msg = b'only specify one of --iteration or --contains'
1152 raise error.Abort(msg)
1152 raise error.Abort(msg)
1153
1153
1154 if opts[b'iteration']:
1154 if opts[b'iteration']:
1155 setup = None
1155 setup = None
1156 dirstate = repo.dirstate
1156 dirstate = repo.dirstate
1157
1157
1158 def d():
1158 def d():
1159 for f in dirstate:
1159 for f in dirstate:
1160 pass
1160 pass
1161
1161
1162 elif opts[b'contains']:
1162 elif opts[b'contains']:
1163 setup = None
1163 setup = None
1164 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1165 allfiles = list(dirstate)
1165 allfiles = list(dirstate)
1166 # also add file path that will be "missing" from the dirstate
1166 # also add file path that will be "missing" from the dirstate
1167 allfiles.extend([f[::-1] for f in allfiles])
1167 allfiles.extend([f[::-1] for f in allfiles])
1168
1168
1169 def d():
1169 def d():
1170 for f in allfiles:
1170 for f in allfiles:
1171 f in dirstate
1171 f in dirstate
1172
1172
1173 else:
1173 else:
1174
1174
1175 def setup():
1175 def setup():
1176 repo.dirstate.invalidate()
1176 repo.dirstate.invalidate()
1177
1177
1178 def d():
1178 def d():
1179 b"a" in repo.dirstate
1179 b"a" in repo.dirstate
1180
1180
1181 timer(d, setup=setup)
1181 timer(d, setup=setup)
1182 fm.end()
1182 fm.end()
1183
1183
1184
1184
1185 @command(b'perfdirstatedirs', formatteropts)
1185 @command(b'perfdirstatedirs', formatteropts)
1186 def perfdirstatedirs(ui, repo, **opts):
1186 def perfdirstatedirs(ui, repo, **opts):
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1188 """
1188 """
1189 opts = _byteskwargs(opts)
1189 opts = _byteskwargs(opts)
1190 timer, fm = gettimer(ui, opts)
1190 timer, fm = gettimer(ui, opts)
1191 repo.dirstate.hasdir(b"a")
1191 repo.dirstate.hasdir(b"a")
1192
1192
1193 def setup():
1193 def setup():
1194 del repo.dirstate._map._dirs
1194 del repo.dirstate._map._dirs
1195
1195
1196 def d():
1196 def d():
1197 repo.dirstate.hasdir(b"a")
1197 repo.dirstate.hasdir(b"a")
1198
1198
1199 timer(d, setup=setup)
1199 timer(d, setup=setup)
1200 fm.end()
1200 fm.end()
1201
1201
1202
1202
1203 @command(b'perfdirstatefoldmap', formatteropts)
1203 @command(b'perfdirstatefoldmap', formatteropts)
1204 def perfdirstatefoldmap(ui, repo, **opts):
1204 def perfdirstatefoldmap(ui, repo, **opts):
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1206
1206
1207 The dirstate filefoldmap cache is dropped between every request.
1207 The dirstate filefoldmap cache is dropped between every request.
1208 """
1208 """
1209 opts = _byteskwargs(opts)
1209 opts = _byteskwargs(opts)
1210 timer, fm = gettimer(ui, opts)
1210 timer, fm = gettimer(ui, opts)
1211 dirstate = repo.dirstate
1211 dirstate = repo.dirstate
1212 dirstate._map.filefoldmap.get(b'a')
1212 dirstate._map.filefoldmap.get(b'a')
1213
1213
1214 def setup():
1214 def setup():
1215 del dirstate._map.filefoldmap
1215 del dirstate._map.filefoldmap
1216
1216
1217 def d():
1217 def d():
1218 dirstate._map.filefoldmap.get(b'a')
1218 dirstate._map.filefoldmap.get(b'a')
1219
1219
1220 timer(d, setup=setup)
1220 timer(d, setup=setup)
1221 fm.end()
1221 fm.end()
1222
1222
1223
1223
1224 @command(b'perfdirfoldmap', formatteropts)
1224 @command(b'perfdirfoldmap', formatteropts)
1225 def perfdirfoldmap(ui, repo, **opts):
1225 def perfdirfoldmap(ui, repo, **opts):
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1227
1227
1228 The dirstate dirfoldmap cache is dropped between every request.
1228 The dirstate dirfoldmap cache is dropped between every request.
1229 """
1229 """
1230 opts = _byteskwargs(opts)
1230 opts = _byteskwargs(opts)
1231 timer, fm = gettimer(ui, opts)
1231 timer, fm = gettimer(ui, opts)
1232 dirstate = repo.dirstate
1232 dirstate = repo.dirstate
1233 dirstate._map.dirfoldmap.get(b'a')
1233 dirstate._map.dirfoldmap.get(b'a')
1234
1234
1235 def setup():
1235 def setup():
1236 del dirstate._map.dirfoldmap
1236 del dirstate._map.dirfoldmap
1237 del dirstate._map._dirs
1237 del dirstate._map._dirs
1238
1238
1239 def d():
1239 def d():
1240 dirstate._map.dirfoldmap.get(b'a')
1240 dirstate._map.dirfoldmap.get(b'a')
1241
1241
1242 timer(d, setup=setup)
1242 timer(d, setup=setup)
1243 fm.end()
1243 fm.end()
1244
1244
1245
1245
1246 @command(b'perfdirstatewrite', formatteropts)
1246 @command(b'perfdirstatewrite', formatteropts)
1247 def perfdirstatewrite(ui, repo, **opts):
1247 def perfdirstatewrite(ui, repo, **opts):
1248 """benchmap the time it take to write a dirstate on disk
1248 """benchmap the time it take to write a dirstate on disk
1249 """
1249 """
1250 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1251 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1252 ds = repo.dirstate
1252 ds = repo.dirstate
1253 b"a" in ds
1253 b"a" in ds
1254
1254
1255 def setup():
1255 def setup():
1256 ds._dirty = True
1256 ds._dirty = True
1257
1257
1258 def d():
1258 def d():
1259 ds.write(repo.currenttransaction())
1259 ds.write(repo.currenttransaction())
1260
1260
1261 timer(d, setup=setup)
1261 timer(d, setup=setup)
1262 fm.end()
1262 fm.end()
1263
1263
1264
1264
1265 def _getmergerevs(repo, opts):
1265 def _getmergerevs(repo, opts):
1266 """parse command argument to return rev involved in merge
1266 """parse command argument to return rev involved in merge
1267
1267
1268 input: options dictionnary with `rev`, `from` and `bse`
1268 input: options dictionnary with `rev`, `from` and `bse`
1269 output: (localctx, otherctx, basectx)
1269 output: (localctx, otherctx, basectx)
1270 """
1270 """
1271 if opts[b'from']:
1271 if opts[b'from']:
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1273 wctx = repo[fromrev]
1273 wctx = repo[fromrev]
1274 else:
1274 else:
1275 wctx = repo[None]
1275 wctx = repo[None]
1276 # we don't want working dir files to be stat'd in the benchmark, so
1276 # we don't want working dir files to be stat'd in the benchmark, so
1277 # prime that cache
1277 # prime that cache
1278 wctx.dirty()
1278 wctx.dirty()
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1280 if opts[b'base']:
1280 if opts[b'base']:
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1282 ancestor = repo[fromrev]
1282 ancestor = repo[fromrev]
1283 else:
1283 else:
1284 ancestor = wctx.ancestor(rctx)
1284 ancestor = wctx.ancestor(rctx)
1285 return (wctx, rctx, ancestor)
1285 return (wctx, rctx, ancestor)
1286
1286
1287
1287
1288 @command(
1288 @command(
1289 b'perfmergecalculate',
1289 b'perfmergecalculate',
1290 [
1290 [
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1292 (b'', b'from', b'', b'rev to merge from'),
1292 (b'', b'from', b'', b'rev to merge from'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1294 ]
1294 ]
1295 + formatteropts,
1295 + formatteropts,
1296 )
1296 )
1297 def perfmergecalculate(ui, repo, **opts):
1297 def perfmergecalculate(ui, repo, **opts):
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1300
1300
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1302
1302
1303 def d():
1303 def d():
1304 # acceptremote is True because we don't want prompts in the middle of
1304 # acceptremote is True because we don't want prompts in the middle of
1305 # our benchmark
1305 # our benchmark
1306 merge.calculateupdates(
1306 merge.calculateupdates(
1307 repo,
1307 repo,
1308 wctx,
1308 wctx,
1309 rctx,
1309 rctx,
1310 [ancestor],
1310 [ancestor],
1311 branchmerge=False,
1311 branchmerge=False,
1312 force=False,
1312 force=False,
1313 acceptremote=True,
1313 acceptremote=True,
1314 followcopies=True,
1314 followcopies=True,
1315 )
1315 )
1316
1316
1317 timer(d)
1317 timer(d)
1318 fm.end()
1318 fm.end()
1319
1319
1320
1320
1321 @command(
1321 @command(
1322 b'perfmergecopies',
1322 b'perfmergecopies',
1323 [
1323 [
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1325 (b'', b'from', b'', b'rev to merge from'),
1325 (b'', b'from', b'', b'rev to merge from'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1327 ]
1327 ]
1328 + formatteropts,
1328 + formatteropts,
1329 )
1329 )
1330 def perfmergecopies(ui, repo, **opts):
1330 def perfmergecopies(ui, repo, **opts):
1331 """measure runtime of `copies.mergecopies`"""
1331 """measure runtime of `copies.mergecopies`"""
1332 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1335
1335
1336 def d():
1336 def d():
1337 # acceptremote is True because we don't want prompts in the middle of
1337 # acceptremote is True because we don't want prompts in the middle of
1338 # our benchmark
1338 # our benchmark
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1340
1340
1341 timer(d)
1341 timer(d)
1342 fm.end()
1342 fm.end()
1343
1343
1344
1344
1345 @command(b'perfpathcopies', [], b"REV REV")
1345 @command(b'perfpathcopies', [], b"REV REV")
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1347 """benchmark the copy tracing logic"""
1347 """benchmark the copy tracing logic"""
1348 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1349 timer, fm = gettimer(ui, opts)
1349 timer, fm = gettimer(ui, opts)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1352
1352
1353 def d():
1353 def d():
1354 copies.pathcopies(ctx1, ctx2)
1354 copies.pathcopies(ctx1, ctx2)
1355
1355
1356 timer(d)
1356 timer(d)
1357 fm.end()
1357 fm.end()
1358
1358
1359
1359
1360 @command(
1360 @command(
1361 b'perfphases',
1361 b'perfphases',
1362 [(b'', b'full', False, b'include file reading time too'),],
1362 [(b'', b'full', False, b'include file reading time too'),],
1363 b"",
1363 b"",
1364 )
1364 )
1365 def perfphases(ui, repo, **opts):
1365 def perfphases(ui, repo, **opts):
1366 """benchmark phasesets computation"""
1366 """benchmark phasesets computation"""
1367 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1368 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1369 _phases = repo._phasecache
1369 _phases = repo._phasecache
1370 full = opts.get(b'full')
1370 full = opts.get(b'full')
1371
1371
1372 def d():
1372 def d():
1373 phases = _phases
1373 phases = _phases
1374 if full:
1374 if full:
1375 clearfilecache(repo, b'_phasecache')
1375 clearfilecache(repo, b'_phasecache')
1376 phases = repo._phasecache
1376 phases = repo._phasecache
1377 phases.invalidate()
1377 phases.invalidate()
1378 phases.loadphaserevs(repo)
1378 phases.loadphaserevs(repo)
1379
1379
1380 timer(d)
1380 timer(d)
1381 fm.end()
1381 fm.end()
1382
1382
1383
1383
1384 @command(b'perfphasesremote', [], b"[DEST]")
1384 @command(b'perfphasesremote', [], b"[DEST]")
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1386 """benchmark time needed to analyse phases of the remote server"""
1386 """benchmark time needed to analyse phases of the remote server"""
1387 from mercurial.node import bin
1387 from mercurial.node import bin
1388 from mercurial import (
1388 from mercurial import (
1389 exchange,
1389 exchange,
1390 hg,
1390 hg,
1391 phases,
1391 phases,
1392 )
1392 )
1393
1393
1394 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1395 timer, fm = gettimer(ui, opts)
1395 timer, fm = gettimer(ui, opts)
1396
1396
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1398 if not path:
1398 if not path:
1399 raise error.Abort(
1399 raise error.Abort(
1400 b'default repository not configured!',
1400 b'default repository not configured!',
1401 hint=b"see 'hg help config.paths'",
1401 hint=b"see 'hg help config.paths'",
1402 )
1402 )
1403 dest = path.pushloc or path.loc
1403 dest = path.pushloc or path.loc
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1405 other = hg.peer(repo, opts, dest)
1405 other = hg.peer(repo, opts, dest)
1406
1406
1407 # easier to perform discovery through the operation
1407 # easier to perform discovery through the operation
1408 op = exchange.pushoperation(repo, other)
1408 op = exchange.pushoperation(repo, other)
1409 exchange._pushdiscoverychangeset(op)
1409 exchange._pushdiscoverychangeset(op)
1410
1410
1411 remotesubset = op.fallbackheads
1411 remotesubset = op.fallbackheads
1412
1412
1413 with other.commandexecutor() as e:
1413 with other.commandexecutor() as e:
1414 remotephases = e.callcommand(
1414 remotephases = e.callcommand(
1415 b'listkeys', {b'namespace': b'phases'}
1415 b'listkeys', {b'namespace': b'phases'}
1416 ).result()
1416 ).result()
1417 del other
1417 del other
1418 publishing = remotephases.get(b'publishing', False)
1418 publishing = remotephases.get(b'publishing', False)
1419 if publishing:
1419 if publishing:
1420 ui.statusnoi18n(b'publishing: yes\n')
1420 ui.statusnoi18n(b'publishing: yes\n')
1421 else:
1421 else:
1422 ui.statusnoi18n(b'publishing: no\n')
1422 ui.statusnoi18n(b'publishing: no\n')
1423
1423
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1425 if has_node is None:
1425 if has_node is None:
1426 has_node = repo.changelog.nodemap.__contains__
1426 has_node = repo.changelog.nodemap.__contains__
1427 nonpublishroots = 0
1427 nonpublishroots = 0
1428 for nhex, phase in remotephases.iteritems():
1428 for nhex, phase in remotephases.iteritems():
1429 if nhex == b'publishing': # ignore data related to publish option
1429 if nhex == b'publishing': # ignore data related to publish option
1430 continue
1430 continue
1431 node = bin(nhex)
1431 node = bin(nhex)
1432 if has_node(node) and int(phase):
1432 if has_node(node) and int(phase):
1433 nonpublishroots += 1
1433 nonpublishroots += 1
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1436
1436
1437 def d():
1437 def d():
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1439
1439
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443
1443
1444 @command(
1444 @command(
1445 b'perfmanifest',
1445 b'perfmanifest',
1446 [
1446 [
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1449 ]
1449 ]
1450 + formatteropts,
1450 + formatteropts,
1451 b'REV|NODE',
1451 b'REV|NODE',
1452 )
1452 )
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1454 """benchmark the time to read a manifest from disk and return a usable
1454 """benchmark the time to read a manifest from disk and return a usable
1455 dict-like object
1455 dict-like object
1456
1456
1457 Manifest caches are cleared before retrieval."""
1457 Manifest caches are cleared before retrieval."""
1458 opts = _byteskwargs(opts)
1458 opts = _byteskwargs(opts)
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 if not manifest_rev:
1460 if not manifest_rev:
1461 ctx = scmutil.revsingle(repo, rev, rev)
1461 ctx = scmutil.revsingle(repo, rev, rev)
1462 t = ctx.manifestnode()
1462 t = ctx.manifestnode()
1463 else:
1463 else:
1464 from mercurial.node import bin
1464 from mercurial.node import bin
1465
1465
1466 if len(rev) == 40:
1466 if len(rev) == 40:
1467 t = bin(rev)
1467 t = bin(rev)
1468 else:
1468 else:
1469 try:
1469 try:
1470 rev = int(rev)
1470 rev = int(rev)
1471
1471
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1474 else:
1474 else:
1475 t = repo.manifestlog._revlog.lookup(rev)
1475 t = repo.manifestlog._revlog.lookup(rev)
1476 except ValueError:
1476 except ValueError:
1477 raise error.Abort(
1477 raise error.Abort(
1478 b'manifest revision must be integer or full node'
1478 b'manifest revision must be integer or full node'
1479 )
1479 )
1480
1480
1481 def d():
1481 def d():
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1483 repo.manifestlog[t].read()
1483 repo.manifestlog[t].read()
1484
1484
1485 timer(d)
1485 timer(d)
1486 fm.end()
1486 fm.end()
1487
1487
1488
1488
1489 @command(b'perfchangeset', formatteropts)
1489 @command(b'perfchangeset', formatteropts)
1490 def perfchangeset(ui, repo, rev, **opts):
1490 def perfchangeset(ui, repo, rev, **opts):
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 n = scmutil.revsingle(repo, rev).node()
1493 n = scmutil.revsingle(repo, rev).node()
1494
1494
1495 def d():
1495 def d():
1496 repo.changelog.read(n)
1496 repo.changelog.read(n)
1497 # repo.changelog._cache = None
1497 # repo.changelog._cache = None
1498
1498
1499 timer(d)
1499 timer(d)
1500 fm.end()
1500 fm.end()
1501
1501
1502
1502
1503 @command(b'perfignore', formatteropts)
1503 @command(b'perfignore', formatteropts)
1504 def perfignore(ui, repo, **opts):
1504 def perfignore(ui, repo, **opts):
1505 """benchmark operation related to computing ignore"""
1505 """benchmark operation related to computing ignore"""
1506 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1507 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509
1509
1510 def setupone():
1510 def setupone():
1511 dirstate.invalidate()
1511 dirstate.invalidate()
1512 clearfilecache(dirstate, b'_ignore')
1512 clearfilecache(dirstate, b'_ignore')
1513
1513
1514 def runone():
1514 def runone():
1515 dirstate._ignore
1515 dirstate._ignore
1516
1516
1517 timer(runone, setup=setupone, title=b"load")
1517 timer(runone, setup=setupone, title=b"load")
1518 fm.end()
1518 fm.end()
1519
1519
1520
1520
1521 @command(
1521 @command(
1522 b'perfindex',
1522 b'perfindex',
1523 [
1523 [
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1526 ]
1526 ]
1527 + formatteropts,
1527 + formatteropts,
1528 )
1528 )
1529 def perfindex(ui, repo, **opts):
1529 def perfindex(ui, repo, **opts):
1530 """benchmark index creation time followed by a lookup
1530 """benchmark index creation time followed by a lookup
1531
1531
1532 The default is to look `tip` up. Depending on the index implementation,
1532 The default is to look `tip` up. Depending on the index implementation,
1533 the revision looked up can matters. For example, an implementation
1533 the revision looked up can matters. For example, an implementation
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1535 `--rev 0`. The number of looked up revisions and their order can also
1535 `--rev 0`. The number of looked up revisions and their order can also
1536 matters.
1536 matters.
1537
1537
1538 Example of useful set to test:
1538 Example of useful set to test:
1539
1539
1540 * tip
1540 * tip
1541 * 0
1541 * 0
1542 * -10:
1542 * -10:
1543 * :10
1543 * :10
1544 * -10: + :10
1544 * -10: + :10
1545 * :10: + -10:
1545 * :10: + -10:
1546 * -10000:
1546 * -10000:
1547 * -10000: + 0
1547 * -10000: + 0
1548
1548
1549 It is not currently possible to check for lookup of a missing node. For
1549 It is not currently possible to check for lookup of a missing node. For
1550 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1550 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1551 import mercurial.revlog
1551 import mercurial.revlog
1552
1552
1553 opts = _byteskwargs(opts)
1553 opts = _byteskwargs(opts)
1554 timer, fm = gettimer(ui, opts)
1554 timer, fm = gettimer(ui, opts)
1555 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1555 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1556 if opts[b'no_lookup']:
1556 if opts[b'no_lookup']:
1557 if opts['rev']:
1557 if opts['rev']:
1558 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1558 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1559 nodes = []
1559 nodes = []
1560 elif not opts[b'rev']:
1560 elif not opts[b'rev']:
1561 nodes = [repo[b"tip"].node()]
1561 nodes = [repo[b"tip"].node()]
1562 else:
1562 else:
1563 revs = scmutil.revrange(repo, opts[b'rev'])
1563 revs = scmutil.revrange(repo, opts[b'rev'])
1564 cl = repo.changelog
1564 cl = repo.changelog
1565 nodes = [cl.node(r) for r in revs]
1565 nodes = [cl.node(r) for r in revs]
1566
1566
1567 unfi = repo.unfiltered()
1567 unfi = repo.unfiltered()
1568 # find the filecache func directly
1568 # find the filecache func directly
1569 # This avoid polluting the benchmark with the filecache logic
1569 # This avoid polluting the benchmark with the filecache logic
1570 makecl = unfi.__class__.changelog.func
1570 makecl = unfi.__class__.changelog.func
1571
1571
1572 def setup():
1572 def setup():
1573 # probably not necessary, but for good measure
1573 # probably not necessary, but for good measure
1574 clearchangelog(unfi)
1574 clearchangelog(unfi)
1575
1575
1576 def d():
1576 def d():
1577 cl = makecl(unfi)
1577 cl = makecl(unfi)
1578 for n in nodes:
1578 for n in nodes:
1579 cl.rev(n)
1579 cl.rev(n)
1580
1580
1581 timer(d, setup=setup)
1581 timer(d, setup=setup)
1582 fm.end()
1582 fm.end()
1583
1583
1584
1584
1585 @command(
1585 @command(
1586 b'perfnodemap',
1586 b'perfnodemap',
1587 [
1587 [
1588 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1588 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1589 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1589 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1590 ]
1590 ]
1591 + formatteropts,
1591 + formatteropts,
1592 )
1592 )
1593 def perfnodemap(ui, repo, **opts):
1593 def perfnodemap(ui, repo, **opts):
1594 """benchmark the time necessary to look up revision from a cold nodemap
1594 """benchmark the time necessary to look up revision from a cold nodemap
1595
1595
1596 Depending on the implementation, the amount and order of revision we look
1596 Depending on the implementation, the amount and order of revision we look
1597 up can varies. Example of useful set to test:
1597 up can varies. Example of useful set to test:
1598 * tip
1598 * tip
1599 * 0
1599 * 0
1600 * -10:
1600 * -10:
1601 * :10
1601 * :10
1602 * -10: + :10
1602 * -10: + :10
1603 * :10: + -10:
1603 * :10: + -10:
1604 * -10000:
1604 * -10000:
1605 * -10000: + 0
1605 * -10000: + 0
1606
1606
1607 The command currently focus on valid binary lookup. Benchmarking for
1607 The command currently focus on valid binary lookup. Benchmarking for
1608 hexlookup, prefix lookup and missing lookup would also be valuable.
1608 hexlookup, prefix lookup and missing lookup would also be valuable.
1609 """
1609 """
1610 import mercurial.revlog
1610 import mercurial.revlog
1611
1611
1612 opts = _byteskwargs(opts)
1612 opts = _byteskwargs(opts)
1613 timer, fm = gettimer(ui, opts)
1613 timer, fm = gettimer(ui, opts)
1614 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1614 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1615
1615
1616 unfi = repo.unfiltered()
1616 unfi = repo.unfiltered()
1617 clearcaches = opts['clear_caches']
1617 clearcaches = opts['clear_caches']
1618 # find the filecache func directly
1618 # find the filecache func directly
1619 # This avoid polluting the benchmark with the filecache logic
1619 # This avoid polluting the benchmark with the filecache logic
1620 makecl = unfi.__class__.changelog.func
1620 makecl = unfi.__class__.changelog.func
1621 if not opts[b'rev']:
1621 if not opts[b'rev']:
1622 raise error.Abort('use --rev to specify revisions to look up')
1622 raise error.Abort('use --rev to specify revisions to look up')
1623 revs = scmutil.revrange(repo, opts[b'rev'])
1623 revs = scmutil.revrange(repo, opts[b'rev'])
1624 cl = repo.changelog
1624 cl = repo.changelog
1625 nodes = [cl.node(r) for r in revs]
1625 nodes = [cl.node(r) for r in revs]
1626
1626
1627 # use a list to pass reference to a nodemap from one closure to the next
1627 # use a list to pass reference to a nodemap from one closure to the next
1628 nodeget = [None]
1628 nodeget = [None]
1629
1629
1630 def setnodeget():
1630 def setnodeget():
1631 # probably not necessary, but for good measure
1631 # probably not necessary, but for good measure
1632 clearchangelog(unfi)
1632 clearchangelog(unfi)
1633 cl = makecl(unfi)
1633 cl = makecl(unfi)
1634 if util.safehasattr(cl.index, 'get_rev'):
1634 if util.safehasattr(cl.index, 'get_rev'):
1635 nodeget[0] = cl.index.get_rev
1635 nodeget[0] = cl.index.get_rev
1636 else:
1636 else:
1637 nodeget[0] = cl.nodemap.get
1637 nodeget[0] = cl.nodemap.get
1638
1638
1639 def d():
1639 def d():
1640 get = nodeget[0]
1640 get = nodeget[0]
1641 for n in nodes:
1641 for n in nodes:
1642 get(n)
1642 get(n)
1643
1643
1644 setup = None
1644 setup = None
1645 if clearcaches:
1645 if clearcaches:
1646
1646
1647 def setup():
1647 def setup():
1648 setnodeget()
1648 setnodeget()
1649
1649
1650 else:
1650 else:
1651 setnodeget()
1651 setnodeget()
1652 d() # prewarm the data structure
1652 d() # prewarm the data structure
1653 timer(d, setup=setup)
1653 timer(d, setup=setup)
1654 fm.end()
1654 fm.end()
1655
1655
1656
1656
1657 @command(b'perfstartup', formatteropts)
1657 @command(b'perfstartup', formatteropts)
1658 def perfstartup(ui, repo, **opts):
1658 def perfstartup(ui, repo, **opts):
1659 opts = _byteskwargs(opts)
1659 opts = _byteskwargs(opts)
1660 timer, fm = gettimer(ui, opts)
1660 timer, fm = gettimer(ui, opts)
1661
1661
1662 def d():
1662 def d():
1663 if os.name != 'nt':
1663 if os.name != 'nt':
1664 os.system(
1664 os.system(
1665 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1665 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1666 )
1666 )
1667 else:
1667 else:
1668 os.environ['HGRCPATH'] = r' '
1668 os.environ['HGRCPATH'] = r' '
1669 os.system("%s version -q > NUL" % sys.argv[0])
1669 os.system("%s version -q > NUL" % sys.argv[0])
1670
1670
1671 timer(d)
1671 timer(d)
1672 fm.end()
1672 fm.end()
1673
1673
1674
1674
1675 @command(b'perfparents', formatteropts)
1675 @command(b'perfparents', formatteropts)
1676 def perfparents(ui, repo, **opts):
1676 def perfparents(ui, repo, **opts):
1677 """benchmark the time necessary to fetch one changeset's parents.
1677 """benchmark the time necessary to fetch one changeset's parents.
1678
1678
1679 The fetch is done using the `node identifier`, traversing all object layers
1679 The fetch is done using the `node identifier`, traversing all object layers
1680 from the repository object. The first N revisions will be used for this
1680 from the repository object. The first N revisions will be used for this
1681 benchmark. N is controlled by the ``perf.parentscount`` config option
1681 benchmark. N is controlled by the ``perf.parentscount`` config option
1682 (default: 1000).
1682 (default: 1000).
1683 """
1683 """
1684 opts = _byteskwargs(opts)
1684 opts = _byteskwargs(opts)
1685 timer, fm = gettimer(ui, opts)
1685 timer, fm = gettimer(ui, opts)
1686 # control the number of commits perfparents iterates over
1686 # control the number of commits perfparents iterates over
1687 # experimental config: perf.parentscount
1687 # experimental config: perf.parentscount
1688 count = getint(ui, b"perf", b"parentscount", 1000)
1688 count = getint(ui, b"perf", b"parentscount", 1000)
1689 if len(repo.changelog) < count:
1689 if len(repo.changelog) < count:
1690 raise error.Abort(b"repo needs %d commits for this test" % count)
1690 raise error.Abort(b"repo needs %d commits for this test" % count)
1691 repo = repo.unfiltered()
1691 repo = repo.unfiltered()
1692 nl = [repo.changelog.node(i) for i in _xrange(count)]
1692 nl = [repo.changelog.node(i) for i in _xrange(count)]
1693
1693
1694 def d():
1694 def d():
1695 for n in nl:
1695 for n in nl:
1696 repo.changelog.parents(n)
1696 repo.changelog.parents(n)
1697
1697
1698 timer(d)
1698 timer(d)
1699 fm.end()
1699 fm.end()
1700
1700
1701
1701
1702 @command(b'perfctxfiles', formatteropts)
1702 @command(b'perfctxfiles', formatteropts)
1703 def perfctxfiles(ui, repo, x, **opts):
1703 def perfctxfiles(ui, repo, x, **opts):
1704 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1705 x = int(x)
1705 x = int(x)
1706 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1707
1707
1708 def d():
1708 def d():
1709 len(repo[x].files())
1709 len(repo[x].files())
1710
1710
1711 timer(d)
1711 timer(d)
1712 fm.end()
1712 fm.end()
1713
1713
1714
1714
1715 @command(b'perfrawfiles', formatteropts)
1715 @command(b'perfrawfiles', formatteropts)
1716 def perfrawfiles(ui, repo, x, **opts):
1716 def perfrawfiles(ui, repo, x, **opts):
1717 opts = _byteskwargs(opts)
1717 opts = _byteskwargs(opts)
1718 x = int(x)
1718 x = int(x)
1719 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1720 cl = repo.changelog
1720 cl = repo.changelog
1721
1721
1722 def d():
1722 def d():
1723 len(cl.read(x)[3])
1723 len(cl.read(x)[3])
1724
1724
1725 timer(d)
1725 timer(d)
1726 fm.end()
1726 fm.end()
1727
1727
1728
1728
1729 @command(b'perflookup', formatteropts)
1729 @command(b'perflookup', formatteropts)
1730 def perflookup(ui, repo, rev, **opts):
1730 def perflookup(ui, repo, rev, **opts):
1731 opts = _byteskwargs(opts)
1731 opts = _byteskwargs(opts)
1732 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1733 timer(lambda: len(repo.lookup(rev)))
1733 timer(lambda: len(repo.lookup(rev)))
1734 fm.end()
1734 fm.end()
1735
1735
1736
1736
1737 @command(
1737 @command(
1738 b'perflinelogedits',
1738 b'perflinelogedits',
1739 [
1739 [
1740 (b'n', b'edits', 10000, b'number of edits'),
1740 (b'n', b'edits', 10000, b'number of edits'),
1741 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1741 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1742 ],
1742 ],
1743 norepo=True,
1743 norepo=True,
1744 )
1744 )
1745 def perflinelogedits(ui, **opts):
1745 def perflinelogedits(ui, **opts):
1746 from mercurial import linelog
1746 from mercurial import linelog
1747
1747
1748 opts = _byteskwargs(opts)
1748 opts = _byteskwargs(opts)
1749
1749
1750 edits = opts[b'edits']
1750 edits = opts[b'edits']
1751 maxhunklines = opts[b'max_hunk_lines']
1751 maxhunklines = opts[b'max_hunk_lines']
1752
1752
1753 maxb1 = 100000
1753 maxb1 = 100000
1754 random.seed(0)
1754 random.seed(0)
1755 randint = random.randint
1755 randint = random.randint
1756 currentlines = 0
1756 currentlines = 0
1757 arglist = []
1757 arglist = []
1758 for rev in _xrange(edits):
1758 for rev in _xrange(edits):
1759 a1 = randint(0, currentlines)
1759 a1 = randint(0, currentlines)
1760 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1760 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1761 b1 = randint(0, maxb1)
1761 b1 = randint(0, maxb1)
1762 b2 = randint(b1, b1 + maxhunklines)
1762 b2 = randint(b1, b1 + maxhunklines)
1763 currentlines += (b2 - b1) - (a2 - a1)
1763 currentlines += (b2 - b1) - (a2 - a1)
1764 arglist.append((rev, a1, a2, b1, b2))
1764 arglist.append((rev, a1, a2, b1, b2))
1765
1765
1766 def d():
1766 def d():
1767 ll = linelog.linelog()
1767 ll = linelog.linelog()
1768 for args in arglist:
1768 for args in arglist:
1769 ll.replacelines(*args)
1769 ll.replacelines(*args)
1770
1770
1771 timer, fm = gettimer(ui, opts)
1771 timer, fm = gettimer(ui, opts)
1772 timer(d)
1772 timer(d)
1773 fm.end()
1773 fm.end()
1774
1774
1775
1775
1776 @command(b'perfrevrange', formatteropts)
1776 @command(b'perfrevrange', formatteropts)
1777 def perfrevrange(ui, repo, *specs, **opts):
1777 def perfrevrange(ui, repo, *specs, **opts):
1778 opts = _byteskwargs(opts)
1778 opts = _byteskwargs(opts)
1779 timer, fm = gettimer(ui, opts)
1779 timer, fm = gettimer(ui, opts)
1780 revrange = scmutil.revrange
1780 revrange = scmutil.revrange
1781 timer(lambda: len(revrange(repo, specs)))
1781 timer(lambda: len(revrange(repo, specs)))
1782 fm.end()
1782 fm.end()
1783
1783
1784
1784
1785 @command(b'perfnodelookup', formatteropts)
1785 @command(b'perfnodelookup', formatteropts)
1786 def perfnodelookup(ui, repo, rev, **opts):
1786 def perfnodelookup(ui, repo, rev, **opts):
1787 opts = _byteskwargs(opts)
1787 opts = _byteskwargs(opts)
1788 timer, fm = gettimer(ui, opts)
1788 timer, fm = gettimer(ui, opts)
1789 import mercurial.revlog
1789 import mercurial.revlog
1790
1790
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 n = scmutil.revsingle(repo, rev).node()
1792 n = scmutil.revsingle(repo, rev).node()
1793 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1793 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1794
1794
1795 def d():
1795 def d():
1796 cl.rev(n)
1796 cl.rev(n)
1797 clearcaches(cl)
1797 clearcaches(cl)
1798
1798
1799 timer(d)
1799 timer(d)
1800 fm.end()
1800 fm.end()
1801
1801
1802
1802
1803 @command(
1803 @command(
1804 b'perflog',
1804 b'perflog',
1805 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1805 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1806 )
1806 )
1807 def perflog(ui, repo, rev=None, **opts):
1807 def perflog(ui, repo, rev=None, **opts):
1808 opts = _byteskwargs(opts)
1808 opts = _byteskwargs(opts)
1809 if rev is None:
1809 if rev is None:
1810 rev = []
1810 rev = []
1811 timer, fm = gettimer(ui, opts)
1811 timer, fm = gettimer(ui, opts)
1812 ui.pushbuffer()
1812 ui.pushbuffer()
1813 timer(
1813 timer(
1814 lambda: commands.log(
1814 lambda: commands.log(
1815 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1815 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1816 )
1816 )
1817 )
1817 )
1818 ui.popbuffer()
1818 ui.popbuffer()
1819 fm.end()
1819 fm.end()
1820
1820
1821
1821
1822 @command(b'perfmoonwalk', formatteropts)
1822 @command(b'perfmoonwalk', formatteropts)
1823 def perfmoonwalk(ui, repo, **opts):
1823 def perfmoonwalk(ui, repo, **opts):
1824 """benchmark walking the changelog backwards
1824 """benchmark walking the changelog backwards
1825
1825
1826 This also loads the changelog data for each revision in the changelog.
1826 This also loads the changelog data for each revision in the changelog.
1827 """
1827 """
1828 opts = _byteskwargs(opts)
1828 opts = _byteskwargs(opts)
1829 timer, fm = gettimer(ui, opts)
1829 timer, fm = gettimer(ui, opts)
1830
1830
1831 def moonwalk():
1831 def moonwalk():
1832 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1832 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1833 ctx = repo[i]
1833 ctx = repo[i]
1834 ctx.branch() # read changelog data (in addition to the index)
1834 ctx.branch() # read changelog data (in addition to the index)
1835
1835
1836 timer(moonwalk)
1836 timer(moonwalk)
1837 fm.end()
1837 fm.end()
1838
1838
1839
1839
1840 @command(
1840 @command(
1841 b'perftemplating',
1841 b'perftemplating',
1842 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1842 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1843 )
1843 )
1844 def perftemplating(ui, repo, testedtemplate=None, **opts):
1844 def perftemplating(ui, repo, testedtemplate=None, **opts):
1845 """test the rendering time of a given template"""
1845 """test the rendering time of a given template"""
1846 if makelogtemplater is None:
1846 if makelogtemplater is None:
1847 raise error.Abort(
1847 raise error.Abort(
1848 b"perftemplating not available with this Mercurial",
1848 b"perftemplating not available with this Mercurial",
1849 hint=b"use 4.3 or later",
1849 hint=b"use 4.3 or later",
1850 )
1850 )
1851
1851
1852 opts = _byteskwargs(opts)
1852 opts = _byteskwargs(opts)
1853
1853
1854 nullui = ui.copy()
1854 nullui = ui.copy()
1855 nullui.fout = open(os.devnull, 'wb')
1855 nullui.fout = open(os.devnull, 'wb')
1856 nullui.disablepager()
1856 nullui.disablepager()
1857 revs = opts.get(b'rev')
1857 revs = opts.get(b'rev')
1858 if not revs:
1858 if not revs:
1859 revs = [b'all()']
1859 revs = [b'all()']
1860 revs = list(scmutil.revrange(repo, revs))
1860 revs = list(scmutil.revrange(repo, revs))
1861
1861
1862 defaulttemplate = (
1862 defaulttemplate = (
1863 b'{date|shortdate} [{rev}:{node|short}]'
1863 b'{date|shortdate} [{rev}:{node|short}]'
1864 b' {author|person}: {desc|firstline}\n'
1864 b' {author|person}: {desc|firstline}\n'
1865 )
1865 )
1866 if testedtemplate is None:
1866 if testedtemplate is None:
1867 testedtemplate = defaulttemplate
1867 testedtemplate = defaulttemplate
1868 displayer = makelogtemplater(nullui, repo, testedtemplate)
1868 displayer = makelogtemplater(nullui, repo, testedtemplate)
1869
1869
1870 def format():
1870 def format():
1871 for r in revs:
1871 for r in revs:
1872 ctx = repo[r]
1872 ctx = repo[r]
1873 displayer.show(ctx)
1873 displayer.show(ctx)
1874 displayer.flush(ctx)
1874 displayer.flush(ctx)
1875
1875
1876 timer, fm = gettimer(ui, opts)
1876 timer, fm = gettimer(ui, opts)
1877 timer(format)
1877 timer(format)
1878 fm.end()
1878 fm.end()
1879
1879
1880
1880
1881 def _displaystats(ui, opts, entries, data):
1881 def _displaystats(ui, opts, entries, data):
1882 # use a second formatter because the data are quite different, not sure
1882 # use a second formatter because the data are quite different, not sure
1883 # how it flies with the templater.
1883 # how it flies with the templater.
1884 fm = ui.formatter(b'perf-stats', opts)
1884 fm = ui.formatter(b'perf-stats', opts)
1885 for key, title in entries:
1885 for key, title in entries:
1886 values = data[key]
1886 values = data[key]
1887 nbvalues = len(data)
1887 nbvalues = len(data)
1888 values.sort()
1888 values.sort()
1889 stats = {
1889 stats = {
1890 'key': key,
1890 'key': key,
1891 'title': title,
1891 'title': title,
1892 'nbitems': len(values),
1892 'nbitems': len(values),
1893 'min': values[0][0],
1893 'min': values[0][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1903 'max': values[-1][0],
1903 'max': values[-1][0],
1904 }
1904 }
1905 fm.startitem()
1905 fm.startitem()
1906 fm.data(**stats)
1906 fm.data(**stats)
1907 # make node pretty for the human output
1907 # make node pretty for the human output
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1909 lines = [
1909 lines = [
1910 'min',
1910 'min',
1911 '10%',
1911 '10%',
1912 '25%',
1912 '25%',
1913 '50%',
1913 '50%',
1914 '75%',
1914 '75%',
1915 '80%',
1915 '80%',
1916 '85%',
1916 '85%',
1917 '90%',
1917 '90%',
1918 '95%',
1918 '95%',
1919 '99%',
1919 '99%',
1920 'max',
1920 'max',
1921 ]
1921 ]
1922 for l in lines:
1922 for l in lines:
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1924 fm.end()
1924 fm.end()
1925
1925
1926
1926
1927 @command(
1927 @command(
1928 b'perfhelper-mergecopies',
1928 b'perfhelper-mergecopies',
1929 formatteropts
1929 formatteropts
1930 + [
1930 + [
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1934 ],
1934 ],
1935 )
1935 )
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1937 """find statistics about potential parameters for `perfmergecopies`
1937 """find statistics about potential parameters for `perfmergecopies`
1938
1938
1939 This command find (base, p1, p2) triplet relevant for copytracing
1939 This command find (base, p1, p2) triplet relevant for copytracing
1940 benchmarking in the context of a merge. It reports values for some of the
1940 benchmarking in the context of a merge. It reports values for some of the
1941 parameters that impact merge copy tracing time during merge.
1941 parameters that impact merge copy tracing time during merge.
1942
1942
1943 If `--timing` is set, rename detection is run and the associated timing
1943 If `--timing` is set, rename detection is run and the associated timing
1944 will be reported. The extra details come at the cost of slower command
1944 will be reported. The extra details come at the cost of slower command
1945 execution.
1945 execution.
1946
1946
1947 Since rename detection is only run once, other factors might easily
1947 Since rename detection is only run once, other factors might easily
1948 affect the precision of the timing. However it should give a good
1948 affect the precision of the timing. However it should give a good
1949 approximation of which revision triplets are very costly.
1949 approximation of which revision triplets are very costly.
1950 """
1950 """
1951 opts = _byteskwargs(opts)
1951 opts = _byteskwargs(opts)
1952 fm = ui.formatter(b'perf', opts)
1952 fm = ui.formatter(b'perf', opts)
1953 dotiming = opts[b'timing']
1953 dotiming = opts[b'timing']
1954 dostats = opts[b'stats']
1954 dostats = opts[b'stats']
1955
1955
1956 output_template = [
1956 output_template = [
1957 ("base", "%(base)12s"),
1957 ("base", "%(base)12s"),
1958 ("p1", "%(p1.node)12s"),
1958 ("p1", "%(p1.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1963 ("p1.time", "%(p1.time)12.3f"),
1963 ("p1.time", "%(p1.time)12.3f"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1967 ("p2.time", "%(p2.time)12.3f"),
1967 ("p2.time", "%(p2.time)12.3f"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1969 ("total.time", "%(time)12.3f"),
1969 ("total.time", "%(time)12.3f"),
1970 ]
1970 ]
1971 if not dotiming:
1971 if not dotiming:
1972 output_template = [
1972 output_template = [
1973 i
1973 i
1974 for i in output_template
1974 for i in output_template
1975 if not ('time' in i[0] or 'renames' in i[0])
1975 if not ('time' in i[0] or 'renames' in i[0])
1976 ]
1976 ]
1977 header_names = [h for (h, v) in output_template]
1977 header_names = [h for (h, v) in output_template]
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1980 fm.plain(header % tuple(header_names))
1980 fm.plain(header % tuple(header_names))
1981
1981
1982 if not revs:
1982 if not revs:
1983 revs = ['all()']
1983 revs = ['all()']
1984 revs = scmutil.revrange(repo, revs)
1984 revs = scmutil.revrange(repo, revs)
1985
1985
1986 if dostats:
1986 if dostats:
1987 alldata = {
1987 alldata = {
1988 'nbrevs': [],
1988 'nbrevs': [],
1989 'nbmissingfiles': [],
1989 'nbmissingfiles': [],
1990 }
1990 }
1991 if dotiming:
1991 if dotiming:
1992 alldata['parentnbrenames'] = []
1992 alldata['parentnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1994 alldata['parenttime'] = []
1994 alldata['parenttime'] = []
1995 alldata['totaltime'] = []
1995 alldata['totaltime'] = []
1996
1996
1997 roi = repo.revs('merge() and %ld', revs)
1997 roi = repo.revs('merge() and %ld', revs)
1998 for r in roi:
1998 for r in roi:
1999 ctx = repo[r]
1999 ctx = repo[r]
2000 p1 = ctx.p1()
2000 p1 = ctx.p1()
2001 p2 = ctx.p2()
2001 p2 = ctx.p2()
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2003 for b in bases:
2003 for b in bases:
2004 b = repo[b]
2004 b = repo[b]
2005 p1missing = copies._computeforwardmissing(b, p1)
2005 p1missing = copies._computeforwardmissing(b, p1)
2006 p2missing = copies._computeforwardmissing(b, p2)
2006 p2missing = copies._computeforwardmissing(b, p2)
2007 data = {
2007 data = {
2008 b'base': b.hex(),
2008 b'base': b.hex(),
2009 b'p1.node': p1.hex(),
2009 b'p1.node': p1.hex(),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2011 b'p1.nbmissingfiles': len(p1missing),
2011 b'p1.nbmissingfiles': len(p1missing),
2012 b'p2.node': p2.hex(),
2012 b'p2.node': p2.hex(),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2014 b'p2.nbmissingfiles': len(p2missing),
2014 b'p2.nbmissingfiles': len(p2missing),
2015 }
2015 }
2016 if dostats:
2016 if dostats:
2017 if p1missing:
2017 if p1missing:
2018 alldata['nbrevs'].append(
2018 alldata['nbrevs'].append(
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2020 )
2020 )
2021 alldata['nbmissingfiles'].append(
2021 alldata['nbmissingfiles'].append(
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2023 )
2023 )
2024 if p2missing:
2024 if p2missing:
2025 alldata['nbrevs'].append(
2025 alldata['nbrevs'].append(
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2027 )
2027 )
2028 alldata['nbmissingfiles'].append(
2028 alldata['nbmissingfiles'].append(
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2030 )
2030 )
2031 if dotiming:
2031 if dotiming:
2032 begin = util.timer()
2032 begin = util.timer()
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2034 end = util.timer()
2034 end = util.timer()
2035 # not very stable timing since we did only one run
2035 # not very stable timing since we did only one run
2036 data['time'] = end - begin
2036 data['time'] = end - begin
2037 # mergedata contains five dicts: "copy", "movewithdir",
2037 # mergedata contains five dicts: "copy", "movewithdir",
2038 # "diverge", "renamedelete" and "dirmove".
2038 # "diverge", "renamedelete" and "dirmove".
2039 # The first 4 are about renamed file so lets count that.
2039 # The first 4 are about renamed file so lets count that.
2040 renames = len(mergedata[0])
2040 renames = len(mergedata[0])
2041 renames += len(mergedata[1])
2041 renames += len(mergedata[1])
2042 renames += len(mergedata[2])
2042 renames += len(mergedata[2])
2043 renames += len(mergedata[3])
2043 renames += len(mergedata[3])
2044 data['nbrenamedfiles'] = renames
2044 data['nbrenamedfiles'] = renames
2045 begin = util.timer()
2045 begin = util.timer()
2046 p1renames = copies.pathcopies(b, p1)
2046 p1renames = copies.pathcopies(b, p1)
2047 end = util.timer()
2047 end = util.timer()
2048 data['p1.time'] = end - begin
2048 data['p1.time'] = end - begin
2049 begin = util.timer()
2049 begin = util.timer()
2050 p2renames = copies.pathcopies(b, p2)
2050 p2renames = copies.pathcopies(b, p2)
2051 end = util.timer()
2051 end = util.timer()
2052 data['p2.time'] = end - begin
2052 data['p2.time'] = end - begin
2053 data['p1.renamedfiles'] = len(p1renames)
2053 data['p1.renamedfiles'] = len(p1renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2055
2055
2056 if dostats:
2056 if dostats:
2057 if p1missing:
2057 if p1missing:
2058 alldata['parentnbrenames'].append(
2058 alldata['parentnbrenames'].append(
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2060 )
2060 )
2061 alldata['parenttime'].append(
2061 alldata['parenttime'].append(
2062 (data['p1.time'], b.hex(), p1.hex())
2062 (data['p1.time'], b.hex(), p1.hex())
2063 )
2063 )
2064 if p2missing:
2064 if p2missing:
2065 alldata['parentnbrenames'].append(
2065 alldata['parentnbrenames'].append(
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2067 )
2067 )
2068 alldata['parenttime'].append(
2068 alldata['parenttime'].append(
2069 (data['p2.time'], b.hex(), p2.hex())
2069 (data['p2.time'], b.hex(), p2.hex())
2070 )
2070 )
2071 if p1missing or p2missing:
2071 if p1missing or p2missing:
2072 alldata['totalnbrenames'].append(
2072 alldata['totalnbrenames'].append(
2073 (
2073 (
2074 data['nbrenamedfiles'],
2074 data['nbrenamedfiles'],
2075 b.hex(),
2075 b.hex(),
2076 p1.hex(),
2076 p1.hex(),
2077 p2.hex(),
2077 p2.hex(),
2078 )
2078 )
2079 )
2079 )
2080 alldata['totaltime'].append(
2080 alldata['totaltime'].append(
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2082 )
2082 )
2083 fm.startitem()
2083 fm.startitem()
2084 fm.data(**data)
2084 fm.data(**data)
2085 # make node pretty for the human output
2085 # make node pretty for the human output
2086 out = data.copy()
2086 out = data.copy()
2087 out['base'] = fm.hexfunc(b.node())
2087 out['base'] = fm.hexfunc(b.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2090 fm.plain(output % out)
2090 fm.plain(output % out)
2091
2091
2092 fm.end()
2092 fm.end()
2093 if dostats:
2093 if dostats:
2094 # use a second formatter because the data are quite different, not sure
2094 # use a second formatter because the data are quite different, not sure
2095 # how it flies with the templater.
2095 # how it flies with the templater.
2096 entries = [
2096 entries = [
2097 ('nbrevs', 'number of revision covered'),
2097 ('nbrevs', 'number of revision covered'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2099 ]
2099 ]
2100 if dotiming:
2100 if dotiming:
2101 entries.append(
2101 entries.append(
2102 ('parentnbrenames', 'rename from one parent to base')
2102 ('parentnbrenames', 'rename from one parent to base')
2103 )
2103 )
2104 entries.append(('totalnbrenames', 'total number of renames'))
2104 entries.append(('totalnbrenames', 'total number of renames'))
2105 entries.append(('parenttime', 'time for one parent'))
2105 entries.append(('parenttime', 'time for one parent'))
2106 entries.append(('totaltime', 'time for both parents'))
2106 entries.append(('totaltime', 'time for both parents'))
2107 _displaystats(ui, opts, entries, alldata)
2107 _displaystats(ui, opts, entries, alldata)
2108
2108
2109
2109
2110 @command(
2110 @command(
2111 b'perfhelper-pathcopies',
2111 b'perfhelper-pathcopies',
2112 formatteropts
2112 formatteropts
2113 + [
2113 + [
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2117 ],
2117 ],
2118 )
2118 )
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2120 """find statistic about potential parameters for the `perftracecopies`
2120 """find statistic about potential parameters for the `perftracecopies`
2121
2121
2122 This command find source-destination pair relevant for copytracing testing.
2122 This command find source-destination pair relevant for copytracing testing.
2123 It report value for some of the parameters that impact copy tracing time.
2123 It report value for some of the parameters that impact copy tracing time.
2124
2124
2125 If `--timing` is set, rename detection is run and the associated timing
2125 If `--timing` is set, rename detection is run and the associated timing
2126 will be reported. The extra details comes at the cost of a slower command
2126 will be reported. The extra details comes at the cost of a slower command
2127 execution.
2127 execution.
2128
2128
2129 Since the rename detection is only run once, other factors might easily
2129 Since the rename detection is only run once, other factors might easily
2130 affect the precision of the timing. However it should give a good
2130 affect the precision of the timing. However it should give a good
2131 approximation of which revision pairs are very costly.
2131 approximation of which revision pairs are very costly.
2132 """
2132 """
2133 opts = _byteskwargs(opts)
2133 opts = _byteskwargs(opts)
2134 fm = ui.formatter(b'perf', opts)
2134 fm = ui.formatter(b'perf', opts)
2135 dotiming = opts[b'timing']
2135 dotiming = opts[b'timing']
2136 dostats = opts[b'stats']
2136 dostats = opts[b'stats']
2137
2137
2138 if dotiming:
2138 if dotiming:
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2140 output = (
2140 output = (
2141 "%(source)12s %(destination)12s "
2141 "%(source)12s %(destination)12s "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2144 )
2144 )
2145 header_names = (
2145 header_names = (
2146 "source",
2146 "source",
2147 "destination",
2147 "destination",
2148 "nb-revs",
2148 "nb-revs",
2149 "nb-files",
2149 "nb-files",
2150 "nb-renames",
2150 "nb-renames",
2151 "time",
2151 "time",
2152 )
2152 )
2153 fm.plain(header % header_names)
2153 fm.plain(header % header_names)
2154 else:
2154 else:
2155 header = '%12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s\n'
2156 output = (
2156 output = (
2157 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2159 )
2159 )
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2161
2161
2162 if not revs:
2162 if not revs:
2163 revs = ['all()']
2163 revs = ['all()']
2164 revs = scmutil.revrange(repo, revs)
2164 revs = scmutil.revrange(repo, revs)
2165
2165
2166 if dostats:
2166 if dostats:
2167 alldata = {
2167 alldata = {
2168 'nbrevs': [],
2168 'nbrevs': [],
2169 'nbmissingfiles': [],
2169 'nbmissingfiles': [],
2170 }
2170 }
2171 if dotiming:
2171 if dotiming:
2172 alldata['nbrenames'] = []
2172 alldata['nbrenames'] = []
2173 alldata['time'] = []
2173 alldata['time'] = []
2174
2174
2175 roi = repo.revs('merge() and %ld', revs)
2175 roi = repo.revs('merge() and %ld', revs)
2176 for r in roi:
2176 for r in roi:
2177 ctx = repo[r]
2177 ctx = repo[r]
2178 p1 = ctx.p1().rev()
2178 p1 = ctx.p1().rev()
2179 p2 = ctx.p2().rev()
2179 p2 = ctx.p2().rev()
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2181 for p in (p1, p2):
2181 for p in (p1, p2):
2182 for b in bases:
2182 for b in bases:
2183 base = repo[b]
2183 base = repo[b]
2184 parent = repo[p]
2184 parent = repo[p]
2185 missing = copies._computeforwardmissing(base, parent)
2185 missing = copies._computeforwardmissing(base, parent)
2186 if not missing:
2186 if not missing:
2187 continue
2187 continue
2188 data = {
2188 data = {
2189 b'source': base.hex(),
2189 b'source': base.hex(),
2190 b'destination': parent.hex(),
2190 b'destination': parent.hex(),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2192 b'nbmissingfiles': len(missing),
2192 b'nbmissingfiles': len(missing),
2193 }
2193 }
2194 if dostats:
2194 if dostats:
2195 alldata['nbrevs'].append(
2195 alldata['nbrevs'].append(
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2197 )
2197 )
2198 alldata['nbmissingfiles'].append(
2198 alldata['nbmissingfiles'].append(
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2200 )
2200 )
2201 if dotiming:
2201 if dotiming:
2202 begin = util.timer()
2202 begin = util.timer()
2203 renames = copies.pathcopies(base, parent)
2203 renames = copies.pathcopies(base, parent)
2204 end = util.timer()
2204 end = util.timer()
2205 # not very stable timing since we did only one run
2205 # not very stable timing since we did only one run
2206 data['time'] = end - begin
2206 data['time'] = end - begin
2207 data['nbrenamedfiles'] = len(renames)
2207 data['nbrenamedfiles'] = len(renames)
2208 if dostats:
2208 if dostats:
2209 alldata['time'].append(
2209 alldata['time'].append(
2210 (data['time'], base.hex(), parent.hex(),)
2210 (data['time'], base.hex(), parent.hex(),)
2211 )
2211 )
2212 alldata['nbrenames'].append(
2212 alldata['nbrenames'].append(
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2214 )
2214 )
2215 fm.startitem()
2215 fm.startitem()
2216 fm.data(**data)
2216 fm.data(**data)
2217 out = data.copy()
2217 out = data.copy()
2218 out['source'] = fm.hexfunc(base.node())
2218 out['source'] = fm.hexfunc(base.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2220 fm.plain(output % out)
2220 fm.plain(output % out)
2221
2221
2222 fm.end()
2222 fm.end()
2223 if dostats:
2223 if dostats:
2224 entries = [
2224 entries = [
2225 ('nbrevs', 'number of revision covered'),
2225 ('nbrevs', 'number of revision covered'),
2226 ('nbmissingfiles', 'number of missing files at head'),
2226 ('nbmissingfiles', 'number of missing files at head'),
2227 ]
2227 ]
2228 if dotiming:
2228 if dotiming:
2229 entries.append(('nbrenames', 'renamed files'))
2229 entries.append(('nbrenames', 'renamed files'))
2230 entries.append(('time', 'time'))
2230 entries.append(('time', 'time'))
2231 _displaystats(ui, opts, entries, alldata)
2231 _displaystats(ui, opts, entries, alldata)
2232
2232
2233
2233
2234 @command(b'perfcca', formatteropts)
2234 @command(b'perfcca', formatteropts)
2235 def perfcca(ui, repo, **opts):
2235 def perfcca(ui, repo, **opts):
2236 opts = _byteskwargs(opts)
2236 opts = _byteskwargs(opts)
2237 timer, fm = gettimer(ui, opts)
2237 timer, fm = gettimer(ui, opts)
2238 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2238 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2239 fm.end()
2239 fm.end()
2240
2240
2241
2241
2242 @command(b'perffncacheload', formatteropts)
2242 @command(b'perffncacheload', formatteropts)
2243 def perffncacheload(ui, repo, **opts):
2243 def perffncacheload(ui, repo, **opts):
2244 opts = _byteskwargs(opts)
2244 opts = _byteskwargs(opts)
2245 timer, fm = gettimer(ui, opts)
2245 timer, fm = gettimer(ui, opts)
2246 s = repo.store
2246 s = repo.store
2247
2247
2248 def d():
2248 def d():
2249 s.fncache._load()
2249 s.fncache._load()
2250
2250
2251 timer(d)
2251 timer(d)
2252 fm.end()
2252 fm.end()
2253
2253
2254
2254
2255 @command(b'perffncachewrite', formatteropts)
2255 @command(b'perffncachewrite', formatteropts)
2256 def perffncachewrite(ui, repo, **opts):
2256 def perffncachewrite(ui, repo, **opts):
2257 opts = _byteskwargs(opts)
2257 opts = _byteskwargs(opts)
2258 timer, fm = gettimer(ui, opts)
2258 timer, fm = gettimer(ui, opts)
2259 s = repo.store
2259 s = repo.store
2260 lock = repo.lock()
2260 lock = repo.lock()
2261 s.fncache._load()
2261 s.fncache._load()
2262 tr = repo.transaction(b'perffncachewrite')
2262 tr = repo.transaction(b'perffncachewrite')
2263 tr.addbackup(b'fncache')
2263 tr.addbackup(b'fncache')
2264
2264
2265 def d():
2265 def d():
2266 s.fncache._dirty = True
2266 s.fncache._dirty = True
2267 s.fncache.write(tr)
2267 s.fncache.write(tr)
2268
2268
2269 timer(d)
2269 timer(d)
2270 tr.close()
2270 tr.close()
2271 lock.release()
2271 lock.release()
2272 fm.end()
2272 fm.end()
2273
2273
2274
2274
2275 @command(b'perffncacheencode', formatteropts)
2275 @command(b'perffncacheencode', formatteropts)
2276 def perffncacheencode(ui, repo, **opts):
2276 def perffncacheencode(ui, repo, **opts):
2277 opts = _byteskwargs(opts)
2277 opts = _byteskwargs(opts)
2278 timer, fm = gettimer(ui, opts)
2278 timer, fm = gettimer(ui, opts)
2279 s = repo.store
2279 s = repo.store
2280 s.fncache._load()
2280 s.fncache._load()
2281
2281
2282 def d():
2282 def d():
2283 for p in s.fncache.entries:
2283 for p in s.fncache.entries:
2284 s.encode(p)
2284 s.encode(p)
2285
2285
2286 timer(d)
2286 timer(d)
2287 fm.end()
2287 fm.end()
2288
2288
2289
2289
2290 def _bdiffworker(q, blocks, xdiff, ready, done):
2290 def _bdiffworker(q, blocks, xdiff, ready, done):
2291 while not done.is_set():
2291 while not done.is_set():
2292 pair = q.get()
2292 pair = q.get()
2293 while pair is not None:
2293 while pair is not None:
2294 if xdiff:
2294 if xdiff:
2295 mdiff.bdiff.xdiffblocks(*pair)
2295 mdiff.bdiff.xdiffblocks(*pair)
2296 elif blocks:
2296 elif blocks:
2297 mdiff.bdiff.blocks(*pair)
2297 mdiff.bdiff.blocks(*pair)
2298 else:
2298 else:
2299 mdiff.textdiff(*pair)
2299 mdiff.textdiff(*pair)
2300 q.task_done()
2300 q.task_done()
2301 pair = q.get()
2301 pair = q.get()
2302 q.task_done() # for the None one
2302 q.task_done() # for the None one
2303 with ready:
2303 with ready:
2304 ready.wait()
2304 ready.wait()
2305
2305
2306
2306
2307 def _manifestrevision(repo, mnode):
2307 def _manifestrevision(repo, mnode):
2308 ml = repo.manifestlog
2308 ml = repo.manifestlog
2309
2309
2310 if util.safehasattr(ml, b'getstorage'):
2310 if util.safehasattr(ml, b'getstorage'):
2311 store = ml.getstorage(b'')
2311 store = ml.getstorage(b'')
2312 else:
2312 else:
2313 store = ml._revlog
2313 store = ml._revlog
2314
2314
2315 return store.revision(mnode)
2315 return store.revision(mnode)
2316
2316
2317
2317
2318 @command(
2318 @command(
2319 b'perfbdiff',
2319 b'perfbdiff',
2320 revlogopts
2320 revlogopts
2321 + formatteropts
2321 + formatteropts
2322 + [
2322 + [
2323 (
2323 (
2324 b'',
2324 b'',
2325 b'count',
2325 b'count',
2326 1,
2326 1,
2327 b'number of revisions to test (when using --startrev)',
2327 b'number of revisions to test (when using --startrev)',
2328 ),
2328 ),
2329 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2329 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2330 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2330 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2331 (b'', b'blocks', False, b'test computing diffs into blocks'),
2331 (b'', b'blocks', False, b'test computing diffs into blocks'),
2332 (b'', b'xdiff', False, b'use xdiff algorithm'),
2332 (b'', b'xdiff', False, b'use xdiff algorithm'),
2333 ],
2333 ],
2334 b'-c|-m|FILE REV',
2334 b'-c|-m|FILE REV',
2335 )
2335 )
2336 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2336 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2337 """benchmark a bdiff between revisions
2337 """benchmark a bdiff between revisions
2338
2338
2339 By default, benchmark a bdiff between its delta parent and itself.
2339 By default, benchmark a bdiff between its delta parent and itself.
2340
2340
2341 With ``--count``, benchmark bdiffs between delta parents and self for N
2341 With ``--count``, benchmark bdiffs between delta parents and self for N
2342 revisions starting at the specified revision.
2342 revisions starting at the specified revision.
2343
2343
2344 With ``--alldata``, assume the requested revision is a changeset and
2344 With ``--alldata``, assume the requested revision is a changeset and
2345 measure bdiffs for all changes related to that changeset (manifest
2345 measure bdiffs for all changes related to that changeset (manifest
2346 and filelogs).
2346 and filelogs).
2347 """
2347 """
2348 opts = _byteskwargs(opts)
2348 opts = _byteskwargs(opts)
2349
2349
2350 if opts[b'xdiff'] and not opts[b'blocks']:
2350 if opts[b'xdiff'] and not opts[b'blocks']:
2351 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2351 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2352
2352
2353 if opts[b'alldata']:
2353 if opts[b'alldata']:
2354 opts[b'changelog'] = True
2354 opts[b'changelog'] = True
2355
2355
2356 if opts.get(b'changelog') or opts.get(b'manifest'):
2356 if opts.get(b'changelog') or opts.get(b'manifest'):
2357 file_, rev = None, file_
2357 file_, rev = None, file_
2358 elif rev is None:
2358 elif rev is None:
2359 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2359 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2360
2360
2361 blocks = opts[b'blocks']
2361 blocks = opts[b'blocks']
2362 xdiff = opts[b'xdiff']
2362 xdiff = opts[b'xdiff']
2363 textpairs = []
2363 textpairs = []
2364
2364
2365 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2365 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2366
2366
2367 startrev = r.rev(r.lookup(rev))
2367 startrev = r.rev(r.lookup(rev))
2368 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2368 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2369 if opts[b'alldata']:
2369 if opts[b'alldata']:
2370 # Load revisions associated with changeset.
2370 # Load revisions associated with changeset.
2371 ctx = repo[rev]
2371 ctx = repo[rev]
2372 mtext = _manifestrevision(repo, ctx.manifestnode())
2372 mtext = _manifestrevision(repo, ctx.manifestnode())
2373 for pctx in ctx.parents():
2373 for pctx in ctx.parents():
2374 pman = _manifestrevision(repo, pctx.manifestnode())
2374 pman = _manifestrevision(repo, pctx.manifestnode())
2375 textpairs.append((pman, mtext))
2375 textpairs.append((pman, mtext))
2376
2376
2377 # Load filelog revisions by iterating manifest delta.
2377 # Load filelog revisions by iterating manifest delta.
2378 man = ctx.manifest()
2378 man = ctx.manifest()
2379 pman = ctx.p1().manifest()
2379 pman = ctx.p1().manifest()
2380 for filename, change in pman.diff(man).items():
2380 for filename, change in pman.diff(man).items():
2381 fctx = repo.file(filename)
2381 fctx = repo.file(filename)
2382 f1 = fctx.revision(change[0][0] or -1)
2382 f1 = fctx.revision(change[0][0] or -1)
2383 f2 = fctx.revision(change[1][0] or -1)
2383 f2 = fctx.revision(change[1][0] or -1)
2384 textpairs.append((f1, f2))
2384 textpairs.append((f1, f2))
2385 else:
2385 else:
2386 dp = r.deltaparent(rev)
2386 dp = r.deltaparent(rev)
2387 textpairs.append((r.revision(dp), r.revision(rev)))
2387 textpairs.append((r.revision(dp), r.revision(rev)))
2388
2388
2389 withthreads = threads > 0
2389 withthreads = threads > 0
2390 if not withthreads:
2390 if not withthreads:
2391
2391
2392 def d():
2392 def d():
2393 for pair in textpairs:
2393 for pair in textpairs:
2394 if xdiff:
2394 if xdiff:
2395 mdiff.bdiff.xdiffblocks(*pair)
2395 mdiff.bdiff.xdiffblocks(*pair)
2396 elif blocks:
2396 elif blocks:
2397 mdiff.bdiff.blocks(*pair)
2397 mdiff.bdiff.blocks(*pair)
2398 else:
2398 else:
2399 mdiff.textdiff(*pair)
2399 mdiff.textdiff(*pair)
2400
2400
2401 else:
2401 else:
2402 q = queue()
2402 q = queue()
2403 for i in _xrange(threads):
2403 for i in _xrange(threads):
2404 q.put(None)
2404 q.put(None)
2405 ready = threading.Condition()
2405 ready = threading.Condition()
2406 done = threading.Event()
2406 done = threading.Event()
2407 for i in _xrange(threads):
2407 for i in _xrange(threads):
2408 threading.Thread(
2408 threading.Thread(
2409 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2409 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2410 ).start()
2410 ).start()
2411 q.join()
2411 q.join()
2412
2412
2413 def d():
2413 def d():
2414 for pair in textpairs:
2414 for pair in textpairs:
2415 q.put(pair)
2415 q.put(pair)
2416 for i in _xrange(threads):
2416 for i in _xrange(threads):
2417 q.put(None)
2417 q.put(None)
2418 with ready:
2418 with ready:
2419 ready.notify_all()
2419 ready.notify_all()
2420 q.join()
2420 q.join()
2421
2421
2422 timer, fm = gettimer(ui, opts)
2422 timer, fm = gettimer(ui, opts)
2423 timer(d)
2423 timer(d)
2424 fm.end()
2424 fm.end()
2425
2425
2426 if withthreads:
2426 if withthreads:
2427 done.set()
2427 done.set()
2428 for i in _xrange(threads):
2428 for i in _xrange(threads):
2429 q.put(None)
2429 q.put(None)
2430 with ready:
2430 with ready:
2431 ready.notify_all()
2431 ready.notify_all()
2432
2432
2433
2433
2434 @command(
2434 @command(
2435 b'perfunidiff',
2435 b'perfunidiff',
2436 revlogopts
2436 revlogopts
2437 + formatteropts
2437 + formatteropts
2438 + [
2438 + [
2439 (
2439 (
2440 b'',
2440 b'',
2441 b'count',
2441 b'count',
2442 1,
2442 1,
2443 b'number of revisions to test (when using --startrev)',
2443 b'number of revisions to test (when using --startrev)',
2444 ),
2444 ),
2445 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2445 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2446 ],
2446 ],
2447 b'-c|-m|FILE REV',
2447 b'-c|-m|FILE REV',
2448 )
2448 )
2449 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2449 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2450 """benchmark a unified diff between revisions
2450 """benchmark a unified diff between revisions
2451
2451
2452 This doesn't include any copy tracing - it's just a unified diff
2452 This doesn't include any copy tracing - it's just a unified diff
2453 of the texts.
2453 of the texts.
2454
2454
2455 By default, benchmark a diff between its delta parent and itself.
2455 By default, benchmark a diff between its delta parent and itself.
2456
2456
2457 With ``--count``, benchmark diffs between delta parents and self for N
2457 With ``--count``, benchmark diffs between delta parents and self for N
2458 revisions starting at the specified revision.
2458 revisions starting at the specified revision.
2459
2459
2460 With ``--alldata``, assume the requested revision is a changeset and
2460 With ``--alldata``, assume the requested revision is a changeset and
2461 measure diffs for all changes related to that changeset (manifest
2461 measure diffs for all changes related to that changeset (manifest
2462 and filelogs).
2462 and filelogs).
2463 """
2463 """
2464 opts = _byteskwargs(opts)
2464 opts = _byteskwargs(opts)
2465 if opts[b'alldata']:
2465 if opts[b'alldata']:
2466 opts[b'changelog'] = True
2466 opts[b'changelog'] = True
2467
2467
2468 if opts.get(b'changelog') or opts.get(b'manifest'):
2468 if opts.get(b'changelog') or opts.get(b'manifest'):
2469 file_, rev = None, file_
2469 file_, rev = None, file_
2470 elif rev is None:
2470 elif rev is None:
2471 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2471 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2472
2472
2473 textpairs = []
2473 textpairs = []
2474
2474
2475 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2475 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2476
2476
2477 startrev = r.rev(r.lookup(rev))
2477 startrev = r.rev(r.lookup(rev))
2478 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2478 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2479 if opts[b'alldata']:
2479 if opts[b'alldata']:
2480 # Load revisions associated with changeset.
2480 # Load revisions associated with changeset.
2481 ctx = repo[rev]
2481 ctx = repo[rev]
2482 mtext = _manifestrevision(repo, ctx.manifestnode())
2482 mtext = _manifestrevision(repo, ctx.manifestnode())
2483 for pctx in ctx.parents():
2483 for pctx in ctx.parents():
2484 pman = _manifestrevision(repo, pctx.manifestnode())
2484 pman = _manifestrevision(repo, pctx.manifestnode())
2485 textpairs.append((pman, mtext))
2485 textpairs.append((pman, mtext))
2486
2486
2487 # Load filelog revisions by iterating manifest delta.
2487 # Load filelog revisions by iterating manifest delta.
2488 man = ctx.manifest()
2488 man = ctx.manifest()
2489 pman = ctx.p1().manifest()
2489 pman = ctx.p1().manifest()
2490 for filename, change in pman.diff(man).items():
2490 for filename, change in pman.diff(man).items():
2491 fctx = repo.file(filename)
2491 fctx = repo.file(filename)
2492 f1 = fctx.revision(change[0][0] or -1)
2492 f1 = fctx.revision(change[0][0] or -1)
2493 f2 = fctx.revision(change[1][0] or -1)
2493 f2 = fctx.revision(change[1][0] or -1)
2494 textpairs.append((f1, f2))
2494 textpairs.append((f1, f2))
2495 else:
2495 else:
2496 dp = r.deltaparent(rev)
2496 dp = r.deltaparent(rev)
2497 textpairs.append((r.revision(dp), r.revision(rev)))
2497 textpairs.append((r.revision(dp), r.revision(rev)))
2498
2498
2499 def d():
2499 def d():
2500 for left, right in textpairs:
2500 for left, right in textpairs:
2501 # The date strings don't matter, so we pass empty strings.
2501 # The date strings don't matter, so we pass empty strings.
2502 headerlines, hunks = mdiff.unidiff(
2502 headerlines, hunks = mdiff.unidiff(
2503 left, b'', right, b'', b'left', b'right', binary=False
2503 left, b'', right, b'', b'left', b'right', binary=False
2504 )
2504 )
2505 # consume iterators in roughly the way patch.py does
2505 # consume iterators in roughly the way patch.py does
2506 b'\n'.join(headerlines)
2506 b'\n'.join(headerlines)
2507 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2507 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2508
2508
2509 timer, fm = gettimer(ui, opts)
2509 timer, fm = gettimer(ui, opts)
2510 timer(d)
2510 timer(d)
2511 fm.end()
2511 fm.end()
2512
2512
2513
2513
2514 @command(b'perfdiffwd', formatteropts)
2514 @command(b'perfdiffwd', formatteropts)
2515 def perfdiffwd(ui, repo, **opts):
2515 def perfdiffwd(ui, repo, **opts):
2516 """Profile diff of working directory changes"""
2516 """Profile diff of working directory changes"""
2517 opts = _byteskwargs(opts)
2517 opts = _byteskwargs(opts)
2518 timer, fm = gettimer(ui, opts)
2518 timer, fm = gettimer(ui, opts)
2519 options = {
2519 options = {
2520 'w': 'ignore_all_space',
2520 'w': 'ignore_all_space',
2521 'b': 'ignore_space_change',
2521 'b': 'ignore_space_change',
2522 'B': 'ignore_blank_lines',
2522 'B': 'ignore_blank_lines',
2523 }
2523 }
2524
2524
2525 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2525 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2526 opts = {options[c]: b'1' for c in diffopt}
2526 opts = {options[c]: b'1' for c in diffopt}
2527
2527
2528 def d():
2528 def d():
2529 ui.pushbuffer()
2529 ui.pushbuffer()
2530 commands.diff(ui, repo, **opts)
2530 commands.diff(ui, repo, **opts)
2531 ui.popbuffer()
2531 ui.popbuffer()
2532
2532
2533 diffopt = diffopt.encode('ascii')
2533 diffopt = diffopt.encode('ascii')
2534 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2534 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2535 timer(d, title=title)
2535 timer(d, title=title)
2536 fm.end()
2536 fm.end()
2537
2537
2538
2538
2539 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2539 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2540 def perfrevlogindex(ui, repo, file_=None, **opts):
2540 def perfrevlogindex(ui, repo, file_=None, **opts):
2541 """Benchmark operations against a revlog index.
2541 """Benchmark operations against a revlog index.
2542
2542
2543 This tests constructing a revlog instance, reading index data,
2543 This tests constructing a revlog instance, reading index data,
2544 parsing index data, and performing various operations related to
2544 parsing index data, and performing various operations related to
2545 index data.
2545 index data.
2546 """
2546 """
2547
2547
2548 opts = _byteskwargs(opts)
2548 opts = _byteskwargs(opts)
2549
2549
2550 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2550 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2551
2551
2552 opener = getattr(rl, 'opener') # trick linter
2552 opener = getattr(rl, 'opener') # trick linter
2553 indexfile = rl.indexfile
2553 indexfile = rl.indexfile
2554 data = opener.read(indexfile)
2554 data = opener.read(indexfile)
2555
2555
2556 header = struct.unpack(b'>I', data[0:4])[0]
2556 header = struct.unpack(b'>I', data[0:4])[0]
2557 version = header & 0xFFFF
2557 version = header & 0xFFFF
2558 if version == 1:
2558 if version == 1:
2559 revlogio = revlog.revlogio()
2559 revlogio = revlog.revlogio()
2560 inline = header & (1 << 16)
2560 inline = header & (1 << 16)
2561 else:
2561 else:
2562 raise error.Abort(b'unsupported revlog version: %d' % version)
2562 raise error.Abort(b'unsupported revlog version: %d' % version)
2563
2563
2564 rllen = len(rl)
2564 rllen = len(rl)
2565
2565
2566 node0 = rl.node(0)
2566 node0 = rl.node(0)
2567 node25 = rl.node(rllen // 4)
2567 node25 = rl.node(rllen // 4)
2568 node50 = rl.node(rllen // 2)
2568 node50 = rl.node(rllen // 2)
2569 node75 = rl.node(rllen // 4 * 3)
2569 node75 = rl.node(rllen // 4 * 3)
2570 node100 = rl.node(rllen - 1)
2570 node100 = rl.node(rllen - 1)
2571
2571
2572 allrevs = range(rllen)
2572 allrevs = range(rllen)
2573 allrevsrev = list(reversed(allrevs))
2573 allrevsrev = list(reversed(allrevs))
2574 allnodes = [rl.node(rev) for rev in range(rllen)]
2574 allnodes = [rl.node(rev) for rev in range(rllen)]
2575 allnodesrev = list(reversed(allnodes))
2575 allnodesrev = list(reversed(allnodes))
2576
2576
2577 def constructor():
2577 def constructor():
2578 revlog.revlog(opener, indexfile)
2578 revlog.revlog(opener, indexfile)
2579
2579
2580 def read():
2580 def read():
2581 with opener(indexfile) as fh:
2581 with opener(indexfile) as fh:
2582 fh.read()
2582 fh.read()
2583
2583
2584 def parseindex():
2584 def parseindex():
2585 revlogio.parseindex(data, inline)
2585 revlogio.parseindex(data, inline)
2586
2586
2587 def getentry(revornode):
2587 def getentry(revornode):
2588 index = revlogio.parseindex(data, inline)[0]
2588 index = revlogio.parseindex(data, inline)[0]
2589 index[revornode]
2589 index[revornode]
2590
2590
2591 def getentries(revs, count=1):
2591 def getentries(revs, count=1):
2592 index = revlogio.parseindex(data, inline)[0]
2592 index = revlogio.parseindex(data, inline)[0]
2593
2593
2594 for i in range(count):
2594 for i in range(count):
2595 for rev in revs:
2595 for rev in revs:
2596 index[rev]
2596 index[rev]
2597
2597
2598 def resolvenode(node):
2598 def resolvenode(node):
2599 index = revlogio.parseindex(data, inline)[0]
2599 index = revlogio.parseindex(data, inline)[0]
2600 rev = getattr(index, 'rev', None)
2600 rev = getattr(index, 'rev', None)
2601 if rev is None:
2601 if rev is None:
2602 nodemap = getattr(
2602 nodemap = getattr(
2603 revlogio.parseindex(data, inline)[0], 'nodemap', None
2603 revlogio.parseindex(data, inline)[0], 'nodemap', None
2604 )
2604 )
2605 # This only works for the C code.
2605 # This only works for the C code.
2606 if nodemap is None:
2606 if nodemap is None:
2607 return
2607 return
2608 rev = nodemap.__getitem__
2608 rev = nodemap.__getitem__
2609
2609
2610 try:
2610 try:
2611 rev(node)
2611 rev(node)
2612 except error.RevlogError:
2612 except error.RevlogError:
2613 pass
2613 pass
2614
2614
2615 def resolvenodes(nodes, count=1):
2615 def resolvenodes(nodes, count=1):
2616 index = revlogio.parseindex(data, inline)[0]
2616 index = revlogio.parseindex(data, inline)[0]
2617 rev = getattr(index, 'rev', None)
2617 rev = getattr(index, 'rev', None)
2618 if rev is None:
2618 if rev is None:
2619 nodemap = getattr(
2619 nodemap = getattr(
2620 revlogio.parseindex(data, inline)[0], 'nodemap', None
2620 revlogio.parseindex(data, inline)[0], 'nodemap', None
2621 )
2621 )
2622 # This only works for the C code.
2622 # This only works for the C code.
2623 if nodemap is None:
2623 if nodemap is None:
2624 return
2624 return
2625 rev = nodemap.__getitem__
2625 rev = nodemap.__getitem__
2626
2626
2627 for i in range(count):
2627 for i in range(count):
2628 for node in nodes:
2628 for node in nodes:
2629 try:
2629 try:
2630 rev(node)
2630 rev(node)
2631 except error.RevlogError:
2631 except error.RevlogError:
2632 pass
2632 pass
2633
2633
2634 benches = [
2634 benches = [
2635 (constructor, b'revlog constructor'),
2635 (constructor, b'revlog constructor'),
2636 (read, b'read'),
2636 (read, b'read'),
2637 (parseindex, b'create index object'),
2637 (parseindex, b'create index object'),
2638 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2638 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2639 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2639 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2640 (lambda: resolvenode(node0), b'look up node at rev 0'),
2640 (lambda: resolvenode(node0), b'look up node at rev 0'),
2641 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2641 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2642 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2642 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2643 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2643 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2644 (lambda: resolvenode(node100), b'look up node at tip'),
2644 (lambda: resolvenode(node100), b'look up node at tip'),
2645 # 2x variation is to measure caching impact.
2645 # 2x variation is to measure caching impact.
2646 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2646 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2647 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2647 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2648 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2648 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2649 (
2649 (
2650 lambda: resolvenodes(allnodesrev, 2),
2650 lambda: resolvenodes(allnodesrev, 2),
2651 b'look up all nodes 2x (reverse)',
2651 b'look up all nodes 2x (reverse)',
2652 ),
2652 ),
2653 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2653 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2654 (
2654 (
2655 lambda: getentries(allrevs, 2),
2655 lambda: getentries(allrevs, 2),
2656 b'retrieve all index entries 2x (forward)',
2656 b'retrieve all index entries 2x (forward)',
2657 ),
2657 ),
2658 (
2658 (
2659 lambda: getentries(allrevsrev),
2659 lambda: getentries(allrevsrev),
2660 b'retrieve all index entries (reverse)',
2660 b'retrieve all index entries (reverse)',
2661 ),
2661 ),
2662 (
2662 (
2663 lambda: getentries(allrevsrev, 2),
2663 lambda: getentries(allrevsrev, 2),
2664 b'retrieve all index entries 2x (reverse)',
2664 b'retrieve all index entries 2x (reverse)',
2665 ),
2665 ),
2666 ]
2666 ]
2667
2667
2668 for fn, title in benches:
2668 for fn, title in benches:
2669 timer, fm = gettimer(ui, opts)
2669 timer, fm = gettimer(ui, opts)
2670 timer(fn, title=title)
2670 timer(fn, title=title)
2671 fm.end()
2671 fm.end()
2672
2672
2673
2673
2674 @command(
2674 @command(
2675 b'perfrevlogrevisions',
2675 b'perfrevlogrevisions',
2676 revlogopts
2676 revlogopts
2677 + formatteropts
2677 + formatteropts
2678 + [
2678 + [
2679 (b'd', b'dist', 100, b'distance between the revisions'),
2679 (b'd', b'dist', 100, b'distance between the revisions'),
2680 (b's', b'startrev', 0, b'revision to start reading at'),
2680 (b's', b'startrev', 0, b'revision to start reading at'),
2681 (b'', b'reverse', False, b'read in reverse'),
2681 (b'', b'reverse', False, b'read in reverse'),
2682 ],
2682 ],
2683 b'-c|-m|FILE',
2683 b'-c|-m|FILE',
2684 )
2684 )
2685 def perfrevlogrevisions(
2685 def perfrevlogrevisions(
2686 ui, repo, file_=None, startrev=0, reverse=False, **opts
2686 ui, repo, file_=None, startrev=0, reverse=False, **opts
2687 ):
2687 ):
2688 """Benchmark reading a series of revisions from a revlog.
2688 """Benchmark reading a series of revisions from a revlog.
2689
2689
2690 By default, we read every ``-d/--dist`` revision from 0 to tip of
2690 By default, we read every ``-d/--dist`` revision from 0 to tip of
2691 the specified revlog.
2691 the specified revlog.
2692
2692
2693 The start revision can be defined via ``-s/--startrev``.
2693 The start revision can be defined via ``-s/--startrev``.
2694 """
2694 """
2695 opts = _byteskwargs(opts)
2695 opts = _byteskwargs(opts)
2696
2696
2697 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2697 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2698 rllen = getlen(ui)(rl)
2698 rllen = getlen(ui)(rl)
2699
2699
2700 if startrev < 0:
2700 if startrev < 0:
2701 startrev = rllen + startrev
2701 startrev = rllen + startrev
2702
2702
2703 def d():
2703 def d():
2704 rl.clearcaches()
2704 rl.clearcaches()
2705
2705
2706 beginrev = startrev
2706 beginrev = startrev
2707 endrev = rllen
2707 endrev = rllen
2708 dist = opts[b'dist']
2708 dist = opts[b'dist']
2709
2709
2710 if reverse:
2710 if reverse:
2711 beginrev, endrev = endrev - 1, beginrev - 1
2711 beginrev, endrev = endrev - 1, beginrev - 1
2712 dist = -1 * dist
2712 dist = -1 * dist
2713
2713
2714 for x in _xrange(beginrev, endrev, dist):
2714 for x in _xrange(beginrev, endrev, dist):
2715 # Old revisions don't support passing int.
2715 # Old revisions don't support passing int.
2716 n = rl.node(x)
2716 n = rl.node(x)
2717 rl.revision(n)
2717 rl.revision(n)
2718
2718
2719 timer, fm = gettimer(ui, opts)
2719 timer, fm = gettimer(ui, opts)
2720 timer(d)
2720 timer(d)
2721 fm.end()
2721 fm.end()
2722
2722
2723
2723
2724 @command(
2724 @command(
2725 b'perfrevlogwrite',
2725 b'perfrevlogwrite',
2726 revlogopts
2726 revlogopts
2727 + formatteropts
2727 + formatteropts
2728 + [
2728 + [
2729 (b's', b'startrev', 1000, b'revision to start writing at'),
2729 (b's', b'startrev', 1000, b'revision to start writing at'),
2730 (b'', b'stoprev', -1, b'last revision to write'),
2730 (b'', b'stoprev', -1, b'last revision to write'),
2731 (b'', b'count', 3, b'number of passes to perform'),
2731 (b'', b'count', 3, b'number of passes to perform'),
2732 (b'', b'details', False, b'print timing for every revisions tested'),
2732 (b'', b'details', False, b'print timing for every revisions tested'),
2733 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2733 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2734 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2734 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2735 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2735 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2736 ],
2736 ],
2737 b'-c|-m|FILE',
2737 b'-c|-m|FILE',
2738 )
2738 )
2739 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2739 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2740 """Benchmark writing a series of revisions to a revlog.
2740 """Benchmark writing a series of revisions to a revlog.
2741
2741
2742 Possible source values are:
2742 Possible source values are:
2743 * `full`: add from a full text (default).
2743 * `full`: add from a full text (default).
2744 * `parent-1`: add from a delta to the first parent
2744 * `parent-1`: add from a delta to the first parent
2745 * `parent-2`: add from a delta to the second parent if it exists
2745 * `parent-2`: add from a delta to the second parent if it exists
2746 (use a delta from the first parent otherwise)
2746 (use a delta from the first parent otherwise)
2747 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2747 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2748 * `storage`: add from the existing precomputed deltas
2748 * `storage`: add from the existing precomputed deltas
2749
2749
2750 Note: This performance command measures performance in a custom way. As a
2750 Note: This performance command measures performance in a custom way. As a
2751 result some of the global configuration of the 'perf' command does not
2751 result some of the global configuration of the 'perf' command does not
2752 apply to it:
2752 apply to it:
2753
2753
2754 * ``pre-run``: disabled
2754 * ``pre-run``: disabled
2755
2755
2756 * ``profile-benchmark``: disabled
2756 * ``profile-benchmark``: disabled
2757
2757
2758 * ``run-limits``: disabled use --count instead
2758 * ``run-limits``: disabled use --count instead
2759 """
2759 """
2760 opts = _byteskwargs(opts)
2760 opts = _byteskwargs(opts)
2761
2761
2762 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2762 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2763 rllen = getlen(ui)(rl)
2763 rllen = getlen(ui)(rl)
2764 if startrev < 0:
2764 if startrev < 0:
2765 startrev = rllen + startrev
2765 startrev = rllen + startrev
2766 if stoprev < 0:
2766 if stoprev < 0:
2767 stoprev = rllen + stoprev
2767 stoprev = rllen + stoprev
2768
2768
2769 lazydeltabase = opts['lazydeltabase']
2769 lazydeltabase = opts['lazydeltabase']
2770 source = opts['source']
2770 source = opts['source']
2771 clearcaches = opts['clear_caches']
2771 clearcaches = opts['clear_caches']
2772 validsource = (
2772 validsource = (
2773 b'full',
2773 b'full',
2774 b'parent-1',
2774 b'parent-1',
2775 b'parent-2',
2775 b'parent-2',
2776 b'parent-smallest',
2776 b'parent-smallest',
2777 b'storage',
2777 b'storage',
2778 )
2778 )
2779 if source not in validsource:
2779 if source not in validsource:
2780 raise error.Abort('invalid source type: %s' % source)
2780 raise error.Abort('invalid source type: %s' % source)
2781
2781
2782 ### actually gather results
2782 ### actually gather results
2783 count = opts['count']
2783 count = opts['count']
2784 if count <= 0:
2784 if count <= 0:
2785 raise error.Abort('invalide run count: %d' % count)
2785 raise error.Abort('invalide run count: %d' % count)
2786 allresults = []
2786 allresults = []
2787 for c in range(count):
2787 for c in range(count):
2788 timing = _timeonewrite(
2788 timing = _timeonewrite(
2789 ui,
2789 ui,
2790 rl,
2790 rl,
2791 source,
2791 source,
2792 startrev,
2792 startrev,
2793 stoprev,
2793 stoprev,
2794 c + 1,
2794 c + 1,
2795 lazydeltabase=lazydeltabase,
2795 lazydeltabase=lazydeltabase,
2796 clearcaches=clearcaches,
2796 clearcaches=clearcaches,
2797 )
2797 )
2798 allresults.append(timing)
2798 allresults.append(timing)
2799
2799
2800 ### consolidate the results in a single list
2800 ### consolidate the results in a single list
2801 results = []
2801 results = []
2802 for idx, (rev, t) in enumerate(allresults[0]):
2802 for idx, (rev, t) in enumerate(allresults[0]):
2803 ts = [t]
2803 ts = [t]
2804 for other in allresults[1:]:
2804 for other in allresults[1:]:
2805 orev, ot = other[idx]
2805 orev, ot = other[idx]
2806 assert orev == rev
2806 assert orev == rev
2807 ts.append(ot)
2807 ts.append(ot)
2808 results.append((rev, ts))
2808 results.append((rev, ts))
2809 resultcount = len(results)
2809 resultcount = len(results)
2810
2810
2811 ### Compute and display relevant statistics
2811 ### Compute and display relevant statistics
2812
2812
2813 # get a formatter
2813 # get a formatter
2814 fm = ui.formatter(b'perf', opts)
2814 fm = ui.formatter(b'perf', opts)
2815 displayall = ui.configbool(b"perf", b"all-timing", False)
2815 displayall = ui.configbool(b"perf", b"all-timing", False)
2816
2816
2817 # print individual details if requested
2817 # print individual details if requested
2818 if opts['details']:
2818 if opts['details']:
2819 for idx, item in enumerate(results, 1):
2819 for idx, item in enumerate(results, 1):
2820 rev, data = item
2820 rev, data = item
2821 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2821 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2822 formatone(fm, data, title=title, displayall=displayall)
2822 formatone(fm, data, title=title, displayall=displayall)
2823
2823
2824 # sorts results by median time
2824 # sorts results by median time
2825 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2825 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2826 # list of (name, index) to display)
2826 # list of (name, index) to display)
2827 relevants = [
2827 relevants = [
2828 ("min", 0),
2828 ("min", 0),
2829 ("10%", resultcount * 10 // 100),
2829 ("10%", resultcount * 10 // 100),
2830 ("25%", resultcount * 25 // 100),
2830 ("25%", resultcount * 25 // 100),
2831 ("50%", resultcount * 70 // 100),
2831 ("50%", resultcount * 70 // 100),
2832 ("75%", resultcount * 75 // 100),
2832 ("75%", resultcount * 75 // 100),
2833 ("90%", resultcount * 90 // 100),
2833 ("90%", resultcount * 90 // 100),
2834 ("95%", resultcount * 95 // 100),
2834 ("95%", resultcount * 95 // 100),
2835 ("99%", resultcount * 99 // 100),
2835 ("99%", resultcount * 99 // 100),
2836 ("99.9%", resultcount * 999 // 1000),
2836 ("99.9%", resultcount * 999 // 1000),
2837 ("99.99%", resultcount * 9999 // 10000),
2837 ("99.99%", resultcount * 9999 // 10000),
2838 ("99.999%", resultcount * 99999 // 100000),
2838 ("99.999%", resultcount * 99999 // 100000),
2839 ("max", -1),
2839 ("max", -1),
2840 ]
2840 ]
2841 if not ui.quiet:
2841 if not ui.quiet:
2842 for name, idx in relevants:
2842 for name, idx in relevants:
2843 data = results[idx]
2843 data = results[idx]
2844 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2844 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2845 formatone(fm, data[1], title=title, displayall=displayall)
2845 formatone(fm, data[1], title=title, displayall=displayall)
2846
2846
2847 # XXX summing that many float will not be very precise, we ignore this fact
2847 # XXX summing that many float will not be very precise, we ignore this fact
2848 # for now
2848 # for now
2849 totaltime = []
2849 totaltime = []
2850 for item in allresults:
2850 for item in allresults:
2851 totaltime.append(
2851 totaltime.append(
2852 (
2852 (
2853 sum(x[1][0] for x in item),
2853 sum(x[1][0] for x in item),
2854 sum(x[1][1] for x in item),
2854 sum(x[1][1] for x in item),
2855 sum(x[1][2] for x in item),
2855 sum(x[1][2] for x in item),
2856 )
2856 )
2857 )
2857 )
2858 formatone(
2858 formatone(
2859 fm,
2859 fm,
2860 totaltime,
2860 totaltime,
2861 title="total time (%d revs)" % resultcount,
2861 title="total time (%d revs)" % resultcount,
2862 displayall=displayall,
2862 displayall=displayall,
2863 )
2863 )
2864 fm.end()
2864 fm.end()
2865
2865
2866
2866
2867 class _faketr(object):
2867 class _faketr(object):
2868 def add(s, x, y, z=None):
2868 def add(s, x, y, z=None):
2869 return None
2869 return None
2870
2870
2871
2871
2872 def _timeonewrite(
2872 def _timeonewrite(
2873 ui,
2873 ui,
2874 orig,
2874 orig,
2875 source,
2875 source,
2876 startrev,
2876 startrev,
2877 stoprev,
2877 stoprev,
2878 runidx=None,
2878 runidx=None,
2879 lazydeltabase=True,
2879 lazydeltabase=True,
2880 clearcaches=True,
2880 clearcaches=True,
2881 ):
2881 ):
2882 timings = []
2882 timings = []
2883 tr = _faketr()
2883 tr = _faketr()
2884 with _temprevlog(ui, orig, startrev) as dest:
2884 with _temprevlog(ui, orig, startrev) as dest:
2885 dest._lazydeltabase = lazydeltabase
2885 dest._lazydeltabase = lazydeltabase
2886 revs = list(orig.revs(startrev, stoprev))
2886 revs = list(orig.revs(startrev, stoprev))
2887 total = len(revs)
2887 total = len(revs)
2888 topic = 'adding'
2888 topic = 'adding'
2889 if runidx is not None:
2889 if runidx is not None:
2890 topic += ' (run #%d)' % runidx
2890 topic += ' (run #%d)' % runidx
2891 # Support both old and new progress API
2891 # Support both old and new progress API
2892 if util.safehasattr(ui, 'makeprogress'):
2892 if util.safehasattr(ui, 'makeprogress'):
2893 progress = ui.makeprogress(topic, unit='revs', total=total)
2893 progress = ui.makeprogress(topic, unit='revs', total=total)
2894
2894
2895 def updateprogress(pos):
2895 def updateprogress(pos):
2896 progress.update(pos)
2896 progress.update(pos)
2897
2897
2898 def completeprogress():
2898 def completeprogress():
2899 progress.complete()
2899 progress.complete()
2900
2900
2901 else:
2901 else:
2902
2902
2903 def updateprogress(pos):
2903 def updateprogress(pos):
2904 ui.progress(topic, pos, unit='revs', total=total)
2904 ui.progress(topic, pos, unit='revs', total=total)
2905
2905
2906 def completeprogress():
2906 def completeprogress():
2907 ui.progress(topic, None, unit='revs', total=total)
2907 ui.progress(topic, None, unit='revs', total=total)
2908
2908
2909 for idx, rev in enumerate(revs):
2909 for idx, rev in enumerate(revs):
2910 updateprogress(idx)
2910 updateprogress(idx)
2911 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2911 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2912 if clearcaches:
2912 if clearcaches:
2913 dest.index.clearcaches()
2913 dest.index.clearcaches()
2914 dest.clearcaches()
2914 dest.clearcaches()
2915 with timeone() as r:
2915 with timeone() as r:
2916 dest.addrawrevision(*addargs, **addkwargs)
2916 dest.addrawrevision(*addargs, **addkwargs)
2917 timings.append((rev, r[0]))
2917 timings.append((rev, r[0]))
2918 updateprogress(total)
2918 updateprogress(total)
2919 completeprogress()
2919 completeprogress()
2920 return timings
2920 return timings
2921
2921
2922
2922
2923 def _getrevisionseed(orig, rev, tr, source):
2923 def _getrevisionseed(orig, rev, tr, source):
2924 from mercurial.node import nullid
2924 from mercurial.node import nullid
2925
2925
2926 linkrev = orig.linkrev(rev)
2926 linkrev = orig.linkrev(rev)
2927 node = orig.node(rev)
2927 node = orig.node(rev)
2928 p1, p2 = orig.parents(node)
2928 p1, p2 = orig.parents(node)
2929 flags = orig.flags(rev)
2929 flags = orig.flags(rev)
2930 cachedelta = None
2930 cachedelta = None
2931 text = None
2931 text = None
2932
2932
2933 if source == b'full':
2933 if source == b'full':
2934 text = orig.revision(rev)
2934 text = orig.revision(rev)
2935 elif source == b'parent-1':
2935 elif source == b'parent-1':
2936 baserev = orig.rev(p1)
2936 baserev = orig.rev(p1)
2937 cachedelta = (baserev, orig.revdiff(p1, rev))
2937 cachedelta = (baserev, orig.revdiff(p1, rev))
2938 elif source == b'parent-2':
2938 elif source == b'parent-2':
2939 parent = p2
2939 parent = p2
2940 if p2 == nullid:
2940 if p2 == nullid:
2941 parent = p1
2941 parent = p1
2942 baserev = orig.rev(parent)
2942 baserev = orig.rev(parent)
2943 cachedelta = (baserev, orig.revdiff(parent, rev))
2943 cachedelta = (baserev, orig.revdiff(parent, rev))
2944 elif source == b'parent-smallest':
2944 elif source == b'parent-smallest':
2945 p1diff = orig.revdiff(p1, rev)
2945 p1diff = orig.revdiff(p1, rev)
2946 parent = p1
2946 parent = p1
2947 diff = p1diff
2947 diff = p1diff
2948 if p2 != nullid:
2948 if p2 != nullid:
2949 p2diff = orig.revdiff(p2, rev)
2949 p2diff = orig.revdiff(p2, rev)
2950 if len(p1diff) > len(p2diff):
2950 if len(p1diff) > len(p2diff):
2951 parent = p2
2951 parent = p2
2952 diff = p2diff
2952 diff = p2diff
2953 baserev = orig.rev(parent)
2953 baserev = orig.rev(parent)
2954 cachedelta = (baserev, diff)
2954 cachedelta = (baserev, diff)
2955 elif source == b'storage':
2955 elif source == b'storage':
2956 baserev = orig.deltaparent(rev)
2956 baserev = orig.deltaparent(rev)
2957 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2957 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2958
2958
2959 return (
2959 return (
2960 (text, tr, linkrev, p1, p2),
2960 (text, tr, linkrev, p1, p2),
2961 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2961 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2962 )
2962 )
2963
2963
2964
2964
2965 @contextlib.contextmanager
2965 @contextlib.contextmanager
2966 def _temprevlog(ui, orig, truncaterev):
2966 def _temprevlog(ui, orig, truncaterev):
2967 from mercurial import vfs as vfsmod
2967 from mercurial import vfs as vfsmod
2968
2968
2969 if orig._inline:
2969 if orig._inline:
2970 raise error.Abort('not supporting inline revlog (yet)')
2970 raise error.Abort('not supporting inline revlog (yet)')
2971 revlogkwargs = {}
2971 revlogkwargs = {}
2972 k = 'upperboundcomp'
2972 k = 'upperboundcomp'
2973 if util.safehasattr(orig, k):
2973 if util.safehasattr(orig, k):
2974 revlogkwargs[k] = getattr(orig, k)
2974 revlogkwargs[k] = getattr(orig, k)
2975
2975
2976 origindexpath = orig.opener.join(orig.indexfile)
2976 origindexpath = orig.opener.join(orig.indexfile)
2977 origdatapath = orig.opener.join(orig.datafile)
2977 origdatapath = orig.opener.join(orig.datafile)
2978 indexname = 'revlog.i'
2978 indexname = 'revlog.i'
2979 dataname = 'revlog.d'
2979 dataname = 'revlog.d'
2980
2980
2981 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2981 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2982 try:
2982 try:
2983 # copy the data file in a temporary directory
2983 # copy the data file in a temporary directory
2984 ui.debug('copying data in %s\n' % tmpdir)
2984 ui.debug('copying data in %s\n' % tmpdir)
2985 destindexpath = os.path.join(tmpdir, 'revlog.i')
2985 destindexpath = os.path.join(tmpdir, 'revlog.i')
2986 destdatapath = os.path.join(tmpdir, 'revlog.d')
2986 destdatapath = os.path.join(tmpdir, 'revlog.d')
2987 shutil.copyfile(origindexpath, destindexpath)
2987 shutil.copyfile(origindexpath, destindexpath)
2988 shutil.copyfile(origdatapath, destdatapath)
2988 shutil.copyfile(origdatapath, destdatapath)
2989
2989
2990 # remove the data we want to add again
2990 # remove the data we want to add again
2991 ui.debug('truncating data to be rewritten\n')
2991 ui.debug('truncating data to be rewritten\n')
2992 with open(destindexpath, 'ab') as index:
2992 with open(destindexpath, 'ab') as index:
2993 index.seek(0)
2993 index.seek(0)
2994 index.truncate(truncaterev * orig._io.size)
2994 index.truncate(truncaterev * orig._io.size)
2995 with open(destdatapath, 'ab') as data:
2995 with open(destdatapath, 'ab') as data:
2996 data.seek(0)
2996 data.seek(0)
2997 data.truncate(orig.start(truncaterev))
2997 data.truncate(orig.start(truncaterev))
2998
2998
2999 # instantiate a new revlog from the temporary copy
2999 # instantiate a new revlog from the temporary copy
3000 ui.debug('truncating adding to be rewritten\n')
3000 ui.debug('truncating adding to be rewritten\n')
3001 vfs = vfsmod.vfs(tmpdir)
3001 vfs = vfsmod.vfs(tmpdir)
3002 vfs.options = getattr(orig.opener, 'options', None)
3002 vfs.options = getattr(orig.opener, 'options', None)
3003
3003
3004 dest = revlog.revlog(
3004 dest = revlog.revlog(
3005 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3005 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3006 )
3006 )
3007 if dest._inline:
3007 if dest._inline:
3008 raise error.Abort('not supporting inline revlog (yet)')
3008 raise error.Abort('not supporting inline revlog (yet)')
3009 # make sure internals are initialized
3009 # make sure internals are initialized
3010 dest.revision(len(dest) - 1)
3010 dest.revision(len(dest) - 1)
3011 yield dest
3011 yield dest
3012 del dest, vfs
3012 del dest, vfs
3013 finally:
3013 finally:
3014 shutil.rmtree(tmpdir, True)
3014 shutil.rmtree(tmpdir, True)
3015
3015
3016
3016
3017 @command(
3017 @command(
3018 b'perfrevlogchunks',
3018 b'perfrevlogchunks',
3019 revlogopts
3019 revlogopts
3020 + formatteropts
3020 + formatteropts
3021 + [
3021 + [
3022 (b'e', b'engines', b'', b'compression engines to use'),
3022 (b'e', b'engines', b'', b'compression engines to use'),
3023 (b's', b'startrev', 0, b'revision to start at'),
3023 (b's', b'startrev', 0, b'revision to start at'),
3024 ],
3024 ],
3025 b'-c|-m|FILE',
3025 b'-c|-m|FILE',
3026 )
3026 )
3027 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3027 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3028 """Benchmark operations on revlog chunks.
3028 """Benchmark operations on revlog chunks.
3029
3029
3030 Logically, each revlog is a collection of fulltext revisions. However,
3030 Logically, each revlog is a collection of fulltext revisions. However,
3031 stored within each revlog are "chunks" of possibly compressed data. This
3031 stored within each revlog are "chunks" of possibly compressed data. This
3032 data needs to be read and decompressed or compressed and written.
3032 data needs to be read and decompressed or compressed and written.
3033
3033
3034 This command measures the time it takes to read+decompress and recompress
3034 This command measures the time it takes to read+decompress and recompress
3035 chunks in a revlog. It effectively isolates I/O and compression performance.
3035 chunks in a revlog. It effectively isolates I/O and compression performance.
3036 For measurements of higher-level operations like resolving revisions,
3036 For measurements of higher-level operations like resolving revisions,
3037 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3037 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3038 """
3038 """
3039 opts = _byteskwargs(opts)
3039 opts = _byteskwargs(opts)
3040
3040
3041 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3041 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3042
3042
3043 # _chunkraw was renamed to _getsegmentforrevs.
3043 # _chunkraw was renamed to _getsegmentforrevs.
3044 try:
3044 try:
3045 segmentforrevs = rl._getsegmentforrevs
3045 segmentforrevs = rl._getsegmentforrevs
3046 except AttributeError:
3046 except AttributeError:
3047 segmentforrevs = rl._chunkraw
3047 segmentforrevs = rl._chunkraw
3048
3048
3049 # Verify engines argument.
3049 # Verify engines argument.
3050 if engines:
3050 if engines:
3051 engines = {e.strip() for e in engines.split(b',')}
3051 engines = {e.strip() for e in engines.split(b',')}
3052 for engine in engines:
3052 for engine in engines:
3053 try:
3053 try:
3054 util.compressionengines[engine]
3054 util.compressionengines[engine]
3055 except KeyError:
3055 except KeyError:
3056 raise error.Abort(b'unknown compression engine: %s' % engine)
3056 raise error.Abort(b'unknown compression engine: %s' % engine)
3057 else:
3057 else:
3058 engines = []
3058 engines = []
3059 for e in util.compengines:
3059 for e in util.compengines:
3060 engine = util.compengines[e]
3060 engine = util.compengines[e]
3061 try:
3061 try:
3062 if engine.available():
3062 if engine.available():
3063 engine.revlogcompressor().compress(b'dummy')
3063 engine.revlogcompressor().compress(b'dummy')
3064 engines.append(e)
3064 engines.append(e)
3065 except NotImplementedError:
3065 except NotImplementedError:
3066 pass
3066 pass
3067
3067
3068 revs = list(rl.revs(startrev, len(rl) - 1))
3068 revs = list(rl.revs(startrev, len(rl) - 1))
3069
3069
3070 def rlfh(rl):
3070 def rlfh(rl):
3071 if rl._inline:
3071 if rl._inline:
3072 return getsvfs(repo)(rl.indexfile)
3072 return getsvfs(repo)(rl.indexfile)
3073 else:
3073 else:
3074 return getsvfs(repo)(rl.datafile)
3074 return getsvfs(repo)(rl.datafile)
3075
3075
3076 def doread():
3076 def doread():
3077 rl.clearcaches()
3077 rl.clearcaches()
3078 for rev in revs:
3078 for rev in revs:
3079 segmentforrevs(rev, rev)
3079 segmentforrevs(rev, rev)
3080
3080
3081 def doreadcachedfh():
3081 def doreadcachedfh():
3082 rl.clearcaches()
3082 rl.clearcaches()
3083 fh = rlfh(rl)
3083 fh = rlfh(rl)
3084 for rev in revs:
3084 for rev in revs:
3085 segmentforrevs(rev, rev, df=fh)
3085 segmentforrevs(rev, rev, df=fh)
3086
3086
3087 def doreadbatch():
3087 def doreadbatch():
3088 rl.clearcaches()
3088 rl.clearcaches()
3089 segmentforrevs(revs[0], revs[-1])
3089 segmentforrevs(revs[0], revs[-1])
3090
3090
3091 def doreadbatchcachedfh():
3091 def doreadbatchcachedfh():
3092 rl.clearcaches()
3092 rl.clearcaches()
3093 fh = rlfh(rl)
3093 fh = rlfh(rl)
3094 segmentforrevs(revs[0], revs[-1], df=fh)
3094 segmentforrevs(revs[0], revs[-1], df=fh)
3095
3095
3096 def dochunk():
3096 def dochunk():
3097 rl.clearcaches()
3097 rl.clearcaches()
3098 fh = rlfh(rl)
3098 fh = rlfh(rl)
3099 for rev in revs:
3099 for rev in revs:
3100 rl._chunk(rev, df=fh)
3100 rl._chunk(rev, df=fh)
3101
3101
3102 chunks = [None]
3102 chunks = [None]
3103
3103
3104 def dochunkbatch():
3104 def dochunkbatch():
3105 rl.clearcaches()
3105 rl.clearcaches()
3106 fh = rlfh(rl)
3106 fh = rlfh(rl)
3107 # Save chunks as a side-effect.
3107 # Save chunks as a side-effect.
3108 chunks[0] = rl._chunks(revs, df=fh)
3108 chunks[0] = rl._chunks(revs, df=fh)
3109
3109
3110 def docompress(compressor):
3110 def docompress(compressor):
3111 rl.clearcaches()
3111 rl.clearcaches()
3112
3112
3113 try:
3113 try:
3114 # Swap in the requested compression engine.
3114 # Swap in the requested compression engine.
3115 oldcompressor = rl._compressor
3115 oldcompressor = rl._compressor
3116 rl._compressor = compressor
3116 rl._compressor = compressor
3117 for chunk in chunks[0]:
3117 for chunk in chunks[0]:
3118 rl.compress(chunk)
3118 rl.compress(chunk)
3119 finally:
3119 finally:
3120 rl._compressor = oldcompressor
3120 rl._compressor = oldcompressor
3121
3121
3122 benches = [
3122 benches = [
3123 (lambda: doread(), b'read'),
3123 (lambda: doread(), b'read'),
3124 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3124 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3125 (lambda: doreadbatch(), b'read batch'),
3125 (lambda: doreadbatch(), b'read batch'),
3126 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3126 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3127 (lambda: dochunk(), b'chunk'),
3127 (lambda: dochunk(), b'chunk'),
3128 (lambda: dochunkbatch(), b'chunk batch'),
3128 (lambda: dochunkbatch(), b'chunk batch'),
3129 ]
3129 ]
3130
3130
3131 for engine in sorted(engines):
3131 for engine in sorted(engines):
3132 compressor = util.compengines[engine].revlogcompressor()
3132 compressor = util.compengines[engine].revlogcompressor()
3133 benches.append(
3133 benches.append(
3134 (
3134 (
3135 functools.partial(docompress, compressor),
3135 functools.partial(docompress, compressor),
3136 b'compress w/ %s' % engine,
3136 b'compress w/ %s' % engine,
3137 )
3137 )
3138 )
3138 )
3139
3139
3140 for fn, title in benches:
3140 for fn, title in benches:
3141 timer, fm = gettimer(ui, opts)
3141 timer, fm = gettimer(ui, opts)
3142 timer(fn, title=title)
3142 timer(fn, title=title)
3143 fm.end()
3143 fm.end()
3144
3144
3145
3145
3146 @command(
3146 @command(
3147 b'perfrevlogrevision',
3147 b'perfrevlogrevision',
3148 revlogopts
3148 revlogopts
3149 + formatteropts
3149 + formatteropts
3150 + [(b'', b'cache', False, b'use caches instead of clearing')],
3150 + [(b'', b'cache', False, b'use caches instead of clearing')],
3151 b'-c|-m|FILE REV',
3151 b'-c|-m|FILE REV',
3152 )
3152 )
3153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3154 """Benchmark obtaining a revlog revision.
3154 """Benchmark obtaining a revlog revision.
3155
3155
3156 Obtaining a revlog revision consists of roughly the following steps:
3156 Obtaining a revlog revision consists of roughly the following steps:
3157
3157
3158 1. Compute the delta chain
3158 1. Compute the delta chain
3159 2. Slice the delta chain if applicable
3159 2. Slice the delta chain if applicable
3160 3. Obtain the raw chunks for that delta chain
3160 3. Obtain the raw chunks for that delta chain
3161 4. Decompress each raw chunk
3161 4. Decompress each raw chunk
3162 5. Apply binary patches to obtain fulltext
3162 5. Apply binary patches to obtain fulltext
3163 6. Verify hash of fulltext
3163 6. Verify hash of fulltext
3164
3164
3165 This command measures the time spent in each of these phases.
3165 This command measures the time spent in each of these phases.
3166 """
3166 """
3167 opts = _byteskwargs(opts)
3167 opts = _byteskwargs(opts)
3168
3168
3169 if opts.get(b'changelog') or opts.get(b'manifest'):
3169 if opts.get(b'changelog') or opts.get(b'manifest'):
3170 file_, rev = None, file_
3170 file_, rev = None, file_
3171 elif rev is None:
3171 elif rev is None:
3172 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3172 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3173
3173
3174 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3174 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3175
3175
3176 # _chunkraw was renamed to _getsegmentforrevs.
3176 # _chunkraw was renamed to _getsegmentforrevs.
3177 try:
3177 try:
3178 segmentforrevs = r._getsegmentforrevs
3178 segmentforrevs = r._getsegmentforrevs
3179 except AttributeError:
3179 except AttributeError:
3180 segmentforrevs = r._chunkraw
3180 segmentforrevs = r._chunkraw
3181
3181
3182 node = r.lookup(rev)
3182 node = r.lookup(rev)
3183 rev = r.rev(node)
3183 rev = r.rev(node)
3184
3184
3185 def getrawchunks(data, chain):
3185 def getrawchunks(data, chain):
3186 start = r.start
3186 start = r.start
3187 length = r.length
3187 length = r.length
3188 inline = r._inline
3188 inline = r._inline
3189 iosize = r._io.size
3189 iosize = r._io.size
3190 buffer = util.buffer
3190 buffer = util.buffer
3191
3191
3192 chunks = []
3192 chunks = []
3193 ladd = chunks.append
3193 ladd = chunks.append
3194 for idx, item in enumerate(chain):
3194 for idx, item in enumerate(chain):
3195 offset = start(item[0])
3195 offset = start(item[0])
3196 bits = data[idx]
3196 bits = data[idx]
3197 for rev in item:
3197 for rev in item:
3198 chunkstart = start(rev)
3198 chunkstart = start(rev)
3199 if inline:
3199 if inline:
3200 chunkstart += (rev + 1) * iosize
3200 chunkstart += (rev + 1) * iosize
3201 chunklength = length(rev)
3201 chunklength = length(rev)
3202 ladd(buffer(bits, chunkstart - offset, chunklength))
3202 ladd(buffer(bits, chunkstart - offset, chunklength))
3203
3203
3204 return chunks
3204 return chunks
3205
3205
3206 def dodeltachain(rev):
3206 def dodeltachain(rev):
3207 if not cache:
3207 if not cache:
3208 r.clearcaches()
3208 r.clearcaches()
3209 r._deltachain(rev)
3209 r._deltachain(rev)
3210
3210
3211 def doread(chain):
3211 def doread(chain):
3212 if not cache:
3212 if not cache:
3213 r.clearcaches()
3213 r.clearcaches()
3214 for item in slicedchain:
3214 for item in slicedchain:
3215 segmentforrevs(item[0], item[-1])
3215 segmentforrevs(item[0], item[-1])
3216
3216
3217 def doslice(r, chain, size):
3217 def doslice(r, chain, size):
3218 for s in slicechunk(r, chain, targetsize=size):
3218 for s in slicechunk(r, chain, targetsize=size):
3219 pass
3219 pass
3220
3220
3221 def dorawchunks(data, chain):
3221 def dorawchunks(data, chain):
3222 if not cache:
3222 if not cache:
3223 r.clearcaches()
3223 r.clearcaches()
3224 getrawchunks(data, chain)
3224 getrawchunks(data, chain)
3225
3225
3226 def dodecompress(chunks):
3226 def dodecompress(chunks):
3227 decomp = r.decompress
3227 decomp = r.decompress
3228 for chunk in chunks:
3228 for chunk in chunks:
3229 decomp(chunk)
3229 decomp(chunk)
3230
3230
3231 def dopatch(text, bins):
3231 def dopatch(text, bins):
3232 if not cache:
3232 if not cache:
3233 r.clearcaches()
3233 r.clearcaches()
3234 mdiff.patches(text, bins)
3234 mdiff.patches(text, bins)
3235
3235
3236 def dohash(text):
3236 def dohash(text):
3237 if not cache:
3237 if not cache:
3238 r.clearcaches()
3238 r.clearcaches()
3239 r.checkhash(text, node, rev=rev)
3239 r.checkhash(text, node, rev=rev)
3240
3240
3241 def dorevision():
3241 def dorevision():
3242 if not cache:
3242 if not cache:
3243 r.clearcaches()
3243 r.clearcaches()
3244 r.revision(node)
3244 r.revision(node)
3245
3245
3246 try:
3246 try:
3247 from mercurial.revlogutils.deltas import slicechunk
3247 from mercurial.revlogutils.deltas import slicechunk
3248 except ImportError:
3248 except ImportError:
3249 slicechunk = getattr(revlog, '_slicechunk', None)
3249 slicechunk = getattr(revlog, '_slicechunk', None)
3250
3250
3251 size = r.length(rev)
3251 size = r.length(rev)
3252 chain = r._deltachain(rev)[0]
3252 chain = r._deltachain(rev)[0]
3253 if not getattr(r, '_withsparseread', False):
3253 if not getattr(r, '_withsparseread', False):
3254 slicedchain = (chain,)
3254 slicedchain = (chain,)
3255 else:
3255 else:
3256 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3256 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3257 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3257 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3258 rawchunks = getrawchunks(data, slicedchain)
3258 rawchunks = getrawchunks(data, slicedchain)
3259 bins = r._chunks(chain)
3259 bins = r._chunks(chain)
3260 text = bytes(bins[0])
3260 text = bytes(bins[0])
3261 bins = bins[1:]
3261 bins = bins[1:]
3262 text = mdiff.patches(text, bins)
3262 text = mdiff.patches(text, bins)
3263
3263
3264 benches = [
3264 benches = [
3265 (lambda: dorevision(), b'full'),
3265 (lambda: dorevision(), b'full'),
3266 (lambda: dodeltachain(rev), b'deltachain'),
3266 (lambda: dodeltachain(rev), b'deltachain'),
3267 (lambda: doread(chain), b'read'),
3267 (lambda: doread(chain), b'read'),
3268 ]
3268 ]
3269
3269
3270 if getattr(r, '_withsparseread', False):
3270 if getattr(r, '_withsparseread', False):
3271 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3271 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3272 benches.append(slicing)
3272 benches.append(slicing)
3273
3273
3274 benches.extend(
3274 benches.extend(
3275 [
3275 [
3276 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3276 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3277 (lambda: dodecompress(rawchunks), b'decompress'),
3277 (lambda: dodecompress(rawchunks), b'decompress'),
3278 (lambda: dopatch(text, bins), b'patch'),
3278 (lambda: dopatch(text, bins), b'patch'),
3279 (lambda: dohash(text), b'hash'),
3279 (lambda: dohash(text), b'hash'),
3280 ]
3280 ]
3281 )
3281 )
3282
3282
3283 timer, fm = gettimer(ui, opts)
3283 timer, fm = gettimer(ui, opts)
3284 for fn, title in benches:
3284 for fn, title in benches:
3285 timer(fn, title=title)
3285 timer(fn, title=title)
3286 fm.end()
3286 fm.end()
3287
3287
3288
3288
3289 @command(
3289 @command(
3290 b'perfrevset',
3290 b'perfrevset',
3291 [
3291 [
3292 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3292 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3293 (b'', b'contexts', False, b'obtain changectx for each revision'),
3293 (b'', b'contexts', False, b'obtain changectx for each revision'),
3294 ]
3294 ]
3295 + formatteropts,
3295 + formatteropts,
3296 b"REVSET",
3296 b"REVSET",
3297 )
3297 )
3298 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3298 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3299 """benchmark the execution time of a revset
3299 """benchmark the execution time of a revset
3300
3300
3301 Use the --clean option if need to evaluate the impact of build volatile
3301 Use the --clean option if need to evaluate the impact of build volatile
3302 revisions set cache on the revset execution. Volatile cache hold filtered
3302 revisions set cache on the revset execution. Volatile cache hold filtered
3303 and obsolete related cache."""
3303 and obsolete related cache."""
3304 opts = _byteskwargs(opts)
3304 opts = _byteskwargs(opts)
3305
3305
3306 timer, fm = gettimer(ui, opts)
3306 timer, fm = gettimer(ui, opts)
3307
3307
3308 def d():
3308 def d():
3309 if clear:
3309 if clear:
3310 repo.invalidatevolatilesets()
3310 repo.invalidatevolatilesets()
3311 if contexts:
3311 if contexts:
3312 for ctx in repo.set(expr):
3312 for ctx in repo.set(expr):
3313 pass
3313 pass
3314 else:
3314 else:
3315 for r in repo.revs(expr):
3315 for r in repo.revs(expr):
3316 pass
3316 pass
3317
3317
3318 timer(d)
3318 timer(d)
3319 fm.end()
3319 fm.end()
3320
3320
3321
3321
3322 @command(
3322 @command(
3323 b'perfvolatilesets',
3323 b'perfvolatilesets',
3324 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3324 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3325 + formatteropts,
3325 + formatteropts,
3326 )
3326 )
3327 def perfvolatilesets(ui, repo, *names, **opts):
3327 def perfvolatilesets(ui, repo, *names, **opts):
3328 """benchmark the computation of various volatile set
3328 """benchmark the computation of various volatile set
3329
3329
3330 Volatile set computes element related to filtering and obsolescence."""
3330 Volatile set computes element related to filtering and obsolescence."""
3331 opts = _byteskwargs(opts)
3331 opts = _byteskwargs(opts)
3332 timer, fm = gettimer(ui, opts)
3332 timer, fm = gettimer(ui, opts)
3333 repo = repo.unfiltered()
3333 repo = repo.unfiltered()
3334
3334
3335 def getobs(name):
3335 def getobs(name):
3336 def d():
3336 def d():
3337 repo.invalidatevolatilesets()
3337 repo.invalidatevolatilesets()
3338 if opts[b'clear_obsstore']:
3338 if opts[b'clear_obsstore']:
3339 clearfilecache(repo, b'obsstore')
3339 clearfilecache(repo, b'obsstore')
3340 obsolete.getrevs(repo, name)
3340 obsolete.getrevs(repo, name)
3341
3341
3342 return d
3342 return d
3343
3343
3344 allobs = sorted(obsolete.cachefuncs)
3344 allobs = sorted(obsolete.cachefuncs)
3345 if names:
3345 if names:
3346 allobs = [n for n in allobs if n in names]
3346 allobs = [n for n in allobs if n in names]
3347
3347
3348 for name in allobs:
3348 for name in allobs:
3349 timer(getobs(name), title=name)
3349 timer(getobs(name), title=name)
3350
3350
3351 def getfiltered(name):
3351 def getfiltered(name):
3352 def d():
3352 def d():
3353 repo.invalidatevolatilesets()
3353 repo.invalidatevolatilesets()
3354 if opts[b'clear_obsstore']:
3354 if opts[b'clear_obsstore']:
3355 clearfilecache(repo, b'obsstore')
3355 clearfilecache(repo, b'obsstore')
3356 repoview.filterrevs(repo, name)
3356 repoview.filterrevs(repo, name)
3357
3357
3358 return d
3358 return d
3359
3359
3360 allfilter = sorted(repoview.filtertable)
3360 allfilter = sorted(repoview.filtertable)
3361 if names:
3361 if names:
3362 allfilter = [n for n in allfilter if n in names]
3362 allfilter = [n for n in allfilter if n in names]
3363
3363
3364 for name in allfilter:
3364 for name in allfilter:
3365 timer(getfiltered(name), title=name)
3365 timer(getfiltered(name), title=name)
3366 fm.end()
3366 fm.end()
3367
3367
3368
3368
3369 @command(
3369 @command(
3370 b'perfbranchmap',
3370 b'perfbranchmap',
3371 [
3371 [
3372 (b'f', b'full', False, b'Includes build time of subset'),
3372 (b'f', b'full', False, b'Includes build time of subset'),
3373 (
3373 (
3374 b'',
3374 b'',
3375 b'clear-revbranch',
3375 b'clear-revbranch',
3376 False,
3376 False,
3377 b'purge the revbranch cache between computation',
3377 b'purge the revbranch cache between computation',
3378 ),
3378 ),
3379 ]
3379 ]
3380 + formatteropts,
3380 + formatteropts,
3381 )
3381 )
3382 def perfbranchmap(ui, repo, *filternames, **opts):
3382 def perfbranchmap(ui, repo, *filternames, **opts):
3383 """benchmark the update of a branchmap
3383 """benchmark the update of a branchmap
3384
3384
3385 This benchmarks the full repo.branchmap() call with read and write disabled
3385 This benchmarks the full repo.branchmap() call with read and write disabled
3386 """
3386 """
3387 opts = _byteskwargs(opts)
3387 opts = _byteskwargs(opts)
3388 full = opts.get(b"full", False)
3388 full = opts.get(b"full", False)
3389 clear_revbranch = opts.get(b"clear_revbranch", False)
3389 clear_revbranch = opts.get(b"clear_revbranch", False)
3390 timer, fm = gettimer(ui, opts)
3390 timer, fm = gettimer(ui, opts)
3391
3391
3392 def getbranchmap(filtername):
3392 def getbranchmap(filtername):
3393 """generate a benchmark function for the filtername"""
3393 """generate a benchmark function for the filtername"""
3394 if filtername is None:
3394 if filtername is None:
3395 view = repo
3395 view = repo
3396 else:
3396 else:
3397 view = repo.filtered(filtername)
3397 view = repo.filtered(filtername)
3398 if util.safehasattr(view._branchcaches, '_per_filter'):
3398 if util.safehasattr(view._branchcaches, '_per_filter'):
3399 filtered = view._branchcaches._per_filter
3399 filtered = view._branchcaches._per_filter
3400 else:
3400 else:
3401 # older versions
3401 # older versions
3402 filtered = view._branchcaches
3402 filtered = view._branchcaches
3403
3403
3404 def d():
3404 def d():
3405 if clear_revbranch:
3405 if clear_revbranch:
3406 repo.revbranchcache()._clear()
3406 repo.revbranchcache()._clear()
3407 if full:
3407 if full:
3408 view._branchcaches.clear()
3408 view._branchcaches.clear()
3409 else:
3409 else:
3410 filtered.pop(filtername, None)
3410 filtered.pop(filtername, None)
3411 view.branchmap()
3411 view.branchmap()
3412
3412
3413 return d
3413 return d
3414
3414
3415 # add filter in smaller subset to bigger subset
3415 # add filter in smaller subset to bigger subset
3416 possiblefilters = set(repoview.filtertable)
3416 possiblefilters = set(repoview.filtertable)
3417 if filternames:
3417 if filternames:
3418 possiblefilters &= set(filternames)
3418 possiblefilters &= set(filternames)
3419 subsettable = getbranchmapsubsettable()
3419 subsettable = getbranchmapsubsettable()
3420 allfilters = []
3420 allfilters = []
3421 while possiblefilters:
3421 while possiblefilters:
3422 for name in possiblefilters:
3422 for name in possiblefilters:
3423 subset = subsettable.get(name)
3423 subset = subsettable.get(name)
3424 if subset not in possiblefilters:
3424 if subset not in possiblefilters:
3425 break
3425 break
3426 else:
3426 else:
3427 assert False, b'subset cycle %s!' % possiblefilters
3427 assert False, b'subset cycle %s!' % possiblefilters
3428 allfilters.append(name)
3428 allfilters.append(name)
3429 possiblefilters.remove(name)
3429 possiblefilters.remove(name)
3430
3430
3431 # warm the cache
3431 # warm the cache
3432 if not full:
3432 if not full:
3433 for name in allfilters:
3433 for name in allfilters:
3434 repo.filtered(name).branchmap()
3434 repo.filtered(name).branchmap()
3435 if not filternames or b'unfiltered' in filternames:
3435 if not filternames or b'unfiltered' in filternames:
3436 # add unfiltered
3436 # add unfiltered
3437 allfilters.append(None)
3437 allfilters.append(None)
3438
3438
3439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3441 branchcacheread.set(classmethod(lambda *args: None))
3441 branchcacheread.set(classmethod(lambda *args: None))
3442 else:
3442 else:
3443 # older versions
3443 # older versions
3444 branchcacheread = safeattrsetter(branchmap, b'read')
3444 branchcacheread = safeattrsetter(branchmap, b'read')
3445 branchcacheread.set(lambda *args: None)
3445 branchcacheread.set(lambda *args: None)
3446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3447 branchcachewrite.set(lambda *args: None)
3447 branchcachewrite.set(lambda *args: None)
3448 try:
3448 try:
3449 for name in allfilters:
3449 for name in allfilters:
3450 printname = name
3450 printname = name
3451 if name is None:
3451 if name is None:
3452 printname = b'unfiltered'
3452 printname = b'unfiltered'
3453 timer(getbranchmap(name), title=str(printname))
3453 timer(getbranchmap(name), title=str(printname))
3454 finally:
3454 finally:
3455 branchcacheread.restore()
3455 branchcacheread.restore()
3456 branchcachewrite.restore()
3456 branchcachewrite.restore()
3457 fm.end()
3457 fm.end()
3458
3458
3459
3459
3460 @command(
3460 @command(
3461 b'perfbranchmapupdate',
3461 b'perfbranchmapupdate',
3462 [
3462 [
3463 (b'', b'base', [], b'subset of revision to start from'),
3463 (b'', b'base', [], b'subset of revision to start from'),
3464 (b'', b'target', [], b'subset of revision to end with'),
3464 (b'', b'target', [], b'subset of revision to end with'),
3465 (b'', b'clear-caches', False, b'clear cache between each runs'),
3465 (b'', b'clear-caches', False, b'clear cache between each runs'),
3466 ]
3466 ]
3467 + formatteropts,
3467 + formatteropts,
3468 )
3468 )
3469 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3469 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3470 """benchmark branchmap update from for <base> revs to <target> revs
3470 """benchmark branchmap update from for <base> revs to <target> revs
3471
3471
3472 If `--clear-caches` is passed, the following items will be reset before
3472 If `--clear-caches` is passed, the following items will be reset before
3473 each update:
3473 each update:
3474 * the changelog instance and associated indexes
3474 * the changelog instance and associated indexes
3475 * the rev-branch-cache instance
3475 * the rev-branch-cache instance
3476
3476
3477 Examples:
3477 Examples:
3478
3478
3479 # update for the one last revision
3479 # update for the one last revision
3480 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3480 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3481
3481
3482 $ update for change coming with a new branch
3482 $ update for change coming with a new branch
3483 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3483 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3484 """
3484 """
3485 from mercurial import branchmap
3485 from mercurial import branchmap
3486 from mercurial import repoview
3486 from mercurial import repoview
3487
3487
3488 opts = _byteskwargs(opts)
3488 opts = _byteskwargs(opts)
3489 timer, fm = gettimer(ui, opts)
3489 timer, fm = gettimer(ui, opts)
3490 clearcaches = opts[b'clear_caches']
3490 clearcaches = opts[b'clear_caches']
3491 unfi = repo.unfiltered()
3491 unfi = repo.unfiltered()
3492 x = [None] # used to pass data between closure
3492 x = [None] # used to pass data between closure
3493
3493
3494 # we use a `list` here to avoid possible side effect from smartset
3494 # we use a `list` here to avoid possible side effect from smartset
3495 baserevs = list(scmutil.revrange(repo, base))
3495 baserevs = list(scmutil.revrange(repo, base))
3496 targetrevs = list(scmutil.revrange(repo, target))
3496 targetrevs = list(scmutil.revrange(repo, target))
3497 if not baserevs:
3497 if not baserevs:
3498 raise error.Abort(b'no revisions selected for --base')
3498 raise error.Abort(b'no revisions selected for --base')
3499 if not targetrevs:
3499 if not targetrevs:
3500 raise error.Abort(b'no revisions selected for --target')
3500 raise error.Abort(b'no revisions selected for --target')
3501
3501
3502 # make sure the target branchmap also contains the one in the base
3502 # make sure the target branchmap also contains the one in the base
3503 targetrevs = list(set(baserevs) | set(targetrevs))
3503 targetrevs = list(set(baserevs) | set(targetrevs))
3504 targetrevs.sort()
3504 targetrevs.sort()
3505
3505
3506 cl = repo.changelog
3506 cl = repo.changelog
3507 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3507 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3508 allbaserevs.sort()
3508 allbaserevs.sort()
3509 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3509 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3510
3510
3511 newrevs = list(alltargetrevs.difference(allbaserevs))
3511 newrevs = list(alltargetrevs.difference(allbaserevs))
3512 newrevs.sort()
3512 newrevs.sort()
3513
3513
3514 allrevs = frozenset(unfi.changelog.revs())
3514 allrevs = frozenset(unfi.changelog.revs())
3515 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3515 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3516 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3516 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3517
3517
3518 def basefilter(repo, visibilityexceptions=None):
3518 def basefilter(repo, visibilityexceptions=None):
3519 return basefilterrevs
3519 return basefilterrevs
3520
3520
3521 def targetfilter(repo, visibilityexceptions=None):
3521 def targetfilter(repo, visibilityexceptions=None):
3522 return targetfilterrevs
3522 return targetfilterrevs
3523
3523
3524 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3524 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3525 ui.status(msg % (len(allbaserevs), len(newrevs)))
3525 ui.status(msg % (len(allbaserevs), len(newrevs)))
3526 if targetfilterrevs:
3526 if targetfilterrevs:
3527 msg = b'(%d revisions still filtered)\n'
3527 msg = b'(%d revisions still filtered)\n'
3528 ui.status(msg % len(targetfilterrevs))
3528 ui.status(msg % len(targetfilterrevs))
3529
3529
3530 try:
3530 try:
3531 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3531 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3532 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3532 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3533
3533
3534 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3534 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3535 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3535 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3536
3536
3537 # try to find an existing branchmap to reuse
3537 # try to find an existing branchmap to reuse
3538 subsettable = getbranchmapsubsettable()
3538 subsettable = getbranchmapsubsettable()
3539 candidatefilter = subsettable.get(None)
3539 candidatefilter = subsettable.get(None)
3540 while candidatefilter is not None:
3540 while candidatefilter is not None:
3541 candidatebm = repo.filtered(candidatefilter).branchmap()
3541 candidatebm = repo.filtered(candidatefilter).branchmap()
3542 if candidatebm.validfor(baserepo):
3542 if candidatebm.validfor(baserepo):
3543 filtered = repoview.filterrevs(repo, candidatefilter)
3543 filtered = repoview.filterrevs(repo, candidatefilter)
3544 missing = [r for r in allbaserevs if r in filtered]
3544 missing = [r for r in allbaserevs if r in filtered]
3545 base = candidatebm.copy()
3545 base = candidatebm.copy()
3546 base.update(baserepo, missing)
3546 base.update(baserepo, missing)
3547 break
3547 break
3548 candidatefilter = subsettable.get(candidatefilter)
3548 candidatefilter = subsettable.get(candidatefilter)
3549 else:
3549 else:
3550 # no suitable subset where found
3550 # no suitable subset where found
3551 base = branchmap.branchcache()
3551 base = branchmap.branchcache()
3552 base.update(baserepo, allbaserevs)
3552 base.update(baserepo, allbaserevs)
3553
3553
3554 def setup():
3554 def setup():
3555 x[0] = base.copy()
3555 x[0] = base.copy()
3556 if clearcaches:
3556 if clearcaches:
3557 unfi._revbranchcache = None
3557 unfi._revbranchcache = None
3558 clearchangelog(repo)
3558 clearchangelog(repo)
3559
3559
3560 def bench():
3560 def bench():
3561 x[0].update(targetrepo, newrevs)
3561 x[0].update(targetrepo, newrevs)
3562
3562
3563 timer(bench, setup=setup)
3563 timer(bench, setup=setup)
3564 fm.end()
3564 fm.end()
3565 finally:
3565 finally:
3566 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3566 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3567 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3567 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3568
3568
3569
3569
3570 @command(
3570 @command(
3571 b'perfbranchmapload',
3571 b'perfbranchmapload',
3572 [
3572 [
3573 (b'f', b'filter', b'', b'Specify repoview filter'),
3573 (b'f', b'filter', b'', b'Specify repoview filter'),
3574 (b'', b'list', False, b'List brachmap filter caches'),
3574 (b'', b'list', False, b'List brachmap filter caches'),
3575 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3575 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3576 ]
3576 ]
3577 + formatteropts,
3577 + formatteropts,
3578 )
3578 )
3579 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3579 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3580 """benchmark reading the branchmap"""
3580 """benchmark reading the branchmap"""
3581 opts = _byteskwargs(opts)
3581 opts = _byteskwargs(opts)
3582 clearrevlogs = opts[b'clear_revlogs']
3582 clearrevlogs = opts[b'clear_revlogs']
3583
3583
3584 if list:
3584 if list:
3585 for name, kind, st in repo.cachevfs.readdir(stat=True):
3585 for name, kind, st in repo.cachevfs.readdir(stat=True):
3586 if name.startswith(b'branch2'):
3586 if name.startswith(b'branch2'):
3587 filtername = name.partition(b'-')[2] or b'unfiltered'
3587 filtername = name.partition(b'-')[2] or b'unfiltered'
3588 ui.status(
3588 ui.status(
3589 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3589 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3590 )
3590 )
3591 return
3591 return
3592 if not filter:
3592 if not filter:
3593 filter = None
3593 filter = None
3594 subsettable = getbranchmapsubsettable()
3594 subsettable = getbranchmapsubsettable()
3595 if filter is None:
3595 if filter is None:
3596 repo = repo.unfiltered()
3596 repo = repo.unfiltered()
3597 else:
3597 else:
3598 repo = repoview.repoview(repo, filter)
3598 repo = repoview.repoview(repo, filter)
3599
3599
3600 repo.branchmap() # make sure we have a relevant, up to date branchmap
3600 repo.branchmap() # make sure we have a relevant, up to date branchmap
3601
3601
3602 try:
3602 try:
3603 fromfile = branchmap.branchcache.fromfile
3603 fromfile = branchmap.branchcache.fromfile
3604 except AttributeError:
3604 except AttributeError:
3605 # older versions
3605 # older versions
3606 fromfile = branchmap.read
3606 fromfile = branchmap.read
3607
3607
3608 currentfilter = filter
3608 currentfilter = filter
3609 # try once without timer, the filter may not be cached
3609 # try once without timer, the filter may not be cached
3610 while fromfile(repo) is None:
3610 while fromfile(repo) is None:
3611 currentfilter = subsettable.get(currentfilter)
3611 currentfilter = subsettable.get(currentfilter)
3612 if currentfilter is None:
3612 if currentfilter is None:
3613 raise error.Abort(
3613 raise error.Abort(
3614 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3614 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3615 )
3615 )
3616 repo = repo.filtered(currentfilter)
3616 repo = repo.filtered(currentfilter)
3617 timer, fm = gettimer(ui, opts)
3617 timer, fm = gettimer(ui, opts)
3618
3618
3619 def setup():
3619 def setup():
3620 if clearrevlogs:
3620 if clearrevlogs:
3621 clearchangelog(repo)
3621 clearchangelog(repo)
3622
3622
3623 def bench():
3623 def bench():
3624 fromfile(repo)
3624 fromfile(repo)
3625
3625
3626 timer(bench, setup=setup)
3626 timer(bench, setup=setup)
3627 fm.end()
3627 fm.end()
3628
3628
3629
3629
3630 @command(b'perfloadmarkers')
3630 @command(b'perfloadmarkers')
3631 def perfloadmarkers(ui, repo):
3631 def perfloadmarkers(ui, repo):
3632 """benchmark the time to parse the on-disk markers for a repo
3632 """benchmark the time to parse the on-disk markers for a repo
3633
3633
3634 Result is the number of markers in the repo."""
3634 Result is the number of markers in the repo."""
3635 timer, fm = gettimer(ui)
3635 timer, fm = gettimer(ui)
3636 svfs = getsvfs(repo)
3636 svfs = getsvfs(repo)
3637 timer(lambda: len(obsolete.obsstore(svfs)))
3637 timer(lambda: len(obsolete.obsstore(svfs)))
3638 fm.end()
3638 fm.end()
3639
3639
3640
3640
3641 @command(
3641 @command(
3642 b'perflrucachedict',
3642 b'perflrucachedict',
3643 formatteropts
3643 formatteropts
3644 + [
3644 + [
3645 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3645 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3646 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3646 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3647 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3647 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3648 (b'', b'size', 4, b'size of cache'),
3648 (b'', b'size', 4, b'size of cache'),
3649 (b'', b'gets', 10000, b'number of key lookups'),
3649 (b'', b'gets', 10000, b'number of key lookups'),
3650 (b'', b'sets', 10000, b'number of key sets'),
3650 (b'', b'sets', 10000, b'number of key sets'),
3651 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3651 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3652 (
3652 (
3653 b'',
3653 b'',
3654 b'mixedgetfreq',
3654 b'mixedgetfreq',
3655 50,
3655 50,
3656 b'frequency of get vs set ops in mixed mode',
3656 b'frequency of get vs set ops in mixed mode',
3657 ),
3657 ),
3658 ],
3658 ],
3659 norepo=True,
3659 norepo=True,
3660 )
3660 )
3661 def perflrucache(
3661 def perflrucache(
3662 ui,
3662 ui,
3663 mincost=0,
3663 mincost=0,
3664 maxcost=100,
3664 maxcost=100,
3665 costlimit=0,
3665 costlimit=0,
3666 size=4,
3666 size=4,
3667 gets=10000,
3667 gets=10000,
3668 sets=10000,
3668 sets=10000,
3669 mixed=10000,
3669 mixed=10000,
3670 mixedgetfreq=50,
3670 mixedgetfreq=50,
3671 **opts
3671 **opts
3672 ):
3672 ):
3673 opts = _byteskwargs(opts)
3673 opts = _byteskwargs(opts)
3674
3674
3675 def doinit():
3675 def doinit():
3676 for i in _xrange(10000):
3676 for i in _xrange(10000):
3677 util.lrucachedict(size)
3677 util.lrucachedict(size)
3678
3678
3679 costrange = list(range(mincost, maxcost + 1))
3679 costrange = list(range(mincost, maxcost + 1))
3680
3680
3681 values = []
3681 values = []
3682 for i in _xrange(size):
3682 for i in _xrange(size):
3683 values.append(random.randint(0, _maxint))
3683 values.append(random.randint(0, _maxint))
3684
3684
3685 # Get mode fills the cache and tests raw lookup performance with no
3685 # Get mode fills the cache and tests raw lookup performance with no
3686 # eviction.
3686 # eviction.
3687 getseq = []
3687 getseq = []
3688 for i in _xrange(gets):
3688 for i in _xrange(gets):
3689 getseq.append(random.choice(values))
3689 getseq.append(random.choice(values))
3690
3690
3691 def dogets():
3691 def dogets():
3692 d = util.lrucachedict(size)
3692 d = util.lrucachedict(size)
3693 for v in values:
3693 for v in values:
3694 d[v] = v
3694 d[v] = v
3695 for key in getseq:
3695 for key in getseq:
3696 value = d[key]
3696 value = d[key]
3697 value # silence pyflakes warning
3697 value # silence pyflakes warning
3698
3698
3699 def dogetscost():
3699 def dogetscost():
3700 d = util.lrucachedict(size, maxcost=costlimit)
3700 d = util.lrucachedict(size, maxcost=costlimit)
3701 for i, v in enumerate(values):
3701 for i, v in enumerate(values):
3702 d.insert(v, v, cost=costs[i])
3702 d.insert(v, v, cost=costs[i])
3703 for key in getseq:
3703 for key in getseq:
3704 try:
3704 try:
3705 value = d[key]
3705 value = d[key]
3706 value # silence pyflakes warning
3706 value # silence pyflakes warning
3707 except KeyError:
3707 except KeyError:
3708 pass
3708 pass
3709
3709
3710 # Set mode tests insertion speed with cache eviction.
3710 # Set mode tests insertion speed with cache eviction.
3711 setseq = []
3711 setseq = []
3712 costs = []
3712 costs = []
3713 for i in _xrange(sets):
3713 for i in _xrange(sets):
3714 setseq.append(random.randint(0, _maxint))
3714 setseq.append(random.randint(0, _maxint))
3715 costs.append(random.choice(costrange))
3715 costs.append(random.choice(costrange))
3716
3716
3717 def doinserts():
3717 def doinserts():
3718 d = util.lrucachedict(size)
3718 d = util.lrucachedict(size)
3719 for v in setseq:
3719 for v in setseq:
3720 d.insert(v, v)
3720 d.insert(v, v)
3721
3721
3722 def doinsertscost():
3722 def doinsertscost():
3723 d = util.lrucachedict(size, maxcost=costlimit)
3723 d = util.lrucachedict(size, maxcost=costlimit)
3724 for i, v in enumerate(setseq):
3724 for i, v in enumerate(setseq):
3725 d.insert(v, v, cost=costs[i])
3725 d.insert(v, v, cost=costs[i])
3726
3726
3727 def dosets():
3727 def dosets():
3728 d = util.lrucachedict(size)
3728 d = util.lrucachedict(size)
3729 for v in setseq:
3729 for v in setseq:
3730 d[v] = v
3730 d[v] = v
3731
3731
3732 # Mixed mode randomly performs gets and sets with eviction.
3732 # Mixed mode randomly performs gets and sets with eviction.
3733 mixedops = []
3733 mixedops = []
3734 for i in _xrange(mixed):
3734 for i in _xrange(mixed):
3735 r = random.randint(0, 100)
3735 r = random.randint(0, 100)
3736 if r < mixedgetfreq:
3736 if r < mixedgetfreq:
3737 op = 0
3737 op = 0
3738 else:
3738 else:
3739 op = 1
3739 op = 1
3740
3740
3741 mixedops.append(
3741 mixedops.append(
3742 (op, random.randint(0, size * 2), random.choice(costrange))
3742 (op, random.randint(0, size * 2), random.choice(costrange))
3743 )
3743 )
3744
3744
3745 def domixed():
3745 def domixed():
3746 d = util.lrucachedict(size)
3746 d = util.lrucachedict(size)
3747
3747
3748 for op, v, cost in mixedops:
3748 for op, v, cost in mixedops:
3749 if op == 0:
3749 if op == 0:
3750 try:
3750 try:
3751 d[v]
3751 d[v]
3752 except KeyError:
3752 except KeyError:
3753 pass
3753 pass
3754 else:
3754 else:
3755 d[v] = v
3755 d[v] = v
3756
3756
3757 def domixedcost():
3757 def domixedcost():
3758 d = util.lrucachedict(size, maxcost=costlimit)
3758 d = util.lrucachedict(size, maxcost=costlimit)
3759
3759
3760 for op, v, cost in mixedops:
3760 for op, v, cost in mixedops:
3761 if op == 0:
3761 if op == 0:
3762 try:
3762 try:
3763 d[v]
3763 d[v]
3764 except KeyError:
3764 except KeyError:
3765 pass
3765 pass
3766 else:
3766 else:
3767 d.insert(v, v, cost=cost)
3767 d.insert(v, v, cost=cost)
3768
3768
3769 benches = [
3769 benches = [
3770 (doinit, b'init'),
3770 (doinit, b'init'),
3771 ]
3771 ]
3772
3772
3773 if costlimit:
3773 if costlimit:
3774 benches.extend(
3774 benches.extend(
3775 [
3775 [
3776 (dogetscost, b'gets w/ cost limit'),
3776 (dogetscost, b'gets w/ cost limit'),
3777 (doinsertscost, b'inserts w/ cost limit'),
3777 (doinsertscost, b'inserts w/ cost limit'),
3778 (domixedcost, b'mixed w/ cost limit'),
3778 (domixedcost, b'mixed w/ cost limit'),
3779 ]
3779 ]
3780 )
3780 )
3781 else:
3781 else:
3782 benches.extend(
3782 benches.extend(
3783 [
3783 [
3784 (dogets, b'gets'),
3784 (dogets, b'gets'),
3785 (doinserts, b'inserts'),
3785 (doinserts, b'inserts'),
3786 (dosets, b'sets'),
3786 (dosets, b'sets'),
3787 (domixed, b'mixed'),
3787 (domixed, b'mixed'),
3788 ]
3788 ]
3789 )
3789 )
3790
3790
3791 for fn, title in benches:
3791 for fn, title in benches:
3792 timer, fm = gettimer(ui, opts)
3792 timer, fm = gettimer(ui, opts)
3793 timer(fn, title=title)
3793 timer(fn, title=title)
3794 fm.end()
3794 fm.end()
3795
3795
3796
3796
3797 @command(
3797 @command(
3798 b'perfwrite',
3798 b'perfwrite',
3799 formatteropts
3799 formatteropts
3800 + [
3800 + [
3801 (b'', b'write-method', b'write', b'ui write method'),
3801 (b'', b'write-method', b'write', b'ui write method'),
3802 (b'', b'nlines', 100, b'number of lines'),
3802 (b'', b'nlines', 100, b'number of lines'),
3803 (b'', b'nitems', 100, b'number of items (per line)'),
3803 (b'', b'nitems', 100, b'number of items (per line)'),
3804 (b'', b'item', b'x', b'item that is written'),
3804 (b'', b'item', b'x', b'item that is written'),
3805 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3805 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3806 (b'', b'flush-line', None, b'flush after each line'),
3806 (b'', b'flush-line', None, b'flush after each line'),
3807 ],
3807 ],
3808 )
3808 )
3809 def perfwrite(ui, repo, **opts):
3809 def perfwrite(ui, repo, **opts):
3810 """microbenchmark ui.write (and others)
3810 """microbenchmark ui.write (and others)
3811 """
3811 """
3812 opts = _byteskwargs(opts)
3812 opts = _byteskwargs(opts)
3813
3813
3814 write = getattr(ui, _sysstr(opts[b'write_method']))
3814 write = getattr(ui, _sysstr(opts[b'write_method']))
3815 nlines = int(opts[b'nlines'])
3815 nlines = int(opts[b'nlines'])
3816 nitems = int(opts[b'nitems'])
3816 nitems = int(opts[b'nitems'])
3817 item = opts[b'item']
3817 item = opts[b'item']
3818 batch_line = opts.get(b'batch_line')
3818 batch_line = opts.get(b'batch_line')
3819 flush_line = opts.get(b'flush_line')
3819 flush_line = opts.get(b'flush_line')
3820
3820
3821 if batch_line:
3821 if batch_line:
3822 line = item * nitems + b'\n'
3822 line = item * nitems + b'\n'
3823
3823
3824 def benchmark():
3824 def benchmark():
3825 for i in pycompat.xrange(nlines):
3825 for i in pycompat.xrange(nlines):
3826 if batch_line:
3826 if batch_line:
3827 write(line)
3827 write(line)
3828 else:
3828 else:
3829 for i in pycompat.xrange(nitems):
3829 for i in pycompat.xrange(nitems):
3830 write(item)
3830 write(item)
3831 write(b'\n')
3831 write(b'\n')
3832 if flush_line:
3832 if flush_line:
3833 ui.flush()
3833 ui.flush()
3834 ui.flush()
3834 ui.flush()
3835
3835
3836 timer, fm = gettimer(ui, opts)
3836 timer, fm = gettimer(ui, opts)
3837 timer(benchmark)
3837 timer(benchmark)
3838 fm.end()
3838 fm.end()
3839
3839
3840
3840
3841 def uisetup(ui):
3841 def uisetup(ui):
3842 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3842 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3843 commands, b'debugrevlogopts'
3843 commands, b'debugrevlogopts'
3844 ):
3844 ):
3845 # for "historical portability":
3845 # for "historical portability":
3846 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3846 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3847 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3847 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3848 # openrevlog() should cause failure, because it has been
3848 # openrevlog() should cause failure, because it has been
3849 # available since 3.5 (or 49c583ca48c4).
3849 # available since 3.5 (or 49c583ca48c4).
3850 def openrevlog(orig, repo, cmd, file_, opts):
3850 def openrevlog(orig, repo, cmd, file_, opts):
3851 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3851 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3852 raise error.Abort(
3852 raise error.Abort(
3853 b"This version doesn't support --dir option",
3853 b"This version doesn't support --dir option",
3854 hint=b"use 3.5 or later",
3854 hint=b"use 3.5 or later",
3855 )
3855 )
3856 return orig(repo, cmd, file_, opts)
3856 return orig(repo, cmd, file_, opts)
3857
3857
3858 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3858 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3859
3859
3860
3860
3861 @command(
3861 @command(
3862 b'perfprogress',
3862 b'perfprogress',
3863 formatteropts
3863 formatteropts
3864 + [
3864 + [
3865 (b'', b'topic', b'topic', b'topic for progress messages'),
3865 (b'', b'topic', b'topic', b'topic for progress messages'),
3866 (b'c', b'total', 1000000, b'total value we are progressing to'),
3866 (b'c', b'total', 1000000, b'total value we are progressing to'),
3867 ],
3867 ],
3868 norepo=True,
3868 norepo=True,
3869 )
3869 )
3870 def perfprogress(ui, topic=None, total=None, **opts):
3870 def perfprogress(ui, topic=None, total=None, **opts):
3871 """printing of progress bars"""
3871 """printing of progress bars"""
3872 opts = _byteskwargs(opts)
3872 opts = _byteskwargs(opts)
3873
3873
3874 timer, fm = gettimer(ui, opts)
3874 timer, fm = gettimer(ui, opts)
3875
3875
3876 def doprogress():
3876 def doprogress():
3877 with ui.makeprogress(topic, total=total) as progress:
3877 with ui.makeprogress(topic, total=total) as progress:
3878 for i in _xrange(total):
3878 for i in _xrange(total):
3879 progress.increment()
3879 progress.increment()
3880
3880
3881 timer(doprogress)
3881 timer(doprogress)
3882 fm.end()
3882 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now