##// END OF EJS Templates
perf: make `perfnodemap` use the new `index.get_rev` api if available...
marmoute -
r43971:22cd0064 default
parent child Browse files
Show More
@@ -1,3840 +1,3844
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if '_tagscache' in vars(repo):
694 if '_tagscache' in vars(repo):
695 del repo.__dict__['_tagscache']
695 del repo.__dict__['_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, '_clcachekey', None)
729 object.__setattr__(repo, '_clcachekey', None)
730 object.__setattr__(repo, '_clcache', None)
730 object.__setattr__(repo, '_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
766 ]
767 + formatteropts,
767 + formatteropts,
768 )
768 )
769 def perfstatus(ui, repo, **opts):
769 def perfstatus(ui, repo, **opts):
770 """benchmark the performance of a single status call
770 """benchmark the performance of a single status call
771
771
772 The repository data are preserved between each call.
772 The repository data are preserved between each call.
773
773
774 By default, only the status of the tracked file are requested. If
774 By default, only the status of the tracked file are requested. If
775 `--unknown` is passed, the "unknown" files are also tracked.
775 `--unknown` is passed, the "unknown" files are also tracked.
776 """
776 """
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 # m = match.always(repo.root, repo.getcwd())
778 # m = match.always(repo.root, repo.getcwd())
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
780 # False))))
780 # False))))
781 timer, fm = gettimer(ui, opts)
781 timer, fm = gettimer(ui, opts)
782 if opts[b'dirstate']:
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
785 unknown = opts[b'unknown']
786
786
787 def status_dirstate():
787 def status_dirstate():
788 s = dirstate.status(
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
790 )
791 sum(map(len, s))
791 sum(map(len, s))
792
792
793 timer(status_dirstate)
793 timer(status_dirstate)
794 else:
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
796 fm.end()
796 fm.end()
797
797
798
798
799 @command(b'perfaddremove', formatteropts)
799 @command(b'perfaddremove', formatteropts)
800 def perfaddremove(ui, repo, **opts):
800 def perfaddremove(ui, repo, **opts):
801 opts = _byteskwargs(opts)
801 opts = _byteskwargs(opts)
802 timer, fm = gettimer(ui, opts)
802 timer, fm = gettimer(ui, opts)
803 try:
803 try:
804 oldquiet = repo.ui.quiet
804 oldquiet = repo.ui.quiet
805 repo.ui.quiet = True
805 repo.ui.quiet = True
806 matcher = scmutil.match(repo[None])
806 matcher = scmutil.match(repo[None])
807 opts[b'dry_run'] = True
807 opts[b'dry_run'] = True
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
808 if b'uipathfn' in getargspec(scmutil.addremove).args:
809 uipathfn = scmutil.getuipathfn(repo)
809 uipathfn = scmutil.getuipathfn(repo)
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
811 else:
811 else:
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
813 finally:
813 finally:
814 repo.ui.quiet = oldquiet
814 repo.ui.quiet = oldquiet
815 fm.end()
815 fm.end()
816
816
817
817
818 def clearcaches(cl):
818 def clearcaches(cl):
819 # behave somewhat consistently across internal API changes
819 # behave somewhat consistently across internal API changes
820 if util.safehasattr(cl, b'clearcaches'):
820 if util.safehasattr(cl, b'clearcaches'):
821 cl.clearcaches()
821 cl.clearcaches()
822 elif util.safehasattr(cl, b'_nodecache'):
822 elif util.safehasattr(cl, b'_nodecache'):
823 # <= hg-5.2
823 # <= hg-5.2
824 from mercurial.node import nullid, nullrev
824 from mercurial.node import nullid, nullrev
825
825
826 cl._nodecache = {nullid: nullrev}
826 cl._nodecache = {nullid: nullrev}
827 cl._nodepos = None
827 cl._nodepos = None
828
828
829
829
830 @command(b'perfheads', formatteropts)
830 @command(b'perfheads', formatteropts)
831 def perfheads(ui, repo, **opts):
831 def perfheads(ui, repo, **opts):
832 """benchmark the computation of a changelog heads"""
832 """benchmark the computation of a changelog heads"""
833 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
835 cl = repo.changelog
835 cl = repo.changelog
836
836
837 def s():
837 def s():
838 clearcaches(cl)
838 clearcaches(cl)
839
839
840 def d():
840 def d():
841 len(cl.headrevs())
841 len(cl.headrevs())
842
842
843 timer(d, setup=s)
843 timer(d, setup=s)
844 fm.end()
844 fm.end()
845
845
846
846
847 @command(
847 @command(
848 b'perftags',
848 b'perftags',
849 formatteropts
849 formatteropts
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
850 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
851 )
851 )
852 def perftags(ui, repo, **opts):
852 def perftags(ui, repo, **opts):
853 opts = _byteskwargs(opts)
853 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
854 timer, fm = gettimer(ui, opts)
855 repocleartagscache = repocleartagscachefunc(repo)
855 repocleartagscache = repocleartagscachefunc(repo)
856 clearrevlogs = opts[b'clear_revlogs']
856 clearrevlogs = opts[b'clear_revlogs']
857
857
858 def s():
858 def s():
859 if clearrevlogs:
859 if clearrevlogs:
860 clearchangelog(repo)
860 clearchangelog(repo)
861 clearfilecache(repo.unfiltered(), 'manifest')
861 clearfilecache(repo.unfiltered(), 'manifest')
862 repocleartagscache()
862 repocleartagscache()
863
863
864 def t():
864 def t():
865 return len(repo.tags())
865 return len(repo.tags())
866
866
867 timer(t, setup=s)
867 timer(t, setup=s)
868 fm.end()
868 fm.end()
869
869
870
870
871 @command(b'perfancestors', formatteropts)
871 @command(b'perfancestors', formatteropts)
872 def perfancestors(ui, repo, **opts):
872 def perfancestors(ui, repo, **opts):
873 opts = _byteskwargs(opts)
873 opts = _byteskwargs(opts)
874 timer, fm = gettimer(ui, opts)
874 timer, fm = gettimer(ui, opts)
875 heads = repo.changelog.headrevs()
875 heads = repo.changelog.headrevs()
876
876
877 def d():
877 def d():
878 for a in repo.changelog.ancestors(heads):
878 for a in repo.changelog.ancestors(heads):
879 pass
879 pass
880
880
881 timer(d)
881 timer(d)
882 fm.end()
882 fm.end()
883
883
884
884
885 @command(b'perfancestorset', formatteropts)
885 @command(b'perfancestorset', formatteropts)
886 def perfancestorset(ui, repo, revset, **opts):
886 def perfancestorset(ui, repo, revset, **opts):
887 opts = _byteskwargs(opts)
887 opts = _byteskwargs(opts)
888 timer, fm = gettimer(ui, opts)
888 timer, fm = gettimer(ui, opts)
889 revs = repo.revs(revset)
889 revs = repo.revs(revset)
890 heads = repo.changelog.headrevs()
890 heads = repo.changelog.headrevs()
891
891
892 def d():
892 def d():
893 s = repo.changelog.ancestors(heads)
893 s = repo.changelog.ancestors(heads)
894 for rev in revs:
894 for rev in revs:
895 rev in s
895 rev in s
896
896
897 timer(d)
897 timer(d)
898 fm.end()
898 fm.end()
899
899
900
900
901 @command(b'perfdiscovery', formatteropts, b'PATH')
901 @command(b'perfdiscovery', formatteropts, b'PATH')
902 def perfdiscovery(ui, repo, path, **opts):
902 def perfdiscovery(ui, repo, path, **opts):
903 """benchmark discovery between local repo and the peer at given path
903 """benchmark discovery between local repo and the peer at given path
904 """
904 """
905 repos = [repo, None]
905 repos = [repo, None]
906 timer, fm = gettimer(ui, opts)
906 timer, fm = gettimer(ui, opts)
907 path = ui.expandpath(path)
907 path = ui.expandpath(path)
908
908
909 def s():
909 def s():
910 repos[1] = hg.peer(ui, opts, path)
910 repos[1] = hg.peer(ui, opts, path)
911
911
912 def d():
912 def d():
913 setdiscovery.findcommonheads(ui, *repos)
913 setdiscovery.findcommonheads(ui, *repos)
914
914
915 timer(d, setup=s)
915 timer(d, setup=s)
916 fm.end()
916 fm.end()
917
917
918
918
919 @command(
919 @command(
920 b'perfbookmarks',
920 b'perfbookmarks',
921 formatteropts
921 formatteropts
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
922 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
923 )
923 )
924 def perfbookmarks(ui, repo, **opts):
924 def perfbookmarks(ui, repo, **opts):
925 """benchmark parsing bookmarks from disk to memory"""
925 """benchmark parsing bookmarks from disk to memory"""
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928
928
929 clearrevlogs = opts[b'clear_revlogs']
929 clearrevlogs = opts[b'clear_revlogs']
930
930
931 def s():
931 def s():
932 if clearrevlogs:
932 if clearrevlogs:
933 clearchangelog(repo)
933 clearchangelog(repo)
934 clearfilecache(repo, b'_bookmarks')
934 clearfilecache(repo, b'_bookmarks')
935
935
936 def d():
936 def d():
937 repo._bookmarks
937 repo._bookmarks
938
938
939 timer(d, setup=s)
939 timer(d, setup=s)
940 fm.end()
940 fm.end()
941
941
942
942
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
943 @command(b'perfbundleread', formatteropts, b'BUNDLE')
944 def perfbundleread(ui, repo, bundlepath, **opts):
944 def perfbundleread(ui, repo, bundlepath, **opts):
945 """Benchmark reading of bundle files.
945 """Benchmark reading of bundle files.
946
946
947 This command is meant to isolate the I/O part of bundle reading as
947 This command is meant to isolate the I/O part of bundle reading as
948 much as possible.
948 much as possible.
949 """
949 """
950 from mercurial import (
950 from mercurial import (
951 bundle2,
951 bundle2,
952 exchange,
952 exchange,
953 streamclone,
953 streamclone,
954 )
954 )
955
955
956 opts = _byteskwargs(opts)
956 opts = _byteskwargs(opts)
957
957
958 def makebench(fn):
958 def makebench(fn):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 bundle = exchange.readbundle(ui, fh, bundlepath)
961 bundle = exchange.readbundle(ui, fh, bundlepath)
962 fn(bundle)
962 fn(bundle)
963
963
964 return run
964 return run
965
965
966 def makereadnbytes(size):
966 def makereadnbytes(size):
967 def run():
967 def run():
968 with open(bundlepath, b'rb') as fh:
968 with open(bundlepath, b'rb') as fh:
969 bundle = exchange.readbundle(ui, fh, bundlepath)
969 bundle = exchange.readbundle(ui, fh, bundlepath)
970 while bundle.read(size):
970 while bundle.read(size):
971 pass
971 pass
972
972
973 return run
973 return run
974
974
975 def makestdioread(size):
975 def makestdioread(size):
976 def run():
976 def run():
977 with open(bundlepath, b'rb') as fh:
977 with open(bundlepath, b'rb') as fh:
978 while fh.read(size):
978 while fh.read(size):
979 pass
979 pass
980
980
981 return run
981 return run
982
982
983 # bundle1
983 # bundle1
984
984
985 def deltaiter(bundle):
985 def deltaiter(bundle):
986 for delta in bundle.deltaiter():
986 for delta in bundle.deltaiter():
987 pass
987 pass
988
988
989 def iterchunks(bundle):
989 def iterchunks(bundle):
990 for chunk in bundle.getchunks():
990 for chunk in bundle.getchunks():
991 pass
991 pass
992
992
993 # bundle2
993 # bundle2
994
994
995 def forwardchunks(bundle):
995 def forwardchunks(bundle):
996 for chunk in bundle._forwardchunks():
996 for chunk in bundle._forwardchunks():
997 pass
997 pass
998
998
999 def iterparts(bundle):
999 def iterparts(bundle):
1000 for part in bundle.iterparts():
1000 for part in bundle.iterparts():
1001 pass
1001 pass
1002
1002
1003 def iterpartsseekable(bundle):
1003 def iterpartsseekable(bundle):
1004 for part in bundle.iterparts(seekable=True):
1004 for part in bundle.iterparts(seekable=True):
1005 pass
1005 pass
1006
1006
1007 def seek(bundle):
1007 def seek(bundle):
1008 for part in bundle.iterparts(seekable=True):
1008 for part in bundle.iterparts(seekable=True):
1009 part.seek(0, os.SEEK_END)
1009 part.seek(0, os.SEEK_END)
1010
1010
1011 def makepartreadnbytes(size):
1011 def makepartreadnbytes(size):
1012 def run():
1012 def run():
1013 with open(bundlepath, b'rb') as fh:
1013 with open(bundlepath, b'rb') as fh:
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1014 bundle = exchange.readbundle(ui, fh, bundlepath)
1015 for part in bundle.iterparts():
1015 for part in bundle.iterparts():
1016 while part.read(size):
1016 while part.read(size):
1017 pass
1017 pass
1018
1018
1019 return run
1019 return run
1020
1020
1021 benches = [
1021 benches = [
1022 (makestdioread(8192), b'read(8k)'),
1022 (makestdioread(8192), b'read(8k)'),
1023 (makestdioread(16384), b'read(16k)'),
1023 (makestdioread(16384), b'read(16k)'),
1024 (makestdioread(32768), b'read(32k)'),
1024 (makestdioread(32768), b'read(32k)'),
1025 (makestdioread(131072), b'read(128k)'),
1025 (makestdioread(131072), b'read(128k)'),
1026 ]
1026 ]
1027
1027
1028 with open(bundlepath, b'rb') as fh:
1028 with open(bundlepath, b'rb') as fh:
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1029 bundle = exchange.readbundle(ui, fh, bundlepath)
1030
1030
1031 if isinstance(bundle, changegroup.cg1unpacker):
1031 if isinstance(bundle, changegroup.cg1unpacker):
1032 benches.extend(
1032 benches.extend(
1033 [
1033 [
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1034 (makebench(deltaiter), b'cg1 deltaiter()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1035 (makebench(iterchunks), b'cg1 getchunks()'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1036 (makereadnbytes(8192), b'cg1 read(8k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1037 (makereadnbytes(16384), b'cg1 read(16k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1038 (makereadnbytes(32768), b'cg1 read(32k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1039 (makereadnbytes(131072), b'cg1 read(128k)'),
1040 ]
1040 ]
1041 )
1041 )
1042 elif isinstance(bundle, bundle2.unbundle20):
1042 elif isinstance(bundle, bundle2.unbundle20):
1043 benches.extend(
1043 benches.extend(
1044 [
1044 [
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1045 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1046 (makebench(iterparts), b'bundle2 iterparts()'),
1047 (
1047 (
1048 makebench(iterpartsseekable),
1048 makebench(iterpartsseekable),
1049 b'bundle2 iterparts() seekable',
1049 b'bundle2 iterparts() seekable',
1050 ),
1050 ),
1051 (makebench(seek), b'bundle2 part seek()'),
1051 (makebench(seek), b'bundle2 part seek()'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1052 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1053 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1054 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1055 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1056 ]
1056 ]
1057 )
1057 )
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1058 elif isinstance(bundle, streamclone.streamcloneapplier):
1059 raise error.Abort(b'stream clone bundles not supported')
1059 raise error.Abort(b'stream clone bundles not supported')
1060 else:
1060 else:
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1061 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1062
1062
1063 for fn, title in benches:
1063 for fn, title in benches:
1064 timer, fm = gettimer(ui, opts)
1064 timer, fm = gettimer(ui, opts)
1065 timer(fn, title=title)
1065 timer(fn, title=title)
1066 fm.end()
1066 fm.end()
1067
1067
1068
1068
1069 @command(
1069 @command(
1070 b'perfchangegroupchangelog',
1070 b'perfchangegroupchangelog',
1071 formatteropts
1071 formatteropts
1072 + [
1072 + [
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1073 (b'', b'cgversion', b'02', b'changegroup version'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1074 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1075 ],
1075 ],
1076 )
1076 )
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1077 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1078 """Benchmark producing a changelog group for a changegroup.
1078 """Benchmark producing a changelog group for a changegroup.
1079
1079
1080 This measures the time spent processing the changelog during a
1080 This measures the time spent processing the changelog during a
1081 bundle operation. This occurs during `hg bundle` and on a server
1081 bundle operation. This occurs during `hg bundle` and on a server
1082 processing a `getbundle` wire protocol request (handles clones
1082 processing a `getbundle` wire protocol request (handles clones
1083 and pull requests).
1083 and pull requests).
1084
1084
1085 By default, all revisions are added to the changegroup.
1085 By default, all revisions are added to the changegroup.
1086 """
1086 """
1087 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1088 cl = repo.changelog
1088 cl = repo.changelog
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1089 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1090 bundler = changegroup.getbundler(cgversion, repo)
1090 bundler = changegroup.getbundler(cgversion, repo)
1091
1091
1092 def d():
1092 def d():
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1093 state, chunks = bundler._generatechangelog(cl, nodes)
1094 for chunk in chunks:
1094 for chunk in chunks:
1095 pass
1095 pass
1096
1096
1097 timer, fm = gettimer(ui, opts)
1097 timer, fm = gettimer(ui, opts)
1098
1098
1099 # Terminal printing can interfere with timing. So disable it.
1099 # Terminal printing can interfere with timing. So disable it.
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1100 with ui.configoverride({(b'progress', b'disable'): True}):
1101 timer(d)
1101 timer(d)
1102
1102
1103 fm.end()
1103 fm.end()
1104
1104
1105
1105
1106 @command(b'perfdirs', formatteropts)
1106 @command(b'perfdirs', formatteropts)
1107 def perfdirs(ui, repo, **opts):
1107 def perfdirs(ui, repo, **opts):
1108 opts = _byteskwargs(opts)
1108 opts = _byteskwargs(opts)
1109 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1110 dirstate = repo.dirstate
1110 dirstate = repo.dirstate
1111 b'a' in dirstate
1111 b'a' in dirstate
1112
1112
1113 def d():
1113 def d():
1114 dirstate.hasdir(b'a')
1114 dirstate.hasdir(b'a')
1115 del dirstate._map._dirs
1115 del dirstate._map._dirs
1116
1116
1117 timer(d)
1117 timer(d)
1118 fm.end()
1118 fm.end()
1119
1119
1120
1120
1121 @command(
1121 @command(
1122 b'perfdirstate',
1122 b'perfdirstate',
1123 [
1123 [
1124 (
1124 (
1125 b'',
1125 b'',
1126 b'iteration',
1126 b'iteration',
1127 None,
1127 None,
1128 b'benchmark a full iteration for the dirstate',
1128 b'benchmark a full iteration for the dirstate',
1129 ),
1129 ),
1130 (
1130 (
1131 b'',
1131 b'',
1132 b'contains',
1132 b'contains',
1133 None,
1133 None,
1134 b'benchmark a large amount of `nf in dirstate` calls',
1134 b'benchmark a large amount of `nf in dirstate` calls',
1135 ),
1135 ),
1136 ]
1136 ]
1137 + formatteropts,
1137 + formatteropts,
1138 )
1138 )
1139 def perfdirstate(ui, repo, **opts):
1139 def perfdirstate(ui, repo, **opts):
1140 """benchmap the time of various distate operations
1140 """benchmap the time of various distate operations
1141
1141
1142 By default benchmark the time necessary to load a dirstate from scratch.
1142 By default benchmark the time necessary to load a dirstate from scratch.
1143 The dirstate is loaded to the point were a "contains" request can be
1143 The dirstate is loaded to the point were a "contains" request can be
1144 answered.
1144 answered.
1145 """
1145 """
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 b"a" in repo.dirstate
1148 b"a" in repo.dirstate
1149
1149
1150 if opts[b'iteration'] and opts[b'contains']:
1150 if opts[b'iteration'] and opts[b'contains']:
1151 msg = b'only specify one of --iteration or --contains'
1151 msg = b'only specify one of --iteration or --contains'
1152 raise error.Abort(msg)
1152 raise error.Abort(msg)
1153
1153
1154 if opts[b'iteration']:
1154 if opts[b'iteration']:
1155 setup = None
1155 setup = None
1156 dirstate = repo.dirstate
1156 dirstate = repo.dirstate
1157
1157
1158 def d():
1158 def d():
1159 for f in dirstate:
1159 for f in dirstate:
1160 pass
1160 pass
1161
1161
1162 elif opts[b'contains']:
1162 elif opts[b'contains']:
1163 setup = None
1163 setup = None
1164 dirstate = repo.dirstate
1164 dirstate = repo.dirstate
1165 allfiles = list(dirstate)
1165 allfiles = list(dirstate)
1166 # also add file path that will be "missing" from the dirstate
1166 # also add file path that will be "missing" from the dirstate
1167 allfiles.extend([f[::-1] for f in allfiles])
1167 allfiles.extend([f[::-1] for f in allfiles])
1168
1168
1169 def d():
1169 def d():
1170 for f in allfiles:
1170 for f in allfiles:
1171 f in dirstate
1171 f in dirstate
1172
1172
1173 else:
1173 else:
1174
1174
1175 def setup():
1175 def setup():
1176 repo.dirstate.invalidate()
1176 repo.dirstate.invalidate()
1177
1177
1178 def d():
1178 def d():
1179 b"a" in repo.dirstate
1179 b"a" in repo.dirstate
1180
1180
1181 timer(d, setup=setup)
1181 timer(d, setup=setup)
1182 fm.end()
1182 fm.end()
1183
1183
1184
1184
1185 @command(b'perfdirstatedirs', formatteropts)
1185 @command(b'perfdirstatedirs', formatteropts)
1186 def perfdirstatedirs(ui, repo, **opts):
1186 def perfdirstatedirs(ui, repo, **opts):
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1187 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1188 """
1188 """
1189 opts = _byteskwargs(opts)
1189 opts = _byteskwargs(opts)
1190 timer, fm = gettimer(ui, opts)
1190 timer, fm = gettimer(ui, opts)
1191 repo.dirstate.hasdir(b"a")
1191 repo.dirstate.hasdir(b"a")
1192
1192
1193 def setup():
1193 def setup():
1194 del repo.dirstate._map._dirs
1194 del repo.dirstate._map._dirs
1195
1195
1196 def d():
1196 def d():
1197 repo.dirstate.hasdir(b"a")
1197 repo.dirstate.hasdir(b"a")
1198
1198
1199 timer(d, setup=setup)
1199 timer(d, setup=setup)
1200 fm.end()
1200 fm.end()
1201
1201
1202
1202
1203 @command(b'perfdirstatefoldmap', formatteropts)
1203 @command(b'perfdirstatefoldmap', formatteropts)
1204 def perfdirstatefoldmap(ui, repo, **opts):
1204 def perfdirstatefoldmap(ui, repo, **opts):
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1205 """benchmap a `dirstate._map.filefoldmap.get()` request
1206
1206
1207 The dirstate filefoldmap cache is dropped between every request.
1207 The dirstate filefoldmap cache is dropped between every request.
1208 """
1208 """
1209 opts = _byteskwargs(opts)
1209 opts = _byteskwargs(opts)
1210 timer, fm = gettimer(ui, opts)
1210 timer, fm = gettimer(ui, opts)
1211 dirstate = repo.dirstate
1211 dirstate = repo.dirstate
1212 dirstate._map.filefoldmap.get(b'a')
1212 dirstate._map.filefoldmap.get(b'a')
1213
1213
1214 def setup():
1214 def setup():
1215 del dirstate._map.filefoldmap
1215 del dirstate._map.filefoldmap
1216
1216
1217 def d():
1217 def d():
1218 dirstate._map.filefoldmap.get(b'a')
1218 dirstate._map.filefoldmap.get(b'a')
1219
1219
1220 timer(d, setup=setup)
1220 timer(d, setup=setup)
1221 fm.end()
1221 fm.end()
1222
1222
1223
1223
1224 @command(b'perfdirfoldmap', formatteropts)
1224 @command(b'perfdirfoldmap', formatteropts)
1225 def perfdirfoldmap(ui, repo, **opts):
1225 def perfdirfoldmap(ui, repo, **opts):
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1226 """benchmap a `dirstate._map.dirfoldmap.get()` request
1227
1227
1228 The dirstate dirfoldmap cache is dropped between every request.
1228 The dirstate dirfoldmap cache is dropped between every request.
1229 """
1229 """
1230 opts = _byteskwargs(opts)
1230 opts = _byteskwargs(opts)
1231 timer, fm = gettimer(ui, opts)
1231 timer, fm = gettimer(ui, opts)
1232 dirstate = repo.dirstate
1232 dirstate = repo.dirstate
1233 dirstate._map.dirfoldmap.get(b'a')
1233 dirstate._map.dirfoldmap.get(b'a')
1234
1234
1235 def setup():
1235 def setup():
1236 del dirstate._map.dirfoldmap
1236 del dirstate._map.dirfoldmap
1237 del dirstate._map._dirs
1237 del dirstate._map._dirs
1238
1238
1239 def d():
1239 def d():
1240 dirstate._map.dirfoldmap.get(b'a')
1240 dirstate._map.dirfoldmap.get(b'a')
1241
1241
1242 timer(d, setup=setup)
1242 timer(d, setup=setup)
1243 fm.end()
1243 fm.end()
1244
1244
1245
1245
1246 @command(b'perfdirstatewrite', formatteropts)
1246 @command(b'perfdirstatewrite', formatteropts)
1247 def perfdirstatewrite(ui, repo, **opts):
1247 def perfdirstatewrite(ui, repo, **opts):
1248 """benchmap the time it take to write a dirstate on disk
1248 """benchmap the time it take to write a dirstate on disk
1249 """
1249 """
1250 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1251 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1252 ds = repo.dirstate
1252 ds = repo.dirstate
1253 b"a" in ds
1253 b"a" in ds
1254
1254
1255 def setup():
1255 def setup():
1256 ds._dirty = True
1256 ds._dirty = True
1257
1257
1258 def d():
1258 def d():
1259 ds.write(repo.currenttransaction())
1259 ds.write(repo.currenttransaction())
1260
1260
1261 timer(d, setup=setup)
1261 timer(d, setup=setup)
1262 fm.end()
1262 fm.end()
1263
1263
1264
1264
1265 def _getmergerevs(repo, opts):
1265 def _getmergerevs(repo, opts):
1266 """parse command argument to return rev involved in merge
1266 """parse command argument to return rev involved in merge
1267
1267
1268 input: options dictionnary with `rev`, `from` and `bse`
1268 input: options dictionnary with `rev`, `from` and `bse`
1269 output: (localctx, otherctx, basectx)
1269 output: (localctx, otherctx, basectx)
1270 """
1270 """
1271 if opts[b'from']:
1271 if opts[b'from']:
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1272 fromrev = scmutil.revsingle(repo, opts[b'from'])
1273 wctx = repo[fromrev]
1273 wctx = repo[fromrev]
1274 else:
1274 else:
1275 wctx = repo[None]
1275 wctx = repo[None]
1276 # we don't want working dir files to be stat'd in the benchmark, so
1276 # we don't want working dir files to be stat'd in the benchmark, so
1277 # prime that cache
1277 # prime that cache
1278 wctx.dirty()
1278 wctx.dirty()
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1279 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1280 if opts[b'base']:
1280 if opts[b'base']:
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1281 fromrev = scmutil.revsingle(repo, opts[b'base'])
1282 ancestor = repo[fromrev]
1282 ancestor = repo[fromrev]
1283 else:
1283 else:
1284 ancestor = wctx.ancestor(rctx)
1284 ancestor = wctx.ancestor(rctx)
1285 return (wctx, rctx, ancestor)
1285 return (wctx, rctx, ancestor)
1286
1286
1287
1287
1288 @command(
1288 @command(
1289 b'perfmergecalculate',
1289 b'perfmergecalculate',
1290 [
1290 [
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1291 (b'r', b'rev', b'.', b'rev to merge against'),
1292 (b'', b'from', b'', b'rev to merge from'),
1292 (b'', b'from', b'', b'rev to merge from'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1293 (b'', b'base', b'', b'the revision to use as base'),
1294 ]
1294 ]
1295 + formatteropts,
1295 + formatteropts,
1296 )
1296 )
1297 def perfmergecalculate(ui, repo, **opts):
1297 def perfmergecalculate(ui, repo, **opts):
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1300
1300
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1301 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1302
1302
1303 def d():
1303 def d():
1304 # acceptremote is True because we don't want prompts in the middle of
1304 # acceptremote is True because we don't want prompts in the middle of
1305 # our benchmark
1305 # our benchmark
1306 merge.calculateupdates(
1306 merge.calculateupdates(
1307 repo,
1307 repo,
1308 wctx,
1308 wctx,
1309 rctx,
1309 rctx,
1310 [ancestor],
1310 [ancestor],
1311 branchmerge=False,
1311 branchmerge=False,
1312 force=False,
1312 force=False,
1313 acceptremote=True,
1313 acceptremote=True,
1314 followcopies=True,
1314 followcopies=True,
1315 )
1315 )
1316
1316
1317 timer(d)
1317 timer(d)
1318 fm.end()
1318 fm.end()
1319
1319
1320
1320
1321 @command(
1321 @command(
1322 b'perfmergecopies',
1322 b'perfmergecopies',
1323 [
1323 [
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1324 (b'r', b'rev', b'.', b'rev to merge against'),
1325 (b'', b'from', b'', b'rev to merge from'),
1325 (b'', b'from', b'', b'rev to merge from'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1326 (b'', b'base', b'', b'the revision to use as base'),
1327 ]
1327 ]
1328 + formatteropts,
1328 + formatteropts,
1329 )
1329 )
1330 def perfmergecopies(ui, repo, **opts):
1330 def perfmergecopies(ui, repo, **opts):
1331 """measure runtime of `copies.mergecopies`"""
1331 """measure runtime of `copies.mergecopies`"""
1332 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1334 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1335
1335
1336 def d():
1336 def d():
1337 # acceptremote is True because we don't want prompts in the middle of
1337 # acceptremote is True because we don't want prompts in the middle of
1338 # our benchmark
1338 # our benchmark
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1339 copies.mergecopies(repo, wctx, rctx, ancestor)
1340
1340
1341 timer(d)
1341 timer(d)
1342 fm.end()
1342 fm.end()
1343
1343
1344
1344
1345 @command(b'perfpathcopies', [], b"REV REV")
1345 @command(b'perfpathcopies', [], b"REV REV")
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1346 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1347 """benchmark the copy tracing logic"""
1347 """benchmark the copy tracing logic"""
1348 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1349 timer, fm = gettimer(ui, opts)
1349 timer, fm = gettimer(ui, opts)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1350 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1351 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1352
1352
1353 def d():
1353 def d():
1354 copies.pathcopies(ctx1, ctx2)
1354 copies.pathcopies(ctx1, ctx2)
1355
1355
1356 timer(d)
1356 timer(d)
1357 fm.end()
1357 fm.end()
1358
1358
1359
1359
1360 @command(
1360 @command(
1361 b'perfphases',
1361 b'perfphases',
1362 [(b'', b'full', False, b'include file reading time too'),],
1362 [(b'', b'full', False, b'include file reading time too'),],
1363 b"",
1363 b"",
1364 )
1364 )
1365 def perfphases(ui, repo, **opts):
1365 def perfphases(ui, repo, **opts):
1366 """benchmark phasesets computation"""
1366 """benchmark phasesets computation"""
1367 opts = _byteskwargs(opts)
1367 opts = _byteskwargs(opts)
1368 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1369 _phases = repo._phasecache
1369 _phases = repo._phasecache
1370 full = opts.get(b'full')
1370 full = opts.get(b'full')
1371
1371
1372 def d():
1372 def d():
1373 phases = _phases
1373 phases = _phases
1374 if full:
1374 if full:
1375 clearfilecache(repo, b'_phasecache')
1375 clearfilecache(repo, b'_phasecache')
1376 phases = repo._phasecache
1376 phases = repo._phasecache
1377 phases.invalidate()
1377 phases.invalidate()
1378 phases.loadphaserevs(repo)
1378 phases.loadphaserevs(repo)
1379
1379
1380 timer(d)
1380 timer(d)
1381 fm.end()
1381 fm.end()
1382
1382
1383
1383
1384 @command(b'perfphasesremote', [], b"[DEST]")
1384 @command(b'perfphasesremote', [], b"[DEST]")
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1385 def perfphasesremote(ui, repo, dest=None, **opts):
1386 """benchmark time needed to analyse phases of the remote server"""
1386 """benchmark time needed to analyse phases of the remote server"""
1387 from mercurial.node import bin
1387 from mercurial.node import bin
1388 from mercurial import (
1388 from mercurial import (
1389 exchange,
1389 exchange,
1390 hg,
1390 hg,
1391 phases,
1391 phases,
1392 )
1392 )
1393
1393
1394 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1395 timer, fm = gettimer(ui, opts)
1395 timer, fm = gettimer(ui, opts)
1396
1396
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1397 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1398 if not path:
1398 if not path:
1399 raise error.Abort(
1399 raise error.Abort(
1400 b'default repository not configured!',
1400 b'default repository not configured!',
1401 hint=b"see 'hg help config.paths'",
1401 hint=b"see 'hg help config.paths'",
1402 )
1402 )
1403 dest = path.pushloc or path.loc
1403 dest = path.pushloc or path.loc
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1404 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1405 other = hg.peer(repo, opts, dest)
1405 other = hg.peer(repo, opts, dest)
1406
1406
1407 # easier to perform discovery through the operation
1407 # easier to perform discovery through the operation
1408 op = exchange.pushoperation(repo, other)
1408 op = exchange.pushoperation(repo, other)
1409 exchange._pushdiscoverychangeset(op)
1409 exchange._pushdiscoverychangeset(op)
1410
1410
1411 remotesubset = op.fallbackheads
1411 remotesubset = op.fallbackheads
1412
1412
1413 with other.commandexecutor() as e:
1413 with other.commandexecutor() as e:
1414 remotephases = e.callcommand(
1414 remotephases = e.callcommand(
1415 b'listkeys', {b'namespace': b'phases'}
1415 b'listkeys', {b'namespace': b'phases'}
1416 ).result()
1416 ).result()
1417 del other
1417 del other
1418 publishing = remotephases.get(b'publishing', False)
1418 publishing = remotephases.get(b'publishing', False)
1419 if publishing:
1419 if publishing:
1420 ui.statusnoi18n(b'publishing: yes\n')
1420 ui.statusnoi18n(b'publishing: yes\n')
1421 else:
1421 else:
1422 ui.statusnoi18n(b'publishing: no\n')
1422 ui.statusnoi18n(b'publishing: no\n')
1423
1423
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1424 has_node = getattr(repo.changelog.index, 'has_node', None)
1425 if has_node is None:
1425 if has_node is None:
1426 has_node = repo.changelog.nodemap.__contains__
1426 has_node = repo.changelog.nodemap.__contains__
1427 nonpublishroots = 0
1427 nonpublishroots = 0
1428 for nhex, phase in remotephases.iteritems():
1428 for nhex, phase in remotephases.iteritems():
1429 if nhex == b'publishing': # ignore data related to publish option
1429 if nhex == b'publishing': # ignore data related to publish option
1430 continue
1430 continue
1431 node = bin(nhex)
1431 node = bin(nhex)
1432 if has_node(node) and int(phase):
1432 if has_node(node) and int(phase):
1433 nonpublishroots += 1
1433 nonpublishroots += 1
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1434 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1435 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1436
1436
1437 def d():
1437 def d():
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1438 phases.remotephasessummary(repo, remotesubset, remotephases)
1439
1439
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443
1443
1444 @command(
1444 @command(
1445 b'perfmanifest',
1445 b'perfmanifest',
1446 [
1446 [
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1447 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1448 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1449 ]
1449 ]
1450 + formatteropts,
1450 + formatteropts,
1451 b'REV|NODE',
1451 b'REV|NODE',
1452 )
1452 )
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1453 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1454 """benchmark the time to read a manifest from disk and return a usable
1454 """benchmark the time to read a manifest from disk and return a usable
1455 dict-like object
1455 dict-like object
1456
1456
1457 Manifest caches are cleared before retrieval."""
1457 Manifest caches are cleared before retrieval."""
1458 opts = _byteskwargs(opts)
1458 opts = _byteskwargs(opts)
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 if not manifest_rev:
1460 if not manifest_rev:
1461 ctx = scmutil.revsingle(repo, rev, rev)
1461 ctx = scmutil.revsingle(repo, rev, rev)
1462 t = ctx.manifestnode()
1462 t = ctx.manifestnode()
1463 else:
1463 else:
1464 from mercurial.node import bin
1464 from mercurial.node import bin
1465
1465
1466 if len(rev) == 40:
1466 if len(rev) == 40:
1467 t = bin(rev)
1467 t = bin(rev)
1468 else:
1468 else:
1469 try:
1469 try:
1470 rev = int(rev)
1470 rev = int(rev)
1471
1471
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1472 if util.safehasattr(repo.manifestlog, b'getstorage'):
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1473 t = repo.manifestlog.getstorage(b'').node(rev)
1474 else:
1474 else:
1475 t = repo.manifestlog._revlog.lookup(rev)
1475 t = repo.manifestlog._revlog.lookup(rev)
1476 except ValueError:
1476 except ValueError:
1477 raise error.Abort(
1477 raise error.Abort(
1478 b'manifest revision must be integer or full node'
1478 b'manifest revision must be integer or full node'
1479 )
1479 )
1480
1480
1481 def d():
1481 def d():
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1482 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1483 repo.manifestlog[t].read()
1483 repo.manifestlog[t].read()
1484
1484
1485 timer(d)
1485 timer(d)
1486 fm.end()
1486 fm.end()
1487
1487
1488
1488
1489 @command(b'perfchangeset', formatteropts)
1489 @command(b'perfchangeset', formatteropts)
1490 def perfchangeset(ui, repo, rev, **opts):
1490 def perfchangeset(ui, repo, rev, **opts):
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 n = scmutil.revsingle(repo, rev).node()
1493 n = scmutil.revsingle(repo, rev).node()
1494
1494
1495 def d():
1495 def d():
1496 repo.changelog.read(n)
1496 repo.changelog.read(n)
1497 # repo.changelog._cache = None
1497 # repo.changelog._cache = None
1498
1498
1499 timer(d)
1499 timer(d)
1500 fm.end()
1500 fm.end()
1501
1501
1502
1502
1503 @command(b'perfignore', formatteropts)
1503 @command(b'perfignore', formatteropts)
1504 def perfignore(ui, repo, **opts):
1504 def perfignore(ui, repo, **opts):
1505 """benchmark operation related to computing ignore"""
1505 """benchmark operation related to computing ignore"""
1506 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1507 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509
1509
1510 def setupone():
1510 def setupone():
1511 dirstate.invalidate()
1511 dirstate.invalidate()
1512 clearfilecache(dirstate, b'_ignore')
1512 clearfilecache(dirstate, b'_ignore')
1513
1513
1514 def runone():
1514 def runone():
1515 dirstate._ignore
1515 dirstate._ignore
1516
1516
1517 timer(runone, setup=setupone, title=b"load")
1517 timer(runone, setup=setupone, title=b"load")
1518 fm.end()
1518 fm.end()
1519
1519
1520
1520
1521 @command(
1521 @command(
1522 b'perfindex',
1522 b'perfindex',
1523 [
1523 [
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1524 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1525 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1526 ]
1526 ]
1527 + formatteropts,
1527 + formatteropts,
1528 )
1528 )
1529 def perfindex(ui, repo, **opts):
1529 def perfindex(ui, repo, **opts):
1530 """benchmark index creation time followed by a lookup
1530 """benchmark index creation time followed by a lookup
1531
1531
1532 The default is to look `tip` up. Depending on the index implementation,
1532 The default is to look `tip` up. Depending on the index implementation,
1533 the revision looked up can matters. For example, an implementation
1533 the revision looked up can matters. For example, an implementation
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1534 scanning the index will have a faster lookup time for `--rev tip` than for
1535 `--rev 0`. The number of looked up revisions and their order can also
1535 `--rev 0`. The number of looked up revisions and their order can also
1536 matters.
1536 matters.
1537
1537
1538 Example of useful set to test:
1538 Example of useful set to test:
1539 * tip
1539 * tip
1540 * 0
1540 * 0
1541 * -10:
1541 * -10:
1542 * :10
1542 * :10
1543 * -10: + :10
1543 * -10: + :10
1544 * :10: + -10:
1544 * :10: + -10:
1545 * -10000:
1545 * -10000:
1546 * -10000: + 0
1546 * -10000: + 0
1547
1547
1548 It is not currently possible to check for lookup of a missing node. For
1548 It is not currently possible to check for lookup of a missing node. For
1549 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1549 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1550 import mercurial.revlog
1550 import mercurial.revlog
1551
1551
1552 opts = _byteskwargs(opts)
1552 opts = _byteskwargs(opts)
1553 timer, fm = gettimer(ui, opts)
1553 timer, fm = gettimer(ui, opts)
1554 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1554 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1555 if opts[b'no_lookup']:
1555 if opts[b'no_lookup']:
1556 if opts['rev']:
1556 if opts['rev']:
1557 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1557 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1558 nodes = []
1558 nodes = []
1559 elif not opts[b'rev']:
1559 elif not opts[b'rev']:
1560 nodes = [repo[b"tip"].node()]
1560 nodes = [repo[b"tip"].node()]
1561 else:
1561 else:
1562 revs = scmutil.revrange(repo, opts[b'rev'])
1562 revs = scmutil.revrange(repo, opts[b'rev'])
1563 cl = repo.changelog
1563 cl = repo.changelog
1564 nodes = [cl.node(r) for r in revs]
1564 nodes = [cl.node(r) for r in revs]
1565
1565
1566 unfi = repo.unfiltered()
1566 unfi = repo.unfiltered()
1567 # find the filecache func directly
1567 # find the filecache func directly
1568 # This avoid polluting the benchmark with the filecache logic
1568 # This avoid polluting the benchmark with the filecache logic
1569 makecl = unfi.__class__.changelog.func
1569 makecl = unfi.__class__.changelog.func
1570
1570
1571 def setup():
1571 def setup():
1572 # probably not necessary, but for good measure
1572 # probably not necessary, but for good measure
1573 clearchangelog(unfi)
1573 clearchangelog(unfi)
1574
1574
1575 def d():
1575 def d():
1576 cl = makecl(unfi)
1576 cl = makecl(unfi)
1577 for n in nodes:
1577 for n in nodes:
1578 cl.rev(n)
1578 cl.rev(n)
1579
1579
1580 timer(d, setup=setup)
1580 timer(d, setup=setup)
1581 fm.end()
1581 fm.end()
1582
1582
1583
1583
1584 @command(
1584 @command(
1585 b'perfnodemap',
1585 b'perfnodemap',
1586 [
1586 [
1587 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1587 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1588 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1588 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1589 ]
1589 ]
1590 + formatteropts,
1590 + formatteropts,
1591 )
1591 )
1592 def perfnodemap(ui, repo, **opts):
1592 def perfnodemap(ui, repo, **opts):
1593 """benchmark the time necessary to look up revision from a cold nodemap
1593 """benchmark the time necessary to look up revision from a cold nodemap
1594
1594
1595 Depending on the implementation, the amount and order of revision we look
1595 Depending on the implementation, the amount and order of revision we look
1596 up can varies. Example of useful set to test:
1596 up can varies. Example of useful set to test:
1597 * tip
1597 * tip
1598 * 0
1598 * 0
1599 * -10:
1599 * -10:
1600 * :10
1600 * :10
1601 * -10: + :10
1601 * -10: + :10
1602 * :10: + -10:
1602 * :10: + -10:
1603 * -10000:
1603 * -10000:
1604 * -10000: + 0
1604 * -10000: + 0
1605
1605
1606 The command currently focus on valid binary lookup. Benchmarking for
1606 The command currently focus on valid binary lookup. Benchmarking for
1607 hexlookup, prefix lookup and missing lookup would also be valuable.
1607 hexlookup, prefix lookup and missing lookup would also be valuable.
1608 """
1608 """
1609 import mercurial.revlog
1609 import mercurial.revlog
1610
1610
1611 opts = _byteskwargs(opts)
1611 opts = _byteskwargs(opts)
1612 timer, fm = gettimer(ui, opts)
1612 timer, fm = gettimer(ui, opts)
1613 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1613 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1614
1614
1615 unfi = repo.unfiltered()
1615 unfi = repo.unfiltered()
1616 clearcaches = opts['clear_caches']
1616 clearcaches = opts['clear_caches']
1617 # find the filecache func directly
1617 # find the filecache func directly
1618 # This avoid polluting the benchmark with the filecache logic
1618 # This avoid polluting the benchmark with the filecache logic
1619 makecl = unfi.__class__.changelog.func
1619 makecl = unfi.__class__.changelog.func
1620 if not opts[b'rev']:
1620 if not opts[b'rev']:
1621 raise error.Abort('use --rev to specify revisions to look up')
1621 raise error.Abort('use --rev to specify revisions to look up')
1622 revs = scmutil.revrange(repo, opts[b'rev'])
1622 revs = scmutil.revrange(repo, opts[b'rev'])
1623 cl = repo.changelog
1623 cl = repo.changelog
1624 nodes = [cl.node(r) for r in revs]
1624 nodes = [cl.node(r) for r in revs]
1625
1625
1626 # use a list to pass reference to a nodemap from one closure to the next
1626 # use a list to pass reference to a nodemap from one closure to the next
1627 nodeget = [None]
1627 nodeget = [None]
1628
1628
1629 def setnodeget():
1629 def setnodeget():
1630 # probably not necessary, but for good measure
1630 # probably not necessary, but for good measure
1631 clearchangelog(unfi)
1631 clearchangelog(unfi)
1632 nodeget[0] = makecl(unfi).nodemap.get
1632 cl = makecl(unfi)
1633 if util.safehasattr(cl.index, 'get_rev'):
1634 nodeget[0] = cl.index.get_rev
1635 else:
1636 nodeget[0] = cl.nodemap.get
1633
1637
1634 def d():
1638 def d():
1635 get = nodeget[0]
1639 get = nodeget[0]
1636 for n in nodes:
1640 for n in nodes:
1637 get(n)
1641 get(n)
1638
1642
1639 setup = None
1643 setup = None
1640 if clearcaches:
1644 if clearcaches:
1641
1645
1642 def setup():
1646 def setup():
1643 setnodeget()
1647 setnodeget()
1644
1648
1645 else:
1649 else:
1646 setnodeget()
1650 setnodeget()
1647 d() # prewarm the data structure
1651 d() # prewarm the data structure
1648 timer(d, setup=setup)
1652 timer(d, setup=setup)
1649 fm.end()
1653 fm.end()
1650
1654
1651
1655
1652 @command(b'perfstartup', formatteropts)
1656 @command(b'perfstartup', formatteropts)
1653 def perfstartup(ui, repo, **opts):
1657 def perfstartup(ui, repo, **opts):
1654 opts = _byteskwargs(opts)
1658 opts = _byteskwargs(opts)
1655 timer, fm = gettimer(ui, opts)
1659 timer, fm = gettimer(ui, opts)
1656
1660
1657 def d():
1661 def d():
1658 if os.name != 'nt':
1662 if os.name != 'nt':
1659 os.system(
1663 os.system(
1660 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1664 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1661 )
1665 )
1662 else:
1666 else:
1663 os.environ['HGRCPATH'] = r' '
1667 os.environ['HGRCPATH'] = r' '
1664 os.system("%s version -q > NUL" % sys.argv[0])
1668 os.system("%s version -q > NUL" % sys.argv[0])
1665
1669
1666 timer(d)
1670 timer(d)
1667 fm.end()
1671 fm.end()
1668
1672
1669
1673
1670 @command(b'perfparents', formatteropts)
1674 @command(b'perfparents', formatteropts)
1671 def perfparents(ui, repo, **opts):
1675 def perfparents(ui, repo, **opts):
1672 """benchmark the time necessary to fetch one changeset's parents.
1676 """benchmark the time necessary to fetch one changeset's parents.
1673
1677
1674 The fetch is done using the `node identifier`, traversing all object layers
1678 The fetch is done using the `node identifier`, traversing all object layers
1675 from the repository object. The first N revisions will be used for this
1679 from the repository object. The first N revisions will be used for this
1676 benchmark. N is controlled by the ``perf.parentscount`` config option
1680 benchmark. N is controlled by the ``perf.parentscount`` config option
1677 (default: 1000).
1681 (default: 1000).
1678 """
1682 """
1679 opts = _byteskwargs(opts)
1683 opts = _byteskwargs(opts)
1680 timer, fm = gettimer(ui, opts)
1684 timer, fm = gettimer(ui, opts)
1681 # control the number of commits perfparents iterates over
1685 # control the number of commits perfparents iterates over
1682 # experimental config: perf.parentscount
1686 # experimental config: perf.parentscount
1683 count = getint(ui, b"perf", b"parentscount", 1000)
1687 count = getint(ui, b"perf", b"parentscount", 1000)
1684 if len(repo.changelog) < count:
1688 if len(repo.changelog) < count:
1685 raise error.Abort(b"repo needs %d commits for this test" % count)
1689 raise error.Abort(b"repo needs %d commits for this test" % count)
1686 repo = repo.unfiltered()
1690 repo = repo.unfiltered()
1687 nl = [repo.changelog.node(i) for i in _xrange(count)]
1691 nl = [repo.changelog.node(i) for i in _xrange(count)]
1688
1692
1689 def d():
1693 def d():
1690 for n in nl:
1694 for n in nl:
1691 repo.changelog.parents(n)
1695 repo.changelog.parents(n)
1692
1696
1693 timer(d)
1697 timer(d)
1694 fm.end()
1698 fm.end()
1695
1699
1696
1700
1697 @command(b'perfctxfiles', formatteropts)
1701 @command(b'perfctxfiles', formatteropts)
1698 def perfctxfiles(ui, repo, x, **opts):
1702 def perfctxfiles(ui, repo, x, **opts):
1699 opts = _byteskwargs(opts)
1703 opts = _byteskwargs(opts)
1700 x = int(x)
1704 x = int(x)
1701 timer, fm = gettimer(ui, opts)
1705 timer, fm = gettimer(ui, opts)
1702
1706
1703 def d():
1707 def d():
1704 len(repo[x].files())
1708 len(repo[x].files())
1705
1709
1706 timer(d)
1710 timer(d)
1707 fm.end()
1711 fm.end()
1708
1712
1709
1713
1710 @command(b'perfrawfiles', formatteropts)
1714 @command(b'perfrawfiles', formatteropts)
1711 def perfrawfiles(ui, repo, x, **opts):
1715 def perfrawfiles(ui, repo, x, **opts):
1712 opts = _byteskwargs(opts)
1716 opts = _byteskwargs(opts)
1713 x = int(x)
1717 x = int(x)
1714 timer, fm = gettimer(ui, opts)
1718 timer, fm = gettimer(ui, opts)
1715 cl = repo.changelog
1719 cl = repo.changelog
1716
1720
1717 def d():
1721 def d():
1718 len(cl.read(x)[3])
1722 len(cl.read(x)[3])
1719
1723
1720 timer(d)
1724 timer(d)
1721 fm.end()
1725 fm.end()
1722
1726
1723
1727
1724 @command(b'perflookup', formatteropts)
1728 @command(b'perflookup', formatteropts)
1725 def perflookup(ui, repo, rev, **opts):
1729 def perflookup(ui, repo, rev, **opts):
1726 opts = _byteskwargs(opts)
1730 opts = _byteskwargs(opts)
1727 timer, fm = gettimer(ui, opts)
1731 timer, fm = gettimer(ui, opts)
1728 timer(lambda: len(repo.lookup(rev)))
1732 timer(lambda: len(repo.lookup(rev)))
1729 fm.end()
1733 fm.end()
1730
1734
1731
1735
1732 @command(
1736 @command(
1733 b'perflinelogedits',
1737 b'perflinelogedits',
1734 [
1738 [
1735 (b'n', b'edits', 10000, b'number of edits'),
1739 (b'n', b'edits', 10000, b'number of edits'),
1736 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1740 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1737 ],
1741 ],
1738 norepo=True,
1742 norepo=True,
1739 )
1743 )
1740 def perflinelogedits(ui, **opts):
1744 def perflinelogedits(ui, **opts):
1741 from mercurial import linelog
1745 from mercurial import linelog
1742
1746
1743 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1744
1748
1745 edits = opts[b'edits']
1749 edits = opts[b'edits']
1746 maxhunklines = opts[b'max_hunk_lines']
1750 maxhunklines = opts[b'max_hunk_lines']
1747
1751
1748 maxb1 = 100000
1752 maxb1 = 100000
1749 random.seed(0)
1753 random.seed(0)
1750 randint = random.randint
1754 randint = random.randint
1751 currentlines = 0
1755 currentlines = 0
1752 arglist = []
1756 arglist = []
1753 for rev in _xrange(edits):
1757 for rev in _xrange(edits):
1754 a1 = randint(0, currentlines)
1758 a1 = randint(0, currentlines)
1755 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1759 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1756 b1 = randint(0, maxb1)
1760 b1 = randint(0, maxb1)
1757 b2 = randint(b1, b1 + maxhunklines)
1761 b2 = randint(b1, b1 + maxhunklines)
1758 currentlines += (b2 - b1) - (a2 - a1)
1762 currentlines += (b2 - b1) - (a2 - a1)
1759 arglist.append((rev, a1, a2, b1, b2))
1763 arglist.append((rev, a1, a2, b1, b2))
1760
1764
1761 def d():
1765 def d():
1762 ll = linelog.linelog()
1766 ll = linelog.linelog()
1763 for args in arglist:
1767 for args in arglist:
1764 ll.replacelines(*args)
1768 ll.replacelines(*args)
1765
1769
1766 timer, fm = gettimer(ui, opts)
1770 timer, fm = gettimer(ui, opts)
1767 timer(d)
1771 timer(d)
1768 fm.end()
1772 fm.end()
1769
1773
1770
1774
1771 @command(b'perfrevrange', formatteropts)
1775 @command(b'perfrevrange', formatteropts)
1772 def perfrevrange(ui, repo, *specs, **opts):
1776 def perfrevrange(ui, repo, *specs, **opts):
1773 opts = _byteskwargs(opts)
1777 opts = _byteskwargs(opts)
1774 timer, fm = gettimer(ui, opts)
1778 timer, fm = gettimer(ui, opts)
1775 revrange = scmutil.revrange
1779 revrange = scmutil.revrange
1776 timer(lambda: len(revrange(repo, specs)))
1780 timer(lambda: len(revrange(repo, specs)))
1777 fm.end()
1781 fm.end()
1778
1782
1779
1783
1780 @command(b'perfnodelookup', formatteropts)
1784 @command(b'perfnodelookup', formatteropts)
1781 def perfnodelookup(ui, repo, rev, **opts):
1785 def perfnodelookup(ui, repo, rev, **opts):
1782 opts = _byteskwargs(opts)
1786 opts = _byteskwargs(opts)
1783 timer, fm = gettimer(ui, opts)
1787 timer, fm = gettimer(ui, opts)
1784 import mercurial.revlog
1788 import mercurial.revlog
1785
1789
1786 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1790 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1787 n = scmutil.revsingle(repo, rev).node()
1791 n = scmutil.revsingle(repo, rev).node()
1788 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1792 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1789
1793
1790 def d():
1794 def d():
1791 cl.rev(n)
1795 cl.rev(n)
1792 clearcaches(cl)
1796 clearcaches(cl)
1793
1797
1794 timer(d)
1798 timer(d)
1795 fm.end()
1799 fm.end()
1796
1800
1797
1801
1798 @command(
1802 @command(
1799 b'perflog',
1803 b'perflog',
1800 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1804 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1801 )
1805 )
1802 def perflog(ui, repo, rev=None, **opts):
1806 def perflog(ui, repo, rev=None, **opts):
1803 opts = _byteskwargs(opts)
1807 opts = _byteskwargs(opts)
1804 if rev is None:
1808 if rev is None:
1805 rev = []
1809 rev = []
1806 timer, fm = gettimer(ui, opts)
1810 timer, fm = gettimer(ui, opts)
1807 ui.pushbuffer()
1811 ui.pushbuffer()
1808 timer(
1812 timer(
1809 lambda: commands.log(
1813 lambda: commands.log(
1810 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1814 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1811 )
1815 )
1812 )
1816 )
1813 ui.popbuffer()
1817 ui.popbuffer()
1814 fm.end()
1818 fm.end()
1815
1819
1816
1820
1817 @command(b'perfmoonwalk', formatteropts)
1821 @command(b'perfmoonwalk', formatteropts)
1818 def perfmoonwalk(ui, repo, **opts):
1822 def perfmoonwalk(ui, repo, **opts):
1819 """benchmark walking the changelog backwards
1823 """benchmark walking the changelog backwards
1820
1824
1821 This also loads the changelog data for each revision in the changelog.
1825 This also loads the changelog data for each revision in the changelog.
1822 """
1826 """
1823 opts = _byteskwargs(opts)
1827 opts = _byteskwargs(opts)
1824 timer, fm = gettimer(ui, opts)
1828 timer, fm = gettimer(ui, opts)
1825
1829
1826 def moonwalk():
1830 def moonwalk():
1827 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1831 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1828 ctx = repo[i]
1832 ctx = repo[i]
1829 ctx.branch() # read changelog data (in addition to the index)
1833 ctx.branch() # read changelog data (in addition to the index)
1830
1834
1831 timer(moonwalk)
1835 timer(moonwalk)
1832 fm.end()
1836 fm.end()
1833
1837
1834
1838
1835 @command(
1839 @command(
1836 b'perftemplating',
1840 b'perftemplating',
1837 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1841 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1838 )
1842 )
1839 def perftemplating(ui, repo, testedtemplate=None, **opts):
1843 def perftemplating(ui, repo, testedtemplate=None, **opts):
1840 """test the rendering time of a given template"""
1844 """test the rendering time of a given template"""
1841 if makelogtemplater is None:
1845 if makelogtemplater is None:
1842 raise error.Abort(
1846 raise error.Abort(
1843 b"perftemplating not available with this Mercurial",
1847 b"perftemplating not available with this Mercurial",
1844 hint=b"use 4.3 or later",
1848 hint=b"use 4.3 or later",
1845 )
1849 )
1846
1850
1847 opts = _byteskwargs(opts)
1851 opts = _byteskwargs(opts)
1848
1852
1849 nullui = ui.copy()
1853 nullui = ui.copy()
1850 nullui.fout = open(os.devnull, 'wb')
1854 nullui.fout = open(os.devnull, 'wb')
1851 nullui.disablepager()
1855 nullui.disablepager()
1852 revs = opts.get(b'rev')
1856 revs = opts.get(b'rev')
1853 if not revs:
1857 if not revs:
1854 revs = [b'all()']
1858 revs = [b'all()']
1855 revs = list(scmutil.revrange(repo, revs))
1859 revs = list(scmutil.revrange(repo, revs))
1856
1860
1857 defaulttemplate = (
1861 defaulttemplate = (
1858 b'{date|shortdate} [{rev}:{node|short}]'
1862 b'{date|shortdate} [{rev}:{node|short}]'
1859 b' {author|person}: {desc|firstline}\n'
1863 b' {author|person}: {desc|firstline}\n'
1860 )
1864 )
1861 if testedtemplate is None:
1865 if testedtemplate is None:
1862 testedtemplate = defaulttemplate
1866 testedtemplate = defaulttemplate
1863 displayer = makelogtemplater(nullui, repo, testedtemplate)
1867 displayer = makelogtemplater(nullui, repo, testedtemplate)
1864
1868
1865 def format():
1869 def format():
1866 for r in revs:
1870 for r in revs:
1867 ctx = repo[r]
1871 ctx = repo[r]
1868 displayer.show(ctx)
1872 displayer.show(ctx)
1869 displayer.flush(ctx)
1873 displayer.flush(ctx)
1870
1874
1871 timer, fm = gettimer(ui, opts)
1875 timer, fm = gettimer(ui, opts)
1872 timer(format)
1876 timer(format)
1873 fm.end()
1877 fm.end()
1874
1878
1875
1879
1876 def _displaystats(ui, opts, entries, data):
1880 def _displaystats(ui, opts, entries, data):
1877 pass
1881 pass
1878 # use a second formatter because the data are quite different, not sure
1882 # use a second formatter because the data are quite different, not sure
1879 # how it flies with the templater.
1883 # how it flies with the templater.
1880 fm = ui.formatter(b'perf-stats', opts)
1884 fm = ui.formatter(b'perf-stats', opts)
1881 for key, title in entries:
1885 for key, title in entries:
1882 values = data[key]
1886 values = data[key]
1883 nbvalues = len(data)
1887 nbvalues = len(data)
1884 values.sort()
1888 values.sort()
1885 stats = {
1889 stats = {
1886 'key': key,
1890 'key': key,
1887 'title': title,
1891 'title': title,
1888 'nbitems': len(values),
1892 'nbitems': len(values),
1889 'min': values[0][0],
1893 'min': values[0][0],
1890 '10%': values[(nbvalues * 10) // 100][0],
1894 '10%': values[(nbvalues * 10) // 100][0],
1891 '25%': values[(nbvalues * 25) // 100][0],
1895 '25%': values[(nbvalues * 25) // 100][0],
1892 '50%': values[(nbvalues * 50) // 100][0],
1896 '50%': values[(nbvalues * 50) // 100][0],
1893 '75%': values[(nbvalues * 75) // 100][0],
1897 '75%': values[(nbvalues * 75) // 100][0],
1894 '80%': values[(nbvalues * 80) // 100][0],
1898 '80%': values[(nbvalues * 80) // 100][0],
1895 '85%': values[(nbvalues * 85) // 100][0],
1899 '85%': values[(nbvalues * 85) // 100][0],
1896 '90%': values[(nbvalues * 90) // 100][0],
1900 '90%': values[(nbvalues * 90) // 100][0],
1897 '95%': values[(nbvalues * 95) // 100][0],
1901 '95%': values[(nbvalues * 95) // 100][0],
1898 '99%': values[(nbvalues * 99) // 100][0],
1902 '99%': values[(nbvalues * 99) // 100][0],
1899 'max': values[-1][0],
1903 'max': values[-1][0],
1900 }
1904 }
1901 fm.startitem()
1905 fm.startitem()
1902 fm.data(**stats)
1906 fm.data(**stats)
1903 # make node pretty for the human output
1907 # make node pretty for the human output
1904 fm.plain('### %s (%d items)\n' % (title, len(values)))
1908 fm.plain('### %s (%d items)\n' % (title, len(values)))
1905 lines = [
1909 lines = [
1906 'min',
1910 'min',
1907 '10%',
1911 '10%',
1908 '25%',
1912 '25%',
1909 '50%',
1913 '50%',
1910 '75%',
1914 '75%',
1911 '80%',
1915 '80%',
1912 '85%',
1916 '85%',
1913 '90%',
1917 '90%',
1914 '95%',
1918 '95%',
1915 '99%',
1919 '99%',
1916 'max',
1920 'max',
1917 ]
1921 ]
1918 for l in lines:
1922 for l in lines:
1919 fm.plain('%s: %s\n' % (l, stats[l]))
1923 fm.plain('%s: %s\n' % (l, stats[l]))
1920 fm.end()
1924 fm.end()
1921
1925
1922
1926
1923 @command(
1927 @command(
1924 b'perfhelper-mergecopies',
1928 b'perfhelper-mergecopies',
1925 formatteropts
1929 formatteropts
1926 + [
1930 + [
1927 (b'r', b'revs', [], b'restrict search to these revisions'),
1931 (b'r', b'revs', [], b'restrict search to these revisions'),
1928 (b'', b'timing', False, b'provides extra data (costly)'),
1932 (b'', b'timing', False, b'provides extra data (costly)'),
1929 (b'', b'stats', False, b'provides statistic about the measured data'),
1933 (b'', b'stats', False, b'provides statistic about the measured data'),
1930 ],
1934 ],
1931 )
1935 )
1932 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1936 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1933 """find statistics about potential parameters for `perfmergecopies`
1937 """find statistics about potential parameters for `perfmergecopies`
1934
1938
1935 This command find (base, p1, p2) triplet relevant for copytracing
1939 This command find (base, p1, p2) triplet relevant for copytracing
1936 benchmarking in the context of a merge. It reports values for some of the
1940 benchmarking in the context of a merge. It reports values for some of the
1937 parameters that impact merge copy tracing time during merge.
1941 parameters that impact merge copy tracing time during merge.
1938
1942
1939 If `--timing` is set, rename detection is run and the associated timing
1943 If `--timing` is set, rename detection is run and the associated timing
1940 will be reported. The extra details come at the cost of slower command
1944 will be reported. The extra details come at the cost of slower command
1941 execution.
1945 execution.
1942
1946
1943 Since rename detection is only run once, other factors might easily
1947 Since rename detection is only run once, other factors might easily
1944 affect the precision of the timing. However it should give a good
1948 affect the precision of the timing. However it should give a good
1945 approximation of which revision triplets are very costly.
1949 approximation of which revision triplets are very costly.
1946 """
1950 """
1947 opts = _byteskwargs(opts)
1951 opts = _byteskwargs(opts)
1948 fm = ui.formatter(b'perf', opts)
1952 fm = ui.formatter(b'perf', opts)
1949 dotiming = opts[b'timing']
1953 dotiming = opts[b'timing']
1950 dostats = opts[b'stats']
1954 dostats = opts[b'stats']
1951
1955
1952 output_template = [
1956 output_template = [
1953 ("base", "%(base)12s"),
1957 ("base", "%(base)12s"),
1954 ("p1", "%(p1.node)12s"),
1958 ("p1", "%(p1.node)12s"),
1955 ("p2", "%(p2.node)12s"),
1959 ("p2", "%(p2.node)12s"),
1956 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1960 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1957 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1961 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1958 ("p1.renames", "%(p1.renamedfiles)12d"),
1962 ("p1.renames", "%(p1.renamedfiles)12d"),
1959 ("p1.time", "%(p1.time)12.3f"),
1963 ("p1.time", "%(p1.time)12.3f"),
1960 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1964 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1961 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1965 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1962 ("p2.renames", "%(p2.renamedfiles)12d"),
1966 ("p2.renames", "%(p2.renamedfiles)12d"),
1963 ("p2.time", "%(p2.time)12.3f"),
1967 ("p2.time", "%(p2.time)12.3f"),
1964 ("renames", "%(nbrenamedfiles)12d"),
1968 ("renames", "%(nbrenamedfiles)12d"),
1965 ("total.time", "%(time)12.3f"),
1969 ("total.time", "%(time)12.3f"),
1966 ]
1970 ]
1967 if not dotiming:
1971 if not dotiming:
1968 output_template = [
1972 output_template = [
1969 i
1973 i
1970 for i in output_template
1974 for i in output_template
1971 if not ('time' in i[0] or 'renames' in i[0])
1975 if not ('time' in i[0] or 'renames' in i[0])
1972 ]
1976 ]
1973 header_names = [h for (h, v) in output_template]
1977 header_names = [h for (h, v) in output_template]
1974 output = ' '.join([v for (h, v) in output_template]) + '\n'
1978 output = ' '.join([v for (h, v) in output_template]) + '\n'
1975 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1979 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1976 fm.plain(header % tuple(header_names))
1980 fm.plain(header % tuple(header_names))
1977
1981
1978 if not revs:
1982 if not revs:
1979 revs = ['all()']
1983 revs = ['all()']
1980 revs = scmutil.revrange(repo, revs)
1984 revs = scmutil.revrange(repo, revs)
1981
1985
1982 if dostats:
1986 if dostats:
1983 alldata = {
1987 alldata = {
1984 'nbrevs': [],
1988 'nbrevs': [],
1985 'nbmissingfiles': [],
1989 'nbmissingfiles': [],
1986 }
1990 }
1987 if dotiming:
1991 if dotiming:
1988 alldata['parentnbrenames'] = []
1992 alldata['parentnbrenames'] = []
1989 alldata['totalnbrenames'] = []
1993 alldata['totalnbrenames'] = []
1990 alldata['parenttime'] = []
1994 alldata['parenttime'] = []
1991 alldata['totaltime'] = []
1995 alldata['totaltime'] = []
1992
1996
1993 roi = repo.revs('merge() and %ld', revs)
1997 roi = repo.revs('merge() and %ld', revs)
1994 for r in roi:
1998 for r in roi:
1995 ctx = repo[r]
1999 ctx = repo[r]
1996 p1 = ctx.p1()
2000 p1 = ctx.p1()
1997 p2 = ctx.p2()
2001 p2 = ctx.p2()
1998 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2002 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1999 for b in bases:
2003 for b in bases:
2000 b = repo[b]
2004 b = repo[b]
2001 p1missing = copies._computeforwardmissing(b, p1)
2005 p1missing = copies._computeforwardmissing(b, p1)
2002 p2missing = copies._computeforwardmissing(b, p2)
2006 p2missing = copies._computeforwardmissing(b, p2)
2003 data = {
2007 data = {
2004 b'base': b.hex(),
2008 b'base': b.hex(),
2005 b'p1.node': p1.hex(),
2009 b'p1.node': p1.hex(),
2006 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2010 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2007 b'p1.nbmissingfiles': len(p1missing),
2011 b'p1.nbmissingfiles': len(p1missing),
2008 b'p2.node': p2.hex(),
2012 b'p2.node': p2.hex(),
2009 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2013 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2010 b'p2.nbmissingfiles': len(p2missing),
2014 b'p2.nbmissingfiles': len(p2missing),
2011 }
2015 }
2012 if dostats:
2016 if dostats:
2013 if p1missing:
2017 if p1missing:
2014 alldata['nbrevs'].append(
2018 alldata['nbrevs'].append(
2015 (data['p1.nbrevs'], b.hex(), p1.hex())
2019 (data['p1.nbrevs'], b.hex(), p1.hex())
2016 )
2020 )
2017 alldata['nbmissingfiles'].append(
2021 alldata['nbmissingfiles'].append(
2018 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2022 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2019 )
2023 )
2020 if p2missing:
2024 if p2missing:
2021 alldata['nbrevs'].append(
2025 alldata['nbrevs'].append(
2022 (data['p2.nbrevs'], b.hex(), p2.hex())
2026 (data['p2.nbrevs'], b.hex(), p2.hex())
2023 )
2027 )
2024 alldata['nbmissingfiles'].append(
2028 alldata['nbmissingfiles'].append(
2025 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2029 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2026 )
2030 )
2027 if dotiming:
2031 if dotiming:
2028 begin = util.timer()
2032 begin = util.timer()
2029 mergedata = copies.mergecopies(repo, p1, p2, b)
2033 mergedata = copies.mergecopies(repo, p1, p2, b)
2030 end = util.timer()
2034 end = util.timer()
2031 # not very stable timing since we did only one run
2035 # not very stable timing since we did only one run
2032 data['time'] = end - begin
2036 data['time'] = end - begin
2033 # mergedata contains five dicts: "copy", "movewithdir",
2037 # mergedata contains five dicts: "copy", "movewithdir",
2034 # "diverge", "renamedelete" and "dirmove".
2038 # "diverge", "renamedelete" and "dirmove".
2035 # The first 4 are about renamed file so lets count that.
2039 # The first 4 are about renamed file so lets count that.
2036 renames = len(mergedata[0])
2040 renames = len(mergedata[0])
2037 renames += len(mergedata[1])
2041 renames += len(mergedata[1])
2038 renames += len(mergedata[2])
2042 renames += len(mergedata[2])
2039 renames += len(mergedata[3])
2043 renames += len(mergedata[3])
2040 data['nbrenamedfiles'] = renames
2044 data['nbrenamedfiles'] = renames
2041 begin = util.timer()
2045 begin = util.timer()
2042 p1renames = copies.pathcopies(b, p1)
2046 p1renames = copies.pathcopies(b, p1)
2043 end = util.timer()
2047 end = util.timer()
2044 data['p1.time'] = end - begin
2048 data['p1.time'] = end - begin
2045 begin = util.timer()
2049 begin = util.timer()
2046 p2renames = copies.pathcopies(b, p2)
2050 p2renames = copies.pathcopies(b, p2)
2047 data['p2.time'] = end - begin
2051 data['p2.time'] = end - begin
2048 end = util.timer()
2052 end = util.timer()
2049 data['p1.renamedfiles'] = len(p1renames)
2053 data['p1.renamedfiles'] = len(p1renames)
2050 data['p2.renamedfiles'] = len(p2renames)
2054 data['p2.renamedfiles'] = len(p2renames)
2051
2055
2052 if dostats:
2056 if dostats:
2053 if p1missing:
2057 if p1missing:
2054 alldata['parentnbrenames'].append(
2058 alldata['parentnbrenames'].append(
2055 (data['p1.renamedfiles'], b.hex(), p1.hex())
2059 (data['p1.renamedfiles'], b.hex(), p1.hex())
2056 )
2060 )
2057 alldata['parenttime'].append(
2061 alldata['parenttime'].append(
2058 (data['p1.time'], b.hex(), p1.hex())
2062 (data['p1.time'], b.hex(), p1.hex())
2059 )
2063 )
2060 if p2missing:
2064 if p2missing:
2061 alldata['parentnbrenames'].append(
2065 alldata['parentnbrenames'].append(
2062 (data['p2.renamedfiles'], b.hex(), p2.hex())
2066 (data['p2.renamedfiles'], b.hex(), p2.hex())
2063 )
2067 )
2064 alldata['parenttime'].append(
2068 alldata['parenttime'].append(
2065 (data['p2.time'], b.hex(), p2.hex())
2069 (data['p2.time'], b.hex(), p2.hex())
2066 )
2070 )
2067 if p1missing or p2missing:
2071 if p1missing or p2missing:
2068 alldata['totalnbrenames'].append(
2072 alldata['totalnbrenames'].append(
2069 (
2073 (
2070 data['nbrenamedfiles'],
2074 data['nbrenamedfiles'],
2071 b.hex(),
2075 b.hex(),
2072 p1.hex(),
2076 p1.hex(),
2073 p2.hex(),
2077 p2.hex(),
2074 )
2078 )
2075 )
2079 )
2076 alldata['totaltime'].append(
2080 alldata['totaltime'].append(
2077 (data['time'], b.hex(), p1.hex(), p2.hex())
2081 (data['time'], b.hex(), p1.hex(), p2.hex())
2078 )
2082 )
2079 fm.startitem()
2083 fm.startitem()
2080 fm.data(**data)
2084 fm.data(**data)
2081 # make node pretty for the human output
2085 # make node pretty for the human output
2082 out = data.copy()
2086 out = data.copy()
2083 out['base'] = fm.hexfunc(b.node())
2087 out['base'] = fm.hexfunc(b.node())
2084 out['p1.node'] = fm.hexfunc(p1.node())
2088 out['p1.node'] = fm.hexfunc(p1.node())
2085 out['p2.node'] = fm.hexfunc(p2.node())
2089 out['p2.node'] = fm.hexfunc(p2.node())
2086 fm.plain(output % out)
2090 fm.plain(output % out)
2087
2091
2088 fm.end()
2092 fm.end()
2089 if dostats:
2093 if dostats:
2090 # use a second formatter because the data are quite different, not sure
2094 # use a second formatter because the data are quite different, not sure
2091 # how it flies with the templater.
2095 # how it flies with the templater.
2092 entries = [
2096 entries = [
2093 ('nbrevs', 'number of revision covered'),
2097 ('nbrevs', 'number of revision covered'),
2094 ('nbmissingfiles', 'number of missing files at head'),
2098 ('nbmissingfiles', 'number of missing files at head'),
2095 ]
2099 ]
2096 if dotiming:
2100 if dotiming:
2097 entries.append(
2101 entries.append(
2098 ('parentnbrenames', 'rename from one parent to base')
2102 ('parentnbrenames', 'rename from one parent to base')
2099 )
2103 )
2100 entries.append(('totalnbrenames', 'total number of renames'))
2104 entries.append(('totalnbrenames', 'total number of renames'))
2101 entries.append(('parenttime', 'time for one parent'))
2105 entries.append(('parenttime', 'time for one parent'))
2102 entries.append(('totaltime', 'time for both parents'))
2106 entries.append(('totaltime', 'time for both parents'))
2103 _displaystats(ui, opts, entries, alldata)
2107 _displaystats(ui, opts, entries, alldata)
2104
2108
2105
2109
2106 @command(
2110 @command(
2107 b'perfhelper-pathcopies',
2111 b'perfhelper-pathcopies',
2108 formatteropts
2112 formatteropts
2109 + [
2113 + [
2110 (b'r', b'revs', [], b'restrict search to these revisions'),
2114 (b'r', b'revs', [], b'restrict search to these revisions'),
2111 (b'', b'timing', False, b'provides extra data (costly)'),
2115 (b'', b'timing', False, b'provides extra data (costly)'),
2112 (b'', b'stats', False, b'provides statistic about the measured data'),
2116 (b'', b'stats', False, b'provides statistic about the measured data'),
2113 ],
2117 ],
2114 )
2118 )
2115 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2119 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2116 """find statistic about potential parameters for the `perftracecopies`
2120 """find statistic about potential parameters for the `perftracecopies`
2117
2121
2118 This command find source-destination pair relevant for copytracing testing.
2122 This command find source-destination pair relevant for copytracing testing.
2119 It report value for some of the parameters that impact copy tracing time.
2123 It report value for some of the parameters that impact copy tracing time.
2120
2124
2121 If `--timing` is set, rename detection is run and the associated timing
2125 If `--timing` is set, rename detection is run and the associated timing
2122 will be reported. The extra details comes at the cost of a slower command
2126 will be reported. The extra details comes at the cost of a slower command
2123 execution.
2127 execution.
2124
2128
2125 Since the rename detection is only run once, other factors might easily
2129 Since the rename detection is only run once, other factors might easily
2126 affect the precision of the timing. However it should give a good
2130 affect the precision of the timing. However it should give a good
2127 approximation of which revision pairs are very costly.
2131 approximation of which revision pairs are very costly.
2128 """
2132 """
2129 opts = _byteskwargs(opts)
2133 opts = _byteskwargs(opts)
2130 fm = ui.formatter(b'perf', opts)
2134 fm = ui.formatter(b'perf', opts)
2131 dotiming = opts[b'timing']
2135 dotiming = opts[b'timing']
2132 dostats = opts[b'stats']
2136 dostats = opts[b'stats']
2133
2137
2134 if dotiming:
2138 if dotiming:
2135 header = '%12s %12s %12s %12s %12s %12s\n'
2139 header = '%12s %12s %12s %12s %12s %12s\n'
2136 output = (
2140 output = (
2137 "%(source)12s %(destination)12s "
2141 "%(source)12s %(destination)12s "
2138 "%(nbrevs)12d %(nbmissingfiles)12d "
2142 "%(nbrevs)12d %(nbmissingfiles)12d "
2139 "%(nbrenamedfiles)12d %(time)18.5f\n"
2143 "%(nbrenamedfiles)12d %(time)18.5f\n"
2140 )
2144 )
2141 header_names = (
2145 header_names = (
2142 "source",
2146 "source",
2143 "destination",
2147 "destination",
2144 "nb-revs",
2148 "nb-revs",
2145 "nb-files",
2149 "nb-files",
2146 "nb-renames",
2150 "nb-renames",
2147 "time",
2151 "time",
2148 )
2152 )
2149 fm.plain(header % header_names)
2153 fm.plain(header % header_names)
2150 else:
2154 else:
2151 header = '%12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s\n'
2152 output = (
2156 output = (
2153 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2154 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2158 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2155 )
2159 )
2156 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2160 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2157
2161
2158 if not revs:
2162 if not revs:
2159 revs = ['all()']
2163 revs = ['all()']
2160 revs = scmutil.revrange(repo, revs)
2164 revs = scmutil.revrange(repo, revs)
2161
2165
2162 if dostats:
2166 if dostats:
2163 alldata = {
2167 alldata = {
2164 'nbrevs': [],
2168 'nbrevs': [],
2165 'nbmissingfiles': [],
2169 'nbmissingfiles': [],
2166 }
2170 }
2167 if dotiming:
2171 if dotiming:
2168 alldata['nbrenames'] = []
2172 alldata['nbrenames'] = []
2169 alldata['time'] = []
2173 alldata['time'] = []
2170
2174
2171 roi = repo.revs('merge() and %ld', revs)
2175 roi = repo.revs('merge() and %ld', revs)
2172 for r in roi:
2176 for r in roi:
2173 ctx = repo[r]
2177 ctx = repo[r]
2174 p1 = ctx.p1().rev()
2178 p1 = ctx.p1().rev()
2175 p2 = ctx.p2().rev()
2179 p2 = ctx.p2().rev()
2176 bases = repo.changelog._commonancestorsheads(p1, p2)
2180 bases = repo.changelog._commonancestorsheads(p1, p2)
2177 for p in (p1, p2):
2181 for p in (p1, p2):
2178 for b in bases:
2182 for b in bases:
2179 base = repo[b]
2183 base = repo[b]
2180 parent = repo[p]
2184 parent = repo[p]
2181 missing = copies._computeforwardmissing(base, parent)
2185 missing = copies._computeforwardmissing(base, parent)
2182 if not missing:
2186 if not missing:
2183 continue
2187 continue
2184 data = {
2188 data = {
2185 b'source': base.hex(),
2189 b'source': base.hex(),
2186 b'destination': parent.hex(),
2190 b'destination': parent.hex(),
2187 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2191 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2188 b'nbmissingfiles': len(missing),
2192 b'nbmissingfiles': len(missing),
2189 }
2193 }
2190 if dostats:
2194 if dostats:
2191 alldata['nbrevs'].append(
2195 alldata['nbrevs'].append(
2192 (data['nbrevs'], base.hex(), parent.hex(),)
2196 (data['nbrevs'], base.hex(), parent.hex(),)
2193 )
2197 )
2194 alldata['nbmissingfiles'].append(
2198 alldata['nbmissingfiles'].append(
2195 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2199 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2196 )
2200 )
2197 if dotiming:
2201 if dotiming:
2198 begin = util.timer()
2202 begin = util.timer()
2199 renames = copies.pathcopies(base, parent)
2203 renames = copies.pathcopies(base, parent)
2200 end = util.timer()
2204 end = util.timer()
2201 # not very stable timing since we did only one run
2205 # not very stable timing since we did only one run
2202 data['time'] = end - begin
2206 data['time'] = end - begin
2203 data['nbrenamedfiles'] = len(renames)
2207 data['nbrenamedfiles'] = len(renames)
2204 if dostats:
2208 if dostats:
2205 alldata['time'].append(
2209 alldata['time'].append(
2206 (data['time'], base.hex(), parent.hex(),)
2210 (data['time'], base.hex(), parent.hex(),)
2207 )
2211 )
2208 alldata['nbrenames'].append(
2212 alldata['nbrenames'].append(
2209 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2213 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2210 )
2214 )
2211 fm.startitem()
2215 fm.startitem()
2212 fm.data(**data)
2216 fm.data(**data)
2213 out = data.copy()
2217 out = data.copy()
2214 out['source'] = fm.hexfunc(base.node())
2218 out['source'] = fm.hexfunc(base.node())
2215 out['destination'] = fm.hexfunc(parent.node())
2219 out['destination'] = fm.hexfunc(parent.node())
2216 fm.plain(output % out)
2220 fm.plain(output % out)
2217
2221
2218 fm.end()
2222 fm.end()
2219 if dostats:
2223 if dostats:
2220 # use a second formatter because the data are quite different, not sure
2224 # use a second formatter because the data are quite different, not sure
2221 # how it flies with the templater.
2225 # how it flies with the templater.
2222 fm = ui.formatter(b'perf', opts)
2226 fm = ui.formatter(b'perf', opts)
2223 entries = [
2227 entries = [
2224 ('nbrevs', 'number of revision covered'),
2228 ('nbrevs', 'number of revision covered'),
2225 ('nbmissingfiles', 'number of missing files at head'),
2229 ('nbmissingfiles', 'number of missing files at head'),
2226 ]
2230 ]
2227 if dotiming:
2231 if dotiming:
2228 entries.append(('nbrenames', 'renamed files'))
2232 entries.append(('nbrenames', 'renamed files'))
2229 entries.append(('time', 'time'))
2233 entries.append(('time', 'time'))
2230 _displaystats(ui, opts, entries, alldata)
2234 _displaystats(ui, opts, entries, alldata)
2231
2235
2232
2236
2233 @command(b'perfcca', formatteropts)
2237 @command(b'perfcca', formatteropts)
2234 def perfcca(ui, repo, **opts):
2238 def perfcca(ui, repo, **opts):
2235 opts = _byteskwargs(opts)
2239 opts = _byteskwargs(opts)
2236 timer, fm = gettimer(ui, opts)
2240 timer, fm = gettimer(ui, opts)
2237 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2241 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2238 fm.end()
2242 fm.end()
2239
2243
2240
2244
2241 @command(b'perffncacheload', formatteropts)
2245 @command(b'perffncacheload', formatteropts)
2242 def perffncacheload(ui, repo, **opts):
2246 def perffncacheload(ui, repo, **opts):
2243 opts = _byteskwargs(opts)
2247 opts = _byteskwargs(opts)
2244 timer, fm = gettimer(ui, opts)
2248 timer, fm = gettimer(ui, opts)
2245 s = repo.store
2249 s = repo.store
2246
2250
2247 def d():
2251 def d():
2248 s.fncache._load()
2252 s.fncache._load()
2249
2253
2250 timer(d)
2254 timer(d)
2251 fm.end()
2255 fm.end()
2252
2256
2253
2257
2254 @command(b'perffncachewrite', formatteropts)
2258 @command(b'perffncachewrite', formatteropts)
2255 def perffncachewrite(ui, repo, **opts):
2259 def perffncachewrite(ui, repo, **opts):
2256 opts = _byteskwargs(opts)
2260 opts = _byteskwargs(opts)
2257 timer, fm = gettimer(ui, opts)
2261 timer, fm = gettimer(ui, opts)
2258 s = repo.store
2262 s = repo.store
2259 lock = repo.lock()
2263 lock = repo.lock()
2260 s.fncache._load()
2264 s.fncache._load()
2261 tr = repo.transaction(b'perffncachewrite')
2265 tr = repo.transaction(b'perffncachewrite')
2262 tr.addbackup(b'fncache')
2266 tr.addbackup(b'fncache')
2263
2267
2264 def d():
2268 def d():
2265 s.fncache._dirty = True
2269 s.fncache._dirty = True
2266 s.fncache.write(tr)
2270 s.fncache.write(tr)
2267
2271
2268 timer(d)
2272 timer(d)
2269 tr.close()
2273 tr.close()
2270 lock.release()
2274 lock.release()
2271 fm.end()
2275 fm.end()
2272
2276
2273
2277
2274 @command(b'perffncacheencode', formatteropts)
2278 @command(b'perffncacheencode', formatteropts)
2275 def perffncacheencode(ui, repo, **opts):
2279 def perffncacheencode(ui, repo, **opts):
2276 opts = _byteskwargs(opts)
2280 opts = _byteskwargs(opts)
2277 timer, fm = gettimer(ui, opts)
2281 timer, fm = gettimer(ui, opts)
2278 s = repo.store
2282 s = repo.store
2279 s.fncache._load()
2283 s.fncache._load()
2280
2284
2281 def d():
2285 def d():
2282 for p in s.fncache.entries:
2286 for p in s.fncache.entries:
2283 s.encode(p)
2287 s.encode(p)
2284
2288
2285 timer(d)
2289 timer(d)
2286 fm.end()
2290 fm.end()
2287
2291
2288
2292
2289 def _bdiffworker(q, blocks, xdiff, ready, done):
2293 def _bdiffworker(q, blocks, xdiff, ready, done):
2290 while not done.is_set():
2294 while not done.is_set():
2291 pair = q.get()
2295 pair = q.get()
2292 while pair is not None:
2296 while pair is not None:
2293 if xdiff:
2297 if xdiff:
2294 mdiff.bdiff.xdiffblocks(*pair)
2298 mdiff.bdiff.xdiffblocks(*pair)
2295 elif blocks:
2299 elif blocks:
2296 mdiff.bdiff.blocks(*pair)
2300 mdiff.bdiff.blocks(*pair)
2297 else:
2301 else:
2298 mdiff.textdiff(*pair)
2302 mdiff.textdiff(*pair)
2299 q.task_done()
2303 q.task_done()
2300 pair = q.get()
2304 pair = q.get()
2301 q.task_done() # for the None one
2305 q.task_done() # for the None one
2302 with ready:
2306 with ready:
2303 ready.wait()
2307 ready.wait()
2304
2308
2305
2309
2306 def _manifestrevision(repo, mnode):
2310 def _manifestrevision(repo, mnode):
2307 ml = repo.manifestlog
2311 ml = repo.manifestlog
2308
2312
2309 if util.safehasattr(ml, b'getstorage'):
2313 if util.safehasattr(ml, b'getstorage'):
2310 store = ml.getstorage(b'')
2314 store = ml.getstorage(b'')
2311 else:
2315 else:
2312 store = ml._revlog
2316 store = ml._revlog
2313
2317
2314 return store.revision(mnode)
2318 return store.revision(mnode)
2315
2319
2316
2320
2317 @command(
2321 @command(
2318 b'perfbdiff',
2322 b'perfbdiff',
2319 revlogopts
2323 revlogopts
2320 + formatteropts
2324 + formatteropts
2321 + [
2325 + [
2322 (
2326 (
2323 b'',
2327 b'',
2324 b'count',
2328 b'count',
2325 1,
2329 1,
2326 b'number of revisions to test (when using --startrev)',
2330 b'number of revisions to test (when using --startrev)',
2327 ),
2331 ),
2328 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2332 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2329 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2333 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2330 (b'', b'blocks', False, b'test computing diffs into blocks'),
2334 (b'', b'blocks', False, b'test computing diffs into blocks'),
2331 (b'', b'xdiff', False, b'use xdiff algorithm'),
2335 (b'', b'xdiff', False, b'use xdiff algorithm'),
2332 ],
2336 ],
2333 b'-c|-m|FILE REV',
2337 b'-c|-m|FILE REV',
2334 )
2338 )
2335 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2339 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2336 """benchmark a bdiff between revisions
2340 """benchmark a bdiff between revisions
2337
2341
2338 By default, benchmark a bdiff between its delta parent and itself.
2342 By default, benchmark a bdiff between its delta parent and itself.
2339
2343
2340 With ``--count``, benchmark bdiffs between delta parents and self for N
2344 With ``--count``, benchmark bdiffs between delta parents and self for N
2341 revisions starting at the specified revision.
2345 revisions starting at the specified revision.
2342
2346
2343 With ``--alldata``, assume the requested revision is a changeset and
2347 With ``--alldata``, assume the requested revision is a changeset and
2344 measure bdiffs for all changes related to that changeset (manifest
2348 measure bdiffs for all changes related to that changeset (manifest
2345 and filelogs).
2349 and filelogs).
2346 """
2350 """
2347 opts = _byteskwargs(opts)
2351 opts = _byteskwargs(opts)
2348
2352
2349 if opts[b'xdiff'] and not opts[b'blocks']:
2353 if opts[b'xdiff'] and not opts[b'blocks']:
2350 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2354 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2351
2355
2352 if opts[b'alldata']:
2356 if opts[b'alldata']:
2353 opts[b'changelog'] = True
2357 opts[b'changelog'] = True
2354
2358
2355 if opts.get(b'changelog') or opts.get(b'manifest'):
2359 if opts.get(b'changelog') or opts.get(b'manifest'):
2356 file_, rev = None, file_
2360 file_, rev = None, file_
2357 elif rev is None:
2361 elif rev is None:
2358 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2362 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2359
2363
2360 blocks = opts[b'blocks']
2364 blocks = opts[b'blocks']
2361 xdiff = opts[b'xdiff']
2365 xdiff = opts[b'xdiff']
2362 textpairs = []
2366 textpairs = []
2363
2367
2364 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2368 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2365
2369
2366 startrev = r.rev(r.lookup(rev))
2370 startrev = r.rev(r.lookup(rev))
2367 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2371 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2368 if opts[b'alldata']:
2372 if opts[b'alldata']:
2369 # Load revisions associated with changeset.
2373 # Load revisions associated with changeset.
2370 ctx = repo[rev]
2374 ctx = repo[rev]
2371 mtext = _manifestrevision(repo, ctx.manifestnode())
2375 mtext = _manifestrevision(repo, ctx.manifestnode())
2372 for pctx in ctx.parents():
2376 for pctx in ctx.parents():
2373 pman = _manifestrevision(repo, pctx.manifestnode())
2377 pman = _manifestrevision(repo, pctx.manifestnode())
2374 textpairs.append((pman, mtext))
2378 textpairs.append((pman, mtext))
2375
2379
2376 # Load filelog revisions by iterating manifest delta.
2380 # Load filelog revisions by iterating manifest delta.
2377 man = ctx.manifest()
2381 man = ctx.manifest()
2378 pman = ctx.p1().manifest()
2382 pman = ctx.p1().manifest()
2379 for filename, change in pman.diff(man).items():
2383 for filename, change in pman.diff(man).items():
2380 fctx = repo.file(filename)
2384 fctx = repo.file(filename)
2381 f1 = fctx.revision(change[0][0] or -1)
2385 f1 = fctx.revision(change[0][0] or -1)
2382 f2 = fctx.revision(change[1][0] or -1)
2386 f2 = fctx.revision(change[1][0] or -1)
2383 textpairs.append((f1, f2))
2387 textpairs.append((f1, f2))
2384 else:
2388 else:
2385 dp = r.deltaparent(rev)
2389 dp = r.deltaparent(rev)
2386 textpairs.append((r.revision(dp), r.revision(rev)))
2390 textpairs.append((r.revision(dp), r.revision(rev)))
2387
2391
2388 withthreads = threads > 0
2392 withthreads = threads > 0
2389 if not withthreads:
2393 if not withthreads:
2390
2394
2391 def d():
2395 def d():
2392 for pair in textpairs:
2396 for pair in textpairs:
2393 if xdiff:
2397 if xdiff:
2394 mdiff.bdiff.xdiffblocks(*pair)
2398 mdiff.bdiff.xdiffblocks(*pair)
2395 elif blocks:
2399 elif blocks:
2396 mdiff.bdiff.blocks(*pair)
2400 mdiff.bdiff.blocks(*pair)
2397 else:
2401 else:
2398 mdiff.textdiff(*pair)
2402 mdiff.textdiff(*pair)
2399
2403
2400 else:
2404 else:
2401 q = queue()
2405 q = queue()
2402 for i in _xrange(threads):
2406 for i in _xrange(threads):
2403 q.put(None)
2407 q.put(None)
2404 ready = threading.Condition()
2408 ready = threading.Condition()
2405 done = threading.Event()
2409 done = threading.Event()
2406 for i in _xrange(threads):
2410 for i in _xrange(threads):
2407 threading.Thread(
2411 threading.Thread(
2408 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2412 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2409 ).start()
2413 ).start()
2410 q.join()
2414 q.join()
2411
2415
2412 def d():
2416 def d():
2413 for pair in textpairs:
2417 for pair in textpairs:
2414 q.put(pair)
2418 q.put(pair)
2415 for i in _xrange(threads):
2419 for i in _xrange(threads):
2416 q.put(None)
2420 q.put(None)
2417 with ready:
2421 with ready:
2418 ready.notify_all()
2422 ready.notify_all()
2419 q.join()
2423 q.join()
2420
2424
2421 timer, fm = gettimer(ui, opts)
2425 timer, fm = gettimer(ui, opts)
2422 timer(d)
2426 timer(d)
2423 fm.end()
2427 fm.end()
2424
2428
2425 if withthreads:
2429 if withthreads:
2426 done.set()
2430 done.set()
2427 for i in _xrange(threads):
2431 for i in _xrange(threads):
2428 q.put(None)
2432 q.put(None)
2429 with ready:
2433 with ready:
2430 ready.notify_all()
2434 ready.notify_all()
2431
2435
2432
2436
2433 @command(
2437 @command(
2434 b'perfunidiff',
2438 b'perfunidiff',
2435 revlogopts
2439 revlogopts
2436 + formatteropts
2440 + formatteropts
2437 + [
2441 + [
2438 (
2442 (
2439 b'',
2443 b'',
2440 b'count',
2444 b'count',
2441 1,
2445 1,
2442 b'number of revisions to test (when using --startrev)',
2446 b'number of revisions to test (when using --startrev)',
2443 ),
2447 ),
2444 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2448 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2445 ],
2449 ],
2446 b'-c|-m|FILE REV',
2450 b'-c|-m|FILE REV',
2447 )
2451 )
2448 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2452 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2449 """benchmark a unified diff between revisions
2453 """benchmark a unified diff between revisions
2450
2454
2451 This doesn't include any copy tracing - it's just a unified diff
2455 This doesn't include any copy tracing - it's just a unified diff
2452 of the texts.
2456 of the texts.
2453
2457
2454 By default, benchmark a diff between its delta parent and itself.
2458 By default, benchmark a diff between its delta parent and itself.
2455
2459
2456 With ``--count``, benchmark diffs between delta parents and self for N
2460 With ``--count``, benchmark diffs between delta parents and self for N
2457 revisions starting at the specified revision.
2461 revisions starting at the specified revision.
2458
2462
2459 With ``--alldata``, assume the requested revision is a changeset and
2463 With ``--alldata``, assume the requested revision is a changeset and
2460 measure diffs for all changes related to that changeset (manifest
2464 measure diffs for all changes related to that changeset (manifest
2461 and filelogs).
2465 and filelogs).
2462 """
2466 """
2463 opts = _byteskwargs(opts)
2467 opts = _byteskwargs(opts)
2464 if opts[b'alldata']:
2468 if opts[b'alldata']:
2465 opts[b'changelog'] = True
2469 opts[b'changelog'] = True
2466
2470
2467 if opts.get(b'changelog') or opts.get(b'manifest'):
2471 if opts.get(b'changelog') or opts.get(b'manifest'):
2468 file_, rev = None, file_
2472 file_, rev = None, file_
2469 elif rev is None:
2473 elif rev is None:
2470 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2474 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2471
2475
2472 textpairs = []
2476 textpairs = []
2473
2477
2474 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2478 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2475
2479
2476 startrev = r.rev(r.lookup(rev))
2480 startrev = r.rev(r.lookup(rev))
2477 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2481 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2478 if opts[b'alldata']:
2482 if opts[b'alldata']:
2479 # Load revisions associated with changeset.
2483 # Load revisions associated with changeset.
2480 ctx = repo[rev]
2484 ctx = repo[rev]
2481 mtext = _manifestrevision(repo, ctx.manifestnode())
2485 mtext = _manifestrevision(repo, ctx.manifestnode())
2482 for pctx in ctx.parents():
2486 for pctx in ctx.parents():
2483 pman = _manifestrevision(repo, pctx.manifestnode())
2487 pman = _manifestrevision(repo, pctx.manifestnode())
2484 textpairs.append((pman, mtext))
2488 textpairs.append((pman, mtext))
2485
2489
2486 # Load filelog revisions by iterating manifest delta.
2490 # Load filelog revisions by iterating manifest delta.
2487 man = ctx.manifest()
2491 man = ctx.manifest()
2488 pman = ctx.p1().manifest()
2492 pman = ctx.p1().manifest()
2489 for filename, change in pman.diff(man).items():
2493 for filename, change in pman.diff(man).items():
2490 fctx = repo.file(filename)
2494 fctx = repo.file(filename)
2491 f1 = fctx.revision(change[0][0] or -1)
2495 f1 = fctx.revision(change[0][0] or -1)
2492 f2 = fctx.revision(change[1][0] or -1)
2496 f2 = fctx.revision(change[1][0] or -1)
2493 textpairs.append((f1, f2))
2497 textpairs.append((f1, f2))
2494 else:
2498 else:
2495 dp = r.deltaparent(rev)
2499 dp = r.deltaparent(rev)
2496 textpairs.append((r.revision(dp), r.revision(rev)))
2500 textpairs.append((r.revision(dp), r.revision(rev)))
2497
2501
2498 def d():
2502 def d():
2499 for left, right in textpairs:
2503 for left, right in textpairs:
2500 # The date strings don't matter, so we pass empty strings.
2504 # The date strings don't matter, so we pass empty strings.
2501 headerlines, hunks = mdiff.unidiff(
2505 headerlines, hunks = mdiff.unidiff(
2502 left, b'', right, b'', b'left', b'right', binary=False
2506 left, b'', right, b'', b'left', b'right', binary=False
2503 )
2507 )
2504 # consume iterators in roughly the way patch.py does
2508 # consume iterators in roughly the way patch.py does
2505 b'\n'.join(headerlines)
2509 b'\n'.join(headerlines)
2506 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2510 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2507
2511
2508 timer, fm = gettimer(ui, opts)
2512 timer, fm = gettimer(ui, opts)
2509 timer(d)
2513 timer(d)
2510 fm.end()
2514 fm.end()
2511
2515
2512
2516
2513 @command(b'perfdiffwd', formatteropts)
2517 @command(b'perfdiffwd', formatteropts)
2514 def perfdiffwd(ui, repo, **opts):
2518 def perfdiffwd(ui, repo, **opts):
2515 """Profile diff of working directory changes"""
2519 """Profile diff of working directory changes"""
2516 opts = _byteskwargs(opts)
2520 opts = _byteskwargs(opts)
2517 timer, fm = gettimer(ui, opts)
2521 timer, fm = gettimer(ui, opts)
2518 options = {
2522 options = {
2519 'w': 'ignore_all_space',
2523 'w': 'ignore_all_space',
2520 'b': 'ignore_space_change',
2524 'b': 'ignore_space_change',
2521 'B': 'ignore_blank_lines',
2525 'B': 'ignore_blank_lines',
2522 }
2526 }
2523
2527
2524 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2528 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2525 opts = dict((options[c], b'1') for c in diffopt)
2529 opts = dict((options[c], b'1') for c in diffopt)
2526
2530
2527 def d():
2531 def d():
2528 ui.pushbuffer()
2532 ui.pushbuffer()
2529 commands.diff(ui, repo, **opts)
2533 commands.diff(ui, repo, **opts)
2530 ui.popbuffer()
2534 ui.popbuffer()
2531
2535
2532 diffopt = diffopt.encode('ascii')
2536 diffopt = diffopt.encode('ascii')
2533 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2537 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2534 timer(d, title=title)
2538 timer(d, title=title)
2535 fm.end()
2539 fm.end()
2536
2540
2537
2541
2538 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2542 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2539 def perfrevlogindex(ui, repo, file_=None, **opts):
2543 def perfrevlogindex(ui, repo, file_=None, **opts):
2540 """Benchmark operations against a revlog index.
2544 """Benchmark operations against a revlog index.
2541
2545
2542 This tests constructing a revlog instance, reading index data,
2546 This tests constructing a revlog instance, reading index data,
2543 parsing index data, and performing various operations related to
2547 parsing index data, and performing various operations related to
2544 index data.
2548 index data.
2545 """
2549 """
2546
2550
2547 opts = _byteskwargs(opts)
2551 opts = _byteskwargs(opts)
2548
2552
2549 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2553 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2550
2554
2551 opener = getattr(rl, 'opener') # trick linter
2555 opener = getattr(rl, 'opener') # trick linter
2552 indexfile = rl.indexfile
2556 indexfile = rl.indexfile
2553 data = opener.read(indexfile)
2557 data = opener.read(indexfile)
2554
2558
2555 header = struct.unpack(b'>I', data[0:4])[0]
2559 header = struct.unpack(b'>I', data[0:4])[0]
2556 version = header & 0xFFFF
2560 version = header & 0xFFFF
2557 if version == 1:
2561 if version == 1:
2558 revlogio = revlog.revlogio()
2562 revlogio = revlog.revlogio()
2559 inline = header & (1 << 16)
2563 inline = header & (1 << 16)
2560 else:
2564 else:
2561 raise error.Abort(b'unsupported revlog version: %d' % version)
2565 raise error.Abort(b'unsupported revlog version: %d' % version)
2562
2566
2563 rllen = len(rl)
2567 rllen = len(rl)
2564
2568
2565 node0 = rl.node(0)
2569 node0 = rl.node(0)
2566 node25 = rl.node(rllen // 4)
2570 node25 = rl.node(rllen // 4)
2567 node50 = rl.node(rllen // 2)
2571 node50 = rl.node(rllen // 2)
2568 node75 = rl.node(rllen // 4 * 3)
2572 node75 = rl.node(rllen // 4 * 3)
2569 node100 = rl.node(rllen - 1)
2573 node100 = rl.node(rllen - 1)
2570
2574
2571 allrevs = range(rllen)
2575 allrevs = range(rllen)
2572 allrevsrev = list(reversed(allrevs))
2576 allrevsrev = list(reversed(allrevs))
2573 allnodes = [rl.node(rev) for rev in range(rllen)]
2577 allnodes = [rl.node(rev) for rev in range(rllen)]
2574 allnodesrev = list(reversed(allnodes))
2578 allnodesrev = list(reversed(allnodes))
2575
2579
2576 def constructor():
2580 def constructor():
2577 revlog.revlog(opener, indexfile)
2581 revlog.revlog(opener, indexfile)
2578
2582
2579 def read():
2583 def read():
2580 with opener(indexfile) as fh:
2584 with opener(indexfile) as fh:
2581 fh.read()
2585 fh.read()
2582
2586
2583 def parseindex():
2587 def parseindex():
2584 revlogio.parseindex(data, inline)
2588 revlogio.parseindex(data, inline)
2585
2589
2586 def getentry(revornode):
2590 def getentry(revornode):
2587 index = revlogio.parseindex(data, inline)[0]
2591 index = revlogio.parseindex(data, inline)[0]
2588 index[revornode]
2592 index[revornode]
2589
2593
2590 def getentries(revs, count=1):
2594 def getentries(revs, count=1):
2591 index = revlogio.parseindex(data, inline)[0]
2595 index = revlogio.parseindex(data, inline)[0]
2592
2596
2593 for i in range(count):
2597 for i in range(count):
2594 for rev in revs:
2598 for rev in revs:
2595 index[rev]
2599 index[rev]
2596
2600
2597 def resolvenode(node):
2601 def resolvenode(node):
2598 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2602 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2599 # This only works for the C code.
2603 # This only works for the C code.
2600 if nodemap is None:
2604 if nodemap is None:
2601 return
2605 return
2602
2606
2603 try:
2607 try:
2604 nodemap[node]
2608 nodemap[node]
2605 except error.RevlogError:
2609 except error.RevlogError:
2606 pass
2610 pass
2607
2611
2608 def resolvenodes(nodes, count=1):
2612 def resolvenodes(nodes, count=1):
2609 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2613 nodemap = getattr(revlogio.parseindex(data, inline)[0], 'nodemap', None)
2610 if nodemap is None:
2614 if nodemap is None:
2611 return
2615 return
2612
2616
2613 for i in range(count):
2617 for i in range(count):
2614 for node in nodes:
2618 for node in nodes:
2615 try:
2619 try:
2616 nodemap[node]
2620 nodemap[node]
2617 except error.RevlogError:
2621 except error.RevlogError:
2618 pass
2622 pass
2619
2623
2620 benches = [
2624 benches = [
2621 (constructor, b'revlog constructor'),
2625 (constructor, b'revlog constructor'),
2622 (read, b'read'),
2626 (read, b'read'),
2623 (parseindex, b'create index object'),
2627 (parseindex, b'create index object'),
2624 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2628 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2625 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2629 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2626 (lambda: resolvenode(node0), b'look up node at rev 0'),
2630 (lambda: resolvenode(node0), b'look up node at rev 0'),
2627 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2631 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2628 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2632 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2629 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2633 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2630 (lambda: resolvenode(node100), b'look up node at tip'),
2634 (lambda: resolvenode(node100), b'look up node at tip'),
2631 # 2x variation is to measure caching impact.
2635 # 2x variation is to measure caching impact.
2632 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2636 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2633 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2637 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2634 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2638 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2635 (
2639 (
2636 lambda: resolvenodes(allnodesrev, 2),
2640 lambda: resolvenodes(allnodesrev, 2),
2637 b'look up all nodes 2x (reverse)',
2641 b'look up all nodes 2x (reverse)',
2638 ),
2642 ),
2639 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2643 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2640 (
2644 (
2641 lambda: getentries(allrevs, 2),
2645 lambda: getentries(allrevs, 2),
2642 b'retrieve all index entries 2x (forward)',
2646 b'retrieve all index entries 2x (forward)',
2643 ),
2647 ),
2644 (
2648 (
2645 lambda: getentries(allrevsrev),
2649 lambda: getentries(allrevsrev),
2646 b'retrieve all index entries (reverse)',
2650 b'retrieve all index entries (reverse)',
2647 ),
2651 ),
2648 (
2652 (
2649 lambda: getentries(allrevsrev, 2),
2653 lambda: getentries(allrevsrev, 2),
2650 b'retrieve all index entries 2x (reverse)',
2654 b'retrieve all index entries 2x (reverse)',
2651 ),
2655 ),
2652 ]
2656 ]
2653
2657
2654 for fn, title in benches:
2658 for fn, title in benches:
2655 timer, fm = gettimer(ui, opts)
2659 timer, fm = gettimer(ui, opts)
2656 timer(fn, title=title)
2660 timer(fn, title=title)
2657 fm.end()
2661 fm.end()
2658
2662
2659
2663
2660 @command(
2664 @command(
2661 b'perfrevlogrevisions',
2665 b'perfrevlogrevisions',
2662 revlogopts
2666 revlogopts
2663 + formatteropts
2667 + formatteropts
2664 + [
2668 + [
2665 (b'd', b'dist', 100, b'distance between the revisions'),
2669 (b'd', b'dist', 100, b'distance between the revisions'),
2666 (b's', b'startrev', 0, b'revision to start reading at'),
2670 (b's', b'startrev', 0, b'revision to start reading at'),
2667 (b'', b'reverse', False, b'read in reverse'),
2671 (b'', b'reverse', False, b'read in reverse'),
2668 ],
2672 ],
2669 b'-c|-m|FILE',
2673 b'-c|-m|FILE',
2670 )
2674 )
2671 def perfrevlogrevisions(
2675 def perfrevlogrevisions(
2672 ui, repo, file_=None, startrev=0, reverse=False, **opts
2676 ui, repo, file_=None, startrev=0, reverse=False, **opts
2673 ):
2677 ):
2674 """Benchmark reading a series of revisions from a revlog.
2678 """Benchmark reading a series of revisions from a revlog.
2675
2679
2676 By default, we read every ``-d/--dist`` revision from 0 to tip of
2680 By default, we read every ``-d/--dist`` revision from 0 to tip of
2677 the specified revlog.
2681 the specified revlog.
2678
2682
2679 The start revision can be defined via ``-s/--startrev``.
2683 The start revision can be defined via ``-s/--startrev``.
2680 """
2684 """
2681 opts = _byteskwargs(opts)
2685 opts = _byteskwargs(opts)
2682
2686
2683 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2687 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2684 rllen = getlen(ui)(rl)
2688 rllen = getlen(ui)(rl)
2685
2689
2686 if startrev < 0:
2690 if startrev < 0:
2687 startrev = rllen + startrev
2691 startrev = rllen + startrev
2688
2692
2689 def d():
2693 def d():
2690 rl.clearcaches()
2694 rl.clearcaches()
2691
2695
2692 beginrev = startrev
2696 beginrev = startrev
2693 endrev = rllen
2697 endrev = rllen
2694 dist = opts[b'dist']
2698 dist = opts[b'dist']
2695
2699
2696 if reverse:
2700 if reverse:
2697 beginrev, endrev = endrev - 1, beginrev - 1
2701 beginrev, endrev = endrev - 1, beginrev - 1
2698 dist = -1 * dist
2702 dist = -1 * dist
2699
2703
2700 for x in _xrange(beginrev, endrev, dist):
2704 for x in _xrange(beginrev, endrev, dist):
2701 # Old revisions don't support passing int.
2705 # Old revisions don't support passing int.
2702 n = rl.node(x)
2706 n = rl.node(x)
2703 rl.revision(n)
2707 rl.revision(n)
2704
2708
2705 timer, fm = gettimer(ui, opts)
2709 timer, fm = gettimer(ui, opts)
2706 timer(d)
2710 timer(d)
2707 fm.end()
2711 fm.end()
2708
2712
2709
2713
2710 @command(
2714 @command(
2711 b'perfrevlogwrite',
2715 b'perfrevlogwrite',
2712 revlogopts
2716 revlogopts
2713 + formatteropts
2717 + formatteropts
2714 + [
2718 + [
2715 (b's', b'startrev', 1000, b'revision to start writing at'),
2719 (b's', b'startrev', 1000, b'revision to start writing at'),
2716 (b'', b'stoprev', -1, b'last revision to write'),
2720 (b'', b'stoprev', -1, b'last revision to write'),
2717 (b'', b'count', 3, b'number of passes to perform'),
2721 (b'', b'count', 3, b'number of passes to perform'),
2718 (b'', b'details', False, b'print timing for every revisions tested'),
2722 (b'', b'details', False, b'print timing for every revisions tested'),
2719 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2720 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2721 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2722 ],
2726 ],
2723 b'-c|-m|FILE',
2727 b'-c|-m|FILE',
2724 )
2728 )
2725 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2729 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2726 """Benchmark writing a series of revisions to a revlog.
2730 """Benchmark writing a series of revisions to a revlog.
2727
2731
2728 Possible source values are:
2732 Possible source values are:
2729 * `full`: add from a full text (default).
2733 * `full`: add from a full text (default).
2730 * `parent-1`: add from a delta to the first parent
2734 * `parent-1`: add from a delta to the first parent
2731 * `parent-2`: add from a delta to the second parent if it exists
2735 * `parent-2`: add from a delta to the second parent if it exists
2732 (use a delta from the first parent otherwise)
2736 (use a delta from the first parent otherwise)
2733 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2737 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2734 * `storage`: add from the existing precomputed deltas
2738 * `storage`: add from the existing precomputed deltas
2735
2739
2736 Note: This performance command measures performance in a custom way. As a
2740 Note: This performance command measures performance in a custom way. As a
2737 result some of the global configuration of the 'perf' command does not
2741 result some of the global configuration of the 'perf' command does not
2738 apply to it:
2742 apply to it:
2739
2743
2740 * ``pre-run``: disabled
2744 * ``pre-run``: disabled
2741
2745
2742 * ``profile-benchmark``: disabled
2746 * ``profile-benchmark``: disabled
2743
2747
2744 * ``run-limits``: disabled use --count instead
2748 * ``run-limits``: disabled use --count instead
2745 """
2749 """
2746 opts = _byteskwargs(opts)
2750 opts = _byteskwargs(opts)
2747
2751
2748 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2752 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2749 rllen = getlen(ui)(rl)
2753 rllen = getlen(ui)(rl)
2750 if startrev < 0:
2754 if startrev < 0:
2751 startrev = rllen + startrev
2755 startrev = rllen + startrev
2752 if stoprev < 0:
2756 if stoprev < 0:
2753 stoprev = rllen + stoprev
2757 stoprev = rllen + stoprev
2754
2758
2755 lazydeltabase = opts['lazydeltabase']
2759 lazydeltabase = opts['lazydeltabase']
2756 source = opts['source']
2760 source = opts['source']
2757 clearcaches = opts['clear_caches']
2761 clearcaches = opts['clear_caches']
2758 validsource = (
2762 validsource = (
2759 b'full',
2763 b'full',
2760 b'parent-1',
2764 b'parent-1',
2761 b'parent-2',
2765 b'parent-2',
2762 b'parent-smallest',
2766 b'parent-smallest',
2763 b'storage',
2767 b'storage',
2764 )
2768 )
2765 if source not in validsource:
2769 if source not in validsource:
2766 raise error.Abort('invalid source type: %s' % source)
2770 raise error.Abort('invalid source type: %s' % source)
2767
2771
2768 ### actually gather results
2772 ### actually gather results
2769 count = opts['count']
2773 count = opts['count']
2770 if count <= 0:
2774 if count <= 0:
2771 raise error.Abort('invalide run count: %d' % count)
2775 raise error.Abort('invalide run count: %d' % count)
2772 allresults = []
2776 allresults = []
2773 for c in range(count):
2777 for c in range(count):
2774 timing = _timeonewrite(
2778 timing = _timeonewrite(
2775 ui,
2779 ui,
2776 rl,
2780 rl,
2777 source,
2781 source,
2778 startrev,
2782 startrev,
2779 stoprev,
2783 stoprev,
2780 c + 1,
2784 c + 1,
2781 lazydeltabase=lazydeltabase,
2785 lazydeltabase=lazydeltabase,
2782 clearcaches=clearcaches,
2786 clearcaches=clearcaches,
2783 )
2787 )
2784 allresults.append(timing)
2788 allresults.append(timing)
2785
2789
2786 ### consolidate the results in a single list
2790 ### consolidate the results in a single list
2787 results = []
2791 results = []
2788 for idx, (rev, t) in enumerate(allresults[0]):
2792 for idx, (rev, t) in enumerate(allresults[0]):
2789 ts = [t]
2793 ts = [t]
2790 for other in allresults[1:]:
2794 for other in allresults[1:]:
2791 orev, ot = other[idx]
2795 orev, ot = other[idx]
2792 assert orev == rev
2796 assert orev == rev
2793 ts.append(ot)
2797 ts.append(ot)
2794 results.append((rev, ts))
2798 results.append((rev, ts))
2795 resultcount = len(results)
2799 resultcount = len(results)
2796
2800
2797 ### Compute and display relevant statistics
2801 ### Compute and display relevant statistics
2798
2802
2799 # get a formatter
2803 # get a formatter
2800 fm = ui.formatter(b'perf', opts)
2804 fm = ui.formatter(b'perf', opts)
2801 displayall = ui.configbool(b"perf", b"all-timing", False)
2805 displayall = ui.configbool(b"perf", b"all-timing", False)
2802
2806
2803 # print individual details if requested
2807 # print individual details if requested
2804 if opts['details']:
2808 if opts['details']:
2805 for idx, item in enumerate(results, 1):
2809 for idx, item in enumerate(results, 1):
2806 rev, data = item
2810 rev, data = item
2807 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2811 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2808 formatone(fm, data, title=title, displayall=displayall)
2812 formatone(fm, data, title=title, displayall=displayall)
2809
2813
2810 # sorts results by median time
2814 # sorts results by median time
2811 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2815 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2812 # list of (name, index) to display)
2816 # list of (name, index) to display)
2813 relevants = [
2817 relevants = [
2814 ("min", 0),
2818 ("min", 0),
2815 ("10%", resultcount * 10 // 100),
2819 ("10%", resultcount * 10 // 100),
2816 ("25%", resultcount * 25 // 100),
2820 ("25%", resultcount * 25 // 100),
2817 ("50%", resultcount * 70 // 100),
2821 ("50%", resultcount * 70 // 100),
2818 ("75%", resultcount * 75 // 100),
2822 ("75%", resultcount * 75 // 100),
2819 ("90%", resultcount * 90 // 100),
2823 ("90%", resultcount * 90 // 100),
2820 ("95%", resultcount * 95 // 100),
2824 ("95%", resultcount * 95 // 100),
2821 ("99%", resultcount * 99 // 100),
2825 ("99%", resultcount * 99 // 100),
2822 ("99.9%", resultcount * 999 // 1000),
2826 ("99.9%", resultcount * 999 // 1000),
2823 ("99.99%", resultcount * 9999 // 10000),
2827 ("99.99%", resultcount * 9999 // 10000),
2824 ("99.999%", resultcount * 99999 // 100000),
2828 ("99.999%", resultcount * 99999 // 100000),
2825 ("max", -1),
2829 ("max", -1),
2826 ]
2830 ]
2827 if not ui.quiet:
2831 if not ui.quiet:
2828 for name, idx in relevants:
2832 for name, idx in relevants:
2829 data = results[idx]
2833 data = results[idx]
2830 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2834 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2831 formatone(fm, data[1], title=title, displayall=displayall)
2835 formatone(fm, data[1], title=title, displayall=displayall)
2832
2836
2833 # XXX summing that many float will not be very precise, we ignore this fact
2837 # XXX summing that many float will not be very precise, we ignore this fact
2834 # for now
2838 # for now
2835 totaltime = []
2839 totaltime = []
2836 for item in allresults:
2840 for item in allresults:
2837 totaltime.append(
2841 totaltime.append(
2838 (
2842 (
2839 sum(x[1][0] for x in item),
2843 sum(x[1][0] for x in item),
2840 sum(x[1][1] for x in item),
2844 sum(x[1][1] for x in item),
2841 sum(x[1][2] for x in item),
2845 sum(x[1][2] for x in item),
2842 )
2846 )
2843 )
2847 )
2844 formatone(
2848 formatone(
2845 fm,
2849 fm,
2846 totaltime,
2850 totaltime,
2847 title="total time (%d revs)" % resultcount,
2851 title="total time (%d revs)" % resultcount,
2848 displayall=displayall,
2852 displayall=displayall,
2849 )
2853 )
2850 fm.end()
2854 fm.end()
2851
2855
2852
2856
2853 class _faketr(object):
2857 class _faketr(object):
2854 def add(s, x, y, z=None):
2858 def add(s, x, y, z=None):
2855 return None
2859 return None
2856
2860
2857
2861
2858 def _timeonewrite(
2862 def _timeonewrite(
2859 ui,
2863 ui,
2860 orig,
2864 orig,
2861 source,
2865 source,
2862 startrev,
2866 startrev,
2863 stoprev,
2867 stoprev,
2864 runidx=None,
2868 runidx=None,
2865 lazydeltabase=True,
2869 lazydeltabase=True,
2866 clearcaches=True,
2870 clearcaches=True,
2867 ):
2871 ):
2868 timings = []
2872 timings = []
2869 tr = _faketr()
2873 tr = _faketr()
2870 with _temprevlog(ui, orig, startrev) as dest:
2874 with _temprevlog(ui, orig, startrev) as dest:
2871 dest._lazydeltabase = lazydeltabase
2875 dest._lazydeltabase = lazydeltabase
2872 revs = list(orig.revs(startrev, stoprev))
2876 revs = list(orig.revs(startrev, stoprev))
2873 total = len(revs)
2877 total = len(revs)
2874 topic = 'adding'
2878 topic = 'adding'
2875 if runidx is not None:
2879 if runidx is not None:
2876 topic += ' (run #%d)' % runidx
2880 topic += ' (run #%d)' % runidx
2877 # Support both old and new progress API
2881 # Support both old and new progress API
2878 if util.safehasattr(ui, 'makeprogress'):
2882 if util.safehasattr(ui, 'makeprogress'):
2879 progress = ui.makeprogress(topic, unit='revs', total=total)
2883 progress = ui.makeprogress(topic, unit='revs', total=total)
2880
2884
2881 def updateprogress(pos):
2885 def updateprogress(pos):
2882 progress.update(pos)
2886 progress.update(pos)
2883
2887
2884 def completeprogress():
2888 def completeprogress():
2885 progress.complete()
2889 progress.complete()
2886
2890
2887 else:
2891 else:
2888
2892
2889 def updateprogress(pos):
2893 def updateprogress(pos):
2890 ui.progress(topic, pos, unit='revs', total=total)
2894 ui.progress(topic, pos, unit='revs', total=total)
2891
2895
2892 def completeprogress():
2896 def completeprogress():
2893 ui.progress(topic, None, unit='revs', total=total)
2897 ui.progress(topic, None, unit='revs', total=total)
2894
2898
2895 for idx, rev in enumerate(revs):
2899 for idx, rev in enumerate(revs):
2896 updateprogress(idx)
2900 updateprogress(idx)
2897 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2901 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2898 if clearcaches:
2902 if clearcaches:
2899 dest.index.clearcaches()
2903 dest.index.clearcaches()
2900 dest.clearcaches()
2904 dest.clearcaches()
2901 with timeone() as r:
2905 with timeone() as r:
2902 dest.addrawrevision(*addargs, **addkwargs)
2906 dest.addrawrevision(*addargs, **addkwargs)
2903 timings.append((rev, r[0]))
2907 timings.append((rev, r[0]))
2904 updateprogress(total)
2908 updateprogress(total)
2905 completeprogress()
2909 completeprogress()
2906 return timings
2910 return timings
2907
2911
2908
2912
2909 def _getrevisionseed(orig, rev, tr, source):
2913 def _getrevisionseed(orig, rev, tr, source):
2910 from mercurial.node import nullid
2914 from mercurial.node import nullid
2911
2915
2912 linkrev = orig.linkrev(rev)
2916 linkrev = orig.linkrev(rev)
2913 node = orig.node(rev)
2917 node = orig.node(rev)
2914 p1, p2 = orig.parents(node)
2918 p1, p2 = orig.parents(node)
2915 flags = orig.flags(rev)
2919 flags = orig.flags(rev)
2916 cachedelta = None
2920 cachedelta = None
2917 text = None
2921 text = None
2918
2922
2919 if source == b'full':
2923 if source == b'full':
2920 text = orig.revision(rev)
2924 text = orig.revision(rev)
2921 elif source == b'parent-1':
2925 elif source == b'parent-1':
2922 baserev = orig.rev(p1)
2926 baserev = orig.rev(p1)
2923 cachedelta = (baserev, orig.revdiff(p1, rev))
2927 cachedelta = (baserev, orig.revdiff(p1, rev))
2924 elif source == b'parent-2':
2928 elif source == b'parent-2':
2925 parent = p2
2929 parent = p2
2926 if p2 == nullid:
2930 if p2 == nullid:
2927 parent = p1
2931 parent = p1
2928 baserev = orig.rev(parent)
2932 baserev = orig.rev(parent)
2929 cachedelta = (baserev, orig.revdiff(parent, rev))
2933 cachedelta = (baserev, orig.revdiff(parent, rev))
2930 elif source == b'parent-smallest':
2934 elif source == b'parent-smallest':
2931 p1diff = orig.revdiff(p1, rev)
2935 p1diff = orig.revdiff(p1, rev)
2932 parent = p1
2936 parent = p1
2933 diff = p1diff
2937 diff = p1diff
2934 if p2 != nullid:
2938 if p2 != nullid:
2935 p2diff = orig.revdiff(p2, rev)
2939 p2diff = orig.revdiff(p2, rev)
2936 if len(p1diff) > len(p2diff):
2940 if len(p1diff) > len(p2diff):
2937 parent = p2
2941 parent = p2
2938 diff = p2diff
2942 diff = p2diff
2939 baserev = orig.rev(parent)
2943 baserev = orig.rev(parent)
2940 cachedelta = (baserev, diff)
2944 cachedelta = (baserev, diff)
2941 elif source == b'storage':
2945 elif source == b'storage':
2942 baserev = orig.deltaparent(rev)
2946 baserev = orig.deltaparent(rev)
2943 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2947 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2944
2948
2945 return (
2949 return (
2946 (text, tr, linkrev, p1, p2),
2950 (text, tr, linkrev, p1, p2),
2947 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2951 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2948 )
2952 )
2949
2953
2950
2954
2951 @contextlib.contextmanager
2955 @contextlib.contextmanager
2952 def _temprevlog(ui, orig, truncaterev):
2956 def _temprevlog(ui, orig, truncaterev):
2953 from mercurial import vfs as vfsmod
2957 from mercurial import vfs as vfsmod
2954
2958
2955 if orig._inline:
2959 if orig._inline:
2956 raise error.Abort('not supporting inline revlog (yet)')
2960 raise error.Abort('not supporting inline revlog (yet)')
2957 revlogkwargs = {}
2961 revlogkwargs = {}
2958 k = 'upperboundcomp'
2962 k = 'upperboundcomp'
2959 if util.safehasattr(orig, k):
2963 if util.safehasattr(orig, k):
2960 revlogkwargs[k] = getattr(orig, k)
2964 revlogkwargs[k] = getattr(orig, k)
2961
2965
2962 origindexpath = orig.opener.join(orig.indexfile)
2966 origindexpath = orig.opener.join(orig.indexfile)
2963 origdatapath = orig.opener.join(orig.datafile)
2967 origdatapath = orig.opener.join(orig.datafile)
2964 indexname = 'revlog.i'
2968 indexname = 'revlog.i'
2965 dataname = 'revlog.d'
2969 dataname = 'revlog.d'
2966
2970
2967 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2971 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2968 try:
2972 try:
2969 # copy the data file in a temporary directory
2973 # copy the data file in a temporary directory
2970 ui.debug('copying data in %s\n' % tmpdir)
2974 ui.debug('copying data in %s\n' % tmpdir)
2971 destindexpath = os.path.join(tmpdir, 'revlog.i')
2975 destindexpath = os.path.join(tmpdir, 'revlog.i')
2972 destdatapath = os.path.join(tmpdir, 'revlog.d')
2976 destdatapath = os.path.join(tmpdir, 'revlog.d')
2973 shutil.copyfile(origindexpath, destindexpath)
2977 shutil.copyfile(origindexpath, destindexpath)
2974 shutil.copyfile(origdatapath, destdatapath)
2978 shutil.copyfile(origdatapath, destdatapath)
2975
2979
2976 # remove the data we want to add again
2980 # remove the data we want to add again
2977 ui.debug('truncating data to be rewritten\n')
2981 ui.debug('truncating data to be rewritten\n')
2978 with open(destindexpath, 'ab') as index:
2982 with open(destindexpath, 'ab') as index:
2979 index.seek(0)
2983 index.seek(0)
2980 index.truncate(truncaterev * orig._io.size)
2984 index.truncate(truncaterev * orig._io.size)
2981 with open(destdatapath, 'ab') as data:
2985 with open(destdatapath, 'ab') as data:
2982 data.seek(0)
2986 data.seek(0)
2983 data.truncate(orig.start(truncaterev))
2987 data.truncate(orig.start(truncaterev))
2984
2988
2985 # instantiate a new revlog from the temporary copy
2989 # instantiate a new revlog from the temporary copy
2986 ui.debug('truncating adding to be rewritten\n')
2990 ui.debug('truncating adding to be rewritten\n')
2987 vfs = vfsmod.vfs(tmpdir)
2991 vfs = vfsmod.vfs(tmpdir)
2988 vfs.options = getattr(orig.opener, 'options', None)
2992 vfs.options = getattr(orig.opener, 'options', None)
2989
2993
2990 dest = revlog.revlog(
2994 dest = revlog.revlog(
2991 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2995 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2992 )
2996 )
2993 if dest._inline:
2997 if dest._inline:
2994 raise error.Abort('not supporting inline revlog (yet)')
2998 raise error.Abort('not supporting inline revlog (yet)')
2995 # make sure internals are initialized
2999 # make sure internals are initialized
2996 dest.revision(len(dest) - 1)
3000 dest.revision(len(dest) - 1)
2997 yield dest
3001 yield dest
2998 del dest, vfs
3002 del dest, vfs
2999 finally:
3003 finally:
3000 shutil.rmtree(tmpdir, True)
3004 shutil.rmtree(tmpdir, True)
3001
3005
3002
3006
3003 @command(
3007 @command(
3004 b'perfrevlogchunks',
3008 b'perfrevlogchunks',
3005 revlogopts
3009 revlogopts
3006 + formatteropts
3010 + formatteropts
3007 + [
3011 + [
3008 (b'e', b'engines', b'', b'compression engines to use'),
3012 (b'e', b'engines', b'', b'compression engines to use'),
3009 (b's', b'startrev', 0, b'revision to start at'),
3013 (b's', b'startrev', 0, b'revision to start at'),
3010 ],
3014 ],
3011 b'-c|-m|FILE',
3015 b'-c|-m|FILE',
3012 )
3016 )
3013 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3017 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3014 """Benchmark operations on revlog chunks.
3018 """Benchmark operations on revlog chunks.
3015
3019
3016 Logically, each revlog is a collection of fulltext revisions. However,
3020 Logically, each revlog is a collection of fulltext revisions. However,
3017 stored within each revlog are "chunks" of possibly compressed data. This
3021 stored within each revlog are "chunks" of possibly compressed data. This
3018 data needs to be read and decompressed or compressed and written.
3022 data needs to be read and decompressed or compressed and written.
3019
3023
3020 This command measures the time it takes to read+decompress and recompress
3024 This command measures the time it takes to read+decompress and recompress
3021 chunks in a revlog. It effectively isolates I/O and compression performance.
3025 chunks in a revlog. It effectively isolates I/O and compression performance.
3022 For measurements of higher-level operations like resolving revisions,
3026 For measurements of higher-level operations like resolving revisions,
3023 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3027 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3024 """
3028 """
3025 opts = _byteskwargs(opts)
3029 opts = _byteskwargs(opts)
3026
3030
3027 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3031 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3028
3032
3029 # _chunkraw was renamed to _getsegmentforrevs.
3033 # _chunkraw was renamed to _getsegmentforrevs.
3030 try:
3034 try:
3031 segmentforrevs = rl._getsegmentforrevs
3035 segmentforrevs = rl._getsegmentforrevs
3032 except AttributeError:
3036 except AttributeError:
3033 segmentforrevs = rl._chunkraw
3037 segmentforrevs = rl._chunkraw
3034
3038
3035 # Verify engines argument.
3039 # Verify engines argument.
3036 if engines:
3040 if engines:
3037 engines = set(e.strip() for e in engines.split(b','))
3041 engines = set(e.strip() for e in engines.split(b','))
3038 for engine in engines:
3042 for engine in engines:
3039 try:
3043 try:
3040 util.compressionengines[engine]
3044 util.compressionengines[engine]
3041 except KeyError:
3045 except KeyError:
3042 raise error.Abort(b'unknown compression engine: %s' % engine)
3046 raise error.Abort(b'unknown compression engine: %s' % engine)
3043 else:
3047 else:
3044 engines = []
3048 engines = []
3045 for e in util.compengines:
3049 for e in util.compengines:
3046 engine = util.compengines[e]
3050 engine = util.compengines[e]
3047 try:
3051 try:
3048 if engine.available():
3052 if engine.available():
3049 engine.revlogcompressor().compress(b'dummy')
3053 engine.revlogcompressor().compress(b'dummy')
3050 engines.append(e)
3054 engines.append(e)
3051 except NotImplementedError:
3055 except NotImplementedError:
3052 pass
3056 pass
3053
3057
3054 revs = list(rl.revs(startrev, len(rl) - 1))
3058 revs = list(rl.revs(startrev, len(rl) - 1))
3055
3059
3056 def rlfh(rl):
3060 def rlfh(rl):
3057 if rl._inline:
3061 if rl._inline:
3058 return getsvfs(repo)(rl.indexfile)
3062 return getsvfs(repo)(rl.indexfile)
3059 else:
3063 else:
3060 return getsvfs(repo)(rl.datafile)
3064 return getsvfs(repo)(rl.datafile)
3061
3065
3062 def doread():
3066 def doread():
3063 rl.clearcaches()
3067 rl.clearcaches()
3064 for rev in revs:
3068 for rev in revs:
3065 segmentforrevs(rev, rev)
3069 segmentforrevs(rev, rev)
3066
3070
3067 def doreadcachedfh():
3071 def doreadcachedfh():
3068 rl.clearcaches()
3072 rl.clearcaches()
3069 fh = rlfh(rl)
3073 fh = rlfh(rl)
3070 for rev in revs:
3074 for rev in revs:
3071 segmentforrevs(rev, rev, df=fh)
3075 segmentforrevs(rev, rev, df=fh)
3072
3076
3073 def doreadbatch():
3077 def doreadbatch():
3074 rl.clearcaches()
3078 rl.clearcaches()
3075 segmentforrevs(revs[0], revs[-1])
3079 segmentforrevs(revs[0], revs[-1])
3076
3080
3077 def doreadbatchcachedfh():
3081 def doreadbatchcachedfh():
3078 rl.clearcaches()
3082 rl.clearcaches()
3079 fh = rlfh(rl)
3083 fh = rlfh(rl)
3080 segmentforrevs(revs[0], revs[-1], df=fh)
3084 segmentforrevs(revs[0], revs[-1], df=fh)
3081
3085
3082 def dochunk():
3086 def dochunk():
3083 rl.clearcaches()
3087 rl.clearcaches()
3084 fh = rlfh(rl)
3088 fh = rlfh(rl)
3085 for rev in revs:
3089 for rev in revs:
3086 rl._chunk(rev, df=fh)
3090 rl._chunk(rev, df=fh)
3087
3091
3088 chunks = [None]
3092 chunks = [None]
3089
3093
3090 def dochunkbatch():
3094 def dochunkbatch():
3091 rl.clearcaches()
3095 rl.clearcaches()
3092 fh = rlfh(rl)
3096 fh = rlfh(rl)
3093 # Save chunks as a side-effect.
3097 # Save chunks as a side-effect.
3094 chunks[0] = rl._chunks(revs, df=fh)
3098 chunks[0] = rl._chunks(revs, df=fh)
3095
3099
3096 def docompress(compressor):
3100 def docompress(compressor):
3097 rl.clearcaches()
3101 rl.clearcaches()
3098
3102
3099 try:
3103 try:
3100 # Swap in the requested compression engine.
3104 # Swap in the requested compression engine.
3101 oldcompressor = rl._compressor
3105 oldcompressor = rl._compressor
3102 rl._compressor = compressor
3106 rl._compressor = compressor
3103 for chunk in chunks[0]:
3107 for chunk in chunks[0]:
3104 rl.compress(chunk)
3108 rl.compress(chunk)
3105 finally:
3109 finally:
3106 rl._compressor = oldcompressor
3110 rl._compressor = oldcompressor
3107
3111
3108 benches = [
3112 benches = [
3109 (lambda: doread(), b'read'),
3113 (lambda: doread(), b'read'),
3110 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3114 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3111 (lambda: doreadbatch(), b'read batch'),
3115 (lambda: doreadbatch(), b'read batch'),
3112 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3116 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3113 (lambda: dochunk(), b'chunk'),
3117 (lambda: dochunk(), b'chunk'),
3114 (lambda: dochunkbatch(), b'chunk batch'),
3118 (lambda: dochunkbatch(), b'chunk batch'),
3115 ]
3119 ]
3116
3120
3117 for engine in sorted(engines):
3121 for engine in sorted(engines):
3118 compressor = util.compengines[engine].revlogcompressor()
3122 compressor = util.compengines[engine].revlogcompressor()
3119 benches.append(
3123 benches.append(
3120 (
3124 (
3121 functools.partial(docompress, compressor),
3125 functools.partial(docompress, compressor),
3122 b'compress w/ %s' % engine,
3126 b'compress w/ %s' % engine,
3123 )
3127 )
3124 )
3128 )
3125
3129
3126 for fn, title in benches:
3130 for fn, title in benches:
3127 timer, fm = gettimer(ui, opts)
3131 timer, fm = gettimer(ui, opts)
3128 timer(fn, title=title)
3132 timer(fn, title=title)
3129 fm.end()
3133 fm.end()
3130
3134
3131
3135
3132 @command(
3136 @command(
3133 b'perfrevlogrevision',
3137 b'perfrevlogrevision',
3134 revlogopts
3138 revlogopts
3135 + formatteropts
3139 + formatteropts
3136 + [(b'', b'cache', False, b'use caches instead of clearing')],
3140 + [(b'', b'cache', False, b'use caches instead of clearing')],
3137 b'-c|-m|FILE REV',
3141 b'-c|-m|FILE REV',
3138 )
3142 )
3139 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3143 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3140 """Benchmark obtaining a revlog revision.
3144 """Benchmark obtaining a revlog revision.
3141
3145
3142 Obtaining a revlog revision consists of roughly the following steps:
3146 Obtaining a revlog revision consists of roughly the following steps:
3143
3147
3144 1. Compute the delta chain
3148 1. Compute the delta chain
3145 2. Slice the delta chain if applicable
3149 2. Slice the delta chain if applicable
3146 3. Obtain the raw chunks for that delta chain
3150 3. Obtain the raw chunks for that delta chain
3147 4. Decompress each raw chunk
3151 4. Decompress each raw chunk
3148 5. Apply binary patches to obtain fulltext
3152 5. Apply binary patches to obtain fulltext
3149 6. Verify hash of fulltext
3153 6. Verify hash of fulltext
3150
3154
3151 This command measures the time spent in each of these phases.
3155 This command measures the time spent in each of these phases.
3152 """
3156 """
3153 opts = _byteskwargs(opts)
3157 opts = _byteskwargs(opts)
3154
3158
3155 if opts.get(b'changelog') or opts.get(b'manifest'):
3159 if opts.get(b'changelog') or opts.get(b'manifest'):
3156 file_, rev = None, file_
3160 file_, rev = None, file_
3157 elif rev is None:
3161 elif rev is None:
3158 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3162 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3159
3163
3160 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3164 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3161
3165
3162 # _chunkraw was renamed to _getsegmentforrevs.
3166 # _chunkraw was renamed to _getsegmentforrevs.
3163 try:
3167 try:
3164 segmentforrevs = r._getsegmentforrevs
3168 segmentforrevs = r._getsegmentforrevs
3165 except AttributeError:
3169 except AttributeError:
3166 segmentforrevs = r._chunkraw
3170 segmentforrevs = r._chunkraw
3167
3171
3168 node = r.lookup(rev)
3172 node = r.lookup(rev)
3169 rev = r.rev(node)
3173 rev = r.rev(node)
3170
3174
3171 def getrawchunks(data, chain):
3175 def getrawchunks(data, chain):
3172 start = r.start
3176 start = r.start
3173 length = r.length
3177 length = r.length
3174 inline = r._inline
3178 inline = r._inline
3175 iosize = r._io.size
3179 iosize = r._io.size
3176 buffer = util.buffer
3180 buffer = util.buffer
3177
3181
3178 chunks = []
3182 chunks = []
3179 ladd = chunks.append
3183 ladd = chunks.append
3180 for idx, item in enumerate(chain):
3184 for idx, item in enumerate(chain):
3181 offset = start(item[0])
3185 offset = start(item[0])
3182 bits = data[idx]
3186 bits = data[idx]
3183 for rev in item:
3187 for rev in item:
3184 chunkstart = start(rev)
3188 chunkstart = start(rev)
3185 if inline:
3189 if inline:
3186 chunkstart += (rev + 1) * iosize
3190 chunkstart += (rev + 1) * iosize
3187 chunklength = length(rev)
3191 chunklength = length(rev)
3188 ladd(buffer(bits, chunkstart - offset, chunklength))
3192 ladd(buffer(bits, chunkstart - offset, chunklength))
3189
3193
3190 return chunks
3194 return chunks
3191
3195
3192 def dodeltachain(rev):
3196 def dodeltachain(rev):
3193 if not cache:
3197 if not cache:
3194 r.clearcaches()
3198 r.clearcaches()
3195 r._deltachain(rev)
3199 r._deltachain(rev)
3196
3200
3197 def doread(chain):
3201 def doread(chain):
3198 if not cache:
3202 if not cache:
3199 r.clearcaches()
3203 r.clearcaches()
3200 for item in slicedchain:
3204 for item in slicedchain:
3201 segmentforrevs(item[0], item[-1])
3205 segmentforrevs(item[0], item[-1])
3202
3206
3203 def doslice(r, chain, size):
3207 def doslice(r, chain, size):
3204 for s in slicechunk(r, chain, targetsize=size):
3208 for s in slicechunk(r, chain, targetsize=size):
3205 pass
3209 pass
3206
3210
3207 def dorawchunks(data, chain):
3211 def dorawchunks(data, chain):
3208 if not cache:
3212 if not cache:
3209 r.clearcaches()
3213 r.clearcaches()
3210 getrawchunks(data, chain)
3214 getrawchunks(data, chain)
3211
3215
3212 def dodecompress(chunks):
3216 def dodecompress(chunks):
3213 decomp = r.decompress
3217 decomp = r.decompress
3214 for chunk in chunks:
3218 for chunk in chunks:
3215 decomp(chunk)
3219 decomp(chunk)
3216
3220
3217 def dopatch(text, bins):
3221 def dopatch(text, bins):
3218 if not cache:
3222 if not cache:
3219 r.clearcaches()
3223 r.clearcaches()
3220 mdiff.patches(text, bins)
3224 mdiff.patches(text, bins)
3221
3225
3222 def dohash(text):
3226 def dohash(text):
3223 if not cache:
3227 if not cache:
3224 r.clearcaches()
3228 r.clearcaches()
3225 r.checkhash(text, node, rev=rev)
3229 r.checkhash(text, node, rev=rev)
3226
3230
3227 def dorevision():
3231 def dorevision():
3228 if not cache:
3232 if not cache:
3229 r.clearcaches()
3233 r.clearcaches()
3230 r.revision(node)
3234 r.revision(node)
3231
3235
3232 try:
3236 try:
3233 from mercurial.revlogutils.deltas import slicechunk
3237 from mercurial.revlogutils.deltas import slicechunk
3234 except ImportError:
3238 except ImportError:
3235 slicechunk = getattr(revlog, '_slicechunk', None)
3239 slicechunk = getattr(revlog, '_slicechunk', None)
3236
3240
3237 size = r.length(rev)
3241 size = r.length(rev)
3238 chain = r._deltachain(rev)[0]
3242 chain = r._deltachain(rev)[0]
3239 if not getattr(r, '_withsparseread', False):
3243 if not getattr(r, '_withsparseread', False):
3240 slicedchain = (chain,)
3244 slicedchain = (chain,)
3241 else:
3245 else:
3242 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3246 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3243 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3247 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3244 rawchunks = getrawchunks(data, slicedchain)
3248 rawchunks = getrawchunks(data, slicedchain)
3245 bins = r._chunks(chain)
3249 bins = r._chunks(chain)
3246 text = bytes(bins[0])
3250 text = bytes(bins[0])
3247 bins = bins[1:]
3251 bins = bins[1:]
3248 text = mdiff.patches(text, bins)
3252 text = mdiff.patches(text, bins)
3249
3253
3250 benches = [
3254 benches = [
3251 (lambda: dorevision(), b'full'),
3255 (lambda: dorevision(), b'full'),
3252 (lambda: dodeltachain(rev), b'deltachain'),
3256 (lambda: dodeltachain(rev), b'deltachain'),
3253 (lambda: doread(chain), b'read'),
3257 (lambda: doread(chain), b'read'),
3254 ]
3258 ]
3255
3259
3256 if getattr(r, '_withsparseread', False):
3260 if getattr(r, '_withsparseread', False):
3257 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3261 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3258 benches.append(slicing)
3262 benches.append(slicing)
3259
3263
3260 benches.extend(
3264 benches.extend(
3261 [
3265 [
3262 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3266 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3263 (lambda: dodecompress(rawchunks), b'decompress'),
3267 (lambda: dodecompress(rawchunks), b'decompress'),
3264 (lambda: dopatch(text, bins), b'patch'),
3268 (lambda: dopatch(text, bins), b'patch'),
3265 (lambda: dohash(text), b'hash'),
3269 (lambda: dohash(text), b'hash'),
3266 ]
3270 ]
3267 )
3271 )
3268
3272
3269 timer, fm = gettimer(ui, opts)
3273 timer, fm = gettimer(ui, opts)
3270 for fn, title in benches:
3274 for fn, title in benches:
3271 timer(fn, title=title)
3275 timer(fn, title=title)
3272 fm.end()
3276 fm.end()
3273
3277
3274
3278
3275 @command(
3279 @command(
3276 b'perfrevset',
3280 b'perfrevset',
3277 [
3281 [
3278 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3282 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3279 (b'', b'contexts', False, b'obtain changectx for each revision'),
3283 (b'', b'contexts', False, b'obtain changectx for each revision'),
3280 ]
3284 ]
3281 + formatteropts,
3285 + formatteropts,
3282 b"REVSET",
3286 b"REVSET",
3283 )
3287 )
3284 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3288 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3285 """benchmark the execution time of a revset
3289 """benchmark the execution time of a revset
3286
3290
3287 Use the --clean option if need to evaluate the impact of build volatile
3291 Use the --clean option if need to evaluate the impact of build volatile
3288 revisions set cache on the revset execution. Volatile cache hold filtered
3292 revisions set cache on the revset execution. Volatile cache hold filtered
3289 and obsolete related cache."""
3293 and obsolete related cache."""
3290 opts = _byteskwargs(opts)
3294 opts = _byteskwargs(opts)
3291
3295
3292 timer, fm = gettimer(ui, opts)
3296 timer, fm = gettimer(ui, opts)
3293
3297
3294 def d():
3298 def d():
3295 if clear:
3299 if clear:
3296 repo.invalidatevolatilesets()
3300 repo.invalidatevolatilesets()
3297 if contexts:
3301 if contexts:
3298 for ctx in repo.set(expr):
3302 for ctx in repo.set(expr):
3299 pass
3303 pass
3300 else:
3304 else:
3301 for r in repo.revs(expr):
3305 for r in repo.revs(expr):
3302 pass
3306 pass
3303
3307
3304 timer(d)
3308 timer(d)
3305 fm.end()
3309 fm.end()
3306
3310
3307
3311
3308 @command(
3312 @command(
3309 b'perfvolatilesets',
3313 b'perfvolatilesets',
3310 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3314 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3311 + formatteropts,
3315 + formatteropts,
3312 )
3316 )
3313 def perfvolatilesets(ui, repo, *names, **opts):
3317 def perfvolatilesets(ui, repo, *names, **opts):
3314 """benchmark the computation of various volatile set
3318 """benchmark the computation of various volatile set
3315
3319
3316 Volatile set computes element related to filtering and obsolescence."""
3320 Volatile set computes element related to filtering and obsolescence."""
3317 opts = _byteskwargs(opts)
3321 opts = _byteskwargs(opts)
3318 timer, fm = gettimer(ui, opts)
3322 timer, fm = gettimer(ui, opts)
3319 repo = repo.unfiltered()
3323 repo = repo.unfiltered()
3320
3324
3321 def getobs(name):
3325 def getobs(name):
3322 def d():
3326 def d():
3323 repo.invalidatevolatilesets()
3327 repo.invalidatevolatilesets()
3324 if opts[b'clear_obsstore']:
3328 if opts[b'clear_obsstore']:
3325 clearfilecache(repo, b'obsstore')
3329 clearfilecache(repo, b'obsstore')
3326 obsolete.getrevs(repo, name)
3330 obsolete.getrevs(repo, name)
3327
3331
3328 return d
3332 return d
3329
3333
3330 allobs = sorted(obsolete.cachefuncs)
3334 allobs = sorted(obsolete.cachefuncs)
3331 if names:
3335 if names:
3332 allobs = [n for n in allobs if n in names]
3336 allobs = [n for n in allobs if n in names]
3333
3337
3334 for name in allobs:
3338 for name in allobs:
3335 timer(getobs(name), title=name)
3339 timer(getobs(name), title=name)
3336
3340
3337 def getfiltered(name):
3341 def getfiltered(name):
3338 def d():
3342 def d():
3339 repo.invalidatevolatilesets()
3343 repo.invalidatevolatilesets()
3340 if opts[b'clear_obsstore']:
3344 if opts[b'clear_obsstore']:
3341 clearfilecache(repo, b'obsstore')
3345 clearfilecache(repo, b'obsstore')
3342 repoview.filterrevs(repo, name)
3346 repoview.filterrevs(repo, name)
3343
3347
3344 return d
3348 return d
3345
3349
3346 allfilter = sorted(repoview.filtertable)
3350 allfilter = sorted(repoview.filtertable)
3347 if names:
3351 if names:
3348 allfilter = [n for n in allfilter if n in names]
3352 allfilter = [n for n in allfilter if n in names]
3349
3353
3350 for name in allfilter:
3354 for name in allfilter:
3351 timer(getfiltered(name), title=name)
3355 timer(getfiltered(name), title=name)
3352 fm.end()
3356 fm.end()
3353
3357
3354
3358
3355 @command(
3359 @command(
3356 b'perfbranchmap',
3360 b'perfbranchmap',
3357 [
3361 [
3358 (b'f', b'full', False, b'Includes build time of subset'),
3362 (b'f', b'full', False, b'Includes build time of subset'),
3359 (
3363 (
3360 b'',
3364 b'',
3361 b'clear-revbranch',
3365 b'clear-revbranch',
3362 False,
3366 False,
3363 b'purge the revbranch cache between computation',
3367 b'purge the revbranch cache between computation',
3364 ),
3368 ),
3365 ]
3369 ]
3366 + formatteropts,
3370 + formatteropts,
3367 )
3371 )
3368 def perfbranchmap(ui, repo, *filternames, **opts):
3372 def perfbranchmap(ui, repo, *filternames, **opts):
3369 """benchmark the update of a branchmap
3373 """benchmark the update of a branchmap
3370
3374
3371 This benchmarks the full repo.branchmap() call with read and write disabled
3375 This benchmarks the full repo.branchmap() call with read and write disabled
3372 """
3376 """
3373 opts = _byteskwargs(opts)
3377 opts = _byteskwargs(opts)
3374 full = opts.get(b"full", False)
3378 full = opts.get(b"full", False)
3375 clear_revbranch = opts.get(b"clear_revbranch", False)
3379 clear_revbranch = opts.get(b"clear_revbranch", False)
3376 timer, fm = gettimer(ui, opts)
3380 timer, fm = gettimer(ui, opts)
3377
3381
3378 def getbranchmap(filtername):
3382 def getbranchmap(filtername):
3379 """generate a benchmark function for the filtername"""
3383 """generate a benchmark function for the filtername"""
3380 if filtername is None:
3384 if filtername is None:
3381 view = repo
3385 view = repo
3382 else:
3386 else:
3383 view = repo.filtered(filtername)
3387 view = repo.filtered(filtername)
3384 if util.safehasattr(view._branchcaches, '_per_filter'):
3388 if util.safehasattr(view._branchcaches, '_per_filter'):
3385 filtered = view._branchcaches._per_filter
3389 filtered = view._branchcaches._per_filter
3386 else:
3390 else:
3387 # older versions
3391 # older versions
3388 filtered = view._branchcaches
3392 filtered = view._branchcaches
3389
3393
3390 def d():
3394 def d():
3391 if clear_revbranch:
3395 if clear_revbranch:
3392 repo.revbranchcache()._clear()
3396 repo.revbranchcache()._clear()
3393 if full:
3397 if full:
3394 view._branchcaches.clear()
3398 view._branchcaches.clear()
3395 else:
3399 else:
3396 filtered.pop(filtername, None)
3400 filtered.pop(filtername, None)
3397 view.branchmap()
3401 view.branchmap()
3398
3402
3399 return d
3403 return d
3400
3404
3401 # add filter in smaller subset to bigger subset
3405 # add filter in smaller subset to bigger subset
3402 possiblefilters = set(repoview.filtertable)
3406 possiblefilters = set(repoview.filtertable)
3403 if filternames:
3407 if filternames:
3404 possiblefilters &= set(filternames)
3408 possiblefilters &= set(filternames)
3405 subsettable = getbranchmapsubsettable()
3409 subsettable = getbranchmapsubsettable()
3406 allfilters = []
3410 allfilters = []
3407 while possiblefilters:
3411 while possiblefilters:
3408 for name in possiblefilters:
3412 for name in possiblefilters:
3409 subset = subsettable.get(name)
3413 subset = subsettable.get(name)
3410 if subset not in possiblefilters:
3414 if subset not in possiblefilters:
3411 break
3415 break
3412 else:
3416 else:
3413 assert False, b'subset cycle %s!' % possiblefilters
3417 assert False, b'subset cycle %s!' % possiblefilters
3414 allfilters.append(name)
3418 allfilters.append(name)
3415 possiblefilters.remove(name)
3419 possiblefilters.remove(name)
3416
3420
3417 # warm the cache
3421 # warm the cache
3418 if not full:
3422 if not full:
3419 for name in allfilters:
3423 for name in allfilters:
3420 repo.filtered(name).branchmap()
3424 repo.filtered(name).branchmap()
3421 if not filternames or b'unfiltered' in filternames:
3425 if not filternames or b'unfiltered' in filternames:
3422 # add unfiltered
3426 # add unfiltered
3423 allfilters.append(None)
3427 allfilters.append(None)
3424
3428
3425 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3429 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3426 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3430 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3427 branchcacheread.set(classmethod(lambda *args: None))
3431 branchcacheread.set(classmethod(lambda *args: None))
3428 else:
3432 else:
3429 # older versions
3433 # older versions
3430 branchcacheread = safeattrsetter(branchmap, b'read')
3434 branchcacheread = safeattrsetter(branchmap, b'read')
3431 branchcacheread.set(lambda *args: None)
3435 branchcacheread.set(lambda *args: None)
3432 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3436 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3433 branchcachewrite.set(lambda *args: None)
3437 branchcachewrite.set(lambda *args: None)
3434 try:
3438 try:
3435 for name in allfilters:
3439 for name in allfilters:
3436 printname = name
3440 printname = name
3437 if name is None:
3441 if name is None:
3438 printname = b'unfiltered'
3442 printname = b'unfiltered'
3439 timer(getbranchmap(name), title=str(printname))
3443 timer(getbranchmap(name), title=str(printname))
3440 finally:
3444 finally:
3441 branchcacheread.restore()
3445 branchcacheread.restore()
3442 branchcachewrite.restore()
3446 branchcachewrite.restore()
3443 fm.end()
3447 fm.end()
3444
3448
3445
3449
3446 @command(
3450 @command(
3447 b'perfbranchmapupdate',
3451 b'perfbranchmapupdate',
3448 [
3452 [
3449 (b'', b'base', [], b'subset of revision to start from'),
3453 (b'', b'base', [], b'subset of revision to start from'),
3450 (b'', b'target', [], b'subset of revision to end with'),
3454 (b'', b'target', [], b'subset of revision to end with'),
3451 (b'', b'clear-caches', False, b'clear cache between each runs'),
3455 (b'', b'clear-caches', False, b'clear cache between each runs'),
3452 ]
3456 ]
3453 + formatteropts,
3457 + formatteropts,
3454 )
3458 )
3455 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3459 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3456 """benchmark branchmap update from for <base> revs to <target> revs
3460 """benchmark branchmap update from for <base> revs to <target> revs
3457
3461
3458 If `--clear-caches` is passed, the following items will be reset before
3462 If `--clear-caches` is passed, the following items will be reset before
3459 each update:
3463 each update:
3460 * the changelog instance and associated indexes
3464 * the changelog instance and associated indexes
3461 * the rev-branch-cache instance
3465 * the rev-branch-cache instance
3462
3466
3463 Examples:
3467 Examples:
3464
3468
3465 # update for the one last revision
3469 # update for the one last revision
3466 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3470 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3467
3471
3468 $ update for change coming with a new branch
3472 $ update for change coming with a new branch
3469 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3473 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3470 """
3474 """
3471 from mercurial import branchmap
3475 from mercurial import branchmap
3472 from mercurial import repoview
3476 from mercurial import repoview
3473
3477
3474 opts = _byteskwargs(opts)
3478 opts = _byteskwargs(opts)
3475 timer, fm = gettimer(ui, opts)
3479 timer, fm = gettimer(ui, opts)
3476 clearcaches = opts[b'clear_caches']
3480 clearcaches = opts[b'clear_caches']
3477 unfi = repo.unfiltered()
3481 unfi = repo.unfiltered()
3478 x = [None] # used to pass data between closure
3482 x = [None] # used to pass data between closure
3479
3483
3480 # we use a `list` here to avoid possible side effect from smartset
3484 # we use a `list` here to avoid possible side effect from smartset
3481 baserevs = list(scmutil.revrange(repo, base))
3485 baserevs = list(scmutil.revrange(repo, base))
3482 targetrevs = list(scmutil.revrange(repo, target))
3486 targetrevs = list(scmutil.revrange(repo, target))
3483 if not baserevs:
3487 if not baserevs:
3484 raise error.Abort(b'no revisions selected for --base')
3488 raise error.Abort(b'no revisions selected for --base')
3485 if not targetrevs:
3489 if not targetrevs:
3486 raise error.Abort(b'no revisions selected for --target')
3490 raise error.Abort(b'no revisions selected for --target')
3487
3491
3488 # make sure the target branchmap also contains the one in the base
3492 # make sure the target branchmap also contains the one in the base
3489 targetrevs = list(set(baserevs) | set(targetrevs))
3493 targetrevs = list(set(baserevs) | set(targetrevs))
3490 targetrevs.sort()
3494 targetrevs.sort()
3491
3495
3492 cl = repo.changelog
3496 cl = repo.changelog
3493 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3497 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3494 allbaserevs.sort()
3498 allbaserevs.sort()
3495 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3499 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3496
3500
3497 newrevs = list(alltargetrevs.difference(allbaserevs))
3501 newrevs = list(alltargetrevs.difference(allbaserevs))
3498 newrevs.sort()
3502 newrevs.sort()
3499
3503
3500 allrevs = frozenset(unfi.changelog.revs())
3504 allrevs = frozenset(unfi.changelog.revs())
3501 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3505 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3502 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3506 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3503
3507
3504 def basefilter(repo, visibilityexceptions=None):
3508 def basefilter(repo, visibilityexceptions=None):
3505 return basefilterrevs
3509 return basefilterrevs
3506
3510
3507 def targetfilter(repo, visibilityexceptions=None):
3511 def targetfilter(repo, visibilityexceptions=None):
3508 return targetfilterrevs
3512 return targetfilterrevs
3509
3513
3510 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3514 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3511 ui.status(msg % (len(allbaserevs), len(newrevs)))
3515 ui.status(msg % (len(allbaserevs), len(newrevs)))
3512 if targetfilterrevs:
3516 if targetfilterrevs:
3513 msg = b'(%d revisions still filtered)\n'
3517 msg = b'(%d revisions still filtered)\n'
3514 ui.status(msg % len(targetfilterrevs))
3518 ui.status(msg % len(targetfilterrevs))
3515
3519
3516 try:
3520 try:
3517 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3521 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3518 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3522 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3519
3523
3520 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3524 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3521 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3525 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3522
3526
3523 # try to find an existing branchmap to reuse
3527 # try to find an existing branchmap to reuse
3524 subsettable = getbranchmapsubsettable()
3528 subsettable = getbranchmapsubsettable()
3525 candidatefilter = subsettable.get(None)
3529 candidatefilter = subsettable.get(None)
3526 while candidatefilter is not None:
3530 while candidatefilter is not None:
3527 candidatebm = repo.filtered(candidatefilter).branchmap()
3531 candidatebm = repo.filtered(candidatefilter).branchmap()
3528 if candidatebm.validfor(baserepo):
3532 if candidatebm.validfor(baserepo):
3529 filtered = repoview.filterrevs(repo, candidatefilter)
3533 filtered = repoview.filterrevs(repo, candidatefilter)
3530 missing = [r for r in allbaserevs if r in filtered]
3534 missing = [r for r in allbaserevs if r in filtered]
3531 base = candidatebm.copy()
3535 base = candidatebm.copy()
3532 base.update(baserepo, missing)
3536 base.update(baserepo, missing)
3533 break
3537 break
3534 candidatefilter = subsettable.get(candidatefilter)
3538 candidatefilter = subsettable.get(candidatefilter)
3535 else:
3539 else:
3536 # no suitable subset where found
3540 # no suitable subset where found
3537 base = branchmap.branchcache()
3541 base = branchmap.branchcache()
3538 base.update(baserepo, allbaserevs)
3542 base.update(baserepo, allbaserevs)
3539
3543
3540 def setup():
3544 def setup():
3541 x[0] = base.copy()
3545 x[0] = base.copy()
3542 if clearcaches:
3546 if clearcaches:
3543 unfi._revbranchcache = None
3547 unfi._revbranchcache = None
3544 clearchangelog(repo)
3548 clearchangelog(repo)
3545
3549
3546 def bench():
3550 def bench():
3547 x[0].update(targetrepo, newrevs)
3551 x[0].update(targetrepo, newrevs)
3548
3552
3549 timer(bench, setup=setup)
3553 timer(bench, setup=setup)
3550 fm.end()
3554 fm.end()
3551 finally:
3555 finally:
3552 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3556 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3553 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3557 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3554
3558
3555
3559
3556 @command(
3560 @command(
3557 b'perfbranchmapload',
3561 b'perfbranchmapload',
3558 [
3562 [
3559 (b'f', b'filter', b'', b'Specify repoview filter'),
3563 (b'f', b'filter', b'', b'Specify repoview filter'),
3560 (b'', b'list', False, b'List brachmap filter caches'),
3564 (b'', b'list', False, b'List brachmap filter caches'),
3561 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3565 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3562 ]
3566 ]
3563 + formatteropts,
3567 + formatteropts,
3564 )
3568 )
3565 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3569 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3566 """benchmark reading the branchmap"""
3570 """benchmark reading the branchmap"""
3567 opts = _byteskwargs(opts)
3571 opts = _byteskwargs(opts)
3568 clearrevlogs = opts[b'clear_revlogs']
3572 clearrevlogs = opts[b'clear_revlogs']
3569
3573
3570 if list:
3574 if list:
3571 for name, kind, st in repo.cachevfs.readdir(stat=True):
3575 for name, kind, st in repo.cachevfs.readdir(stat=True):
3572 if name.startswith(b'branch2'):
3576 if name.startswith(b'branch2'):
3573 filtername = name.partition(b'-')[2] or b'unfiltered'
3577 filtername = name.partition(b'-')[2] or b'unfiltered'
3574 ui.status(
3578 ui.status(
3575 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3579 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3576 )
3580 )
3577 return
3581 return
3578 if not filter:
3582 if not filter:
3579 filter = None
3583 filter = None
3580 subsettable = getbranchmapsubsettable()
3584 subsettable = getbranchmapsubsettable()
3581 if filter is None:
3585 if filter is None:
3582 repo = repo.unfiltered()
3586 repo = repo.unfiltered()
3583 else:
3587 else:
3584 repo = repoview.repoview(repo, filter)
3588 repo = repoview.repoview(repo, filter)
3585
3589
3586 repo.branchmap() # make sure we have a relevant, up to date branchmap
3590 repo.branchmap() # make sure we have a relevant, up to date branchmap
3587
3591
3588 try:
3592 try:
3589 fromfile = branchmap.branchcache.fromfile
3593 fromfile = branchmap.branchcache.fromfile
3590 except AttributeError:
3594 except AttributeError:
3591 # older versions
3595 # older versions
3592 fromfile = branchmap.read
3596 fromfile = branchmap.read
3593
3597
3594 currentfilter = filter
3598 currentfilter = filter
3595 # try once without timer, the filter may not be cached
3599 # try once without timer, the filter may not be cached
3596 while fromfile(repo) is None:
3600 while fromfile(repo) is None:
3597 currentfilter = subsettable.get(currentfilter)
3601 currentfilter = subsettable.get(currentfilter)
3598 if currentfilter is None:
3602 if currentfilter is None:
3599 raise error.Abort(
3603 raise error.Abort(
3600 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3604 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3601 )
3605 )
3602 repo = repo.filtered(currentfilter)
3606 repo = repo.filtered(currentfilter)
3603 timer, fm = gettimer(ui, opts)
3607 timer, fm = gettimer(ui, opts)
3604
3608
3605 def setup():
3609 def setup():
3606 if clearrevlogs:
3610 if clearrevlogs:
3607 clearchangelog(repo)
3611 clearchangelog(repo)
3608
3612
3609 def bench():
3613 def bench():
3610 fromfile(repo)
3614 fromfile(repo)
3611
3615
3612 timer(bench, setup=setup)
3616 timer(bench, setup=setup)
3613 fm.end()
3617 fm.end()
3614
3618
3615
3619
3616 @command(b'perfloadmarkers')
3620 @command(b'perfloadmarkers')
3617 def perfloadmarkers(ui, repo):
3621 def perfloadmarkers(ui, repo):
3618 """benchmark the time to parse the on-disk markers for a repo
3622 """benchmark the time to parse the on-disk markers for a repo
3619
3623
3620 Result is the number of markers in the repo."""
3624 Result is the number of markers in the repo."""
3621 timer, fm = gettimer(ui)
3625 timer, fm = gettimer(ui)
3622 svfs = getsvfs(repo)
3626 svfs = getsvfs(repo)
3623 timer(lambda: len(obsolete.obsstore(svfs)))
3627 timer(lambda: len(obsolete.obsstore(svfs)))
3624 fm.end()
3628 fm.end()
3625
3629
3626
3630
3627 @command(
3631 @command(
3628 b'perflrucachedict',
3632 b'perflrucachedict',
3629 formatteropts
3633 formatteropts
3630 + [
3634 + [
3631 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3635 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3632 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3636 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3633 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3637 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3634 (b'', b'size', 4, b'size of cache'),
3638 (b'', b'size', 4, b'size of cache'),
3635 (b'', b'gets', 10000, b'number of key lookups'),
3639 (b'', b'gets', 10000, b'number of key lookups'),
3636 (b'', b'sets', 10000, b'number of key sets'),
3640 (b'', b'sets', 10000, b'number of key sets'),
3637 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3641 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3638 (
3642 (
3639 b'',
3643 b'',
3640 b'mixedgetfreq',
3644 b'mixedgetfreq',
3641 50,
3645 50,
3642 b'frequency of get vs set ops in mixed mode',
3646 b'frequency of get vs set ops in mixed mode',
3643 ),
3647 ),
3644 ],
3648 ],
3645 norepo=True,
3649 norepo=True,
3646 )
3650 )
3647 def perflrucache(
3651 def perflrucache(
3648 ui,
3652 ui,
3649 mincost=0,
3653 mincost=0,
3650 maxcost=100,
3654 maxcost=100,
3651 costlimit=0,
3655 costlimit=0,
3652 size=4,
3656 size=4,
3653 gets=10000,
3657 gets=10000,
3654 sets=10000,
3658 sets=10000,
3655 mixed=10000,
3659 mixed=10000,
3656 mixedgetfreq=50,
3660 mixedgetfreq=50,
3657 **opts
3661 **opts
3658 ):
3662 ):
3659 opts = _byteskwargs(opts)
3663 opts = _byteskwargs(opts)
3660
3664
3661 def doinit():
3665 def doinit():
3662 for i in _xrange(10000):
3666 for i in _xrange(10000):
3663 util.lrucachedict(size)
3667 util.lrucachedict(size)
3664
3668
3665 costrange = list(range(mincost, maxcost + 1))
3669 costrange = list(range(mincost, maxcost + 1))
3666
3670
3667 values = []
3671 values = []
3668 for i in _xrange(size):
3672 for i in _xrange(size):
3669 values.append(random.randint(0, _maxint))
3673 values.append(random.randint(0, _maxint))
3670
3674
3671 # Get mode fills the cache and tests raw lookup performance with no
3675 # Get mode fills the cache and tests raw lookup performance with no
3672 # eviction.
3676 # eviction.
3673 getseq = []
3677 getseq = []
3674 for i in _xrange(gets):
3678 for i in _xrange(gets):
3675 getseq.append(random.choice(values))
3679 getseq.append(random.choice(values))
3676
3680
3677 def dogets():
3681 def dogets():
3678 d = util.lrucachedict(size)
3682 d = util.lrucachedict(size)
3679 for v in values:
3683 for v in values:
3680 d[v] = v
3684 d[v] = v
3681 for key in getseq:
3685 for key in getseq:
3682 value = d[key]
3686 value = d[key]
3683 value # silence pyflakes warning
3687 value # silence pyflakes warning
3684
3688
3685 def dogetscost():
3689 def dogetscost():
3686 d = util.lrucachedict(size, maxcost=costlimit)
3690 d = util.lrucachedict(size, maxcost=costlimit)
3687 for i, v in enumerate(values):
3691 for i, v in enumerate(values):
3688 d.insert(v, v, cost=costs[i])
3692 d.insert(v, v, cost=costs[i])
3689 for key in getseq:
3693 for key in getseq:
3690 try:
3694 try:
3691 value = d[key]
3695 value = d[key]
3692 value # silence pyflakes warning
3696 value # silence pyflakes warning
3693 except KeyError:
3697 except KeyError:
3694 pass
3698 pass
3695
3699
3696 # Set mode tests insertion speed with cache eviction.
3700 # Set mode tests insertion speed with cache eviction.
3697 setseq = []
3701 setseq = []
3698 costs = []
3702 costs = []
3699 for i in _xrange(sets):
3703 for i in _xrange(sets):
3700 setseq.append(random.randint(0, _maxint))
3704 setseq.append(random.randint(0, _maxint))
3701 costs.append(random.choice(costrange))
3705 costs.append(random.choice(costrange))
3702
3706
3703 def doinserts():
3707 def doinserts():
3704 d = util.lrucachedict(size)
3708 d = util.lrucachedict(size)
3705 for v in setseq:
3709 for v in setseq:
3706 d.insert(v, v)
3710 d.insert(v, v)
3707
3711
3708 def doinsertscost():
3712 def doinsertscost():
3709 d = util.lrucachedict(size, maxcost=costlimit)
3713 d = util.lrucachedict(size, maxcost=costlimit)
3710 for i, v in enumerate(setseq):
3714 for i, v in enumerate(setseq):
3711 d.insert(v, v, cost=costs[i])
3715 d.insert(v, v, cost=costs[i])
3712
3716
3713 def dosets():
3717 def dosets():
3714 d = util.lrucachedict(size)
3718 d = util.lrucachedict(size)
3715 for v in setseq:
3719 for v in setseq:
3716 d[v] = v
3720 d[v] = v
3717
3721
3718 # Mixed mode randomly performs gets and sets with eviction.
3722 # Mixed mode randomly performs gets and sets with eviction.
3719 mixedops = []
3723 mixedops = []
3720 for i in _xrange(mixed):
3724 for i in _xrange(mixed):
3721 r = random.randint(0, 100)
3725 r = random.randint(0, 100)
3722 if r < mixedgetfreq:
3726 if r < mixedgetfreq:
3723 op = 0
3727 op = 0
3724 else:
3728 else:
3725 op = 1
3729 op = 1
3726
3730
3727 mixedops.append(
3731 mixedops.append(
3728 (op, random.randint(0, size * 2), random.choice(costrange))
3732 (op, random.randint(0, size * 2), random.choice(costrange))
3729 )
3733 )
3730
3734
3731 def domixed():
3735 def domixed():
3732 d = util.lrucachedict(size)
3736 d = util.lrucachedict(size)
3733
3737
3734 for op, v, cost in mixedops:
3738 for op, v, cost in mixedops:
3735 if op == 0:
3739 if op == 0:
3736 try:
3740 try:
3737 d[v]
3741 d[v]
3738 except KeyError:
3742 except KeyError:
3739 pass
3743 pass
3740 else:
3744 else:
3741 d[v] = v
3745 d[v] = v
3742
3746
3743 def domixedcost():
3747 def domixedcost():
3744 d = util.lrucachedict(size, maxcost=costlimit)
3748 d = util.lrucachedict(size, maxcost=costlimit)
3745
3749
3746 for op, v, cost in mixedops:
3750 for op, v, cost in mixedops:
3747 if op == 0:
3751 if op == 0:
3748 try:
3752 try:
3749 d[v]
3753 d[v]
3750 except KeyError:
3754 except KeyError:
3751 pass
3755 pass
3752 else:
3756 else:
3753 d.insert(v, v, cost=cost)
3757 d.insert(v, v, cost=cost)
3754
3758
3755 benches = [
3759 benches = [
3756 (doinit, b'init'),
3760 (doinit, b'init'),
3757 ]
3761 ]
3758
3762
3759 if costlimit:
3763 if costlimit:
3760 benches.extend(
3764 benches.extend(
3761 [
3765 [
3762 (dogetscost, b'gets w/ cost limit'),
3766 (dogetscost, b'gets w/ cost limit'),
3763 (doinsertscost, b'inserts w/ cost limit'),
3767 (doinsertscost, b'inserts w/ cost limit'),
3764 (domixedcost, b'mixed w/ cost limit'),
3768 (domixedcost, b'mixed w/ cost limit'),
3765 ]
3769 ]
3766 )
3770 )
3767 else:
3771 else:
3768 benches.extend(
3772 benches.extend(
3769 [
3773 [
3770 (dogets, b'gets'),
3774 (dogets, b'gets'),
3771 (doinserts, b'inserts'),
3775 (doinserts, b'inserts'),
3772 (dosets, b'sets'),
3776 (dosets, b'sets'),
3773 (domixed, b'mixed'),
3777 (domixed, b'mixed'),
3774 ]
3778 ]
3775 )
3779 )
3776
3780
3777 for fn, title in benches:
3781 for fn, title in benches:
3778 timer, fm = gettimer(ui, opts)
3782 timer, fm = gettimer(ui, opts)
3779 timer(fn, title=title)
3783 timer(fn, title=title)
3780 fm.end()
3784 fm.end()
3781
3785
3782
3786
3783 @command(b'perfwrite', formatteropts)
3787 @command(b'perfwrite', formatteropts)
3784 def perfwrite(ui, repo, **opts):
3788 def perfwrite(ui, repo, **opts):
3785 """microbenchmark ui.write
3789 """microbenchmark ui.write
3786 """
3790 """
3787 opts = _byteskwargs(opts)
3791 opts = _byteskwargs(opts)
3788
3792
3789 timer, fm = gettimer(ui, opts)
3793 timer, fm = gettimer(ui, opts)
3790
3794
3791 def write():
3795 def write():
3792 for i in range(100000):
3796 for i in range(100000):
3793 ui.writenoi18n(b'Testing write performance\n')
3797 ui.writenoi18n(b'Testing write performance\n')
3794
3798
3795 timer(write)
3799 timer(write)
3796 fm.end()
3800 fm.end()
3797
3801
3798
3802
3799 def uisetup(ui):
3803 def uisetup(ui):
3800 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3804 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3801 commands, b'debugrevlogopts'
3805 commands, b'debugrevlogopts'
3802 ):
3806 ):
3803 # for "historical portability":
3807 # for "historical portability":
3804 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3808 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3805 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3809 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3806 # openrevlog() should cause failure, because it has been
3810 # openrevlog() should cause failure, because it has been
3807 # available since 3.5 (or 49c583ca48c4).
3811 # available since 3.5 (or 49c583ca48c4).
3808 def openrevlog(orig, repo, cmd, file_, opts):
3812 def openrevlog(orig, repo, cmd, file_, opts):
3809 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3813 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3810 raise error.Abort(
3814 raise error.Abort(
3811 b"This version doesn't support --dir option",
3815 b"This version doesn't support --dir option",
3812 hint=b"use 3.5 or later",
3816 hint=b"use 3.5 or later",
3813 )
3817 )
3814 return orig(repo, cmd, file_, opts)
3818 return orig(repo, cmd, file_, opts)
3815
3819
3816 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3820 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3817
3821
3818
3822
3819 @command(
3823 @command(
3820 b'perfprogress',
3824 b'perfprogress',
3821 formatteropts
3825 formatteropts
3822 + [
3826 + [
3823 (b'', b'topic', b'topic', b'topic for progress messages'),
3827 (b'', b'topic', b'topic', b'topic for progress messages'),
3824 (b'c', b'total', 1000000, b'total value we are progressing to'),
3828 (b'c', b'total', 1000000, b'total value we are progressing to'),
3825 ],
3829 ],
3826 norepo=True,
3830 norepo=True,
3827 )
3831 )
3828 def perfprogress(ui, topic=None, total=None, **opts):
3832 def perfprogress(ui, topic=None, total=None, **opts):
3829 """printing of progress bars"""
3833 """printing of progress bars"""
3830 opts = _byteskwargs(opts)
3834 opts = _byteskwargs(opts)
3831
3835
3832 timer, fm = gettimer(ui, opts)
3836 timer, fm = gettimer(ui, opts)
3833
3837
3834 def doprogress():
3838 def doprogress():
3835 with ui.makeprogress(topic, total=total) as progress:
3839 with ui.makeprogress(topic, total=total) as progress:
3836 for i in _xrange(total):
3840 for i in _xrange(total):
3837 progress.increment()
3841 progress.increment()
3838
3842
3839 timer(doprogress)
3843 timer(doprogress)
3840 fm.end()
3844 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now