##// END OF EJS Templates
perf: introduce a `--contains` flag to the `perfdirstate` command...
marmoute -
r43468:0c4efb6e default
parent child Browse files
Show More
@@ -1,3789 +1,3821 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', [
1104 @command(
1105 (b'', b'iteration', None,
1105 b'perfdirstate',
1106 b'benchmark a full iteration for the dirstate'),
1106 [
1107 ] + formatteropts)
1107 (
1108 b'',
1109 b'iteration',
1110 None,
1111 b'benchmark a full iteration for the dirstate',
1112 ),
1113 (
1114 b'',
1115 b'contains',
1116 None,
1117 b'benchmark a large amount of `nf in dirstate` calls',
1118 ),
1119 ]
1120 + formatteropts,
1121 )
1108 def perfdirstate(ui, repo, **opts):
1122 def perfdirstate(ui, repo, **opts):
1109 """benchmap the time of various distate operations
1123 """benchmap the time of various distate operations
1110
1124
1111 By default benchmark the time necessary to load a dirstate from scratch.
1125 By default benchmark the time necessary to load a dirstate from scratch.
1112 The dirstate is loaded to the point were a "contains" request can be
1126 The dirstate is loaded to the point were a "contains" request can be
1113 answered.
1127 answered.
1114 """
1128 """
1115 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1116 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1117 b"a" in repo.dirstate
1131 b"a" in repo.dirstate
1118
1132
1133 if opts[b'iteration'] and opts[b'contains']:
1134 msg = b'only specify one of --iteration or --contains'
1135 raise error.Abort(msg)
1136
1119 if opts[b'iteration']:
1137 if opts[b'iteration']:
1120 setup = None
1138 setup = None
1121 dirstate = repo.dirstate
1139 dirstate = repo.dirstate
1140
1122 def d():
1141 def d():
1123 for f in dirstate:
1142 for f in dirstate:
1124 pass
1143 pass
1144
1145 elif opts[b'contains']:
1146 setup = None
1147 dirstate = repo.dirstate
1148 allfiles = list(dirstate)
1149 # also add file path that will be "missing" from the dirstate
1150 allfiles.extend([f[::-1] for f in allfiles])
1151
1152 def d():
1153 for f in allfiles:
1154 f in dirstate
1155
1125 else:
1156 else:
1157
1126 def setup():
1158 def setup():
1127 repo.dirstate.invalidate()
1159 repo.dirstate.invalidate()
1128
1160
1129 def d():
1161 def d():
1130 b"a" in repo.dirstate
1162 b"a" in repo.dirstate
1131
1163
1132 timer(d, setup=setup)
1164 timer(d, setup=setup)
1133 fm.end()
1165 fm.end()
1134
1166
1135
1167
1136 @command(b'perfdirstatedirs', formatteropts)
1168 @command(b'perfdirstatedirs', formatteropts)
1137 def perfdirstatedirs(ui, repo, **opts):
1169 def perfdirstatedirs(ui, repo, **opts):
1138 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1170 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1139 """
1171 """
1140 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1141 timer, fm = gettimer(ui, opts)
1173 timer, fm = gettimer(ui, opts)
1142 repo.dirstate.hasdir(b"a")
1174 repo.dirstate.hasdir(b"a")
1143
1175
1144 def setup():
1176 def setup():
1145 del repo.dirstate._map._dirs
1177 del repo.dirstate._map._dirs
1146
1178
1147 def d():
1179 def d():
1148 repo.dirstate.hasdir(b"a")
1180 repo.dirstate.hasdir(b"a")
1149
1181
1150 timer(d, setup=setup)
1182 timer(d, setup=setup)
1151 fm.end()
1183 fm.end()
1152
1184
1153
1185
1154 @command(b'perfdirstatefoldmap', formatteropts)
1186 @command(b'perfdirstatefoldmap', formatteropts)
1155 def perfdirstatefoldmap(ui, repo, **opts):
1187 def perfdirstatefoldmap(ui, repo, **opts):
1156 """benchmap a `dirstate._map.filefoldmap.get()` request
1188 """benchmap a `dirstate._map.filefoldmap.get()` request
1157
1189
1158 The dirstate filefoldmap cache is dropped between every request.
1190 The dirstate filefoldmap cache is dropped between every request.
1159 """
1191 """
1160 opts = _byteskwargs(opts)
1192 opts = _byteskwargs(opts)
1161 timer, fm = gettimer(ui, opts)
1193 timer, fm = gettimer(ui, opts)
1162 dirstate = repo.dirstate
1194 dirstate = repo.dirstate
1163 dirstate._map.filefoldmap.get(b'a')
1195 dirstate._map.filefoldmap.get(b'a')
1164
1196
1165 def setup():
1197 def setup():
1166 del dirstate._map.filefoldmap
1198 del dirstate._map.filefoldmap
1167
1199
1168 def d():
1200 def d():
1169 dirstate._map.filefoldmap.get(b'a')
1201 dirstate._map.filefoldmap.get(b'a')
1170
1202
1171 timer(d, setup=setup)
1203 timer(d, setup=setup)
1172 fm.end()
1204 fm.end()
1173
1205
1174
1206
1175 @command(b'perfdirfoldmap', formatteropts)
1207 @command(b'perfdirfoldmap', formatteropts)
1176 def perfdirfoldmap(ui, repo, **opts):
1208 def perfdirfoldmap(ui, repo, **opts):
1177 """benchmap a `dirstate._map.dirfoldmap.get()` request
1209 """benchmap a `dirstate._map.dirfoldmap.get()` request
1178
1210
1179 The dirstate dirfoldmap cache is dropped between every request.
1211 The dirstate dirfoldmap cache is dropped between every request.
1180 """
1212 """
1181 opts = _byteskwargs(opts)
1213 opts = _byteskwargs(opts)
1182 timer, fm = gettimer(ui, opts)
1214 timer, fm = gettimer(ui, opts)
1183 dirstate = repo.dirstate
1215 dirstate = repo.dirstate
1184 dirstate._map.dirfoldmap.get(b'a')
1216 dirstate._map.dirfoldmap.get(b'a')
1185
1217
1186 def setup():
1218 def setup():
1187 del dirstate._map.dirfoldmap
1219 del dirstate._map.dirfoldmap
1188 del dirstate._map._dirs
1220 del dirstate._map._dirs
1189
1221
1190 def d():
1222 def d():
1191 dirstate._map.dirfoldmap.get(b'a')
1223 dirstate._map.dirfoldmap.get(b'a')
1192
1224
1193 timer(d, setup=setup)
1225 timer(d, setup=setup)
1194 fm.end()
1226 fm.end()
1195
1227
1196
1228
1197 @command(b'perfdirstatewrite', formatteropts)
1229 @command(b'perfdirstatewrite', formatteropts)
1198 def perfdirstatewrite(ui, repo, **opts):
1230 def perfdirstatewrite(ui, repo, **opts):
1199 """benchmap the time it take to write a dirstate on disk
1231 """benchmap the time it take to write a dirstate on disk
1200 """
1232 """
1201 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1202 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1203 ds = repo.dirstate
1235 ds = repo.dirstate
1204 b"a" in ds
1236 b"a" in ds
1205
1237
1206 def setup():
1238 def setup():
1207 ds._dirty = True
1239 ds._dirty = True
1208
1240
1209 def d():
1241 def d():
1210 ds.write(repo.currenttransaction())
1242 ds.write(repo.currenttransaction())
1211
1243
1212 timer(d, setup=setup)
1244 timer(d, setup=setup)
1213 fm.end()
1245 fm.end()
1214
1246
1215
1247
1216 def _getmergerevs(repo, opts):
1248 def _getmergerevs(repo, opts):
1217 """parse command argument to return rev involved in merge
1249 """parse command argument to return rev involved in merge
1218
1250
1219 input: options dictionnary with `rev`, `from` and `bse`
1251 input: options dictionnary with `rev`, `from` and `bse`
1220 output: (localctx, otherctx, basectx)
1252 output: (localctx, otherctx, basectx)
1221 """
1253 """
1222 if opts[b'from']:
1254 if opts[b'from']:
1223 fromrev = scmutil.revsingle(repo, opts[b'from'])
1255 fromrev = scmutil.revsingle(repo, opts[b'from'])
1224 wctx = repo[fromrev]
1256 wctx = repo[fromrev]
1225 else:
1257 else:
1226 wctx = repo[None]
1258 wctx = repo[None]
1227 # we don't want working dir files to be stat'd in the benchmark, so
1259 # we don't want working dir files to be stat'd in the benchmark, so
1228 # prime that cache
1260 # prime that cache
1229 wctx.dirty()
1261 wctx.dirty()
1230 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1262 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1231 if opts[b'base']:
1263 if opts[b'base']:
1232 fromrev = scmutil.revsingle(repo, opts[b'base'])
1264 fromrev = scmutil.revsingle(repo, opts[b'base'])
1233 ancestor = repo[fromrev]
1265 ancestor = repo[fromrev]
1234 else:
1266 else:
1235 ancestor = wctx.ancestor(rctx)
1267 ancestor = wctx.ancestor(rctx)
1236 return (wctx, rctx, ancestor)
1268 return (wctx, rctx, ancestor)
1237
1269
1238
1270
1239 @command(
1271 @command(
1240 b'perfmergecalculate',
1272 b'perfmergecalculate',
1241 [
1273 [
1242 (b'r', b'rev', b'.', b'rev to merge against'),
1274 (b'r', b'rev', b'.', b'rev to merge against'),
1243 (b'', b'from', b'', b'rev to merge from'),
1275 (b'', b'from', b'', b'rev to merge from'),
1244 (b'', b'base', b'', b'the revision to use as base'),
1276 (b'', b'base', b'', b'the revision to use as base'),
1245 ]
1277 ]
1246 + formatteropts,
1278 + formatteropts,
1247 )
1279 )
1248 def perfmergecalculate(ui, repo, **opts):
1280 def perfmergecalculate(ui, repo, **opts):
1249 opts = _byteskwargs(opts)
1281 opts = _byteskwargs(opts)
1250 timer, fm = gettimer(ui, opts)
1282 timer, fm = gettimer(ui, opts)
1251
1283
1252 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1284 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1253
1285
1254 def d():
1286 def d():
1255 # acceptremote is True because we don't want prompts in the middle of
1287 # acceptremote is True because we don't want prompts in the middle of
1256 # our benchmark
1288 # our benchmark
1257 merge.calculateupdates(
1289 merge.calculateupdates(
1258 repo,
1290 repo,
1259 wctx,
1291 wctx,
1260 rctx,
1292 rctx,
1261 [ancestor],
1293 [ancestor],
1262 branchmerge=False,
1294 branchmerge=False,
1263 force=False,
1295 force=False,
1264 acceptremote=True,
1296 acceptremote=True,
1265 followcopies=True,
1297 followcopies=True,
1266 )
1298 )
1267
1299
1268 timer(d)
1300 timer(d)
1269 fm.end()
1301 fm.end()
1270
1302
1271
1303
1272 @command(
1304 @command(
1273 b'perfmergecopies',
1305 b'perfmergecopies',
1274 [
1306 [
1275 (b'r', b'rev', b'.', b'rev to merge against'),
1307 (b'r', b'rev', b'.', b'rev to merge against'),
1276 (b'', b'from', b'', b'rev to merge from'),
1308 (b'', b'from', b'', b'rev to merge from'),
1277 (b'', b'base', b'', b'the revision to use as base'),
1309 (b'', b'base', b'', b'the revision to use as base'),
1278 ]
1310 ]
1279 + formatteropts,
1311 + formatteropts,
1280 )
1312 )
1281 def perfmergecopies(ui, repo, **opts):
1313 def perfmergecopies(ui, repo, **opts):
1282 """measure runtime of `copies.mergecopies`"""
1314 """measure runtime of `copies.mergecopies`"""
1283 opts = _byteskwargs(opts)
1315 opts = _byteskwargs(opts)
1284 timer, fm = gettimer(ui, opts)
1316 timer, fm = gettimer(ui, opts)
1285 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1317 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1286
1318
1287 def d():
1319 def d():
1288 # acceptremote is True because we don't want prompts in the middle of
1320 # acceptremote is True because we don't want prompts in the middle of
1289 # our benchmark
1321 # our benchmark
1290 copies.mergecopies(repo, wctx, rctx, ancestor)
1322 copies.mergecopies(repo, wctx, rctx, ancestor)
1291
1323
1292 timer(d)
1324 timer(d)
1293 fm.end()
1325 fm.end()
1294
1326
1295
1327
1296 @command(b'perfpathcopies', [], b"REV REV")
1328 @command(b'perfpathcopies', [], b"REV REV")
1297 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1329 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1298 """benchmark the copy tracing logic"""
1330 """benchmark the copy tracing logic"""
1299 opts = _byteskwargs(opts)
1331 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1301 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1333 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1302 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1334 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1303
1335
1304 def d():
1336 def d():
1305 copies.pathcopies(ctx1, ctx2)
1337 copies.pathcopies(ctx1, ctx2)
1306
1338
1307 timer(d)
1339 timer(d)
1308 fm.end()
1340 fm.end()
1309
1341
1310
1342
1311 @command(
1343 @command(
1312 b'perfphases',
1344 b'perfphases',
1313 [(b'', b'full', False, b'include file reading time too'),],
1345 [(b'', b'full', False, b'include file reading time too'),],
1314 b"",
1346 b"",
1315 )
1347 )
1316 def perfphases(ui, repo, **opts):
1348 def perfphases(ui, repo, **opts):
1317 """benchmark phasesets computation"""
1349 """benchmark phasesets computation"""
1318 opts = _byteskwargs(opts)
1350 opts = _byteskwargs(opts)
1319 timer, fm = gettimer(ui, opts)
1351 timer, fm = gettimer(ui, opts)
1320 _phases = repo._phasecache
1352 _phases = repo._phasecache
1321 full = opts.get(b'full')
1353 full = opts.get(b'full')
1322
1354
1323 def d():
1355 def d():
1324 phases = _phases
1356 phases = _phases
1325 if full:
1357 if full:
1326 clearfilecache(repo, b'_phasecache')
1358 clearfilecache(repo, b'_phasecache')
1327 phases = repo._phasecache
1359 phases = repo._phasecache
1328 phases.invalidate()
1360 phases.invalidate()
1329 phases.loadphaserevs(repo)
1361 phases.loadphaserevs(repo)
1330
1362
1331 timer(d)
1363 timer(d)
1332 fm.end()
1364 fm.end()
1333
1365
1334
1366
1335 @command(b'perfphasesremote', [], b"[DEST]")
1367 @command(b'perfphasesremote', [], b"[DEST]")
1336 def perfphasesremote(ui, repo, dest=None, **opts):
1368 def perfphasesremote(ui, repo, dest=None, **opts):
1337 """benchmark time needed to analyse phases of the remote server"""
1369 """benchmark time needed to analyse phases of the remote server"""
1338 from mercurial.node import bin
1370 from mercurial.node import bin
1339 from mercurial import (
1371 from mercurial import (
1340 exchange,
1372 exchange,
1341 hg,
1373 hg,
1342 phases,
1374 phases,
1343 )
1375 )
1344
1376
1345 opts = _byteskwargs(opts)
1377 opts = _byteskwargs(opts)
1346 timer, fm = gettimer(ui, opts)
1378 timer, fm = gettimer(ui, opts)
1347
1379
1348 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1380 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1349 if not path:
1381 if not path:
1350 raise error.Abort(
1382 raise error.Abort(
1351 b'default repository not configured!',
1383 b'default repository not configured!',
1352 hint=b"see 'hg help config.paths'",
1384 hint=b"see 'hg help config.paths'",
1353 )
1385 )
1354 dest = path.pushloc or path.loc
1386 dest = path.pushloc or path.loc
1355 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1387 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1356 other = hg.peer(repo, opts, dest)
1388 other = hg.peer(repo, opts, dest)
1357
1389
1358 # easier to perform discovery through the operation
1390 # easier to perform discovery through the operation
1359 op = exchange.pushoperation(repo, other)
1391 op = exchange.pushoperation(repo, other)
1360 exchange._pushdiscoverychangeset(op)
1392 exchange._pushdiscoverychangeset(op)
1361
1393
1362 remotesubset = op.fallbackheads
1394 remotesubset = op.fallbackheads
1363
1395
1364 with other.commandexecutor() as e:
1396 with other.commandexecutor() as e:
1365 remotephases = e.callcommand(
1397 remotephases = e.callcommand(
1366 b'listkeys', {b'namespace': b'phases'}
1398 b'listkeys', {b'namespace': b'phases'}
1367 ).result()
1399 ).result()
1368 del other
1400 del other
1369 publishing = remotephases.get(b'publishing', False)
1401 publishing = remotephases.get(b'publishing', False)
1370 if publishing:
1402 if publishing:
1371 ui.statusnoi18n(b'publishing: yes\n')
1403 ui.statusnoi18n(b'publishing: yes\n')
1372 else:
1404 else:
1373 ui.statusnoi18n(b'publishing: no\n')
1405 ui.statusnoi18n(b'publishing: no\n')
1374
1406
1375 nodemap = repo.changelog.nodemap
1407 nodemap = repo.changelog.nodemap
1376 nonpublishroots = 0
1408 nonpublishroots = 0
1377 for nhex, phase in remotephases.iteritems():
1409 for nhex, phase in remotephases.iteritems():
1378 if nhex == b'publishing': # ignore data related to publish option
1410 if nhex == b'publishing': # ignore data related to publish option
1379 continue
1411 continue
1380 node = bin(nhex)
1412 node = bin(nhex)
1381 if node in nodemap and int(phase):
1413 if node in nodemap and int(phase):
1382 nonpublishroots += 1
1414 nonpublishroots += 1
1383 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1415 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1384 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1416 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1385
1417
1386 def d():
1418 def d():
1387 phases.remotephasessummary(repo, remotesubset, remotephases)
1419 phases.remotephasessummary(repo, remotesubset, remotephases)
1388
1420
1389 timer(d)
1421 timer(d)
1390 fm.end()
1422 fm.end()
1391
1423
1392
1424
1393 @command(
1425 @command(
1394 b'perfmanifest',
1426 b'perfmanifest',
1395 [
1427 [
1396 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1428 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1397 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1429 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1398 ]
1430 ]
1399 + formatteropts,
1431 + formatteropts,
1400 b'REV|NODE',
1432 b'REV|NODE',
1401 )
1433 )
1402 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1434 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1403 """benchmark the time to read a manifest from disk and return a usable
1435 """benchmark the time to read a manifest from disk and return a usable
1404 dict-like object
1436 dict-like object
1405
1437
1406 Manifest caches are cleared before retrieval."""
1438 Manifest caches are cleared before retrieval."""
1407 opts = _byteskwargs(opts)
1439 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1440 timer, fm = gettimer(ui, opts)
1409 if not manifest_rev:
1441 if not manifest_rev:
1410 ctx = scmutil.revsingle(repo, rev, rev)
1442 ctx = scmutil.revsingle(repo, rev, rev)
1411 t = ctx.manifestnode()
1443 t = ctx.manifestnode()
1412 else:
1444 else:
1413 from mercurial.node import bin
1445 from mercurial.node import bin
1414
1446
1415 if len(rev) == 40:
1447 if len(rev) == 40:
1416 t = bin(rev)
1448 t = bin(rev)
1417 else:
1449 else:
1418 try:
1450 try:
1419 rev = int(rev)
1451 rev = int(rev)
1420
1452
1421 if util.safehasattr(repo.manifestlog, b'getstorage'):
1453 if util.safehasattr(repo.manifestlog, b'getstorage'):
1422 t = repo.manifestlog.getstorage(b'').node(rev)
1454 t = repo.manifestlog.getstorage(b'').node(rev)
1423 else:
1455 else:
1424 t = repo.manifestlog._revlog.lookup(rev)
1456 t = repo.manifestlog._revlog.lookup(rev)
1425 except ValueError:
1457 except ValueError:
1426 raise error.Abort(
1458 raise error.Abort(
1427 b'manifest revision must be integer or full node'
1459 b'manifest revision must be integer or full node'
1428 )
1460 )
1429
1461
1430 def d():
1462 def d():
1431 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1463 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1432 repo.manifestlog[t].read()
1464 repo.manifestlog[t].read()
1433
1465
1434 timer(d)
1466 timer(d)
1435 fm.end()
1467 fm.end()
1436
1468
1437
1469
1438 @command(b'perfchangeset', formatteropts)
1470 @command(b'perfchangeset', formatteropts)
1439 def perfchangeset(ui, repo, rev, **opts):
1471 def perfchangeset(ui, repo, rev, **opts):
1440 opts = _byteskwargs(opts)
1472 opts = _byteskwargs(opts)
1441 timer, fm = gettimer(ui, opts)
1473 timer, fm = gettimer(ui, opts)
1442 n = scmutil.revsingle(repo, rev).node()
1474 n = scmutil.revsingle(repo, rev).node()
1443
1475
1444 def d():
1476 def d():
1445 repo.changelog.read(n)
1477 repo.changelog.read(n)
1446 # repo.changelog._cache = None
1478 # repo.changelog._cache = None
1447
1479
1448 timer(d)
1480 timer(d)
1449 fm.end()
1481 fm.end()
1450
1482
1451
1483
1452 @command(b'perfignore', formatteropts)
1484 @command(b'perfignore', formatteropts)
1453 def perfignore(ui, repo, **opts):
1485 def perfignore(ui, repo, **opts):
1454 """benchmark operation related to computing ignore"""
1486 """benchmark operation related to computing ignore"""
1455 opts = _byteskwargs(opts)
1487 opts = _byteskwargs(opts)
1456 timer, fm = gettimer(ui, opts)
1488 timer, fm = gettimer(ui, opts)
1457 dirstate = repo.dirstate
1489 dirstate = repo.dirstate
1458
1490
1459 def setupone():
1491 def setupone():
1460 dirstate.invalidate()
1492 dirstate.invalidate()
1461 clearfilecache(dirstate, b'_ignore')
1493 clearfilecache(dirstate, b'_ignore')
1462
1494
1463 def runone():
1495 def runone():
1464 dirstate._ignore
1496 dirstate._ignore
1465
1497
1466 timer(runone, setup=setupone, title=b"load")
1498 timer(runone, setup=setupone, title=b"load")
1467 fm.end()
1499 fm.end()
1468
1500
1469
1501
1470 @command(
1502 @command(
1471 b'perfindex',
1503 b'perfindex',
1472 [
1504 [
1473 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1505 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1474 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1506 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1475 ]
1507 ]
1476 + formatteropts,
1508 + formatteropts,
1477 )
1509 )
1478 def perfindex(ui, repo, **opts):
1510 def perfindex(ui, repo, **opts):
1479 """benchmark index creation time followed by a lookup
1511 """benchmark index creation time followed by a lookup
1480
1512
1481 The default is to look `tip` up. Depending on the index implementation,
1513 The default is to look `tip` up. Depending on the index implementation,
1482 the revision looked up can matters. For example, an implementation
1514 the revision looked up can matters. For example, an implementation
1483 scanning the index will have a faster lookup time for `--rev tip` than for
1515 scanning the index will have a faster lookup time for `--rev tip` than for
1484 `--rev 0`. The number of looked up revisions and their order can also
1516 `--rev 0`. The number of looked up revisions and their order can also
1485 matters.
1517 matters.
1486
1518
1487 Example of useful set to test:
1519 Example of useful set to test:
1488 * tip
1520 * tip
1489 * 0
1521 * 0
1490 * -10:
1522 * -10:
1491 * :10
1523 * :10
1492 * -10: + :10
1524 * -10: + :10
1493 * :10: + -10:
1525 * :10: + -10:
1494 * -10000:
1526 * -10000:
1495 * -10000: + 0
1527 * -10000: + 0
1496
1528
1497 It is not currently possible to check for lookup of a missing node. For
1529 It is not currently possible to check for lookup of a missing node. For
1498 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1530 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1499 import mercurial.revlog
1531 import mercurial.revlog
1500
1532
1501 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1502 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1503 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1535 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1504 if opts[b'no_lookup']:
1536 if opts[b'no_lookup']:
1505 if opts['rev']:
1537 if opts['rev']:
1506 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1538 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1507 nodes = []
1539 nodes = []
1508 elif not opts[b'rev']:
1540 elif not opts[b'rev']:
1509 nodes = [repo[b"tip"].node()]
1541 nodes = [repo[b"tip"].node()]
1510 else:
1542 else:
1511 revs = scmutil.revrange(repo, opts[b'rev'])
1543 revs = scmutil.revrange(repo, opts[b'rev'])
1512 cl = repo.changelog
1544 cl = repo.changelog
1513 nodes = [cl.node(r) for r in revs]
1545 nodes = [cl.node(r) for r in revs]
1514
1546
1515 unfi = repo.unfiltered()
1547 unfi = repo.unfiltered()
1516 # find the filecache func directly
1548 # find the filecache func directly
1517 # This avoid polluting the benchmark with the filecache logic
1549 # This avoid polluting the benchmark with the filecache logic
1518 makecl = unfi.__class__.changelog.func
1550 makecl = unfi.__class__.changelog.func
1519
1551
1520 def setup():
1552 def setup():
1521 # probably not necessary, but for good measure
1553 # probably not necessary, but for good measure
1522 clearchangelog(unfi)
1554 clearchangelog(unfi)
1523
1555
1524 def d():
1556 def d():
1525 cl = makecl(unfi)
1557 cl = makecl(unfi)
1526 for n in nodes:
1558 for n in nodes:
1527 cl.rev(n)
1559 cl.rev(n)
1528
1560
1529 timer(d, setup=setup)
1561 timer(d, setup=setup)
1530 fm.end()
1562 fm.end()
1531
1563
1532
1564
1533 @command(
1565 @command(
1534 b'perfnodemap',
1566 b'perfnodemap',
1535 [
1567 [
1536 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1568 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1537 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1569 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1538 ]
1570 ]
1539 + formatteropts,
1571 + formatteropts,
1540 )
1572 )
1541 def perfnodemap(ui, repo, **opts):
1573 def perfnodemap(ui, repo, **opts):
1542 """benchmark the time necessary to look up revision from a cold nodemap
1574 """benchmark the time necessary to look up revision from a cold nodemap
1543
1575
1544 Depending on the implementation, the amount and order of revision we look
1576 Depending on the implementation, the amount and order of revision we look
1545 up can varies. Example of useful set to test:
1577 up can varies. Example of useful set to test:
1546 * tip
1578 * tip
1547 * 0
1579 * 0
1548 * -10:
1580 * -10:
1549 * :10
1581 * :10
1550 * -10: + :10
1582 * -10: + :10
1551 * :10: + -10:
1583 * :10: + -10:
1552 * -10000:
1584 * -10000:
1553 * -10000: + 0
1585 * -10000: + 0
1554
1586
1555 The command currently focus on valid binary lookup. Benchmarking for
1587 The command currently focus on valid binary lookup. Benchmarking for
1556 hexlookup, prefix lookup and missing lookup would also be valuable.
1588 hexlookup, prefix lookup and missing lookup would also be valuable.
1557 """
1589 """
1558 import mercurial.revlog
1590 import mercurial.revlog
1559
1591
1560 opts = _byteskwargs(opts)
1592 opts = _byteskwargs(opts)
1561 timer, fm = gettimer(ui, opts)
1593 timer, fm = gettimer(ui, opts)
1562 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1594 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1563
1595
1564 unfi = repo.unfiltered()
1596 unfi = repo.unfiltered()
1565 clearcaches = opts['clear_caches']
1597 clearcaches = opts['clear_caches']
1566 # find the filecache func directly
1598 # find the filecache func directly
1567 # This avoid polluting the benchmark with the filecache logic
1599 # This avoid polluting the benchmark with the filecache logic
1568 makecl = unfi.__class__.changelog.func
1600 makecl = unfi.__class__.changelog.func
1569 if not opts[b'rev']:
1601 if not opts[b'rev']:
1570 raise error.Abort('use --rev to specify revisions to look up')
1602 raise error.Abort('use --rev to specify revisions to look up')
1571 revs = scmutil.revrange(repo, opts[b'rev'])
1603 revs = scmutil.revrange(repo, opts[b'rev'])
1572 cl = repo.changelog
1604 cl = repo.changelog
1573 nodes = [cl.node(r) for r in revs]
1605 nodes = [cl.node(r) for r in revs]
1574
1606
1575 # use a list to pass reference to a nodemap from one closure to the next
1607 # use a list to pass reference to a nodemap from one closure to the next
1576 nodeget = [None]
1608 nodeget = [None]
1577
1609
1578 def setnodeget():
1610 def setnodeget():
1579 # probably not necessary, but for good measure
1611 # probably not necessary, but for good measure
1580 clearchangelog(unfi)
1612 clearchangelog(unfi)
1581 nodeget[0] = makecl(unfi).nodemap.get
1613 nodeget[0] = makecl(unfi).nodemap.get
1582
1614
1583 def d():
1615 def d():
1584 get = nodeget[0]
1616 get = nodeget[0]
1585 for n in nodes:
1617 for n in nodes:
1586 get(n)
1618 get(n)
1587
1619
1588 setup = None
1620 setup = None
1589 if clearcaches:
1621 if clearcaches:
1590
1622
1591 def setup():
1623 def setup():
1592 setnodeget()
1624 setnodeget()
1593
1625
1594 else:
1626 else:
1595 setnodeget()
1627 setnodeget()
1596 d() # prewarm the data structure
1628 d() # prewarm the data structure
1597 timer(d, setup=setup)
1629 timer(d, setup=setup)
1598 fm.end()
1630 fm.end()
1599
1631
1600
1632
1601 @command(b'perfstartup', formatteropts)
1633 @command(b'perfstartup', formatteropts)
1602 def perfstartup(ui, repo, **opts):
1634 def perfstartup(ui, repo, **opts):
1603 opts = _byteskwargs(opts)
1635 opts = _byteskwargs(opts)
1604 timer, fm = gettimer(ui, opts)
1636 timer, fm = gettimer(ui, opts)
1605
1637
1606 def d():
1638 def d():
1607 if os.name != r'nt':
1639 if os.name != r'nt':
1608 os.system(
1640 os.system(
1609 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1641 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1610 )
1642 )
1611 else:
1643 else:
1612 os.environ[r'HGRCPATH'] = r' '
1644 os.environ[r'HGRCPATH'] = r' '
1613 os.system(r"%s version -q > NUL" % sys.argv[0])
1645 os.system(r"%s version -q > NUL" % sys.argv[0])
1614
1646
1615 timer(d)
1647 timer(d)
1616 fm.end()
1648 fm.end()
1617
1649
1618
1650
1619 @command(b'perfparents', formatteropts)
1651 @command(b'perfparents', formatteropts)
1620 def perfparents(ui, repo, **opts):
1652 def perfparents(ui, repo, **opts):
1621 """benchmark the time necessary to fetch one changeset's parents.
1653 """benchmark the time necessary to fetch one changeset's parents.
1622
1654
1623 The fetch is done using the `node identifier`, traversing all object layers
1655 The fetch is done using the `node identifier`, traversing all object layers
1624 from the repository object. The first N revisions will be used for this
1656 from the repository object. The first N revisions will be used for this
1625 benchmark. N is controlled by the ``perf.parentscount`` config option
1657 benchmark. N is controlled by the ``perf.parentscount`` config option
1626 (default: 1000).
1658 (default: 1000).
1627 """
1659 """
1628 opts = _byteskwargs(opts)
1660 opts = _byteskwargs(opts)
1629 timer, fm = gettimer(ui, opts)
1661 timer, fm = gettimer(ui, opts)
1630 # control the number of commits perfparents iterates over
1662 # control the number of commits perfparents iterates over
1631 # experimental config: perf.parentscount
1663 # experimental config: perf.parentscount
1632 count = getint(ui, b"perf", b"parentscount", 1000)
1664 count = getint(ui, b"perf", b"parentscount", 1000)
1633 if len(repo.changelog) < count:
1665 if len(repo.changelog) < count:
1634 raise error.Abort(b"repo needs %d commits for this test" % count)
1666 raise error.Abort(b"repo needs %d commits for this test" % count)
1635 repo = repo.unfiltered()
1667 repo = repo.unfiltered()
1636 nl = [repo.changelog.node(i) for i in _xrange(count)]
1668 nl = [repo.changelog.node(i) for i in _xrange(count)]
1637
1669
1638 def d():
1670 def d():
1639 for n in nl:
1671 for n in nl:
1640 repo.changelog.parents(n)
1672 repo.changelog.parents(n)
1641
1673
1642 timer(d)
1674 timer(d)
1643 fm.end()
1675 fm.end()
1644
1676
1645
1677
1646 @command(b'perfctxfiles', formatteropts)
1678 @command(b'perfctxfiles', formatteropts)
1647 def perfctxfiles(ui, repo, x, **opts):
1679 def perfctxfiles(ui, repo, x, **opts):
1648 opts = _byteskwargs(opts)
1680 opts = _byteskwargs(opts)
1649 x = int(x)
1681 x = int(x)
1650 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1651
1683
1652 def d():
1684 def d():
1653 len(repo[x].files())
1685 len(repo[x].files())
1654
1686
1655 timer(d)
1687 timer(d)
1656 fm.end()
1688 fm.end()
1657
1689
1658
1690
1659 @command(b'perfrawfiles', formatteropts)
1691 @command(b'perfrawfiles', formatteropts)
1660 def perfrawfiles(ui, repo, x, **opts):
1692 def perfrawfiles(ui, repo, x, **opts):
1661 opts = _byteskwargs(opts)
1693 opts = _byteskwargs(opts)
1662 x = int(x)
1694 x = int(x)
1663 timer, fm = gettimer(ui, opts)
1695 timer, fm = gettimer(ui, opts)
1664 cl = repo.changelog
1696 cl = repo.changelog
1665
1697
1666 def d():
1698 def d():
1667 len(cl.read(x)[3])
1699 len(cl.read(x)[3])
1668
1700
1669 timer(d)
1701 timer(d)
1670 fm.end()
1702 fm.end()
1671
1703
1672
1704
1673 @command(b'perflookup', formatteropts)
1705 @command(b'perflookup', formatteropts)
1674 def perflookup(ui, repo, rev, **opts):
1706 def perflookup(ui, repo, rev, **opts):
1675 opts = _byteskwargs(opts)
1707 opts = _byteskwargs(opts)
1676 timer, fm = gettimer(ui, opts)
1708 timer, fm = gettimer(ui, opts)
1677 timer(lambda: len(repo.lookup(rev)))
1709 timer(lambda: len(repo.lookup(rev)))
1678 fm.end()
1710 fm.end()
1679
1711
1680
1712
1681 @command(
1713 @command(
1682 b'perflinelogedits',
1714 b'perflinelogedits',
1683 [
1715 [
1684 (b'n', b'edits', 10000, b'number of edits'),
1716 (b'n', b'edits', 10000, b'number of edits'),
1685 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1717 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1686 ],
1718 ],
1687 norepo=True,
1719 norepo=True,
1688 )
1720 )
1689 def perflinelogedits(ui, **opts):
1721 def perflinelogedits(ui, **opts):
1690 from mercurial import linelog
1722 from mercurial import linelog
1691
1723
1692 opts = _byteskwargs(opts)
1724 opts = _byteskwargs(opts)
1693
1725
1694 edits = opts[b'edits']
1726 edits = opts[b'edits']
1695 maxhunklines = opts[b'max_hunk_lines']
1727 maxhunklines = opts[b'max_hunk_lines']
1696
1728
1697 maxb1 = 100000
1729 maxb1 = 100000
1698 random.seed(0)
1730 random.seed(0)
1699 randint = random.randint
1731 randint = random.randint
1700 currentlines = 0
1732 currentlines = 0
1701 arglist = []
1733 arglist = []
1702 for rev in _xrange(edits):
1734 for rev in _xrange(edits):
1703 a1 = randint(0, currentlines)
1735 a1 = randint(0, currentlines)
1704 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1736 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1705 b1 = randint(0, maxb1)
1737 b1 = randint(0, maxb1)
1706 b2 = randint(b1, b1 + maxhunklines)
1738 b2 = randint(b1, b1 + maxhunklines)
1707 currentlines += (b2 - b1) - (a2 - a1)
1739 currentlines += (b2 - b1) - (a2 - a1)
1708 arglist.append((rev, a1, a2, b1, b2))
1740 arglist.append((rev, a1, a2, b1, b2))
1709
1741
1710 def d():
1742 def d():
1711 ll = linelog.linelog()
1743 ll = linelog.linelog()
1712 for args in arglist:
1744 for args in arglist:
1713 ll.replacelines(*args)
1745 ll.replacelines(*args)
1714
1746
1715 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1716 timer(d)
1748 timer(d)
1717 fm.end()
1749 fm.end()
1718
1750
1719
1751
1720 @command(b'perfrevrange', formatteropts)
1752 @command(b'perfrevrange', formatteropts)
1721 def perfrevrange(ui, repo, *specs, **opts):
1753 def perfrevrange(ui, repo, *specs, **opts):
1722 opts = _byteskwargs(opts)
1754 opts = _byteskwargs(opts)
1723 timer, fm = gettimer(ui, opts)
1755 timer, fm = gettimer(ui, opts)
1724 revrange = scmutil.revrange
1756 revrange = scmutil.revrange
1725 timer(lambda: len(revrange(repo, specs)))
1757 timer(lambda: len(revrange(repo, specs)))
1726 fm.end()
1758 fm.end()
1727
1759
1728
1760
1729 @command(b'perfnodelookup', formatteropts)
1761 @command(b'perfnodelookup', formatteropts)
1730 def perfnodelookup(ui, repo, rev, **opts):
1762 def perfnodelookup(ui, repo, rev, **opts):
1731 opts = _byteskwargs(opts)
1763 opts = _byteskwargs(opts)
1732 timer, fm = gettimer(ui, opts)
1764 timer, fm = gettimer(ui, opts)
1733 import mercurial.revlog
1765 import mercurial.revlog
1734
1766
1735 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1767 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1736 n = scmutil.revsingle(repo, rev).node()
1768 n = scmutil.revsingle(repo, rev).node()
1737 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1769 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1738
1770
1739 def d():
1771 def d():
1740 cl.rev(n)
1772 cl.rev(n)
1741 clearcaches(cl)
1773 clearcaches(cl)
1742
1774
1743 timer(d)
1775 timer(d)
1744 fm.end()
1776 fm.end()
1745
1777
1746
1778
1747 @command(
1779 @command(
1748 b'perflog',
1780 b'perflog',
1749 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1781 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1750 )
1782 )
1751 def perflog(ui, repo, rev=None, **opts):
1783 def perflog(ui, repo, rev=None, **opts):
1752 opts = _byteskwargs(opts)
1784 opts = _byteskwargs(opts)
1753 if rev is None:
1785 if rev is None:
1754 rev = []
1786 rev = []
1755 timer, fm = gettimer(ui, opts)
1787 timer, fm = gettimer(ui, opts)
1756 ui.pushbuffer()
1788 ui.pushbuffer()
1757 timer(
1789 timer(
1758 lambda: commands.log(
1790 lambda: commands.log(
1759 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1791 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1760 )
1792 )
1761 )
1793 )
1762 ui.popbuffer()
1794 ui.popbuffer()
1763 fm.end()
1795 fm.end()
1764
1796
1765
1797
1766 @command(b'perfmoonwalk', formatteropts)
1798 @command(b'perfmoonwalk', formatteropts)
1767 def perfmoonwalk(ui, repo, **opts):
1799 def perfmoonwalk(ui, repo, **opts):
1768 """benchmark walking the changelog backwards
1800 """benchmark walking the changelog backwards
1769
1801
1770 This also loads the changelog data for each revision in the changelog.
1802 This also loads the changelog data for each revision in the changelog.
1771 """
1803 """
1772 opts = _byteskwargs(opts)
1804 opts = _byteskwargs(opts)
1773 timer, fm = gettimer(ui, opts)
1805 timer, fm = gettimer(ui, opts)
1774
1806
1775 def moonwalk():
1807 def moonwalk():
1776 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1808 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1777 ctx = repo[i]
1809 ctx = repo[i]
1778 ctx.branch() # read changelog data (in addition to the index)
1810 ctx.branch() # read changelog data (in addition to the index)
1779
1811
1780 timer(moonwalk)
1812 timer(moonwalk)
1781 fm.end()
1813 fm.end()
1782
1814
1783
1815
1784 @command(
1816 @command(
1785 b'perftemplating',
1817 b'perftemplating',
1786 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1818 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1787 )
1819 )
1788 def perftemplating(ui, repo, testedtemplate=None, **opts):
1820 def perftemplating(ui, repo, testedtemplate=None, **opts):
1789 """test the rendering time of a given template"""
1821 """test the rendering time of a given template"""
1790 if makelogtemplater is None:
1822 if makelogtemplater is None:
1791 raise error.Abort(
1823 raise error.Abort(
1792 b"perftemplating not available with this Mercurial",
1824 b"perftemplating not available with this Mercurial",
1793 hint=b"use 4.3 or later",
1825 hint=b"use 4.3 or later",
1794 )
1826 )
1795
1827
1796 opts = _byteskwargs(opts)
1828 opts = _byteskwargs(opts)
1797
1829
1798 nullui = ui.copy()
1830 nullui = ui.copy()
1799 nullui.fout = open(os.devnull, r'wb')
1831 nullui.fout = open(os.devnull, r'wb')
1800 nullui.disablepager()
1832 nullui.disablepager()
1801 revs = opts.get(b'rev')
1833 revs = opts.get(b'rev')
1802 if not revs:
1834 if not revs:
1803 revs = [b'all()']
1835 revs = [b'all()']
1804 revs = list(scmutil.revrange(repo, revs))
1836 revs = list(scmutil.revrange(repo, revs))
1805
1837
1806 defaulttemplate = (
1838 defaulttemplate = (
1807 b'{date|shortdate} [{rev}:{node|short}]'
1839 b'{date|shortdate} [{rev}:{node|short}]'
1808 b' {author|person}: {desc|firstline}\n'
1840 b' {author|person}: {desc|firstline}\n'
1809 )
1841 )
1810 if testedtemplate is None:
1842 if testedtemplate is None:
1811 testedtemplate = defaulttemplate
1843 testedtemplate = defaulttemplate
1812 displayer = makelogtemplater(nullui, repo, testedtemplate)
1844 displayer = makelogtemplater(nullui, repo, testedtemplate)
1813
1845
1814 def format():
1846 def format():
1815 for r in revs:
1847 for r in revs:
1816 ctx = repo[r]
1848 ctx = repo[r]
1817 displayer.show(ctx)
1849 displayer.show(ctx)
1818 displayer.flush(ctx)
1850 displayer.flush(ctx)
1819
1851
1820 timer, fm = gettimer(ui, opts)
1852 timer, fm = gettimer(ui, opts)
1821 timer(format)
1853 timer(format)
1822 fm.end()
1854 fm.end()
1823
1855
1824
1856
1825 def _displaystats(ui, opts, entries, data):
1857 def _displaystats(ui, opts, entries, data):
1826 pass
1858 pass
1827 # use a second formatter because the data are quite different, not sure
1859 # use a second formatter because the data are quite different, not sure
1828 # how it flies with the templater.
1860 # how it flies with the templater.
1829 fm = ui.formatter(b'perf-stats', opts)
1861 fm = ui.formatter(b'perf-stats', opts)
1830 for key, title in entries:
1862 for key, title in entries:
1831 values = data[key]
1863 values = data[key]
1832 nbvalues = len(data)
1864 nbvalues = len(data)
1833 values.sort()
1865 values.sort()
1834 stats = {
1866 stats = {
1835 'key': key,
1867 'key': key,
1836 'title': title,
1868 'title': title,
1837 'nbitems': len(values),
1869 'nbitems': len(values),
1838 'min': values[0][0],
1870 'min': values[0][0],
1839 '10%': values[(nbvalues * 10) // 100][0],
1871 '10%': values[(nbvalues * 10) // 100][0],
1840 '25%': values[(nbvalues * 25) // 100][0],
1872 '25%': values[(nbvalues * 25) // 100][0],
1841 '50%': values[(nbvalues * 50) // 100][0],
1873 '50%': values[(nbvalues * 50) // 100][0],
1842 '75%': values[(nbvalues * 75) // 100][0],
1874 '75%': values[(nbvalues * 75) // 100][0],
1843 '80%': values[(nbvalues * 80) // 100][0],
1875 '80%': values[(nbvalues * 80) // 100][0],
1844 '85%': values[(nbvalues * 85) // 100][0],
1876 '85%': values[(nbvalues * 85) // 100][0],
1845 '90%': values[(nbvalues * 90) // 100][0],
1877 '90%': values[(nbvalues * 90) // 100][0],
1846 '95%': values[(nbvalues * 95) // 100][0],
1878 '95%': values[(nbvalues * 95) // 100][0],
1847 '99%': values[(nbvalues * 99) // 100][0],
1879 '99%': values[(nbvalues * 99) // 100][0],
1848 'max': values[-1][0],
1880 'max': values[-1][0],
1849 }
1881 }
1850 fm.startitem()
1882 fm.startitem()
1851 fm.data(**stats)
1883 fm.data(**stats)
1852 # make node pretty for the human output
1884 # make node pretty for the human output
1853 fm.plain('### %s (%d items)\n' % (title, len(values)))
1885 fm.plain('### %s (%d items)\n' % (title, len(values)))
1854 lines = [
1886 lines = [
1855 'min',
1887 'min',
1856 '10%',
1888 '10%',
1857 '25%',
1889 '25%',
1858 '50%',
1890 '50%',
1859 '75%',
1891 '75%',
1860 '80%',
1892 '80%',
1861 '85%',
1893 '85%',
1862 '90%',
1894 '90%',
1863 '95%',
1895 '95%',
1864 '99%',
1896 '99%',
1865 'max',
1897 'max',
1866 ]
1898 ]
1867 for l in lines:
1899 for l in lines:
1868 fm.plain('%s: %s\n' % (l, stats[l]))
1900 fm.plain('%s: %s\n' % (l, stats[l]))
1869 fm.end()
1901 fm.end()
1870
1902
1871
1903
1872 @command(
1904 @command(
1873 b'perfhelper-mergecopies',
1905 b'perfhelper-mergecopies',
1874 formatteropts
1906 formatteropts
1875 + [
1907 + [
1876 (b'r', b'revs', [], b'restrict search to these revisions'),
1908 (b'r', b'revs', [], b'restrict search to these revisions'),
1877 (b'', b'timing', False, b'provides extra data (costly)'),
1909 (b'', b'timing', False, b'provides extra data (costly)'),
1878 (b'', b'stats', False, b'provides statistic about the measured data'),
1910 (b'', b'stats', False, b'provides statistic about the measured data'),
1879 ],
1911 ],
1880 )
1912 )
1881 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1913 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1882 """find statistics about potential parameters for `perfmergecopies`
1914 """find statistics about potential parameters for `perfmergecopies`
1883
1915
1884 This command find (base, p1, p2) triplet relevant for copytracing
1916 This command find (base, p1, p2) triplet relevant for copytracing
1885 benchmarking in the context of a merge. It reports values for some of the
1917 benchmarking in the context of a merge. It reports values for some of the
1886 parameters that impact merge copy tracing time during merge.
1918 parameters that impact merge copy tracing time during merge.
1887
1919
1888 If `--timing` is set, rename detection is run and the associated timing
1920 If `--timing` is set, rename detection is run and the associated timing
1889 will be reported. The extra details come at the cost of slower command
1921 will be reported. The extra details come at the cost of slower command
1890 execution.
1922 execution.
1891
1923
1892 Since rename detection is only run once, other factors might easily
1924 Since rename detection is only run once, other factors might easily
1893 affect the precision of the timing. However it should give a good
1925 affect the precision of the timing. However it should give a good
1894 approximation of which revision triplets are very costly.
1926 approximation of which revision triplets are very costly.
1895 """
1927 """
1896 opts = _byteskwargs(opts)
1928 opts = _byteskwargs(opts)
1897 fm = ui.formatter(b'perf', opts)
1929 fm = ui.formatter(b'perf', opts)
1898 dotiming = opts[b'timing']
1930 dotiming = opts[b'timing']
1899 dostats = opts[b'stats']
1931 dostats = opts[b'stats']
1900
1932
1901 output_template = [
1933 output_template = [
1902 ("base", "%(base)12s"),
1934 ("base", "%(base)12s"),
1903 ("p1", "%(p1.node)12s"),
1935 ("p1", "%(p1.node)12s"),
1904 ("p2", "%(p2.node)12s"),
1936 ("p2", "%(p2.node)12s"),
1905 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1937 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1906 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1938 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1907 ("p1.renames", "%(p1.renamedfiles)12d"),
1939 ("p1.renames", "%(p1.renamedfiles)12d"),
1908 ("p1.time", "%(p1.time)12.3f"),
1940 ("p1.time", "%(p1.time)12.3f"),
1909 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1941 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1910 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1942 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1911 ("p2.renames", "%(p2.renamedfiles)12d"),
1943 ("p2.renames", "%(p2.renamedfiles)12d"),
1912 ("p2.time", "%(p2.time)12.3f"),
1944 ("p2.time", "%(p2.time)12.3f"),
1913 ("renames", "%(nbrenamedfiles)12d"),
1945 ("renames", "%(nbrenamedfiles)12d"),
1914 ("total.time", "%(time)12.3f"),
1946 ("total.time", "%(time)12.3f"),
1915 ]
1947 ]
1916 if not dotiming:
1948 if not dotiming:
1917 output_template = [
1949 output_template = [
1918 i
1950 i
1919 for i in output_template
1951 for i in output_template
1920 if not ('time' in i[0] or 'renames' in i[0])
1952 if not ('time' in i[0] or 'renames' in i[0])
1921 ]
1953 ]
1922 header_names = [h for (h, v) in output_template]
1954 header_names = [h for (h, v) in output_template]
1923 output = ' '.join([v for (h, v) in output_template]) + '\n'
1955 output = ' '.join([v for (h, v) in output_template]) + '\n'
1924 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1956 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1925 fm.plain(header % tuple(header_names))
1957 fm.plain(header % tuple(header_names))
1926
1958
1927 if not revs:
1959 if not revs:
1928 revs = ['all()']
1960 revs = ['all()']
1929 revs = scmutil.revrange(repo, revs)
1961 revs = scmutil.revrange(repo, revs)
1930
1962
1931 if dostats:
1963 if dostats:
1932 alldata = {
1964 alldata = {
1933 'nbrevs': [],
1965 'nbrevs': [],
1934 'nbmissingfiles': [],
1966 'nbmissingfiles': [],
1935 }
1967 }
1936 if dotiming:
1968 if dotiming:
1937 alldata['parentnbrenames'] = []
1969 alldata['parentnbrenames'] = []
1938 alldata['totalnbrenames'] = []
1970 alldata['totalnbrenames'] = []
1939 alldata['parenttime'] = []
1971 alldata['parenttime'] = []
1940 alldata['totaltime'] = []
1972 alldata['totaltime'] = []
1941
1973
1942 roi = repo.revs('merge() and %ld', revs)
1974 roi = repo.revs('merge() and %ld', revs)
1943 for r in roi:
1975 for r in roi:
1944 ctx = repo[r]
1976 ctx = repo[r]
1945 p1 = ctx.p1()
1977 p1 = ctx.p1()
1946 p2 = ctx.p2()
1978 p2 = ctx.p2()
1947 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1979 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1948 for b in bases:
1980 for b in bases:
1949 b = repo[b]
1981 b = repo[b]
1950 p1missing = copies._computeforwardmissing(b, p1)
1982 p1missing = copies._computeforwardmissing(b, p1)
1951 p2missing = copies._computeforwardmissing(b, p2)
1983 p2missing = copies._computeforwardmissing(b, p2)
1952 data = {
1984 data = {
1953 b'base': b.hex(),
1985 b'base': b.hex(),
1954 b'p1.node': p1.hex(),
1986 b'p1.node': p1.hex(),
1955 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1987 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1956 b'p1.nbmissingfiles': len(p1missing),
1988 b'p1.nbmissingfiles': len(p1missing),
1957 b'p2.node': p2.hex(),
1989 b'p2.node': p2.hex(),
1958 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1990 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1959 b'p2.nbmissingfiles': len(p2missing),
1991 b'p2.nbmissingfiles': len(p2missing),
1960 }
1992 }
1961 if dostats:
1993 if dostats:
1962 if p1missing:
1994 if p1missing:
1963 alldata['nbrevs'].append(
1995 alldata['nbrevs'].append(
1964 (data['p1.nbrevs'], b.hex(), p1.hex())
1996 (data['p1.nbrevs'], b.hex(), p1.hex())
1965 )
1997 )
1966 alldata['nbmissingfiles'].append(
1998 alldata['nbmissingfiles'].append(
1967 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1999 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1968 )
2000 )
1969 if p2missing:
2001 if p2missing:
1970 alldata['nbrevs'].append(
2002 alldata['nbrevs'].append(
1971 (data['p2.nbrevs'], b.hex(), p2.hex())
2003 (data['p2.nbrevs'], b.hex(), p2.hex())
1972 )
2004 )
1973 alldata['nbmissingfiles'].append(
2005 alldata['nbmissingfiles'].append(
1974 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2006 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1975 )
2007 )
1976 if dotiming:
2008 if dotiming:
1977 begin = util.timer()
2009 begin = util.timer()
1978 mergedata = copies.mergecopies(repo, p1, p2, b)
2010 mergedata = copies.mergecopies(repo, p1, p2, b)
1979 end = util.timer()
2011 end = util.timer()
1980 # not very stable timing since we did only one run
2012 # not very stable timing since we did only one run
1981 data['time'] = end - begin
2013 data['time'] = end - begin
1982 # mergedata contains five dicts: "copy", "movewithdir",
2014 # mergedata contains five dicts: "copy", "movewithdir",
1983 # "diverge", "renamedelete" and "dirmove".
2015 # "diverge", "renamedelete" and "dirmove".
1984 # The first 4 are about renamed file so lets count that.
2016 # The first 4 are about renamed file so lets count that.
1985 renames = len(mergedata[0])
2017 renames = len(mergedata[0])
1986 renames += len(mergedata[1])
2018 renames += len(mergedata[1])
1987 renames += len(mergedata[2])
2019 renames += len(mergedata[2])
1988 renames += len(mergedata[3])
2020 renames += len(mergedata[3])
1989 data['nbrenamedfiles'] = renames
2021 data['nbrenamedfiles'] = renames
1990 begin = util.timer()
2022 begin = util.timer()
1991 p1renames = copies.pathcopies(b, p1)
2023 p1renames = copies.pathcopies(b, p1)
1992 end = util.timer()
2024 end = util.timer()
1993 data['p1.time'] = end - begin
2025 data['p1.time'] = end - begin
1994 begin = util.timer()
2026 begin = util.timer()
1995 p2renames = copies.pathcopies(b, p2)
2027 p2renames = copies.pathcopies(b, p2)
1996 data['p2.time'] = end - begin
2028 data['p2.time'] = end - begin
1997 end = util.timer()
2029 end = util.timer()
1998 data['p1.renamedfiles'] = len(p1renames)
2030 data['p1.renamedfiles'] = len(p1renames)
1999 data['p2.renamedfiles'] = len(p2renames)
2031 data['p2.renamedfiles'] = len(p2renames)
2000
2032
2001 if dostats:
2033 if dostats:
2002 if p1missing:
2034 if p1missing:
2003 alldata['parentnbrenames'].append(
2035 alldata['parentnbrenames'].append(
2004 (data['p1.renamedfiles'], b.hex(), p1.hex())
2036 (data['p1.renamedfiles'], b.hex(), p1.hex())
2005 )
2037 )
2006 alldata['parenttime'].append(
2038 alldata['parenttime'].append(
2007 (data['p1.time'], b.hex(), p1.hex())
2039 (data['p1.time'], b.hex(), p1.hex())
2008 )
2040 )
2009 if p2missing:
2041 if p2missing:
2010 alldata['parentnbrenames'].append(
2042 alldata['parentnbrenames'].append(
2011 (data['p2.renamedfiles'], b.hex(), p2.hex())
2043 (data['p2.renamedfiles'], b.hex(), p2.hex())
2012 )
2044 )
2013 alldata['parenttime'].append(
2045 alldata['parenttime'].append(
2014 (data['p2.time'], b.hex(), p2.hex())
2046 (data['p2.time'], b.hex(), p2.hex())
2015 )
2047 )
2016 if p1missing or p2missing:
2048 if p1missing or p2missing:
2017 alldata['totalnbrenames'].append(
2049 alldata['totalnbrenames'].append(
2018 (
2050 (
2019 data['nbrenamedfiles'],
2051 data['nbrenamedfiles'],
2020 b.hex(),
2052 b.hex(),
2021 p1.hex(),
2053 p1.hex(),
2022 p2.hex(),
2054 p2.hex(),
2023 )
2055 )
2024 )
2056 )
2025 alldata['totaltime'].append(
2057 alldata['totaltime'].append(
2026 (data['time'], b.hex(), p1.hex(), p2.hex())
2058 (data['time'], b.hex(), p1.hex(), p2.hex())
2027 )
2059 )
2028 fm.startitem()
2060 fm.startitem()
2029 fm.data(**data)
2061 fm.data(**data)
2030 # make node pretty for the human output
2062 # make node pretty for the human output
2031 out = data.copy()
2063 out = data.copy()
2032 out['base'] = fm.hexfunc(b.node())
2064 out['base'] = fm.hexfunc(b.node())
2033 out['p1.node'] = fm.hexfunc(p1.node())
2065 out['p1.node'] = fm.hexfunc(p1.node())
2034 out['p2.node'] = fm.hexfunc(p2.node())
2066 out['p2.node'] = fm.hexfunc(p2.node())
2035 fm.plain(output % out)
2067 fm.plain(output % out)
2036
2068
2037 fm.end()
2069 fm.end()
2038 if dostats:
2070 if dostats:
2039 # use a second formatter because the data are quite different, not sure
2071 # use a second formatter because the data are quite different, not sure
2040 # how it flies with the templater.
2072 # how it flies with the templater.
2041 entries = [
2073 entries = [
2042 ('nbrevs', 'number of revision covered'),
2074 ('nbrevs', 'number of revision covered'),
2043 ('nbmissingfiles', 'number of missing files at head'),
2075 ('nbmissingfiles', 'number of missing files at head'),
2044 ]
2076 ]
2045 if dotiming:
2077 if dotiming:
2046 entries.append(
2078 entries.append(
2047 ('parentnbrenames', 'rename from one parent to base')
2079 ('parentnbrenames', 'rename from one parent to base')
2048 )
2080 )
2049 entries.append(('totalnbrenames', 'total number of renames'))
2081 entries.append(('totalnbrenames', 'total number of renames'))
2050 entries.append(('parenttime', 'time for one parent'))
2082 entries.append(('parenttime', 'time for one parent'))
2051 entries.append(('totaltime', 'time for both parents'))
2083 entries.append(('totaltime', 'time for both parents'))
2052 _displaystats(ui, opts, entries, alldata)
2084 _displaystats(ui, opts, entries, alldata)
2053
2085
2054
2086
2055 @command(
2087 @command(
2056 b'perfhelper-pathcopies',
2088 b'perfhelper-pathcopies',
2057 formatteropts
2089 formatteropts
2058 + [
2090 + [
2059 (b'r', b'revs', [], b'restrict search to these revisions'),
2091 (b'r', b'revs', [], b'restrict search to these revisions'),
2060 (b'', b'timing', False, b'provides extra data (costly)'),
2092 (b'', b'timing', False, b'provides extra data (costly)'),
2061 (b'', b'stats', False, b'provides statistic about the measured data'),
2093 (b'', b'stats', False, b'provides statistic about the measured data'),
2062 ],
2094 ],
2063 )
2095 )
2064 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2096 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2065 """find statistic about potential parameters for the `perftracecopies`
2097 """find statistic about potential parameters for the `perftracecopies`
2066
2098
2067 This command find source-destination pair relevant for copytracing testing.
2099 This command find source-destination pair relevant for copytracing testing.
2068 It report value for some of the parameters that impact copy tracing time.
2100 It report value for some of the parameters that impact copy tracing time.
2069
2101
2070 If `--timing` is set, rename detection is run and the associated timing
2102 If `--timing` is set, rename detection is run and the associated timing
2071 will be reported. The extra details comes at the cost of a slower command
2103 will be reported. The extra details comes at the cost of a slower command
2072 execution.
2104 execution.
2073
2105
2074 Since the rename detection is only run once, other factors might easily
2106 Since the rename detection is only run once, other factors might easily
2075 affect the precision of the timing. However it should give a good
2107 affect the precision of the timing. However it should give a good
2076 approximation of which revision pairs are very costly.
2108 approximation of which revision pairs are very costly.
2077 """
2109 """
2078 opts = _byteskwargs(opts)
2110 opts = _byteskwargs(opts)
2079 fm = ui.formatter(b'perf', opts)
2111 fm = ui.formatter(b'perf', opts)
2080 dotiming = opts[b'timing']
2112 dotiming = opts[b'timing']
2081 dostats = opts[b'stats']
2113 dostats = opts[b'stats']
2082
2114
2083 if dotiming:
2115 if dotiming:
2084 header = '%12s %12s %12s %12s %12s %12s\n'
2116 header = '%12s %12s %12s %12s %12s %12s\n'
2085 output = (
2117 output = (
2086 "%(source)12s %(destination)12s "
2118 "%(source)12s %(destination)12s "
2087 "%(nbrevs)12d %(nbmissingfiles)12d "
2119 "%(nbrevs)12d %(nbmissingfiles)12d "
2088 "%(nbrenamedfiles)12d %(time)18.5f\n"
2120 "%(nbrenamedfiles)12d %(time)18.5f\n"
2089 )
2121 )
2090 header_names = (
2122 header_names = (
2091 "source",
2123 "source",
2092 "destination",
2124 "destination",
2093 "nb-revs",
2125 "nb-revs",
2094 "nb-files",
2126 "nb-files",
2095 "nb-renames",
2127 "nb-renames",
2096 "time",
2128 "time",
2097 )
2129 )
2098 fm.plain(header % header_names)
2130 fm.plain(header % header_names)
2099 else:
2131 else:
2100 header = '%12s %12s %12s %12s\n'
2132 header = '%12s %12s %12s %12s\n'
2101 output = (
2133 output = (
2102 "%(source)12s %(destination)12s "
2134 "%(source)12s %(destination)12s "
2103 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2135 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2104 )
2136 )
2105 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2137 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2106
2138
2107 if not revs:
2139 if not revs:
2108 revs = ['all()']
2140 revs = ['all()']
2109 revs = scmutil.revrange(repo, revs)
2141 revs = scmutil.revrange(repo, revs)
2110
2142
2111 if dostats:
2143 if dostats:
2112 alldata = {
2144 alldata = {
2113 'nbrevs': [],
2145 'nbrevs': [],
2114 'nbmissingfiles': [],
2146 'nbmissingfiles': [],
2115 }
2147 }
2116 if dotiming:
2148 if dotiming:
2117 alldata['nbrenames'] = []
2149 alldata['nbrenames'] = []
2118 alldata['time'] = []
2150 alldata['time'] = []
2119
2151
2120 roi = repo.revs('merge() and %ld', revs)
2152 roi = repo.revs('merge() and %ld', revs)
2121 for r in roi:
2153 for r in roi:
2122 ctx = repo[r]
2154 ctx = repo[r]
2123 p1 = ctx.p1().rev()
2155 p1 = ctx.p1().rev()
2124 p2 = ctx.p2().rev()
2156 p2 = ctx.p2().rev()
2125 bases = repo.changelog._commonancestorsheads(p1, p2)
2157 bases = repo.changelog._commonancestorsheads(p1, p2)
2126 for p in (p1, p2):
2158 for p in (p1, p2):
2127 for b in bases:
2159 for b in bases:
2128 base = repo[b]
2160 base = repo[b]
2129 parent = repo[p]
2161 parent = repo[p]
2130 missing = copies._computeforwardmissing(base, parent)
2162 missing = copies._computeforwardmissing(base, parent)
2131 if not missing:
2163 if not missing:
2132 continue
2164 continue
2133 data = {
2165 data = {
2134 b'source': base.hex(),
2166 b'source': base.hex(),
2135 b'destination': parent.hex(),
2167 b'destination': parent.hex(),
2136 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2168 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2137 b'nbmissingfiles': len(missing),
2169 b'nbmissingfiles': len(missing),
2138 }
2170 }
2139 if dostats:
2171 if dostats:
2140 alldata['nbrevs'].append(
2172 alldata['nbrevs'].append(
2141 (data['nbrevs'], base.hex(), parent.hex(),)
2173 (data['nbrevs'], base.hex(), parent.hex(),)
2142 )
2174 )
2143 alldata['nbmissingfiles'].append(
2175 alldata['nbmissingfiles'].append(
2144 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2176 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2145 )
2177 )
2146 if dotiming:
2178 if dotiming:
2147 begin = util.timer()
2179 begin = util.timer()
2148 renames = copies.pathcopies(base, parent)
2180 renames = copies.pathcopies(base, parent)
2149 end = util.timer()
2181 end = util.timer()
2150 # not very stable timing since we did only one run
2182 # not very stable timing since we did only one run
2151 data['time'] = end - begin
2183 data['time'] = end - begin
2152 data['nbrenamedfiles'] = len(renames)
2184 data['nbrenamedfiles'] = len(renames)
2153 if dostats:
2185 if dostats:
2154 alldata['time'].append(
2186 alldata['time'].append(
2155 (data['time'], base.hex(), parent.hex(),)
2187 (data['time'], base.hex(), parent.hex(),)
2156 )
2188 )
2157 alldata['nbrenames'].append(
2189 alldata['nbrenames'].append(
2158 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2190 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2159 )
2191 )
2160 fm.startitem()
2192 fm.startitem()
2161 fm.data(**data)
2193 fm.data(**data)
2162 out = data.copy()
2194 out = data.copy()
2163 out['source'] = fm.hexfunc(base.node())
2195 out['source'] = fm.hexfunc(base.node())
2164 out['destination'] = fm.hexfunc(parent.node())
2196 out['destination'] = fm.hexfunc(parent.node())
2165 fm.plain(output % out)
2197 fm.plain(output % out)
2166
2198
2167 fm.end()
2199 fm.end()
2168 if dostats:
2200 if dostats:
2169 # use a second formatter because the data are quite different, not sure
2201 # use a second formatter because the data are quite different, not sure
2170 # how it flies with the templater.
2202 # how it flies with the templater.
2171 fm = ui.formatter(b'perf', opts)
2203 fm = ui.formatter(b'perf', opts)
2172 entries = [
2204 entries = [
2173 ('nbrevs', 'number of revision covered'),
2205 ('nbrevs', 'number of revision covered'),
2174 ('nbmissingfiles', 'number of missing files at head'),
2206 ('nbmissingfiles', 'number of missing files at head'),
2175 ]
2207 ]
2176 if dotiming:
2208 if dotiming:
2177 entries.append(('nbrenames', 'renamed files'))
2209 entries.append(('nbrenames', 'renamed files'))
2178 entries.append(('time', 'time'))
2210 entries.append(('time', 'time'))
2179 _displaystats(ui, opts, entries, alldata)
2211 _displaystats(ui, opts, entries, alldata)
2180
2212
2181
2213
2182 @command(b'perfcca', formatteropts)
2214 @command(b'perfcca', formatteropts)
2183 def perfcca(ui, repo, **opts):
2215 def perfcca(ui, repo, **opts):
2184 opts = _byteskwargs(opts)
2216 opts = _byteskwargs(opts)
2185 timer, fm = gettimer(ui, opts)
2217 timer, fm = gettimer(ui, opts)
2186 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2218 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2187 fm.end()
2219 fm.end()
2188
2220
2189
2221
2190 @command(b'perffncacheload', formatteropts)
2222 @command(b'perffncacheload', formatteropts)
2191 def perffncacheload(ui, repo, **opts):
2223 def perffncacheload(ui, repo, **opts):
2192 opts = _byteskwargs(opts)
2224 opts = _byteskwargs(opts)
2193 timer, fm = gettimer(ui, opts)
2225 timer, fm = gettimer(ui, opts)
2194 s = repo.store
2226 s = repo.store
2195
2227
2196 def d():
2228 def d():
2197 s.fncache._load()
2229 s.fncache._load()
2198
2230
2199 timer(d)
2231 timer(d)
2200 fm.end()
2232 fm.end()
2201
2233
2202
2234
2203 @command(b'perffncachewrite', formatteropts)
2235 @command(b'perffncachewrite', formatteropts)
2204 def perffncachewrite(ui, repo, **opts):
2236 def perffncachewrite(ui, repo, **opts):
2205 opts = _byteskwargs(opts)
2237 opts = _byteskwargs(opts)
2206 timer, fm = gettimer(ui, opts)
2238 timer, fm = gettimer(ui, opts)
2207 s = repo.store
2239 s = repo.store
2208 lock = repo.lock()
2240 lock = repo.lock()
2209 s.fncache._load()
2241 s.fncache._load()
2210 tr = repo.transaction(b'perffncachewrite')
2242 tr = repo.transaction(b'perffncachewrite')
2211 tr.addbackup(b'fncache')
2243 tr.addbackup(b'fncache')
2212
2244
2213 def d():
2245 def d():
2214 s.fncache._dirty = True
2246 s.fncache._dirty = True
2215 s.fncache.write(tr)
2247 s.fncache.write(tr)
2216
2248
2217 timer(d)
2249 timer(d)
2218 tr.close()
2250 tr.close()
2219 lock.release()
2251 lock.release()
2220 fm.end()
2252 fm.end()
2221
2253
2222
2254
2223 @command(b'perffncacheencode', formatteropts)
2255 @command(b'perffncacheencode', formatteropts)
2224 def perffncacheencode(ui, repo, **opts):
2256 def perffncacheencode(ui, repo, **opts):
2225 opts = _byteskwargs(opts)
2257 opts = _byteskwargs(opts)
2226 timer, fm = gettimer(ui, opts)
2258 timer, fm = gettimer(ui, opts)
2227 s = repo.store
2259 s = repo.store
2228 s.fncache._load()
2260 s.fncache._load()
2229
2261
2230 def d():
2262 def d():
2231 for p in s.fncache.entries:
2263 for p in s.fncache.entries:
2232 s.encode(p)
2264 s.encode(p)
2233
2265
2234 timer(d)
2266 timer(d)
2235 fm.end()
2267 fm.end()
2236
2268
2237
2269
2238 def _bdiffworker(q, blocks, xdiff, ready, done):
2270 def _bdiffworker(q, blocks, xdiff, ready, done):
2239 while not done.is_set():
2271 while not done.is_set():
2240 pair = q.get()
2272 pair = q.get()
2241 while pair is not None:
2273 while pair is not None:
2242 if xdiff:
2274 if xdiff:
2243 mdiff.bdiff.xdiffblocks(*pair)
2275 mdiff.bdiff.xdiffblocks(*pair)
2244 elif blocks:
2276 elif blocks:
2245 mdiff.bdiff.blocks(*pair)
2277 mdiff.bdiff.blocks(*pair)
2246 else:
2278 else:
2247 mdiff.textdiff(*pair)
2279 mdiff.textdiff(*pair)
2248 q.task_done()
2280 q.task_done()
2249 pair = q.get()
2281 pair = q.get()
2250 q.task_done() # for the None one
2282 q.task_done() # for the None one
2251 with ready:
2283 with ready:
2252 ready.wait()
2284 ready.wait()
2253
2285
2254
2286
2255 def _manifestrevision(repo, mnode):
2287 def _manifestrevision(repo, mnode):
2256 ml = repo.manifestlog
2288 ml = repo.manifestlog
2257
2289
2258 if util.safehasattr(ml, b'getstorage'):
2290 if util.safehasattr(ml, b'getstorage'):
2259 store = ml.getstorage(b'')
2291 store = ml.getstorage(b'')
2260 else:
2292 else:
2261 store = ml._revlog
2293 store = ml._revlog
2262
2294
2263 return store.revision(mnode)
2295 return store.revision(mnode)
2264
2296
2265
2297
2266 @command(
2298 @command(
2267 b'perfbdiff',
2299 b'perfbdiff',
2268 revlogopts
2300 revlogopts
2269 + formatteropts
2301 + formatteropts
2270 + [
2302 + [
2271 (
2303 (
2272 b'',
2304 b'',
2273 b'count',
2305 b'count',
2274 1,
2306 1,
2275 b'number of revisions to test (when using --startrev)',
2307 b'number of revisions to test (when using --startrev)',
2276 ),
2308 ),
2277 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2309 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2278 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2310 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2279 (b'', b'blocks', False, b'test computing diffs into blocks'),
2311 (b'', b'blocks', False, b'test computing diffs into blocks'),
2280 (b'', b'xdiff', False, b'use xdiff algorithm'),
2312 (b'', b'xdiff', False, b'use xdiff algorithm'),
2281 ],
2313 ],
2282 b'-c|-m|FILE REV',
2314 b'-c|-m|FILE REV',
2283 )
2315 )
2284 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2316 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2285 """benchmark a bdiff between revisions
2317 """benchmark a bdiff between revisions
2286
2318
2287 By default, benchmark a bdiff between its delta parent and itself.
2319 By default, benchmark a bdiff between its delta parent and itself.
2288
2320
2289 With ``--count``, benchmark bdiffs between delta parents and self for N
2321 With ``--count``, benchmark bdiffs between delta parents and self for N
2290 revisions starting at the specified revision.
2322 revisions starting at the specified revision.
2291
2323
2292 With ``--alldata``, assume the requested revision is a changeset and
2324 With ``--alldata``, assume the requested revision is a changeset and
2293 measure bdiffs for all changes related to that changeset (manifest
2325 measure bdiffs for all changes related to that changeset (manifest
2294 and filelogs).
2326 and filelogs).
2295 """
2327 """
2296 opts = _byteskwargs(opts)
2328 opts = _byteskwargs(opts)
2297
2329
2298 if opts[b'xdiff'] and not opts[b'blocks']:
2330 if opts[b'xdiff'] and not opts[b'blocks']:
2299 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2331 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2300
2332
2301 if opts[b'alldata']:
2333 if opts[b'alldata']:
2302 opts[b'changelog'] = True
2334 opts[b'changelog'] = True
2303
2335
2304 if opts.get(b'changelog') or opts.get(b'manifest'):
2336 if opts.get(b'changelog') or opts.get(b'manifest'):
2305 file_, rev = None, file_
2337 file_, rev = None, file_
2306 elif rev is None:
2338 elif rev is None:
2307 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2339 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2308
2340
2309 blocks = opts[b'blocks']
2341 blocks = opts[b'blocks']
2310 xdiff = opts[b'xdiff']
2342 xdiff = opts[b'xdiff']
2311 textpairs = []
2343 textpairs = []
2312
2344
2313 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2345 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2314
2346
2315 startrev = r.rev(r.lookup(rev))
2347 startrev = r.rev(r.lookup(rev))
2316 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2348 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2317 if opts[b'alldata']:
2349 if opts[b'alldata']:
2318 # Load revisions associated with changeset.
2350 # Load revisions associated with changeset.
2319 ctx = repo[rev]
2351 ctx = repo[rev]
2320 mtext = _manifestrevision(repo, ctx.manifestnode())
2352 mtext = _manifestrevision(repo, ctx.manifestnode())
2321 for pctx in ctx.parents():
2353 for pctx in ctx.parents():
2322 pman = _manifestrevision(repo, pctx.manifestnode())
2354 pman = _manifestrevision(repo, pctx.manifestnode())
2323 textpairs.append((pman, mtext))
2355 textpairs.append((pman, mtext))
2324
2356
2325 # Load filelog revisions by iterating manifest delta.
2357 # Load filelog revisions by iterating manifest delta.
2326 man = ctx.manifest()
2358 man = ctx.manifest()
2327 pman = ctx.p1().manifest()
2359 pman = ctx.p1().manifest()
2328 for filename, change in pman.diff(man).items():
2360 for filename, change in pman.diff(man).items():
2329 fctx = repo.file(filename)
2361 fctx = repo.file(filename)
2330 f1 = fctx.revision(change[0][0] or -1)
2362 f1 = fctx.revision(change[0][0] or -1)
2331 f2 = fctx.revision(change[1][0] or -1)
2363 f2 = fctx.revision(change[1][0] or -1)
2332 textpairs.append((f1, f2))
2364 textpairs.append((f1, f2))
2333 else:
2365 else:
2334 dp = r.deltaparent(rev)
2366 dp = r.deltaparent(rev)
2335 textpairs.append((r.revision(dp), r.revision(rev)))
2367 textpairs.append((r.revision(dp), r.revision(rev)))
2336
2368
2337 withthreads = threads > 0
2369 withthreads = threads > 0
2338 if not withthreads:
2370 if not withthreads:
2339
2371
2340 def d():
2372 def d():
2341 for pair in textpairs:
2373 for pair in textpairs:
2342 if xdiff:
2374 if xdiff:
2343 mdiff.bdiff.xdiffblocks(*pair)
2375 mdiff.bdiff.xdiffblocks(*pair)
2344 elif blocks:
2376 elif blocks:
2345 mdiff.bdiff.blocks(*pair)
2377 mdiff.bdiff.blocks(*pair)
2346 else:
2378 else:
2347 mdiff.textdiff(*pair)
2379 mdiff.textdiff(*pair)
2348
2380
2349 else:
2381 else:
2350 q = queue()
2382 q = queue()
2351 for i in _xrange(threads):
2383 for i in _xrange(threads):
2352 q.put(None)
2384 q.put(None)
2353 ready = threading.Condition()
2385 ready = threading.Condition()
2354 done = threading.Event()
2386 done = threading.Event()
2355 for i in _xrange(threads):
2387 for i in _xrange(threads):
2356 threading.Thread(
2388 threading.Thread(
2357 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2389 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2358 ).start()
2390 ).start()
2359 q.join()
2391 q.join()
2360
2392
2361 def d():
2393 def d():
2362 for pair in textpairs:
2394 for pair in textpairs:
2363 q.put(pair)
2395 q.put(pair)
2364 for i in _xrange(threads):
2396 for i in _xrange(threads):
2365 q.put(None)
2397 q.put(None)
2366 with ready:
2398 with ready:
2367 ready.notify_all()
2399 ready.notify_all()
2368 q.join()
2400 q.join()
2369
2401
2370 timer, fm = gettimer(ui, opts)
2402 timer, fm = gettimer(ui, opts)
2371 timer(d)
2403 timer(d)
2372 fm.end()
2404 fm.end()
2373
2405
2374 if withthreads:
2406 if withthreads:
2375 done.set()
2407 done.set()
2376 for i in _xrange(threads):
2408 for i in _xrange(threads):
2377 q.put(None)
2409 q.put(None)
2378 with ready:
2410 with ready:
2379 ready.notify_all()
2411 ready.notify_all()
2380
2412
2381
2413
2382 @command(
2414 @command(
2383 b'perfunidiff',
2415 b'perfunidiff',
2384 revlogopts
2416 revlogopts
2385 + formatteropts
2417 + formatteropts
2386 + [
2418 + [
2387 (
2419 (
2388 b'',
2420 b'',
2389 b'count',
2421 b'count',
2390 1,
2422 1,
2391 b'number of revisions to test (when using --startrev)',
2423 b'number of revisions to test (when using --startrev)',
2392 ),
2424 ),
2393 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2425 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2394 ],
2426 ],
2395 b'-c|-m|FILE REV',
2427 b'-c|-m|FILE REV',
2396 )
2428 )
2397 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2429 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2398 """benchmark a unified diff between revisions
2430 """benchmark a unified diff between revisions
2399
2431
2400 This doesn't include any copy tracing - it's just a unified diff
2432 This doesn't include any copy tracing - it's just a unified diff
2401 of the texts.
2433 of the texts.
2402
2434
2403 By default, benchmark a diff between its delta parent and itself.
2435 By default, benchmark a diff between its delta parent and itself.
2404
2436
2405 With ``--count``, benchmark diffs between delta parents and self for N
2437 With ``--count``, benchmark diffs between delta parents and self for N
2406 revisions starting at the specified revision.
2438 revisions starting at the specified revision.
2407
2439
2408 With ``--alldata``, assume the requested revision is a changeset and
2440 With ``--alldata``, assume the requested revision is a changeset and
2409 measure diffs for all changes related to that changeset (manifest
2441 measure diffs for all changes related to that changeset (manifest
2410 and filelogs).
2442 and filelogs).
2411 """
2443 """
2412 opts = _byteskwargs(opts)
2444 opts = _byteskwargs(opts)
2413 if opts[b'alldata']:
2445 if opts[b'alldata']:
2414 opts[b'changelog'] = True
2446 opts[b'changelog'] = True
2415
2447
2416 if opts.get(b'changelog') or opts.get(b'manifest'):
2448 if opts.get(b'changelog') or opts.get(b'manifest'):
2417 file_, rev = None, file_
2449 file_, rev = None, file_
2418 elif rev is None:
2450 elif rev is None:
2419 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2451 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2420
2452
2421 textpairs = []
2453 textpairs = []
2422
2454
2423 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2455 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2424
2456
2425 startrev = r.rev(r.lookup(rev))
2457 startrev = r.rev(r.lookup(rev))
2426 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2458 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2427 if opts[b'alldata']:
2459 if opts[b'alldata']:
2428 # Load revisions associated with changeset.
2460 # Load revisions associated with changeset.
2429 ctx = repo[rev]
2461 ctx = repo[rev]
2430 mtext = _manifestrevision(repo, ctx.manifestnode())
2462 mtext = _manifestrevision(repo, ctx.manifestnode())
2431 for pctx in ctx.parents():
2463 for pctx in ctx.parents():
2432 pman = _manifestrevision(repo, pctx.manifestnode())
2464 pman = _manifestrevision(repo, pctx.manifestnode())
2433 textpairs.append((pman, mtext))
2465 textpairs.append((pman, mtext))
2434
2466
2435 # Load filelog revisions by iterating manifest delta.
2467 # Load filelog revisions by iterating manifest delta.
2436 man = ctx.manifest()
2468 man = ctx.manifest()
2437 pman = ctx.p1().manifest()
2469 pman = ctx.p1().manifest()
2438 for filename, change in pman.diff(man).items():
2470 for filename, change in pman.diff(man).items():
2439 fctx = repo.file(filename)
2471 fctx = repo.file(filename)
2440 f1 = fctx.revision(change[0][0] or -1)
2472 f1 = fctx.revision(change[0][0] or -1)
2441 f2 = fctx.revision(change[1][0] or -1)
2473 f2 = fctx.revision(change[1][0] or -1)
2442 textpairs.append((f1, f2))
2474 textpairs.append((f1, f2))
2443 else:
2475 else:
2444 dp = r.deltaparent(rev)
2476 dp = r.deltaparent(rev)
2445 textpairs.append((r.revision(dp), r.revision(rev)))
2477 textpairs.append((r.revision(dp), r.revision(rev)))
2446
2478
2447 def d():
2479 def d():
2448 for left, right in textpairs:
2480 for left, right in textpairs:
2449 # The date strings don't matter, so we pass empty strings.
2481 # The date strings don't matter, so we pass empty strings.
2450 headerlines, hunks = mdiff.unidiff(
2482 headerlines, hunks = mdiff.unidiff(
2451 left, b'', right, b'', b'left', b'right', binary=False
2483 left, b'', right, b'', b'left', b'right', binary=False
2452 )
2484 )
2453 # consume iterators in roughly the way patch.py does
2485 # consume iterators in roughly the way patch.py does
2454 b'\n'.join(headerlines)
2486 b'\n'.join(headerlines)
2455 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2487 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2456
2488
2457 timer, fm = gettimer(ui, opts)
2489 timer, fm = gettimer(ui, opts)
2458 timer(d)
2490 timer(d)
2459 fm.end()
2491 fm.end()
2460
2492
2461
2493
2462 @command(b'perfdiffwd', formatteropts)
2494 @command(b'perfdiffwd', formatteropts)
2463 def perfdiffwd(ui, repo, **opts):
2495 def perfdiffwd(ui, repo, **opts):
2464 """Profile diff of working directory changes"""
2496 """Profile diff of working directory changes"""
2465 opts = _byteskwargs(opts)
2497 opts = _byteskwargs(opts)
2466 timer, fm = gettimer(ui, opts)
2498 timer, fm = gettimer(ui, opts)
2467 options = {
2499 options = {
2468 'w': 'ignore_all_space',
2500 'w': 'ignore_all_space',
2469 'b': 'ignore_space_change',
2501 'b': 'ignore_space_change',
2470 'B': 'ignore_blank_lines',
2502 'B': 'ignore_blank_lines',
2471 }
2503 }
2472
2504
2473 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2505 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2474 opts = dict((options[c], b'1') for c in diffopt)
2506 opts = dict((options[c], b'1') for c in diffopt)
2475
2507
2476 def d():
2508 def d():
2477 ui.pushbuffer()
2509 ui.pushbuffer()
2478 commands.diff(ui, repo, **opts)
2510 commands.diff(ui, repo, **opts)
2479 ui.popbuffer()
2511 ui.popbuffer()
2480
2512
2481 diffopt = diffopt.encode('ascii')
2513 diffopt = diffopt.encode('ascii')
2482 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2514 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2483 timer(d, title=title)
2515 timer(d, title=title)
2484 fm.end()
2516 fm.end()
2485
2517
2486
2518
2487 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2519 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2488 def perfrevlogindex(ui, repo, file_=None, **opts):
2520 def perfrevlogindex(ui, repo, file_=None, **opts):
2489 """Benchmark operations against a revlog index.
2521 """Benchmark operations against a revlog index.
2490
2522
2491 This tests constructing a revlog instance, reading index data,
2523 This tests constructing a revlog instance, reading index data,
2492 parsing index data, and performing various operations related to
2524 parsing index data, and performing various operations related to
2493 index data.
2525 index data.
2494 """
2526 """
2495
2527
2496 opts = _byteskwargs(opts)
2528 opts = _byteskwargs(opts)
2497
2529
2498 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2530 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2499
2531
2500 opener = getattr(rl, 'opener') # trick linter
2532 opener = getattr(rl, 'opener') # trick linter
2501 indexfile = rl.indexfile
2533 indexfile = rl.indexfile
2502 data = opener.read(indexfile)
2534 data = opener.read(indexfile)
2503
2535
2504 header = struct.unpack(b'>I', data[0:4])[0]
2536 header = struct.unpack(b'>I', data[0:4])[0]
2505 version = header & 0xFFFF
2537 version = header & 0xFFFF
2506 if version == 1:
2538 if version == 1:
2507 revlogio = revlog.revlogio()
2539 revlogio = revlog.revlogio()
2508 inline = header & (1 << 16)
2540 inline = header & (1 << 16)
2509 else:
2541 else:
2510 raise error.Abort(b'unsupported revlog version: %d' % version)
2542 raise error.Abort(b'unsupported revlog version: %d' % version)
2511
2543
2512 rllen = len(rl)
2544 rllen = len(rl)
2513
2545
2514 node0 = rl.node(0)
2546 node0 = rl.node(0)
2515 node25 = rl.node(rllen // 4)
2547 node25 = rl.node(rllen // 4)
2516 node50 = rl.node(rllen // 2)
2548 node50 = rl.node(rllen // 2)
2517 node75 = rl.node(rllen // 4 * 3)
2549 node75 = rl.node(rllen // 4 * 3)
2518 node100 = rl.node(rllen - 1)
2550 node100 = rl.node(rllen - 1)
2519
2551
2520 allrevs = range(rllen)
2552 allrevs = range(rllen)
2521 allrevsrev = list(reversed(allrevs))
2553 allrevsrev = list(reversed(allrevs))
2522 allnodes = [rl.node(rev) for rev in range(rllen)]
2554 allnodes = [rl.node(rev) for rev in range(rllen)]
2523 allnodesrev = list(reversed(allnodes))
2555 allnodesrev = list(reversed(allnodes))
2524
2556
2525 def constructor():
2557 def constructor():
2526 revlog.revlog(opener, indexfile)
2558 revlog.revlog(opener, indexfile)
2527
2559
2528 def read():
2560 def read():
2529 with opener(indexfile) as fh:
2561 with opener(indexfile) as fh:
2530 fh.read()
2562 fh.read()
2531
2563
2532 def parseindex():
2564 def parseindex():
2533 revlogio.parseindex(data, inline)
2565 revlogio.parseindex(data, inline)
2534
2566
2535 def getentry(revornode):
2567 def getentry(revornode):
2536 index = revlogio.parseindex(data, inline)[0]
2568 index = revlogio.parseindex(data, inline)[0]
2537 index[revornode]
2569 index[revornode]
2538
2570
2539 def getentries(revs, count=1):
2571 def getentries(revs, count=1):
2540 index = revlogio.parseindex(data, inline)[0]
2572 index = revlogio.parseindex(data, inline)[0]
2541
2573
2542 for i in range(count):
2574 for i in range(count):
2543 for rev in revs:
2575 for rev in revs:
2544 index[rev]
2576 index[rev]
2545
2577
2546 def resolvenode(node):
2578 def resolvenode(node):
2547 nodemap = revlogio.parseindex(data, inline)[1]
2579 nodemap = revlogio.parseindex(data, inline)[1]
2548 # This only works for the C code.
2580 # This only works for the C code.
2549 if nodemap is None:
2581 if nodemap is None:
2550 return
2582 return
2551
2583
2552 try:
2584 try:
2553 nodemap[node]
2585 nodemap[node]
2554 except error.RevlogError:
2586 except error.RevlogError:
2555 pass
2587 pass
2556
2588
2557 def resolvenodes(nodes, count=1):
2589 def resolvenodes(nodes, count=1):
2558 nodemap = revlogio.parseindex(data, inline)[1]
2590 nodemap = revlogio.parseindex(data, inline)[1]
2559 if nodemap is None:
2591 if nodemap is None:
2560 return
2592 return
2561
2593
2562 for i in range(count):
2594 for i in range(count):
2563 for node in nodes:
2595 for node in nodes:
2564 try:
2596 try:
2565 nodemap[node]
2597 nodemap[node]
2566 except error.RevlogError:
2598 except error.RevlogError:
2567 pass
2599 pass
2568
2600
2569 benches = [
2601 benches = [
2570 (constructor, b'revlog constructor'),
2602 (constructor, b'revlog constructor'),
2571 (read, b'read'),
2603 (read, b'read'),
2572 (parseindex, b'create index object'),
2604 (parseindex, b'create index object'),
2573 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2605 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2574 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2606 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2575 (lambda: resolvenode(node0), b'look up node at rev 0'),
2607 (lambda: resolvenode(node0), b'look up node at rev 0'),
2576 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2608 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2577 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2609 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2578 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2610 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2579 (lambda: resolvenode(node100), b'look up node at tip'),
2611 (lambda: resolvenode(node100), b'look up node at tip'),
2580 # 2x variation is to measure caching impact.
2612 # 2x variation is to measure caching impact.
2581 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2613 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2582 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2614 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2583 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2615 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2584 (
2616 (
2585 lambda: resolvenodes(allnodesrev, 2),
2617 lambda: resolvenodes(allnodesrev, 2),
2586 b'look up all nodes 2x (reverse)',
2618 b'look up all nodes 2x (reverse)',
2587 ),
2619 ),
2588 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2620 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2589 (
2621 (
2590 lambda: getentries(allrevs, 2),
2622 lambda: getentries(allrevs, 2),
2591 b'retrieve all index entries 2x (forward)',
2623 b'retrieve all index entries 2x (forward)',
2592 ),
2624 ),
2593 (
2625 (
2594 lambda: getentries(allrevsrev),
2626 lambda: getentries(allrevsrev),
2595 b'retrieve all index entries (reverse)',
2627 b'retrieve all index entries (reverse)',
2596 ),
2628 ),
2597 (
2629 (
2598 lambda: getentries(allrevsrev, 2),
2630 lambda: getentries(allrevsrev, 2),
2599 b'retrieve all index entries 2x (reverse)',
2631 b'retrieve all index entries 2x (reverse)',
2600 ),
2632 ),
2601 ]
2633 ]
2602
2634
2603 for fn, title in benches:
2635 for fn, title in benches:
2604 timer, fm = gettimer(ui, opts)
2636 timer, fm = gettimer(ui, opts)
2605 timer(fn, title=title)
2637 timer(fn, title=title)
2606 fm.end()
2638 fm.end()
2607
2639
2608
2640
2609 @command(
2641 @command(
2610 b'perfrevlogrevisions',
2642 b'perfrevlogrevisions',
2611 revlogopts
2643 revlogopts
2612 + formatteropts
2644 + formatteropts
2613 + [
2645 + [
2614 (b'd', b'dist', 100, b'distance between the revisions'),
2646 (b'd', b'dist', 100, b'distance between the revisions'),
2615 (b's', b'startrev', 0, b'revision to start reading at'),
2647 (b's', b'startrev', 0, b'revision to start reading at'),
2616 (b'', b'reverse', False, b'read in reverse'),
2648 (b'', b'reverse', False, b'read in reverse'),
2617 ],
2649 ],
2618 b'-c|-m|FILE',
2650 b'-c|-m|FILE',
2619 )
2651 )
2620 def perfrevlogrevisions(
2652 def perfrevlogrevisions(
2621 ui, repo, file_=None, startrev=0, reverse=False, **opts
2653 ui, repo, file_=None, startrev=0, reverse=False, **opts
2622 ):
2654 ):
2623 """Benchmark reading a series of revisions from a revlog.
2655 """Benchmark reading a series of revisions from a revlog.
2624
2656
2625 By default, we read every ``-d/--dist`` revision from 0 to tip of
2657 By default, we read every ``-d/--dist`` revision from 0 to tip of
2626 the specified revlog.
2658 the specified revlog.
2627
2659
2628 The start revision can be defined via ``-s/--startrev``.
2660 The start revision can be defined via ``-s/--startrev``.
2629 """
2661 """
2630 opts = _byteskwargs(opts)
2662 opts = _byteskwargs(opts)
2631
2663
2632 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2664 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2633 rllen = getlen(ui)(rl)
2665 rllen = getlen(ui)(rl)
2634
2666
2635 if startrev < 0:
2667 if startrev < 0:
2636 startrev = rllen + startrev
2668 startrev = rllen + startrev
2637
2669
2638 def d():
2670 def d():
2639 rl.clearcaches()
2671 rl.clearcaches()
2640
2672
2641 beginrev = startrev
2673 beginrev = startrev
2642 endrev = rllen
2674 endrev = rllen
2643 dist = opts[b'dist']
2675 dist = opts[b'dist']
2644
2676
2645 if reverse:
2677 if reverse:
2646 beginrev, endrev = endrev - 1, beginrev - 1
2678 beginrev, endrev = endrev - 1, beginrev - 1
2647 dist = -1 * dist
2679 dist = -1 * dist
2648
2680
2649 for x in _xrange(beginrev, endrev, dist):
2681 for x in _xrange(beginrev, endrev, dist):
2650 # Old revisions don't support passing int.
2682 # Old revisions don't support passing int.
2651 n = rl.node(x)
2683 n = rl.node(x)
2652 rl.revision(n)
2684 rl.revision(n)
2653
2685
2654 timer, fm = gettimer(ui, opts)
2686 timer, fm = gettimer(ui, opts)
2655 timer(d)
2687 timer(d)
2656 fm.end()
2688 fm.end()
2657
2689
2658
2690
2659 @command(
2691 @command(
2660 b'perfrevlogwrite',
2692 b'perfrevlogwrite',
2661 revlogopts
2693 revlogopts
2662 + formatteropts
2694 + formatteropts
2663 + [
2695 + [
2664 (b's', b'startrev', 1000, b'revision to start writing at'),
2696 (b's', b'startrev', 1000, b'revision to start writing at'),
2665 (b'', b'stoprev', -1, b'last revision to write'),
2697 (b'', b'stoprev', -1, b'last revision to write'),
2666 (b'', b'count', 3, b'number of passes to perform'),
2698 (b'', b'count', 3, b'number of passes to perform'),
2667 (b'', b'details', False, b'print timing for every revisions tested'),
2699 (b'', b'details', False, b'print timing for every revisions tested'),
2668 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2700 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2669 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2701 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2670 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2702 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2671 ],
2703 ],
2672 b'-c|-m|FILE',
2704 b'-c|-m|FILE',
2673 )
2705 )
2674 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2706 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2675 """Benchmark writing a series of revisions to a revlog.
2707 """Benchmark writing a series of revisions to a revlog.
2676
2708
2677 Possible source values are:
2709 Possible source values are:
2678 * `full`: add from a full text (default).
2710 * `full`: add from a full text (default).
2679 * `parent-1`: add from a delta to the first parent
2711 * `parent-1`: add from a delta to the first parent
2680 * `parent-2`: add from a delta to the second parent if it exists
2712 * `parent-2`: add from a delta to the second parent if it exists
2681 (use a delta from the first parent otherwise)
2713 (use a delta from the first parent otherwise)
2682 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2714 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2683 * `storage`: add from the existing precomputed deltas
2715 * `storage`: add from the existing precomputed deltas
2684
2716
2685 Note: This performance command measures performance in a custom way. As a
2717 Note: This performance command measures performance in a custom way. As a
2686 result some of the global configuration of the 'perf' command does not
2718 result some of the global configuration of the 'perf' command does not
2687 apply to it:
2719 apply to it:
2688
2720
2689 * ``pre-run``: disabled
2721 * ``pre-run``: disabled
2690
2722
2691 * ``profile-benchmark``: disabled
2723 * ``profile-benchmark``: disabled
2692
2724
2693 * ``run-limits``: disabled use --count instead
2725 * ``run-limits``: disabled use --count instead
2694 """
2726 """
2695 opts = _byteskwargs(opts)
2727 opts = _byteskwargs(opts)
2696
2728
2697 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2729 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2698 rllen = getlen(ui)(rl)
2730 rllen = getlen(ui)(rl)
2699 if startrev < 0:
2731 if startrev < 0:
2700 startrev = rllen + startrev
2732 startrev = rllen + startrev
2701 if stoprev < 0:
2733 if stoprev < 0:
2702 stoprev = rllen + stoprev
2734 stoprev = rllen + stoprev
2703
2735
2704 lazydeltabase = opts['lazydeltabase']
2736 lazydeltabase = opts['lazydeltabase']
2705 source = opts['source']
2737 source = opts['source']
2706 clearcaches = opts['clear_caches']
2738 clearcaches = opts['clear_caches']
2707 validsource = (
2739 validsource = (
2708 b'full',
2740 b'full',
2709 b'parent-1',
2741 b'parent-1',
2710 b'parent-2',
2742 b'parent-2',
2711 b'parent-smallest',
2743 b'parent-smallest',
2712 b'storage',
2744 b'storage',
2713 )
2745 )
2714 if source not in validsource:
2746 if source not in validsource:
2715 raise error.Abort('invalid source type: %s' % source)
2747 raise error.Abort('invalid source type: %s' % source)
2716
2748
2717 ### actually gather results
2749 ### actually gather results
2718 count = opts['count']
2750 count = opts['count']
2719 if count <= 0:
2751 if count <= 0:
2720 raise error.Abort('invalide run count: %d' % count)
2752 raise error.Abort('invalide run count: %d' % count)
2721 allresults = []
2753 allresults = []
2722 for c in range(count):
2754 for c in range(count):
2723 timing = _timeonewrite(
2755 timing = _timeonewrite(
2724 ui,
2756 ui,
2725 rl,
2757 rl,
2726 source,
2758 source,
2727 startrev,
2759 startrev,
2728 stoprev,
2760 stoprev,
2729 c + 1,
2761 c + 1,
2730 lazydeltabase=lazydeltabase,
2762 lazydeltabase=lazydeltabase,
2731 clearcaches=clearcaches,
2763 clearcaches=clearcaches,
2732 )
2764 )
2733 allresults.append(timing)
2765 allresults.append(timing)
2734
2766
2735 ### consolidate the results in a single list
2767 ### consolidate the results in a single list
2736 results = []
2768 results = []
2737 for idx, (rev, t) in enumerate(allresults[0]):
2769 for idx, (rev, t) in enumerate(allresults[0]):
2738 ts = [t]
2770 ts = [t]
2739 for other in allresults[1:]:
2771 for other in allresults[1:]:
2740 orev, ot = other[idx]
2772 orev, ot = other[idx]
2741 assert orev == rev
2773 assert orev == rev
2742 ts.append(ot)
2774 ts.append(ot)
2743 results.append((rev, ts))
2775 results.append((rev, ts))
2744 resultcount = len(results)
2776 resultcount = len(results)
2745
2777
2746 ### Compute and display relevant statistics
2778 ### Compute and display relevant statistics
2747
2779
2748 # get a formatter
2780 # get a formatter
2749 fm = ui.formatter(b'perf', opts)
2781 fm = ui.formatter(b'perf', opts)
2750 displayall = ui.configbool(b"perf", b"all-timing", False)
2782 displayall = ui.configbool(b"perf", b"all-timing", False)
2751
2783
2752 # print individual details if requested
2784 # print individual details if requested
2753 if opts['details']:
2785 if opts['details']:
2754 for idx, item in enumerate(results, 1):
2786 for idx, item in enumerate(results, 1):
2755 rev, data = item
2787 rev, data = item
2756 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2757 formatone(fm, data, title=title, displayall=displayall)
2789 formatone(fm, data, title=title, displayall=displayall)
2758
2790
2759 # sorts results by median time
2791 # sorts results by median time
2760 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2761 # list of (name, index) to display)
2793 # list of (name, index) to display)
2762 relevants = [
2794 relevants = [
2763 ("min", 0),
2795 ("min", 0),
2764 ("10%", resultcount * 10 // 100),
2796 ("10%", resultcount * 10 // 100),
2765 ("25%", resultcount * 25 // 100),
2797 ("25%", resultcount * 25 // 100),
2766 ("50%", resultcount * 70 // 100),
2798 ("50%", resultcount * 70 // 100),
2767 ("75%", resultcount * 75 // 100),
2799 ("75%", resultcount * 75 // 100),
2768 ("90%", resultcount * 90 // 100),
2800 ("90%", resultcount * 90 // 100),
2769 ("95%", resultcount * 95 // 100),
2801 ("95%", resultcount * 95 // 100),
2770 ("99%", resultcount * 99 // 100),
2802 ("99%", resultcount * 99 // 100),
2771 ("99.9%", resultcount * 999 // 1000),
2803 ("99.9%", resultcount * 999 // 1000),
2772 ("99.99%", resultcount * 9999 // 10000),
2804 ("99.99%", resultcount * 9999 // 10000),
2773 ("99.999%", resultcount * 99999 // 100000),
2805 ("99.999%", resultcount * 99999 // 100000),
2774 ("max", -1),
2806 ("max", -1),
2775 ]
2807 ]
2776 if not ui.quiet:
2808 if not ui.quiet:
2777 for name, idx in relevants:
2809 for name, idx in relevants:
2778 data = results[idx]
2810 data = results[idx]
2779 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2780 formatone(fm, data[1], title=title, displayall=displayall)
2812 formatone(fm, data[1], title=title, displayall=displayall)
2781
2813
2782 # XXX summing that many float will not be very precise, we ignore this fact
2814 # XXX summing that many float will not be very precise, we ignore this fact
2783 # for now
2815 # for now
2784 totaltime = []
2816 totaltime = []
2785 for item in allresults:
2817 for item in allresults:
2786 totaltime.append(
2818 totaltime.append(
2787 (
2819 (
2788 sum(x[1][0] for x in item),
2820 sum(x[1][0] for x in item),
2789 sum(x[1][1] for x in item),
2821 sum(x[1][1] for x in item),
2790 sum(x[1][2] for x in item),
2822 sum(x[1][2] for x in item),
2791 )
2823 )
2792 )
2824 )
2793 formatone(
2825 formatone(
2794 fm,
2826 fm,
2795 totaltime,
2827 totaltime,
2796 title="total time (%d revs)" % resultcount,
2828 title="total time (%d revs)" % resultcount,
2797 displayall=displayall,
2829 displayall=displayall,
2798 )
2830 )
2799 fm.end()
2831 fm.end()
2800
2832
2801
2833
2802 class _faketr(object):
2834 class _faketr(object):
2803 def add(s, x, y, z=None):
2835 def add(s, x, y, z=None):
2804 return None
2836 return None
2805
2837
2806
2838
2807 def _timeonewrite(
2839 def _timeonewrite(
2808 ui,
2840 ui,
2809 orig,
2841 orig,
2810 source,
2842 source,
2811 startrev,
2843 startrev,
2812 stoprev,
2844 stoprev,
2813 runidx=None,
2845 runidx=None,
2814 lazydeltabase=True,
2846 lazydeltabase=True,
2815 clearcaches=True,
2847 clearcaches=True,
2816 ):
2848 ):
2817 timings = []
2849 timings = []
2818 tr = _faketr()
2850 tr = _faketr()
2819 with _temprevlog(ui, orig, startrev) as dest:
2851 with _temprevlog(ui, orig, startrev) as dest:
2820 dest._lazydeltabase = lazydeltabase
2852 dest._lazydeltabase = lazydeltabase
2821 revs = list(orig.revs(startrev, stoprev))
2853 revs = list(orig.revs(startrev, stoprev))
2822 total = len(revs)
2854 total = len(revs)
2823 topic = 'adding'
2855 topic = 'adding'
2824 if runidx is not None:
2856 if runidx is not None:
2825 topic += ' (run #%d)' % runidx
2857 topic += ' (run #%d)' % runidx
2826 # Support both old and new progress API
2858 # Support both old and new progress API
2827 if util.safehasattr(ui, 'makeprogress'):
2859 if util.safehasattr(ui, 'makeprogress'):
2828 progress = ui.makeprogress(topic, unit='revs', total=total)
2860 progress = ui.makeprogress(topic, unit='revs', total=total)
2829
2861
2830 def updateprogress(pos):
2862 def updateprogress(pos):
2831 progress.update(pos)
2863 progress.update(pos)
2832
2864
2833 def completeprogress():
2865 def completeprogress():
2834 progress.complete()
2866 progress.complete()
2835
2867
2836 else:
2868 else:
2837
2869
2838 def updateprogress(pos):
2870 def updateprogress(pos):
2839 ui.progress(topic, pos, unit='revs', total=total)
2871 ui.progress(topic, pos, unit='revs', total=total)
2840
2872
2841 def completeprogress():
2873 def completeprogress():
2842 ui.progress(topic, None, unit='revs', total=total)
2874 ui.progress(topic, None, unit='revs', total=total)
2843
2875
2844 for idx, rev in enumerate(revs):
2876 for idx, rev in enumerate(revs):
2845 updateprogress(idx)
2877 updateprogress(idx)
2846 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2878 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2847 if clearcaches:
2879 if clearcaches:
2848 dest.index.clearcaches()
2880 dest.index.clearcaches()
2849 dest.clearcaches()
2881 dest.clearcaches()
2850 with timeone() as r:
2882 with timeone() as r:
2851 dest.addrawrevision(*addargs, **addkwargs)
2883 dest.addrawrevision(*addargs, **addkwargs)
2852 timings.append((rev, r[0]))
2884 timings.append((rev, r[0]))
2853 updateprogress(total)
2885 updateprogress(total)
2854 completeprogress()
2886 completeprogress()
2855 return timings
2887 return timings
2856
2888
2857
2889
2858 def _getrevisionseed(orig, rev, tr, source):
2890 def _getrevisionseed(orig, rev, tr, source):
2859 from mercurial.node import nullid
2891 from mercurial.node import nullid
2860
2892
2861 linkrev = orig.linkrev(rev)
2893 linkrev = orig.linkrev(rev)
2862 node = orig.node(rev)
2894 node = orig.node(rev)
2863 p1, p2 = orig.parents(node)
2895 p1, p2 = orig.parents(node)
2864 flags = orig.flags(rev)
2896 flags = orig.flags(rev)
2865 cachedelta = None
2897 cachedelta = None
2866 text = None
2898 text = None
2867
2899
2868 if source == b'full':
2900 if source == b'full':
2869 text = orig.revision(rev)
2901 text = orig.revision(rev)
2870 elif source == b'parent-1':
2902 elif source == b'parent-1':
2871 baserev = orig.rev(p1)
2903 baserev = orig.rev(p1)
2872 cachedelta = (baserev, orig.revdiff(p1, rev))
2904 cachedelta = (baserev, orig.revdiff(p1, rev))
2873 elif source == b'parent-2':
2905 elif source == b'parent-2':
2874 parent = p2
2906 parent = p2
2875 if p2 == nullid:
2907 if p2 == nullid:
2876 parent = p1
2908 parent = p1
2877 baserev = orig.rev(parent)
2909 baserev = orig.rev(parent)
2878 cachedelta = (baserev, orig.revdiff(parent, rev))
2910 cachedelta = (baserev, orig.revdiff(parent, rev))
2879 elif source == b'parent-smallest':
2911 elif source == b'parent-smallest':
2880 p1diff = orig.revdiff(p1, rev)
2912 p1diff = orig.revdiff(p1, rev)
2881 parent = p1
2913 parent = p1
2882 diff = p1diff
2914 diff = p1diff
2883 if p2 != nullid:
2915 if p2 != nullid:
2884 p2diff = orig.revdiff(p2, rev)
2916 p2diff = orig.revdiff(p2, rev)
2885 if len(p1diff) > len(p2diff):
2917 if len(p1diff) > len(p2diff):
2886 parent = p2
2918 parent = p2
2887 diff = p2diff
2919 diff = p2diff
2888 baserev = orig.rev(parent)
2920 baserev = orig.rev(parent)
2889 cachedelta = (baserev, diff)
2921 cachedelta = (baserev, diff)
2890 elif source == b'storage':
2922 elif source == b'storage':
2891 baserev = orig.deltaparent(rev)
2923 baserev = orig.deltaparent(rev)
2892 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2924 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2893
2925
2894 return (
2926 return (
2895 (text, tr, linkrev, p1, p2),
2927 (text, tr, linkrev, p1, p2),
2896 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2928 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2897 )
2929 )
2898
2930
2899
2931
2900 @contextlib.contextmanager
2932 @contextlib.contextmanager
2901 def _temprevlog(ui, orig, truncaterev):
2933 def _temprevlog(ui, orig, truncaterev):
2902 from mercurial import vfs as vfsmod
2934 from mercurial import vfs as vfsmod
2903
2935
2904 if orig._inline:
2936 if orig._inline:
2905 raise error.Abort('not supporting inline revlog (yet)')
2937 raise error.Abort('not supporting inline revlog (yet)')
2906 revlogkwargs = {}
2938 revlogkwargs = {}
2907 k = 'upperboundcomp'
2939 k = 'upperboundcomp'
2908 if util.safehasattr(orig, k):
2940 if util.safehasattr(orig, k):
2909 revlogkwargs[k] = getattr(orig, k)
2941 revlogkwargs[k] = getattr(orig, k)
2910
2942
2911 origindexpath = orig.opener.join(orig.indexfile)
2943 origindexpath = orig.opener.join(orig.indexfile)
2912 origdatapath = orig.opener.join(orig.datafile)
2944 origdatapath = orig.opener.join(orig.datafile)
2913 indexname = 'revlog.i'
2945 indexname = 'revlog.i'
2914 dataname = 'revlog.d'
2946 dataname = 'revlog.d'
2915
2947
2916 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2948 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2917 try:
2949 try:
2918 # copy the data file in a temporary directory
2950 # copy the data file in a temporary directory
2919 ui.debug('copying data in %s\n' % tmpdir)
2951 ui.debug('copying data in %s\n' % tmpdir)
2920 destindexpath = os.path.join(tmpdir, 'revlog.i')
2952 destindexpath = os.path.join(tmpdir, 'revlog.i')
2921 destdatapath = os.path.join(tmpdir, 'revlog.d')
2953 destdatapath = os.path.join(tmpdir, 'revlog.d')
2922 shutil.copyfile(origindexpath, destindexpath)
2954 shutil.copyfile(origindexpath, destindexpath)
2923 shutil.copyfile(origdatapath, destdatapath)
2955 shutil.copyfile(origdatapath, destdatapath)
2924
2956
2925 # remove the data we want to add again
2957 # remove the data we want to add again
2926 ui.debug('truncating data to be rewritten\n')
2958 ui.debug('truncating data to be rewritten\n')
2927 with open(destindexpath, 'ab') as index:
2959 with open(destindexpath, 'ab') as index:
2928 index.seek(0)
2960 index.seek(0)
2929 index.truncate(truncaterev * orig._io.size)
2961 index.truncate(truncaterev * orig._io.size)
2930 with open(destdatapath, 'ab') as data:
2962 with open(destdatapath, 'ab') as data:
2931 data.seek(0)
2963 data.seek(0)
2932 data.truncate(orig.start(truncaterev))
2964 data.truncate(orig.start(truncaterev))
2933
2965
2934 # instantiate a new revlog from the temporary copy
2966 # instantiate a new revlog from the temporary copy
2935 ui.debug('truncating adding to be rewritten\n')
2967 ui.debug('truncating adding to be rewritten\n')
2936 vfs = vfsmod.vfs(tmpdir)
2968 vfs = vfsmod.vfs(tmpdir)
2937 vfs.options = getattr(orig.opener, 'options', None)
2969 vfs.options = getattr(orig.opener, 'options', None)
2938
2970
2939 dest = revlog.revlog(
2971 dest = revlog.revlog(
2940 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2972 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2941 )
2973 )
2942 if dest._inline:
2974 if dest._inline:
2943 raise error.Abort('not supporting inline revlog (yet)')
2975 raise error.Abort('not supporting inline revlog (yet)')
2944 # make sure internals are initialized
2976 # make sure internals are initialized
2945 dest.revision(len(dest) - 1)
2977 dest.revision(len(dest) - 1)
2946 yield dest
2978 yield dest
2947 del dest, vfs
2979 del dest, vfs
2948 finally:
2980 finally:
2949 shutil.rmtree(tmpdir, True)
2981 shutil.rmtree(tmpdir, True)
2950
2982
2951
2983
2952 @command(
2984 @command(
2953 b'perfrevlogchunks',
2985 b'perfrevlogchunks',
2954 revlogopts
2986 revlogopts
2955 + formatteropts
2987 + formatteropts
2956 + [
2988 + [
2957 (b'e', b'engines', b'', b'compression engines to use'),
2989 (b'e', b'engines', b'', b'compression engines to use'),
2958 (b's', b'startrev', 0, b'revision to start at'),
2990 (b's', b'startrev', 0, b'revision to start at'),
2959 ],
2991 ],
2960 b'-c|-m|FILE',
2992 b'-c|-m|FILE',
2961 )
2993 )
2962 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2994 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2963 """Benchmark operations on revlog chunks.
2995 """Benchmark operations on revlog chunks.
2964
2996
2965 Logically, each revlog is a collection of fulltext revisions. However,
2997 Logically, each revlog is a collection of fulltext revisions. However,
2966 stored within each revlog are "chunks" of possibly compressed data. This
2998 stored within each revlog are "chunks" of possibly compressed data. This
2967 data needs to be read and decompressed or compressed and written.
2999 data needs to be read and decompressed or compressed and written.
2968
3000
2969 This command measures the time it takes to read+decompress and recompress
3001 This command measures the time it takes to read+decompress and recompress
2970 chunks in a revlog. It effectively isolates I/O and compression performance.
3002 chunks in a revlog. It effectively isolates I/O and compression performance.
2971 For measurements of higher-level operations like resolving revisions,
3003 For measurements of higher-level operations like resolving revisions,
2972 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3004 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2973 """
3005 """
2974 opts = _byteskwargs(opts)
3006 opts = _byteskwargs(opts)
2975
3007
2976 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3008 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2977
3009
2978 # _chunkraw was renamed to _getsegmentforrevs.
3010 # _chunkraw was renamed to _getsegmentforrevs.
2979 try:
3011 try:
2980 segmentforrevs = rl._getsegmentforrevs
3012 segmentforrevs = rl._getsegmentforrevs
2981 except AttributeError:
3013 except AttributeError:
2982 segmentforrevs = rl._chunkraw
3014 segmentforrevs = rl._chunkraw
2983
3015
2984 # Verify engines argument.
3016 # Verify engines argument.
2985 if engines:
3017 if engines:
2986 engines = set(e.strip() for e in engines.split(b','))
3018 engines = set(e.strip() for e in engines.split(b','))
2987 for engine in engines:
3019 for engine in engines:
2988 try:
3020 try:
2989 util.compressionengines[engine]
3021 util.compressionengines[engine]
2990 except KeyError:
3022 except KeyError:
2991 raise error.Abort(b'unknown compression engine: %s' % engine)
3023 raise error.Abort(b'unknown compression engine: %s' % engine)
2992 else:
3024 else:
2993 engines = []
3025 engines = []
2994 for e in util.compengines:
3026 for e in util.compengines:
2995 engine = util.compengines[e]
3027 engine = util.compengines[e]
2996 try:
3028 try:
2997 if engine.available():
3029 if engine.available():
2998 engine.revlogcompressor().compress(b'dummy')
3030 engine.revlogcompressor().compress(b'dummy')
2999 engines.append(e)
3031 engines.append(e)
3000 except NotImplementedError:
3032 except NotImplementedError:
3001 pass
3033 pass
3002
3034
3003 revs = list(rl.revs(startrev, len(rl) - 1))
3035 revs = list(rl.revs(startrev, len(rl) - 1))
3004
3036
3005 def rlfh(rl):
3037 def rlfh(rl):
3006 if rl._inline:
3038 if rl._inline:
3007 return getsvfs(repo)(rl.indexfile)
3039 return getsvfs(repo)(rl.indexfile)
3008 else:
3040 else:
3009 return getsvfs(repo)(rl.datafile)
3041 return getsvfs(repo)(rl.datafile)
3010
3042
3011 def doread():
3043 def doread():
3012 rl.clearcaches()
3044 rl.clearcaches()
3013 for rev in revs:
3045 for rev in revs:
3014 segmentforrevs(rev, rev)
3046 segmentforrevs(rev, rev)
3015
3047
3016 def doreadcachedfh():
3048 def doreadcachedfh():
3017 rl.clearcaches()
3049 rl.clearcaches()
3018 fh = rlfh(rl)
3050 fh = rlfh(rl)
3019 for rev in revs:
3051 for rev in revs:
3020 segmentforrevs(rev, rev, df=fh)
3052 segmentforrevs(rev, rev, df=fh)
3021
3053
3022 def doreadbatch():
3054 def doreadbatch():
3023 rl.clearcaches()
3055 rl.clearcaches()
3024 segmentforrevs(revs[0], revs[-1])
3056 segmentforrevs(revs[0], revs[-1])
3025
3057
3026 def doreadbatchcachedfh():
3058 def doreadbatchcachedfh():
3027 rl.clearcaches()
3059 rl.clearcaches()
3028 fh = rlfh(rl)
3060 fh = rlfh(rl)
3029 segmentforrevs(revs[0], revs[-1], df=fh)
3061 segmentforrevs(revs[0], revs[-1], df=fh)
3030
3062
3031 def dochunk():
3063 def dochunk():
3032 rl.clearcaches()
3064 rl.clearcaches()
3033 fh = rlfh(rl)
3065 fh = rlfh(rl)
3034 for rev in revs:
3066 for rev in revs:
3035 rl._chunk(rev, df=fh)
3067 rl._chunk(rev, df=fh)
3036
3068
3037 chunks = [None]
3069 chunks = [None]
3038
3070
3039 def dochunkbatch():
3071 def dochunkbatch():
3040 rl.clearcaches()
3072 rl.clearcaches()
3041 fh = rlfh(rl)
3073 fh = rlfh(rl)
3042 # Save chunks as a side-effect.
3074 # Save chunks as a side-effect.
3043 chunks[0] = rl._chunks(revs, df=fh)
3075 chunks[0] = rl._chunks(revs, df=fh)
3044
3076
3045 def docompress(compressor):
3077 def docompress(compressor):
3046 rl.clearcaches()
3078 rl.clearcaches()
3047
3079
3048 try:
3080 try:
3049 # Swap in the requested compression engine.
3081 # Swap in the requested compression engine.
3050 oldcompressor = rl._compressor
3082 oldcompressor = rl._compressor
3051 rl._compressor = compressor
3083 rl._compressor = compressor
3052 for chunk in chunks[0]:
3084 for chunk in chunks[0]:
3053 rl.compress(chunk)
3085 rl.compress(chunk)
3054 finally:
3086 finally:
3055 rl._compressor = oldcompressor
3087 rl._compressor = oldcompressor
3056
3088
3057 benches = [
3089 benches = [
3058 (lambda: doread(), b'read'),
3090 (lambda: doread(), b'read'),
3059 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3091 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3060 (lambda: doreadbatch(), b'read batch'),
3092 (lambda: doreadbatch(), b'read batch'),
3061 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3093 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3062 (lambda: dochunk(), b'chunk'),
3094 (lambda: dochunk(), b'chunk'),
3063 (lambda: dochunkbatch(), b'chunk batch'),
3095 (lambda: dochunkbatch(), b'chunk batch'),
3064 ]
3096 ]
3065
3097
3066 for engine in sorted(engines):
3098 for engine in sorted(engines):
3067 compressor = util.compengines[engine].revlogcompressor()
3099 compressor = util.compengines[engine].revlogcompressor()
3068 benches.append(
3100 benches.append(
3069 (
3101 (
3070 functools.partial(docompress, compressor),
3102 functools.partial(docompress, compressor),
3071 b'compress w/ %s' % engine,
3103 b'compress w/ %s' % engine,
3072 )
3104 )
3073 )
3105 )
3074
3106
3075 for fn, title in benches:
3107 for fn, title in benches:
3076 timer, fm = gettimer(ui, opts)
3108 timer, fm = gettimer(ui, opts)
3077 timer(fn, title=title)
3109 timer(fn, title=title)
3078 fm.end()
3110 fm.end()
3079
3111
3080
3112
3081 @command(
3113 @command(
3082 b'perfrevlogrevision',
3114 b'perfrevlogrevision',
3083 revlogopts
3115 revlogopts
3084 + formatteropts
3116 + formatteropts
3085 + [(b'', b'cache', False, b'use caches instead of clearing')],
3117 + [(b'', b'cache', False, b'use caches instead of clearing')],
3086 b'-c|-m|FILE REV',
3118 b'-c|-m|FILE REV',
3087 )
3119 )
3088 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3120 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3089 """Benchmark obtaining a revlog revision.
3121 """Benchmark obtaining a revlog revision.
3090
3122
3091 Obtaining a revlog revision consists of roughly the following steps:
3123 Obtaining a revlog revision consists of roughly the following steps:
3092
3124
3093 1. Compute the delta chain
3125 1. Compute the delta chain
3094 2. Slice the delta chain if applicable
3126 2. Slice the delta chain if applicable
3095 3. Obtain the raw chunks for that delta chain
3127 3. Obtain the raw chunks for that delta chain
3096 4. Decompress each raw chunk
3128 4. Decompress each raw chunk
3097 5. Apply binary patches to obtain fulltext
3129 5. Apply binary patches to obtain fulltext
3098 6. Verify hash of fulltext
3130 6. Verify hash of fulltext
3099
3131
3100 This command measures the time spent in each of these phases.
3132 This command measures the time spent in each of these phases.
3101 """
3133 """
3102 opts = _byteskwargs(opts)
3134 opts = _byteskwargs(opts)
3103
3135
3104 if opts.get(b'changelog') or opts.get(b'manifest'):
3136 if opts.get(b'changelog') or opts.get(b'manifest'):
3105 file_, rev = None, file_
3137 file_, rev = None, file_
3106 elif rev is None:
3138 elif rev is None:
3107 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3139 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3108
3140
3109 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3141 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3110
3142
3111 # _chunkraw was renamed to _getsegmentforrevs.
3143 # _chunkraw was renamed to _getsegmentforrevs.
3112 try:
3144 try:
3113 segmentforrevs = r._getsegmentforrevs
3145 segmentforrevs = r._getsegmentforrevs
3114 except AttributeError:
3146 except AttributeError:
3115 segmentforrevs = r._chunkraw
3147 segmentforrevs = r._chunkraw
3116
3148
3117 node = r.lookup(rev)
3149 node = r.lookup(rev)
3118 rev = r.rev(node)
3150 rev = r.rev(node)
3119
3151
3120 def getrawchunks(data, chain):
3152 def getrawchunks(data, chain):
3121 start = r.start
3153 start = r.start
3122 length = r.length
3154 length = r.length
3123 inline = r._inline
3155 inline = r._inline
3124 iosize = r._io.size
3156 iosize = r._io.size
3125 buffer = util.buffer
3157 buffer = util.buffer
3126
3158
3127 chunks = []
3159 chunks = []
3128 ladd = chunks.append
3160 ladd = chunks.append
3129 for idx, item in enumerate(chain):
3161 for idx, item in enumerate(chain):
3130 offset = start(item[0])
3162 offset = start(item[0])
3131 bits = data[idx]
3163 bits = data[idx]
3132 for rev in item:
3164 for rev in item:
3133 chunkstart = start(rev)
3165 chunkstart = start(rev)
3134 if inline:
3166 if inline:
3135 chunkstart += (rev + 1) * iosize
3167 chunkstart += (rev + 1) * iosize
3136 chunklength = length(rev)
3168 chunklength = length(rev)
3137 ladd(buffer(bits, chunkstart - offset, chunklength))
3169 ladd(buffer(bits, chunkstart - offset, chunklength))
3138
3170
3139 return chunks
3171 return chunks
3140
3172
3141 def dodeltachain(rev):
3173 def dodeltachain(rev):
3142 if not cache:
3174 if not cache:
3143 r.clearcaches()
3175 r.clearcaches()
3144 r._deltachain(rev)
3176 r._deltachain(rev)
3145
3177
3146 def doread(chain):
3178 def doread(chain):
3147 if not cache:
3179 if not cache:
3148 r.clearcaches()
3180 r.clearcaches()
3149 for item in slicedchain:
3181 for item in slicedchain:
3150 segmentforrevs(item[0], item[-1])
3182 segmentforrevs(item[0], item[-1])
3151
3183
3152 def doslice(r, chain, size):
3184 def doslice(r, chain, size):
3153 for s in slicechunk(r, chain, targetsize=size):
3185 for s in slicechunk(r, chain, targetsize=size):
3154 pass
3186 pass
3155
3187
3156 def dorawchunks(data, chain):
3188 def dorawchunks(data, chain):
3157 if not cache:
3189 if not cache:
3158 r.clearcaches()
3190 r.clearcaches()
3159 getrawchunks(data, chain)
3191 getrawchunks(data, chain)
3160
3192
3161 def dodecompress(chunks):
3193 def dodecompress(chunks):
3162 decomp = r.decompress
3194 decomp = r.decompress
3163 for chunk in chunks:
3195 for chunk in chunks:
3164 decomp(chunk)
3196 decomp(chunk)
3165
3197
3166 def dopatch(text, bins):
3198 def dopatch(text, bins):
3167 if not cache:
3199 if not cache:
3168 r.clearcaches()
3200 r.clearcaches()
3169 mdiff.patches(text, bins)
3201 mdiff.patches(text, bins)
3170
3202
3171 def dohash(text):
3203 def dohash(text):
3172 if not cache:
3204 if not cache:
3173 r.clearcaches()
3205 r.clearcaches()
3174 r.checkhash(text, node, rev=rev)
3206 r.checkhash(text, node, rev=rev)
3175
3207
3176 def dorevision():
3208 def dorevision():
3177 if not cache:
3209 if not cache:
3178 r.clearcaches()
3210 r.clearcaches()
3179 r.revision(node)
3211 r.revision(node)
3180
3212
3181 try:
3213 try:
3182 from mercurial.revlogutils.deltas import slicechunk
3214 from mercurial.revlogutils.deltas import slicechunk
3183 except ImportError:
3215 except ImportError:
3184 slicechunk = getattr(revlog, '_slicechunk', None)
3216 slicechunk = getattr(revlog, '_slicechunk', None)
3185
3217
3186 size = r.length(rev)
3218 size = r.length(rev)
3187 chain = r._deltachain(rev)[0]
3219 chain = r._deltachain(rev)[0]
3188 if not getattr(r, '_withsparseread', False):
3220 if not getattr(r, '_withsparseread', False):
3189 slicedchain = (chain,)
3221 slicedchain = (chain,)
3190 else:
3222 else:
3191 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3223 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3192 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3224 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3193 rawchunks = getrawchunks(data, slicedchain)
3225 rawchunks = getrawchunks(data, slicedchain)
3194 bins = r._chunks(chain)
3226 bins = r._chunks(chain)
3195 text = bytes(bins[0])
3227 text = bytes(bins[0])
3196 bins = bins[1:]
3228 bins = bins[1:]
3197 text = mdiff.patches(text, bins)
3229 text = mdiff.patches(text, bins)
3198
3230
3199 benches = [
3231 benches = [
3200 (lambda: dorevision(), b'full'),
3232 (lambda: dorevision(), b'full'),
3201 (lambda: dodeltachain(rev), b'deltachain'),
3233 (lambda: dodeltachain(rev), b'deltachain'),
3202 (lambda: doread(chain), b'read'),
3234 (lambda: doread(chain), b'read'),
3203 ]
3235 ]
3204
3236
3205 if getattr(r, '_withsparseread', False):
3237 if getattr(r, '_withsparseread', False):
3206 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3238 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3207 benches.append(slicing)
3239 benches.append(slicing)
3208
3240
3209 benches.extend(
3241 benches.extend(
3210 [
3242 [
3211 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3243 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3212 (lambda: dodecompress(rawchunks), b'decompress'),
3244 (lambda: dodecompress(rawchunks), b'decompress'),
3213 (lambda: dopatch(text, bins), b'patch'),
3245 (lambda: dopatch(text, bins), b'patch'),
3214 (lambda: dohash(text), b'hash'),
3246 (lambda: dohash(text), b'hash'),
3215 ]
3247 ]
3216 )
3248 )
3217
3249
3218 timer, fm = gettimer(ui, opts)
3250 timer, fm = gettimer(ui, opts)
3219 for fn, title in benches:
3251 for fn, title in benches:
3220 timer(fn, title=title)
3252 timer(fn, title=title)
3221 fm.end()
3253 fm.end()
3222
3254
3223
3255
3224 @command(
3256 @command(
3225 b'perfrevset',
3257 b'perfrevset',
3226 [
3258 [
3227 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3259 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3228 (b'', b'contexts', False, b'obtain changectx for each revision'),
3260 (b'', b'contexts', False, b'obtain changectx for each revision'),
3229 ]
3261 ]
3230 + formatteropts,
3262 + formatteropts,
3231 b"REVSET",
3263 b"REVSET",
3232 )
3264 )
3233 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3265 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3234 """benchmark the execution time of a revset
3266 """benchmark the execution time of a revset
3235
3267
3236 Use the --clean option if need to evaluate the impact of build volatile
3268 Use the --clean option if need to evaluate the impact of build volatile
3237 revisions set cache on the revset execution. Volatile cache hold filtered
3269 revisions set cache on the revset execution. Volatile cache hold filtered
3238 and obsolete related cache."""
3270 and obsolete related cache."""
3239 opts = _byteskwargs(opts)
3271 opts = _byteskwargs(opts)
3240
3272
3241 timer, fm = gettimer(ui, opts)
3273 timer, fm = gettimer(ui, opts)
3242
3274
3243 def d():
3275 def d():
3244 if clear:
3276 if clear:
3245 repo.invalidatevolatilesets()
3277 repo.invalidatevolatilesets()
3246 if contexts:
3278 if contexts:
3247 for ctx in repo.set(expr):
3279 for ctx in repo.set(expr):
3248 pass
3280 pass
3249 else:
3281 else:
3250 for r in repo.revs(expr):
3282 for r in repo.revs(expr):
3251 pass
3283 pass
3252
3284
3253 timer(d)
3285 timer(d)
3254 fm.end()
3286 fm.end()
3255
3287
3256
3288
3257 @command(
3289 @command(
3258 b'perfvolatilesets',
3290 b'perfvolatilesets',
3259 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3291 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3260 + formatteropts,
3292 + formatteropts,
3261 )
3293 )
3262 def perfvolatilesets(ui, repo, *names, **opts):
3294 def perfvolatilesets(ui, repo, *names, **opts):
3263 """benchmark the computation of various volatile set
3295 """benchmark the computation of various volatile set
3264
3296
3265 Volatile set computes element related to filtering and obsolescence."""
3297 Volatile set computes element related to filtering and obsolescence."""
3266 opts = _byteskwargs(opts)
3298 opts = _byteskwargs(opts)
3267 timer, fm = gettimer(ui, opts)
3299 timer, fm = gettimer(ui, opts)
3268 repo = repo.unfiltered()
3300 repo = repo.unfiltered()
3269
3301
3270 def getobs(name):
3302 def getobs(name):
3271 def d():
3303 def d():
3272 repo.invalidatevolatilesets()
3304 repo.invalidatevolatilesets()
3273 if opts[b'clear_obsstore']:
3305 if opts[b'clear_obsstore']:
3274 clearfilecache(repo, b'obsstore')
3306 clearfilecache(repo, b'obsstore')
3275 obsolete.getrevs(repo, name)
3307 obsolete.getrevs(repo, name)
3276
3308
3277 return d
3309 return d
3278
3310
3279 allobs = sorted(obsolete.cachefuncs)
3311 allobs = sorted(obsolete.cachefuncs)
3280 if names:
3312 if names:
3281 allobs = [n for n in allobs if n in names]
3313 allobs = [n for n in allobs if n in names]
3282
3314
3283 for name in allobs:
3315 for name in allobs:
3284 timer(getobs(name), title=name)
3316 timer(getobs(name), title=name)
3285
3317
3286 def getfiltered(name):
3318 def getfiltered(name):
3287 def d():
3319 def d():
3288 repo.invalidatevolatilesets()
3320 repo.invalidatevolatilesets()
3289 if opts[b'clear_obsstore']:
3321 if opts[b'clear_obsstore']:
3290 clearfilecache(repo, b'obsstore')
3322 clearfilecache(repo, b'obsstore')
3291 repoview.filterrevs(repo, name)
3323 repoview.filterrevs(repo, name)
3292
3324
3293 return d
3325 return d
3294
3326
3295 allfilter = sorted(repoview.filtertable)
3327 allfilter = sorted(repoview.filtertable)
3296 if names:
3328 if names:
3297 allfilter = [n for n in allfilter if n in names]
3329 allfilter = [n for n in allfilter if n in names]
3298
3330
3299 for name in allfilter:
3331 for name in allfilter:
3300 timer(getfiltered(name), title=name)
3332 timer(getfiltered(name), title=name)
3301 fm.end()
3333 fm.end()
3302
3334
3303
3335
3304 @command(
3336 @command(
3305 b'perfbranchmap',
3337 b'perfbranchmap',
3306 [
3338 [
3307 (b'f', b'full', False, b'Includes build time of subset'),
3339 (b'f', b'full', False, b'Includes build time of subset'),
3308 (
3340 (
3309 b'',
3341 b'',
3310 b'clear-revbranch',
3342 b'clear-revbranch',
3311 False,
3343 False,
3312 b'purge the revbranch cache between computation',
3344 b'purge the revbranch cache between computation',
3313 ),
3345 ),
3314 ]
3346 ]
3315 + formatteropts,
3347 + formatteropts,
3316 )
3348 )
3317 def perfbranchmap(ui, repo, *filternames, **opts):
3349 def perfbranchmap(ui, repo, *filternames, **opts):
3318 """benchmark the update of a branchmap
3350 """benchmark the update of a branchmap
3319
3351
3320 This benchmarks the full repo.branchmap() call with read and write disabled
3352 This benchmarks the full repo.branchmap() call with read and write disabled
3321 """
3353 """
3322 opts = _byteskwargs(opts)
3354 opts = _byteskwargs(opts)
3323 full = opts.get(b"full", False)
3355 full = opts.get(b"full", False)
3324 clear_revbranch = opts.get(b"clear_revbranch", False)
3356 clear_revbranch = opts.get(b"clear_revbranch", False)
3325 timer, fm = gettimer(ui, opts)
3357 timer, fm = gettimer(ui, opts)
3326
3358
3327 def getbranchmap(filtername):
3359 def getbranchmap(filtername):
3328 """generate a benchmark function for the filtername"""
3360 """generate a benchmark function for the filtername"""
3329 if filtername is None:
3361 if filtername is None:
3330 view = repo
3362 view = repo
3331 else:
3363 else:
3332 view = repo.filtered(filtername)
3364 view = repo.filtered(filtername)
3333 if util.safehasattr(view._branchcaches, '_per_filter'):
3365 if util.safehasattr(view._branchcaches, '_per_filter'):
3334 filtered = view._branchcaches._per_filter
3366 filtered = view._branchcaches._per_filter
3335 else:
3367 else:
3336 # older versions
3368 # older versions
3337 filtered = view._branchcaches
3369 filtered = view._branchcaches
3338
3370
3339 def d():
3371 def d():
3340 if clear_revbranch:
3372 if clear_revbranch:
3341 repo.revbranchcache()._clear()
3373 repo.revbranchcache()._clear()
3342 if full:
3374 if full:
3343 view._branchcaches.clear()
3375 view._branchcaches.clear()
3344 else:
3376 else:
3345 filtered.pop(filtername, None)
3377 filtered.pop(filtername, None)
3346 view.branchmap()
3378 view.branchmap()
3347
3379
3348 return d
3380 return d
3349
3381
3350 # add filter in smaller subset to bigger subset
3382 # add filter in smaller subset to bigger subset
3351 possiblefilters = set(repoview.filtertable)
3383 possiblefilters = set(repoview.filtertable)
3352 if filternames:
3384 if filternames:
3353 possiblefilters &= set(filternames)
3385 possiblefilters &= set(filternames)
3354 subsettable = getbranchmapsubsettable()
3386 subsettable = getbranchmapsubsettable()
3355 allfilters = []
3387 allfilters = []
3356 while possiblefilters:
3388 while possiblefilters:
3357 for name in possiblefilters:
3389 for name in possiblefilters:
3358 subset = subsettable.get(name)
3390 subset = subsettable.get(name)
3359 if subset not in possiblefilters:
3391 if subset not in possiblefilters:
3360 break
3392 break
3361 else:
3393 else:
3362 assert False, b'subset cycle %s!' % possiblefilters
3394 assert False, b'subset cycle %s!' % possiblefilters
3363 allfilters.append(name)
3395 allfilters.append(name)
3364 possiblefilters.remove(name)
3396 possiblefilters.remove(name)
3365
3397
3366 # warm the cache
3398 # warm the cache
3367 if not full:
3399 if not full:
3368 for name in allfilters:
3400 for name in allfilters:
3369 repo.filtered(name).branchmap()
3401 repo.filtered(name).branchmap()
3370 if not filternames or b'unfiltered' in filternames:
3402 if not filternames or b'unfiltered' in filternames:
3371 # add unfiltered
3403 # add unfiltered
3372 allfilters.append(None)
3404 allfilters.append(None)
3373
3405
3374 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3406 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3375 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3407 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3376 branchcacheread.set(classmethod(lambda *args: None))
3408 branchcacheread.set(classmethod(lambda *args: None))
3377 else:
3409 else:
3378 # older versions
3410 # older versions
3379 branchcacheread = safeattrsetter(branchmap, b'read')
3411 branchcacheread = safeattrsetter(branchmap, b'read')
3380 branchcacheread.set(lambda *args: None)
3412 branchcacheread.set(lambda *args: None)
3381 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3413 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3382 branchcachewrite.set(lambda *args: None)
3414 branchcachewrite.set(lambda *args: None)
3383 try:
3415 try:
3384 for name in allfilters:
3416 for name in allfilters:
3385 printname = name
3417 printname = name
3386 if name is None:
3418 if name is None:
3387 printname = b'unfiltered'
3419 printname = b'unfiltered'
3388 timer(getbranchmap(name), title=str(printname))
3420 timer(getbranchmap(name), title=str(printname))
3389 finally:
3421 finally:
3390 branchcacheread.restore()
3422 branchcacheread.restore()
3391 branchcachewrite.restore()
3423 branchcachewrite.restore()
3392 fm.end()
3424 fm.end()
3393
3425
3394
3426
3395 @command(
3427 @command(
3396 b'perfbranchmapupdate',
3428 b'perfbranchmapupdate',
3397 [
3429 [
3398 (b'', b'base', [], b'subset of revision to start from'),
3430 (b'', b'base', [], b'subset of revision to start from'),
3399 (b'', b'target', [], b'subset of revision to end with'),
3431 (b'', b'target', [], b'subset of revision to end with'),
3400 (b'', b'clear-caches', False, b'clear cache between each runs'),
3432 (b'', b'clear-caches', False, b'clear cache between each runs'),
3401 ]
3433 ]
3402 + formatteropts,
3434 + formatteropts,
3403 )
3435 )
3404 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3436 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3405 """benchmark branchmap update from for <base> revs to <target> revs
3437 """benchmark branchmap update from for <base> revs to <target> revs
3406
3438
3407 If `--clear-caches` is passed, the following items will be reset before
3439 If `--clear-caches` is passed, the following items will be reset before
3408 each update:
3440 each update:
3409 * the changelog instance and associated indexes
3441 * the changelog instance and associated indexes
3410 * the rev-branch-cache instance
3442 * the rev-branch-cache instance
3411
3443
3412 Examples:
3444 Examples:
3413
3445
3414 # update for the one last revision
3446 # update for the one last revision
3415 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3447 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3416
3448
3417 $ update for change coming with a new branch
3449 $ update for change coming with a new branch
3418 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3450 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3419 """
3451 """
3420 from mercurial import branchmap
3452 from mercurial import branchmap
3421 from mercurial import repoview
3453 from mercurial import repoview
3422
3454
3423 opts = _byteskwargs(opts)
3455 opts = _byteskwargs(opts)
3424 timer, fm = gettimer(ui, opts)
3456 timer, fm = gettimer(ui, opts)
3425 clearcaches = opts[b'clear_caches']
3457 clearcaches = opts[b'clear_caches']
3426 unfi = repo.unfiltered()
3458 unfi = repo.unfiltered()
3427 x = [None] # used to pass data between closure
3459 x = [None] # used to pass data between closure
3428
3460
3429 # we use a `list` here to avoid possible side effect from smartset
3461 # we use a `list` here to avoid possible side effect from smartset
3430 baserevs = list(scmutil.revrange(repo, base))
3462 baserevs = list(scmutil.revrange(repo, base))
3431 targetrevs = list(scmutil.revrange(repo, target))
3463 targetrevs = list(scmutil.revrange(repo, target))
3432 if not baserevs:
3464 if not baserevs:
3433 raise error.Abort(b'no revisions selected for --base')
3465 raise error.Abort(b'no revisions selected for --base')
3434 if not targetrevs:
3466 if not targetrevs:
3435 raise error.Abort(b'no revisions selected for --target')
3467 raise error.Abort(b'no revisions selected for --target')
3436
3468
3437 # make sure the target branchmap also contains the one in the base
3469 # make sure the target branchmap also contains the one in the base
3438 targetrevs = list(set(baserevs) | set(targetrevs))
3470 targetrevs = list(set(baserevs) | set(targetrevs))
3439 targetrevs.sort()
3471 targetrevs.sort()
3440
3472
3441 cl = repo.changelog
3473 cl = repo.changelog
3442 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3474 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3443 allbaserevs.sort()
3475 allbaserevs.sort()
3444 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3476 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3445
3477
3446 newrevs = list(alltargetrevs.difference(allbaserevs))
3478 newrevs = list(alltargetrevs.difference(allbaserevs))
3447 newrevs.sort()
3479 newrevs.sort()
3448
3480
3449 allrevs = frozenset(unfi.changelog.revs())
3481 allrevs = frozenset(unfi.changelog.revs())
3450 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3482 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3451 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3483 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3452
3484
3453 def basefilter(repo, visibilityexceptions=None):
3485 def basefilter(repo, visibilityexceptions=None):
3454 return basefilterrevs
3486 return basefilterrevs
3455
3487
3456 def targetfilter(repo, visibilityexceptions=None):
3488 def targetfilter(repo, visibilityexceptions=None):
3457 return targetfilterrevs
3489 return targetfilterrevs
3458
3490
3459 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3491 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3460 ui.status(msg % (len(allbaserevs), len(newrevs)))
3492 ui.status(msg % (len(allbaserevs), len(newrevs)))
3461 if targetfilterrevs:
3493 if targetfilterrevs:
3462 msg = b'(%d revisions still filtered)\n'
3494 msg = b'(%d revisions still filtered)\n'
3463 ui.status(msg % len(targetfilterrevs))
3495 ui.status(msg % len(targetfilterrevs))
3464
3496
3465 try:
3497 try:
3466 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3498 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3467 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3499 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3468
3500
3469 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3501 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3470 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3502 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3471
3503
3472 # try to find an existing branchmap to reuse
3504 # try to find an existing branchmap to reuse
3473 subsettable = getbranchmapsubsettable()
3505 subsettable = getbranchmapsubsettable()
3474 candidatefilter = subsettable.get(None)
3506 candidatefilter = subsettable.get(None)
3475 while candidatefilter is not None:
3507 while candidatefilter is not None:
3476 candidatebm = repo.filtered(candidatefilter).branchmap()
3508 candidatebm = repo.filtered(candidatefilter).branchmap()
3477 if candidatebm.validfor(baserepo):
3509 if candidatebm.validfor(baserepo):
3478 filtered = repoview.filterrevs(repo, candidatefilter)
3510 filtered = repoview.filterrevs(repo, candidatefilter)
3479 missing = [r for r in allbaserevs if r in filtered]
3511 missing = [r for r in allbaserevs if r in filtered]
3480 base = candidatebm.copy()
3512 base = candidatebm.copy()
3481 base.update(baserepo, missing)
3513 base.update(baserepo, missing)
3482 break
3514 break
3483 candidatefilter = subsettable.get(candidatefilter)
3515 candidatefilter = subsettable.get(candidatefilter)
3484 else:
3516 else:
3485 # no suitable subset where found
3517 # no suitable subset where found
3486 base = branchmap.branchcache()
3518 base = branchmap.branchcache()
3487 base.update(baserepo, allbaserevs)
3519 base.update(baserepo, allbaserevs)
3488
3520
3489 def setup():
3521 def setup():
3490 x[0] = base.copy()
3522 x[0] = base.copy()
3491 if clearcaches:
3523 if clearcaches:
3492 unfi._revbranchcache = None
3524 unfi._revbranchcache = None
3493 clearchangelog(repo)
3525 clearchangelog(repo)
3494
3526
3495 def bench():
3527 def bench():
3496 x[0].update(targetrepo, newrevs)
3528 x[0].update(targetrepo, newrevs)
3497
3529
3498 timer(bench, setup=setup)
3530 timer(bench, setup=setup)
3499 fm.end()
3531 fm.end()
3500 finally:
3532 finally:
3501 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3533 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3502 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3534 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3503
3535
3504
3536
3505 @command(
3537 @command(
3506 b'perfbranchmapload',
3538 b'perfbranchmapload',
3507 [
3539 [
3508 (b'f', b'filter', b'', b'Specify repoview filter'),
3540 (b'f', b'filter', b'', b'Specify repoview filter'),
3509 (b'', b'list', False, b'List brachmap filter caches'),
3541 (b'', b'list', False, b'List brachmap filter caches'),
3510 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3542 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3511 ]
3543 ]
3512 + formatteropts,
3544 + formatteropts,
3513 )
3545 )
3514 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3546 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3515 """benchmark reading the branchmap"""
3547 """benchmark reading the branchmap"""
3516 opts = _byteskwargs(opts)
3548 opts = _byteskwargs(opts)
3517 clearrevlogs = opts[b'clear_revlogs']
3549 clearrevlogs = opts[b'clear_revlogs']
3518
3550
3519 if list:
3551 if list:
3520 for name, kind, st in repo.cachevfs.readdir(stat=True):
3552 for name, kind, st in repo.cachevfs.readdir(stat=True):
3521 if name.startswith(b'branch2'):
3553 if name.startswith(b'branch2'):
3522 filtername = name.partition(b'-')[2] or b'unfiltered'
3554 filtername = name.partition(b'-')[2] or b'unfiltered'
3523 ui.status(
3555 ui.status(
3524 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3556 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3525 )
3557 )
3526 return
3558 return
3527 if not filter:
3559 if not filter:
3528 filter = None
3560 filter = None
3529 subsettable = getbranchmapsubsettable()
3561 subsettable = getbranchmapsubsettable()
3530 if filter is None:
3562 if filter is None:
3531 repo = repo.unfiltered()
3563 repo = repo.unfiltered()
3532 else:
3564 else:
3533 repo = repoview.repoview(repo, filter)
3565 repo = repoview.repoview(repo, filter)
3534
3566
3535 repo.branchmap() # make sure we have a relevant, up to date branchmap
3567 repo.branchmap() # make sure we have a relevant, up to date branchmap
3536
3568
3537 try:
3569 try:
3538 fromfile = branchmap.branchcache.fromfile
3570 fromfile = branchmap.branchcache.fromfile
3539 except AttributeError:
3571 except AttributeError:
3540 # older versions
3572 # older versions
3541 fromfile = branchmap.read
3573 fromfile = branchmap.read
3542
3574
3543 currentfilter = filter
3575 currentfilter = filter
3544 # try once without timer, the filter may not be cached
3576 # try once without timer, the filter may not be cached
3545 while fromfile(repo) is None:
3577 while fromfile(repo) is None:
3546 currentfilter = subsettable.get(currentfilter)
3578 currentfilter = subsettable.get(currentfilter)
3547 if currentfilter is None:
3579 if currentfilter is None:
3548 raise error.Abort(
3580 raise error.Abort(
3549 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3581 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3550 )
3582 )
3551 repo = repo.filtered(currentfilter)
3583 repo = repo.filtered(currentfilter)
3552 timer, fm = gettimer(ui, opts)
3584 timer, fm = gettimer(ui, opts)
3553
3585
3554 def setup():
3586 def setup():
3555 if clearrevlogs:
3587 if clearrevlogs:
3556 clearchangelog(repo)
3588 clearchangelog(repo)
3557
3589
3558 def bench():
3590 def bench():
3559 fromfile(repo)
3591 fromfile(repo)
3560
3592
3561 timer(bench, setup=setup)
3593 timer(bench, setup=setup)
3562 fm.end()
3594 fm.end()
3563
3595
3564
3596
3565 @command(b'perfloadmarkers')
3597 @command(b'perfloadmarkers')
3566 def perfloadmarkers(ui, repo):
3598 def perfloadmarkers(ui, repo):
3567 """benchmark the time to parse the on-disk markers for a repo
3599 """benchmark the time to parse the on-disk markers for a repo
3568
3600
3569 Result is the number of markers in the repo."""
3601 Result is the number of markers in the repo."""
3570 timer, fm = gettimer(ui)
3602 timer, fm = gettimer(ui)
3571 svfs = getsvfs(repo)
3603 svfs = getsvfs(repo)
3572 timer(lambda: len(obsolete.obsstore(svfs)))
3604 timer(lambda: len(obsolete.obsstore(svfs)))
3573 fm.end()
3605 fm.end()
3574
3606
3575
3607
3576 @command(
3608 @command(
3577 b'perflrucachedict',
3609 b'perflrucachedict',
3578 formatteropts
3610 formatteropts
3579 + [
3611 + [
3580 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3612 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3581 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3613 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3582 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3614 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3583 (b'', b'size', 4, b'size of cache'),
3615 (b'', b'size', 4, b'size of cache'),
3584 (b'', b'gets', 10000, b'number of key lookups'),
3616 (b'', b'gets', 10000, b'number of key lookups'),
3585 (b'', b'sets', 10000, b'number of key sets'),
3617 (b'', b'sets', 10000, b'number of key sets'),
3586 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3618 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3587 (
3619 (
3588 b'',
3620 b'',
3589 b'mixedgetfreq',
3621 b'mixedgetfreq',
3590 50,
3622 50,
3591 b'frequency of get vs set ops in mixed mode',
3623 b'frequency of get vs set ops in mixed mode',
3592 ),
3624 ),
3593 ],
3625 ],
3594 norepo=True,
3626 norepo=True,
3595 )
3627 )
3596 def perflrucache(
3628 def perflrucache(
3597 ui,
3629 ui,
3598 mincost=0,
3630 mincost=0,
3599 maxcost=100,
3631 maxcost=100,
3600 costlimit=0,
3632 costlimit=0,
3601 size=4,
3633 size=4,
3602 gets=10000,
3634 gets=10000,
3603 sets=10000,
3635 sets=10000,
3604 mixed=10000,
3636 mixed=10000,
3605 mixedgetfreq=50,
3637 mixedgetfreq=50,
3606 **opts
3638 **opts
3607 ):
3639 ):
3608 opts = _byteskwargs(opts)
3640 opts = _byteskwargs(opts)
3609
3641
3610 def doinit():
3642 def doinit():
3611 for i in _xrange(10000):
3643 for i in _xrange(10000):
3612 util.lrucachedict(size)
3644 util.lrucachedict(size)
3613
3645
3614 costrange = list(range(mincost, maxcost + 1))
3646 costrange = list(range(mincost, maxcost + 1))
3615
3647
3616 values = []
3648 values = []
3617 for i in _xrange(size):
3649 for i in _xrange(size):
3618 values.append(random.randint(0, _maxint))
3650 values.append(random.randint(0, _maxint))
3619
3651
3620 # Get mode fills the cache and tests raw lookup performance with no
3652 # Get mode fills the cache and tests raw lookup performance with no
3621 # eviction.
3653 # eviction.
3622 getseq = []
3654 getseq = []
3623 for i in _xrange(gets):
3655 for i in _xrange(gets):
3624 getseq.append(random.choice(values))
3656 getseq.append(random.choice(values))
3625
3657
3626 def dogets():
3658 def dogets():
3627 d = util.lrucachedict(size)
3659 d = util.lrucachedict(size)
3628 for v in values:
3660 for v in values:
3629 d[v] = v
3661 d[v] = v
3630 for key in getseq:
3662 for key in getseq:
3631 value = d[key]
3663 value = d[key]
3632 value # silence pyflakes warning
3664 value # silence pyflakes warning
3633
3665
3634 def dogetscost():
3666 def dogetscost():
3635 d = util.lrucachedict(size, maxcost=costlimit)
3667 d = util.lrucachedict(size, maxcost=costlimit)
3636 for i, v in enumerate(values):
3668 for i, v in enumerate(values):
3637 d.insert(v, v, cost=costs[i])
3669 d.insert(v, v, cost=costs[i])
3638 for key in getseq:
3670 for key in getseq:
3639 try:
3671 try:
3640 value = d[key]
3672 value = d[key]
3641 value # silence pyflakes warning
3673 value # silence pyflakes warning
3642 except KeyError:
3674 except KeyError:
3643 pass
3675 pass
3644
3676
3645 # Set mode tests insertion speed with cache eviction.
3677 # Set mode tests insertion speed with cache eviction.
3646 setseq = []
3678 setseq = []
3647 costs = []
3679 costs = []
3648 for i in _xrange(sets):
3680 for i in _xrange(sets):
3649 setseq.append(random.randint(0, _maxint))
3681 setseq.append(random.randint(0, _maxint))
3650 costs.append(random.choice(costrange))
3682 costs.append(random.choice(costrange))
3651
3683
3652 def doinserts():
3684 def doinserts():
3653 d = util.lrucachedict(size)
3685 d = util.lrucachedict(size)
3654 for v in setseq:
3686 for v in setseq:
3655 d.insert(v, v)
3687 d.insert(v, v)
3656
3688
3657 def doinsertscost():
3689 def doinsertscost():
3658 d = util.lrucachedict(size, maxcost=costlimit)
3690 d = util.lrucachedict(size, maxcost=costlimit)
3659 for i, v in enumerate(setseq):
3691 for i, v in enumerate(setseq):
3660 d.insert(v, v, cost=costs[i])
3692 d.insert(v, v, cost=costs[i])
3661
3693
3662 def dosets():
3694 def dosets():
3663 d = util.lrucachedict(size)
3695 d = util.lrucachedict(size)
3664 for v in setseq:
3696 for v in setseq:
3665 d[v] = v
3697 d[v] = v
3666
3698
3667 # Mixed mode randomly performs gets and sets with eviction.
3699 # Mixed mode randomly performs gets and sets with eviction.
3668 mixedops = []
3700 mixedops = []
3669 for i in _xrange(mixed):
3701 for i in _xrange(mixed):
3670 r = random.randint(0, 100)
3702 r = random.randint(0, 100)
3671 if r < mixedgetfreq:
3703 if r < mixedgetfreq:
3672 op = 0
3704 op = 0
3673 else:
3705 else:
3674 op = 1
3706 op = 1
3675
3707
3676 mixedops.append(
3708 mixedops.append(
3677 (op, random.randint(0, size * 2), random.choice(costrange))
3709 (op, random.randint(0, size * 2), random.choice(costrange))
3678 )
3710 )
3679
3711
3680 def domixed():
3712 def domixed():
3681 d = util.lrucachedict(size)
3713 d = util.lrucachedict(size)
3682
3714
3683 for op, v, cost in mixedops:
3715 for op, v, cost in mixedops:
3684 if op == 0:
3716 if op == 0:
3685 try:
3717 try:
3686 d[v]
3718 d[v]
3687 except KeyError:
3719 except KeyError:
3688 pass
3720 pass
3689 else:
3721 else:
3690 d[v] = v
3722 d[v] = v
3691
3723
3692 def domixedcost():
3724 def domixedcost():
3693 d = util.lrucachedict(size, maxcost=costlimit)
3725 d = util.lrucachedict(size, maxcost=costlimit)
3694
3726
3695 for op, v, cost in mixedops:
3727 for op, v, cost in mixedops:
3696 if op == 0:
3728 if op == 0:
3697 try:
3729 try:
3698 d[v]
3730 d[v]
3699 except KeyError:
3731 except KeyError:
3700 pass
3732 pass
3701 else:
3733 else:
3702 d.insert(v, v, cost=cost)
3734 d.insert(v, v, cost=cost)
3703
3735
3704 benches = [
3736 benches = [
3705 (doinit, b'init'),
3737 (doinit, b'init'),
3706 ]
3738 ]
3707
3739
3708 if costlimit:
3740 if costlimit:
3709 benches.extend(
3741 benches.extend(
3710 [
3742 [
3711 (dogetscost, b'gets w/ cost limit'),
3743 (dogetscost, b'gets w/ cost limit'),
3712 (doinsertscost, b'inserts w/ cost limit'),
3744 (doinsertscost, b'inserts w/ cost limit'),
3713 (domixedcost, b'mixed w/ cost limit'),
3745 (domixedcost, b'mixed w/ cost limit'),
3714 ]
3746 ]
3715 )
3747 )
3716 else:
3748 else:
3717 benches.extend(
3749 benches.extend(
3718 [
3750 [
3719 (dogets, b'gets'),
3751 (dogets, b'gets'),
3720 (doinserts, b'inserts'),
3752 (doinserts, b'inserts'),
3721 (dosets, b'sets'),
3753 (dosets, b'sets'),
3722 (domixed, b'mixed'),
3754 (domixed, b'mixed'),
3723 ]
3755 ]
3724 )
3756 )
3725
3757
3726 for fn, title in benches:
3758 for fn, title in benches:
3727 timer, fm = gettimer(ui, opts)
3759 timer, fm = gettimer(ui, opts)
3728 timer(fn, title=title)
3760 timer(fn, title=title)
3729 fm.end()
3761 fm.end()
3730
3762
3731
3763
3732 @command(b'perfwrite', formatteropts)
3764 @command(b'perfwrite', formatteropts)
3733 def perfwrite(ui, repo, **opts):
3765 def perfwrite(ui, repo, **opts):
3734 """microbenchmark ui.write
3766 """microbenchmark ui.write
3735 """
3767 """
3736 opts = _byteskwargs(opts)
3768 opts = _byteskwargs(opts)
3737
3769
3738 timer, fm = gettimer(ui, opts)
3770 timer, fm = gettimer(ui, opts)
3739
3771
3740 def write():
3772 def write():
3741 for i in range(100000):
3773 for i in range(100000):
3742 ui.writenoi18n(b'Testing write performance\n')
3774 ui.writenoi18n(b'Testing write performance\n')
3743
3775
3744 timer(write)
3776 timer(write)
3745 fm.end()
3777 fm.end()
3746
3778
3747
3779
3748 def uisetup(ui):
3780 def uisetup(ui):
3749 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3781 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3750 commands, b'debugrevlogopts'
3782 commands, b'debugrevlogopts'
3751 ):
3783 ):
3752 # for "historical portability":
3784 # for "historical portability":
3753 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3785 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3754 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3786 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3755 # openrevlog() should cause failure, because it has been
3787 # openrevlog() should cause failure, because it has been
3756 # available since 3.5 (or 49c583ca48c4).
3788 # available since 3.5 (or 49c583ca48c4).
3757 def openrevlog(orig, repo, cmd, file_, opts):
3789 def openrevlog(orig, repo, cmd, file_, opts):
3758 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3790 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3759 raise error.Abort(
3791 raise error.Abort(
3760 b"This version doesn't support --dir option",
3792 b"This version doesn't support --dir option",
3761 hint=b"use 3.5 or later",
3793 hint=b"use 3.5 or later",
3762 )
3794 )
3763 return orig(repo, cmd, file_, opts)
3795 return orig(repo, cmd, file_, opts)
3764
3796
3765 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3797 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3766
3798
3767
3799
3768 @command(
3800 @command(
3769 b'perfprogress',
3801 b'perfprogress',
3770 formatteropts
3802 formatteropts
3771 + [
3803 + [
3772 (b'', b'topic', b'topic', b'topic for progress messages'),
3804 (b'', b'topic', b'topic', b'topic for progress messages'),
3773 (b'c', b'total', 1000000, b'total value we are progressing to'),
3805 (b'c', b'total', 1000000, b'total value we are progressing to'),
3774 ],
3806 ],
3775 norepo=True,
3807 norepo=True,
3776 )
3808 )
3777 def perfprogress(ui, topic=None, total=None, **opts):
3809 def perfprogress(ui, topic=None, total=None, **opts):
3778 """printing of progress bars"""
3810 """printing of progress bars"""
3779 opts = _byteskwargs(opts)
3811 opts = _byteskwargs(opts)
3780
3812
3781 timer, fm = gettimer(ui, opts)
3813 timer, fm = gettimer(ui, opts)
3782
3814
3783 def doprogress():
3815 def doprogress():
3784 with ui.makeprogress(topic, total=total) as progress:
3816 with ui.makeprogress(topic, total=total) as progress:
3785 for i in _xrange(total):
3817 for i in _xrange(total):
3786 progress.increment()
3818 progress.increment()
3787
3819
3788 timer(doprogress)
3820 timer(doprogress)
3789 fm.end()
3821 fm.end()
@@ -1,397 +1,398 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate benchmap the time of various distate operations
110 perfdirstate benchmap the time of various distate operations
111 perfdirstatedirs
111 perfdirstatedirs
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 benchmap a 'dirstate._map.filefoldmap.get()' request
114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 perfdirstatewrite
115 perfdirstatewrite
116 benchmap the time it take to write a dirstate on disk
116 benchmap the time it take to write a dirstate on disk
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstate --contains
208 $ hg perfdirstate --iteration
209 $ hg perfdirstate --iteration
209 $ hg perfdirstatedirs
210 $ hg perfdirstatedirs
210 $ hg perfdirstatefoldmap
211 $ hg perfdirstatefoldmap
211 $ hg perfdirstatewrite
212 $ hg perfdirstatewrite
212 #if repofncache
213 #if repofncache
213 $ hg perffncacheencode
214 $ hg perffncacheencode
214 $ hg perffncacheload
215 $ hg perffncacheload
215 $ hg debugrebuildfncache
216 $ hg debugrebuildfncache
216 fncache already up to date
217 fncache already up to date
217 $ hg perffncachewrite
218 $ hg perffncachewrite
218 $ hg debugrebuildfncache
219 $ hg debugrebuildfncache
219 fncache already up to date
220 fncache already up to date
220 #endif
221 #endif
221 $ hg perfheads
222 $ hg perfheads
222 $ hg perfignore
223 $ hg perfignore
223 $ hg perfindex
224 $ hg perfindex
224 $ hg perflinelogedits -n 1
225 $ hg perflinelogedits -n 1
225 $ hg perfloadmarkers
226 $ hg perfloadmarkers
226 $ hg perflog
227 $ hg perflog
227 $ hg perflookup 2
228 $ hg perflookup 2
228 $ hg perflrucache
229 $ hg perflrucache
229 $ hg perfmanifest 2
230 $ hg perfmanifest 2
230 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
231 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
231 $ hg perfmanifest -m 44fe2c8352bb
232 $ hg perfmanifest -m 44fe2c8352bb
232 abort: manifest revision must be integer or full node
233 abort: manifest revision must be integer or full node
233 [255]
234 [255]
234 $ hg perfmergecalculate -r 3
235 $ hg perfmergecalculate -r 3
235 $ hg perfmoonwalk
236 $ hg perfmoonwalk
236 $ hg perfnodelookup 2
237 $ hg perfnodelookup 2
237 $ hg perfpathcopies 1 2
238 $ hg perfpathcopies 1 2
238 $ hg perfprogress --total 1000
239 $ hg perfprogress --total 1000
239 $ hg perfrawfiles 2
240 $ hg perfrawfiles 2
240 $ hg perfrevlogindex -c
241 $ hg perfrevlogindex -c
241 #if reporevlogstore
242 #if reporevlogstore
242 $ hg perfrevlogrevisions .hg/store/data/a.i
243 $ hg perfrevlogrevisions .hg/store/data/a.i
243 #endif
244 #endif
244 $ hg perfrevlogrevision -m 0
245 $ hg perfrevlogrevision -m 0
245 $ hg perfrevlogchunks -c
246 $ hg perfrevlogchunks -c
246 $ hg perfrevrange
247 $ hg perfrevrange
247 $ hg perfrevset 'all()'
248 $ hg perfrevset 'all()'
248 $ hg perfstartup
249 $ hg perfstartup
249 $ hg perfstatus
250 $ hg perfstatus
250 $ hg perftags
251 $ hg perftags
251 $ hg perftemplating
252 $ hg perftemplating
252 $ hg perfvolatilesets
253 $ hg perfvolatilesets
253 $ hg perfwalk
254 $ hg perfwalk
254 $ hg perfparents
255 $ hg perfparents
255 $ hg perfdiscovery -q .
256 $ hg perfdiscovery -q .
256
257
257 Test run control
258 Test run control
258 ----------------
259 ----------------
259
260
260 Simple single entry
261 Simple single entry
261
262
262 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
263 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
263 ! wall * comb * user * sys * (best of 15) (glob)
264 ! wall * comb * user * sys * (best of 15) (glob)
264
265
265 Multiple entries
266 Multiple entries
266
267
267 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
268 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
268 ! wall * comb * user * sys * (best of 5) (glob)
269 ! wall * comb * user * sys * (best of 5) (glob)
269
270
270 error case are ignored
271 error case are ignored
271
272
272 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
273 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
273 malformatted run limit entry, missing "-": 500
274 malformatted run limit entry, missing "-": 500
274 ! wall * comb * user * sys * (best of 5) (glob)
275 ! wall * comb * user * sys * (best of 5) (glob)
275 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
276 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
276 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
277 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
277 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
278 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
278 ! wall * comb * user * sys * (best of 5) (glob)
279 ! wall * comb * user * sys * (best of 5) (glob)
279 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
280 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
280 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
281 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
281 ! wall * comb * user * sys * (best of 5) (glob)
282 ! wall * comb * user * sys * (best of 5) (glob)
282
283
283 test actual output
284 test actual output
284 ------------------
285 ------------------
285
286
286 normal output:
287 normal output:
287
288
288 $ hg perfheads --config perf.stub=no
289 $ hg perfheads --config perf.stub=no
289 ! wall * comb * user * sys * (best of *) (glob)
290 ! wall * comb * user * sys * (best of *) (glob)
290
291
291 detailed output:
292 detailed output:
292
293
293 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
294 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
294 ! wall * comb * user * sys * (best of *) (glob)
295 ! wall * comb * user * sys * (best of *) (glob)
295 ! wall * comb * user * sys * (max of *) (glob)
296 ! wall * comb * user * sys * (max of *) (glob)
296 ! wall * comb * user * sys * (avg of *) (glob)
297 ! wall * comb * user * sys * (avg of *) (glob)
297 ! wall * comb * user * sys * (median of *) (glob)
298 ! wall * comb * user * sys * (median of *) (glob)
298
299
299 test json output
300 test json output
300 ----------------
301 ----------------
301
302
302 normal output:
303 normal output:
303
304
304 $ hg perfheads --template json --config perf.stub=no
305 $ hg perfheads --template json --config perf.stub=no
305 [
306 [
306 {
307 {
307 "comb": *, (glob)
308 "comb": *, (glob)
308 "count": *, (glob)
309 "count": *, (glob)
309 "sys": *, (glob)
310 "sys": *, (glob)
310 "user": *, (glob)
311 "user": *, (glob)
311 "wall": * (glob)
312 "wall": * (glob)
312 }
313 }
313 ]
314 ]
314
315
315 detailed output:
316 detailed output:
316
317
317 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
318 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
318 [
319 [
319 {
320 {
320 "avg.comb": *, (glob)
321 "avg.comb": *, (glob)
321 "avg.count": *, (glob)
322 "avg.count": *, (glob)
322 "avg.sys": *, (glob)
323 "avg.sys": *, (glob)
323 "avg.user": *, (glob)
324 "avg.user": *, (glob)
324 "avg.wall": *, (glob)
325 "avg.wall": *, (glob)
325 "comb": *, (glob)
326 "comb": *, (glob)
326 "count": *, (glob)
327 "count": *, (glob)
327 "max.comb": *, (glob)
328 "max.comb": *, (glob)
328 "max.count": *, (glob)
329 "max.count": *, (glob)
329 "max.sys": *, (glob)
330 "max.sys": *, (glob)
330 "max.user": *, (glob)
331 "max.user": *, (glob)
331 "max.wall": *, (glob)
332 "max.wall": *, (glob)
332 "median.comb": *, (glob)
333 "median.comb": *, (glob)
333 "median.count": *, (glob)
334 "median.count": *, (glob)
334 "median.sys": *, (glob)
335 "median.sys": *, (glob)
335 "median.user": *, (glob)
336 "median.user": *, (glob)
336 "median.wall": *, (glob)
337 "median.wall": *, (glob)
337 "sys": *, (glob)
338 "sys": *, (glob)
338 "user": *, (glob)
339 "user": *, (glob)
339 "wall": * (glob)
340 "wall": * (glob)
340 }
341 }
341 ]
342 ]
342
343
343 Test pre-run feature
344 Test pre-run feature
344 --------------------
345 --------------------
345
346
346 (perf discovery has some spurious output)
347 (perf discovery has some spurious output)
347
348
348 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
349 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
349 ! wall * comb * user * sys * (best of 1) (glob)
350 ! wall * comb * user * sys * (best of 1) (glob)
350 searching for changes
351 searching for changes
351 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
352 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
352 ! wall * comb * user * sys * (best of 1) (glob)
353 ! wall * comb * user * sys * (best of 1) (glob)
353 searching for changes
354 searching for changes
354 searching for changes
355 searching for changes
355 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
356 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
356 ! wall * comb * user * sys * (best of 1) (glob)
357 ! wall * comb * user * sys * (best of 1) (glob)
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360 searching for changes
360 searching for changes
361 searching for changes
361
362
362 test profile-benchmark option
363 test profile-benchmark option
363 ------------------------------
364 ------------------------------
364
365
365 Function to check that statprof ran
366 Function to check that statprof ran
366 $ statprofran () {
367 $ statprofran () {
367 > egrep 'Sample count:|No samples recorded' > /dev/null
368 > egrep 'Sample count:|No samples recorded' > /dev/null
368 > }
369 > }
369 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
370 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
370
371
371 Check perf.py for historical portability
372 Check perf.py for historical portability
372 ----------------------------------------
373 ----------------------------------------
373
374
374 $ cd "$TESTDIR/.."
375 $ cd "$TESTDIR/.."
375
376
376 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
377 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
377 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
378 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
378 > "$TESTDIR"/check-perf-code.py contrib/perf.py
379 > "$TESTDIR"/check-perf-code.py contrib/perf.py
379 contrib/perf.py:\d+: (re)
380 contrib/perf.py:\d+: (re)
380 > from mercurial import (
381 > from mercurial import (
381 import newer module separately in try clause for early Mercurial
382 import newer module separately in try clause for early Mercurial
382 contrib/perf.py:\d+: (re)
383 contrib/perf.py:\d+: (re)
383 > from mercurial import (
384 > from mercurial import (
384 import newer module separately in try clause for early Mercurial
385 import newer module separately in try clause for early Mercurial
385 contrib/perf.py:\d+: (re)
386 contrib/perf.py:\d+: (re)
386 > origindexpath = orig.opener.join(orig.indexfile)
387 > origindexpath = orig.opener.join(orig.indexfile)
387 use getvfs()/getsvfs() for early Mercurial
388 use getvfs()/getsvfs() for early Mercurial
388 contrib/perf.py:\d+: (re)
389 contrib/perf.py:\d+: (re)
389 > origdatapath = orig.opener.join(orig.datafile)
390 > origdatapath = orig.opener.join(orig.datafile)
390 use getvfs()/getsvfs() for early Mercurial
391 use getvfs()/getsvfs() for early Mercurial
391 contrib/perf.py:\d+: (re)
392 contrib/perf.py:\d+: (re)
392 > vfs = vfsmod.vfs(tmpdir)
393 > vfs = vfsmod.vfs(tmpdir)
393 use getvfs()/getsvfs() for early Mercurial
394 use getvfs()/getsvfs() for early Mercurial
394 contrib/perf.py:\d+: (re)
395 contrib/perf.py:\d+: (re)
395 > vfs.options = getattr(orig.opener, 'options', None)
396 > vfs.options = getattr(orig.opener, 'options', None)
396 use getvfs()/getsvfs() for early Mercurial
397 use getvfs()/getsvfs() for early Mercurial
397 [1]
398 [1]
General Comments 0
You need to be logged in to leave comments. Login now