##// END OF EJS Templates
command-namespace: use `::` are the command separator...
marmoute -
r47117:d8ad391e default
parent child Browse files
Show More
@@ -1,3915 +1,3919
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if 'norepo' not in getargspec(command).args:
234 if 'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf',
294 b'perf',
295 b'pre-run',
295 b'pre-run',
296 default=mercurial.configitems.dynamicdefault,
296 default=mercurial.configitems.dynamicdefault,
297 )
297 )
298 configitem(
298 configitem(
299 b'perf',
299 b'perf',
300 b'profile-benchmark',
300 b'profile-benchmark',
301 default=mercurial.configitems.dynamicdefault,
301 default=mercurial.configitems.dynamicdefault,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'run-limits',
305 b'run-limits',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 except (ImportError, AttributeError):
309 except (ImportError, AttributeError):
310 pass
310 pass
311 except TypeError:
311 except TypeError:
312 # compatibility fix for a11fd395e83f
312 # compatibility fix for a11fd395e83f
313 # hg version: 5.2
313 # hg version: 5.2
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'presleep',
316 b'presleep',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'stub',
321 b'stub',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf',
325 b'perf',
326 b'parentscount',
326 b'parentscount',
327 default=mercurial.configitems.dynamicdefault,
327 default=mercurial.configitems.dynamicdefault,
328 )
328 )
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'all-timing',
331 b'all-timing',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'pre-run',
336 b'pre-run',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'profile-benchmark',
341 b'profile-benchmark',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'run-limits',
346 b'run-limits',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349
349
350
350
351 def getlen(ui):
351 def getlen(ui):
352 if ui.configbool(b"perf", b"stub", False):
352 if ui.configbool(b"perf", b"stub", False):
353 return lambda x: 1
353 return lambda x: 1
354 return len
354 return len
355
355
356
356
357 class noop(object):
357 class noop(object):
358 """dummy context manager"""
358 """dummy context manager"""
359
359
360 def __enter__(self):
360 def __enter__(self):
361 pass
361 pass
362
362
363 def __exit__(self, *args):
363 def __exit__(self, *args):
364 pass
364 pass
365
365
366
366
367 NOOPCTX = noop()
367 NOOPCTX = noop()
368
368
369
369
370 def gettimer(ui, opts=None):
370 def gettimer(ui, opts=None):
371 """return a timer function and formatter: (timer, formatter)
371 """return a timer function and formatter: (timer, formatter)
372
372
373 This function exists to gather the creation of formatter in a single
373 This function exists to gather the creation of formatter in a single
374 place instead of duplicating it in all performance commands."""
374 place instead of duplicating it in all performance commands."""
375
375
376 # enforce an idle period before execution to counteract power management
376 # enforce an idle period before execution to counteract power management
377 # experimental config: perf.presleep
377 # experimental config: perf.presleep
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379
379
380 if opts is None:
380 if opts is None:
381 opts = {}
381 opts = {}
382 # redirect all to stderr unless buffer api is in use
382 # redirect all to stderr unless buffer api is in use
383 if not ui._buffers:
383 if not ui._buffers:
384 ui = ui.copy()
384 ui = ui.copy()
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 if uifout:
386 if uifout:
387 # for "historical portability":
387 # for "historical portability":
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 uifout.set(ui.ferr)
389 uifout.set(ui.ferr)
390
390
391 # get a formatter
391 # get a formatter
392 uiformatter = getattr(ui, 'formatter', None)
392 uiformatter = getattr(ui, 'formatter', None)
393 if uiformatter:
393 if uiformatter:
394 fm = uiformatter(b'perf', opts)
394 fm = uiformatter(b'perf', opts)
395 else:
395 else:
396 # for "historical portability":
396 # for "historical portability":
397 # define formatter locally, because ui.formatter has been
397 # define formatter locally, because ui.formatter has been
398 # available since 2.2 (or ae5f92e154d3)
398 # available since 2.2 (or ae5f92e154d3)
399 from mercurial import node
399 from mercurial import node
400
400
401 class defaultformatter(object):
401 class defaultformatter(object):
402 """Minimized composition of baseformatter and plainformatter"""
402 """Minimized composition of baseformatter and plainformatter"""
403
403
404 def __init__(self, ui, topic, opts):
404 def __init__(self, ui, topic, opts):
405 self._ui = ui
405 self._ui = ui
406 if ui.debugflag:
406 if ui.debugflag:
407 self.hexfunc = node.hex
407 self.hexfunc = node.hex
408 else:
408 else:
409 self.hexfunc = node.short
409 self.hexfunc = node.short
410
410
411 def __nonzero__(self):
411 def __nonzero__(self):
412 return False
412 return False
413
413
414 __bool__ = __nonzero__
414 __bool__ = __nonzero__
415
415
416 def startitem(self):
416 def startitem(self):
417 pass
417 pass
418
418
419 def data(self, **data):
419 def data(self, **data):
420 pass
420 pass
421
421
422 def write(self, fields, deftext, *fielddata, **opts):
422 def write(self, fields, deftext, *fielddata, **opts):
423 self._ui.write(deftext % fielddata, **opts)
423 self._ui.write(deftext % fielddata, **opts)
424
424
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 if cond:
426 if cond:
427 self._ui.write(deftext % fielddata, **opts)
427 self._ui.write(deftext % fielddata, **opts)
428
428
429 def plain(self, text, **opts):
429 def plain(self, text, **opts):
430 self._ui.write(text, **opts)
430 self._ui.write(text, **opts)
431
431
432 def end(self):
432 def end(self):
433 pass
433 pass
434
434
435 fm = defaultformatter(ui, b'perf', opts)
435 fm = defaultformatter(ui, b'perf', opts)
436
436
437 # stub function, runs code only once instead of in a loop
437 # stub function, runs code only once instead of in a loop
438 # experimental config: perf.stub
438 # experimental config: perf.stub
439 if ui.configbool(b"perf", b"stub", False):
439 if ui.configbool(b"perf", b"stub", False):
440 return functools.partial(stub_timer, fm), fm
440 return functools.partial(stub_timer, fm), fm
441
441
442 # experimental config: perf.all-timing
442 # experimental config: perf.all-timing
443 displayall = ui.configbool(b"perf", b"all-timing", False)
443 displayall = ui.configbool(b"perf", b"all-timing", False)
444
444
445 # experimental config: perf.run-limits
445 # experimental config: perf.run-limits
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 limits = []
447 limits = []
448 for item in limitspec:
448 for item in limitspec:
449 parts = item.split(b'-', 1)
449 parts = item.split(b'-', 1)
450 if len(parts) < 2:
450 if len(parts) < 2:
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 continue
452 continue
453 try:
453 try:
454 time_limit = float(_sysstr(parts[0]))
454 time_limit = float(_sysstr(parts[0]))
455 except ValueError as e:
455 except ValueError as e:
456 ui.warn(
456 ui.warn(
457 (
457 (
458 b'malformatted run limit entry, %s: %s\n'
458 b'malformatted run limit entry, %s: %s\n'
459 % (_bytestr(e), item)
459 % (_bytestr(e), item)
460 )
460 )
461 )
461 )
462 continue
462 continue
463 try:
463 try:
464 run_limit = int(_sysstr(parts[1]))
464 run_limit = int(_sysstr(parts[1]))
465 except ValueError as e:
465 except ValueError as e:
466 ui.warn(
466 ui.warn(
467 (
467 (
468 b'malformatted run limit entry, %s: %s\n'
468 b'malformatted run limit entry, %s: %s\n'
469 % (_bytestr(e), item)
469 % (_bytestr(e), item)
470 )
470 )
471 )
471 )
472 continue
472 continue
473 limits.append((time_limit, run_limit))
473 limits.append((time_limit, run_limit))
474 if not limits:
474 if not limits:
475 limits = DEFAULTLIMITS
475 limits = DEFAULTLIMITS
476
476
477 profiler = None
477 profiler = None
478 if profiling is not None:
478 if profiling is not None:
479 if ui.configbool(b"perf", b"profile-benchmark", False):
479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 profiler = profiling.profile(ui)
480 profiler = profiling.profile(ui)
481
481
482 prerun = getint(ui, b"perf", b"pre-run", 0)
482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 t = functools.partial(
483 t = functools.partial(
484 _timer,
484 _timer,
485 fm,
485 fm,
486 displayall=displayall,
486 displayall=displayall,
487 limits=limits,
487 limits=limits,
488 prerun=prerun,
488 prerun=prerun,
489 profiler=profiler,
489 profiler=profiler,
490 )
490 )
491 return t, fm
491 return t, fm
492
492
493
493
494 def stub_timer(fm, func, setup=None, title=None):
494 def stub_timer(fm, func, setup=None, title=None):
495 if setup is not None:
495 if setup is not None:
496 setup()
496 setup()
497 func()
497 func()
498
498
499
499
500 @contextlib.contextmanager
500 @contextlib.contextmanager
501 def timeone():
501 def timeone():
502 r = []
502 r = []
503 ostart = os.times()
503 ostart = os.times()
504 cstart = util.timer()
504 cstart = util.timer()
505 yield r
505 yield r
506 cstop = util.timer()
506 cstop = util.timer()
507 ostop = os.times()
507 ostop = os.times()
508 a, b = ostart, ostop
508 a, b = ostart, ostop
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510
510
511
511
512 # list of stop condition (elapsed time, minimal run count)
512 # list of stop condition (elapsed time, minimal run count)
513 DEFAULTLIMITS = (
513 DEFAULTLIMITS = (
514 (3.0, 100),
514 (3.0, 100),
515 (10.0, 3),
515 (10.0, 3),
516 )
516 )
517
517
518
518
519 def _timer(
519 def _timer(
520 fm,
520 fm,
521 func,
521 func,
522 setup=None,
522 setup=None,
523 title=None,
523 title=None,
524 displayall=False,
524 displayall=False,
525 limits=DEFAULTLIMITS,
525 limits=DEFAULTLIMITS,
526 prerun=0,
526 prerun=0,
527 profiler=None,
527 profiler=None,
528 ):
528 ):
529 gc.collect()
529 gc.collect()
530 results = []
530 results = []
531 begin = util.timer()
531 begin = util.timer()
532 count = 0
532 count = 0
533 if profiler is None:
533 if profiler is None:
534 profiler = NOOPCTX
534 profiler = NOOPCTX
535 for i in range(prerun):
535 for i in range(prerun):
536 if setup is not None:
536 if setup is not None:
537 setup()
537 setup()
538 func()
538 func()
539 keepgoing = True
539 keepgoing = True
540 while keepgoing:
540 while keepgoing:
541 if setup is not None:
541 if setup is not None:
542 setup()
542 setup()
543 with profiler:
543 with profiler:
544 with timeone() as item:
544 with timeone() as item:
545 r = func()
545 r = func()
546 profiler = NOOPCTX
546 profiler = NOOPCTX
547 count += 1
547 count += 1
548 results.append(item[0])
548 results.append(item[0])
549 cstop = util.timer()
549 cstop = util.timer()
550 # Look for a stop condition.
550 # Look for a stop condition.
551 elapsed = cstop - begin
551 elapsed = cstop - begin
552 for t, mincount in limits:
552 for t, mincount in limits:
553 if elapsed >= t and count >= mincount:
553 if elapsed >= t and count >= mincount:
554 keepgoing = False
554 keepgoing = False
555 break
555 break
556
556
557 formatone(fm, results, title=title, result=r, displayall=displayall)
557 formatone(fm, results, title=title, result=r, displayall=displayall)
558
558
559
559
560 def formatone(fm, timings, title=None, result=None, displayall=False):
560 def formatone(fm, timings, title=None, result=None, displayall=False):
561
561
562 count = len(timings)
562 count = len(timings)
563
563
564 fm.startitem()
564 fm.startitem()
565
565
566 if title:
566 if title:
567 fm.write(b'title', b'! %s\n', title)
567 fm.write(b'title', b'! %s\n', title)
568 if result:
568 if result:
569 fm.write(b'result', b'! result: %s\n', result)
569 fm.write(b'result', b'! result: %s\n', result)
570
570
571 def display(role, entry):
571 def display(role, entry):
572 prefix = b''
572 prefix = b''
573 if role != b'best':
573 if role != b'best':
574 prefix = b'%s.' % role
574 prefix = b'%s.' % role
575 fm.plain(b'!')
575 fm.plain(b'!')
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 fm.write(prefix + b'user', b' user %f', entry[1])
578 fm.write(prefix + b'user', b' user %f', entry[1])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 fm.plain(b'\n')
581 fm.plain(b'\n')
582
582
583 timings.sort()
583 timings.sort()
584 min_val = timings[0]
584 min_val = timings[0]
585 display(b'best', min_val)
585 display(b'best', min_val)
586 if displayall:
586 if displayall:
587 max_val = timings[-1]
587 max_val = timings[-1]
588 display(b'max', max_val)
588 display(b'max', max_val)
589 avg = tuple([sum(x) / count for x in zip(*timings)])
589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 display(b'avg', avg)
590 display(b'avg', avg)
591 median = timings[len(timings) // 2]
591 median = timings[len(timings) // 2]
592 display(b'median', median)
592 display(b'median', median)
593
593
594
594
595 # utilities for historical portability
595 # utilities for historical portability
596
596
597
597
598 def getint(ui, section, name, default):
598 def getint(ui, section, name, default):
599 # for "historical portability":
599 # for "historical portability":
600 # ui.configint has been available since 1.9 (or fa2b596db182)
600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 v = ui.config(section, name, None)
601 v = ui.config(section, name, None)
602 if v is None:
602 if v is None:
603 return default
603 return default
604 try:
604 try:
605 return int(v)
605 return int(v)
606 except ValueError:
606 except ValueError:
607 raise error.ConfigError(
607 raise error.ConfigError(
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 )
609 )
610
610
611
611
612 def safeattrsetter(obj, name, ignoremissing=False):
612 def safeattrsetter(obj, name, ignoremissing=False):
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614
614
615 This function is aborted, if 'obj' doesn't have 'name' attribute
615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 at runtime. This avoids overlooking removal of an attribute, which
616 at runtime. This avoids overlooking removal of an attribute, which
617 breaks assumption of performance measurement, in the future.
617 breaks assumption of performance measurement, in the future.
618
618
619 This function returns the object to (1) assign a new value, and
619 This function returns the object to (1) assign a new value, and
620 (2) restore an original value to the attribute.
620 (2) restore an original value to the attribute.
621
621
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 abortion, and this function returns None. This is useful to
623 abortion, and this function returns None. This is useful to
624 examine an attribute, which isn't ensured in all Mercurial
624 examine an attribute, which isn't ensured in all Mercurial
625 versions.
625 versions.
626 """
626 """
627 if not util.safehasattr(obj, name):
627 if not util.safehasattr(obj, name):
628 if ignoremissing:
628 if ignoremissing:
629 return None
629 return None
630 raise error.Abort(
630 raise error.Abort(
631 (
631 (
632 b"missing attribute %s of %s might break assumption"
632 b"missing attribute %s of %s might break assumption"
633 b" of performance measurement"
633 b" of performance measurement"
634 )
634 )
635 % (name, obj)
635 % (name, obj)
636 )
636 )
637
637
638 origvalue = getattr(obj, _sysstr(name))
638 origvalue = getattr(obj, _sysstr(name))
639
639
640 class attrutil(object):
640 class attrutil(object):
641 def set(self, newvalue):
641 def set(self, newvalue):
642 setattr(obj, _sysstr(name), newvalue)
642 setattr(obj, _sysstr(name), newvalue)
643
643
644 def restore(self):
644 def restore(self):
645 setattr(obj, _sysstr(name), origvalue)
645 setattr(obj, _sysstr(name), origvalue)
646
646
647 return attrutil()
647 return attrutil()
648
648
649
649
650 # utilities to examine each internal API changes
650 # utilities to examine each internal API changes
651
651
652
652
653 def getbranchmapsubsettable():
653 def getbranchmapsubsettable():
654 # for "historical portability":
654 # for "historical portability":
655 # subsettable is defined in:
655 # subsettable is defined in:
656 # - branchmap since 2.9 (or 175c6fd8cacc)
656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 # - repoview since 2.5 (or 59a9f18d4587)
657 # - repoview since 2.5 (or 59a9f18d4587)
658 # - repoviewutil since 5.0
658 # - repoviewutil since 5.0
659 for mod in (branchmap, repoview, repoviewutil):
659 for mod in (branchmap, repoview, repoviewutil):
660 subsettable = getattr(mod, 'subsettable', None)
660 subsettable = getattr(mod, 'subsettable', None)
661 if subsettable:
661 if subsettable:
662 return subsettable
662 return subsettable
663
663
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 # branchmap and repoview modules exist, but subsettable attribute
665 # branchmap and repoview modules exist, but subsettable attribute
666 # doesn't)
666 # doesn't)
667 raise error.Abort(
667 raise error.Abort(
668 b"perfbranchmap not available with this Mercurial",
668 b"perfbranchmap not available with this Mercurial",
669 hint=b"use 2.5 or later",
669 hint=b"use 2.5 or later",
670 )
670 )
671
671
672
672
673 def getsvfs(repo):
673 def getsvfs(repo):
674 """Return appropriate object to access files under .hg/store"""
674 """Return appropriate object to access files under .hg/store"""
675 # for "historical portability":
675 # for "historical portability":
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 svfs = getattr(repo, 'svfs', None)
677 svfs = getattr(repo, 'svfs', None)
678 if svfs:
678 if svfs:
679 return svfs
679 return svfs
680 else:
680 else:
681 return getattr(repo, 'sopener')
681 return getattr(repo, 'sopener')
682
682
683
683
684 def getvfs(repo):
684 def getvfs(repo):
685 """Return appropriate object to access files under .hg"""
685 """Return appropriate object to access files under .hg"""
686 # for "historical portability":
686 # for "historical portability":
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 vfs = getattr(repo, 'vfs', None)
688 vfs = getattr(repo, 'vfs', None)
689 if vfs:
689 if vfs:
690 return vfs
690 return vfs
691 else:
691 else:
692 return getattr(repo, 'opener')
692 return getattr(repo, 'opener')
693
693
694
694
695 def repocleartagscachefunc(repo):
695 def repocleartagscachefunc(repo):
696 """Return the function to clear tags cache according to repo internal API"""
696 """Return the function to clear tags cache according to repo internal API"""
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 # correct way to clear tags cache, because existing code paths
699 # correct way to clear tags cache, because existing code paths
700 # expect _tagscache to be a structured object.
700 # expect _tagscache to be a structured object.
701 def clearcache():
701 def clearcache():
702 # _tagscache has been filteredpropertycache since 2.5 (or
702 # _tagscache has been filteredpropertycache since 2.5 (or
703 # 98c867ac1330), and delattr() can't work in such case
703 # 98c867ac1330), and delattr() can't work in such case
704 if '_tagscache' in vars(repo):
704 if '_tagscache' in vars(repo):
705 del repo.__dict__['_tagscache']
705 del repo.__dict__['_tagscache']
706
706
707 return clearcache
707 return clearcache
708
708
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 if repotags: # since 1.4 (or 5614a628d173)
710 if repotags: # since 1.4 (or 5614a628d173)
711 return lambda: repotags.set(None)
711 return lambda: repotags.set(None)
712
712
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 return lambda: repotagscache.set(None)
715 return lambda: repotagscache.set(None)
716
716
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 # this point, but it isn't so problematic, because:
718 # this point, but it isn't so problematic, because:
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 # in perftags() causes failure soon
720 # in perftags() causes failure soon
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 raise error.Abort(b"tags API of this hg command is unknown")
722 raise error.Abort(b"tags API of this hg command is unknown")
723
723
724
724
725 # utilities to clear cache
725 # utilities to clear cache
726
726
727
727
728 def clearfilecache(obj, attrname):
728 def clearfilecache(obj, attrname):
729 unfiltered = getattr(obj, 'unfiltered', None)
729 unfiltered = getattr(obj, 'unfiltered', None)
730 if unfiltered is not None:
730 if unfiltered is not None:
731 obj = obj.unfiltered()
731 obj = obj.unfiltered()
732 if attrname in vars(obj):
732 if attrname in vars(obj):
733 delattr(obj, attrname)
733 delattr(obj, attrname)
734 obj._filecache.pop(attrname, None)
734 obj._filecache.pop(attrname, None)
735
735
736
736
737 def clearchangelog(repo):
737 def clearchangelog(repo):
738 if repo is not repo.unfiltered():
738 if repo is not repo.unfiltered():
739 object.__setattr__(repo, '_clcachekey', None)
739 object.__setattr__(repo, '_clcachekey', None)
740 object.__setattr__(repo, '_clcache', None)
740 object.__setattr__(repo, '_clcache', None)
741 clearfilecache(repo.unfiltered(), 'changelog')
741 clearfilecache(repo.unfiltered(), 'changelog')
742
742
743
743
744 # perf commands
744 # perf commands
745
745
746
746
747 @command(b'perf--walk', formatteropts)
747 @command(b'perf::walk|perfwalk', formatteropts)
748 def perfwalk(ui, repo, *pats, **opts):
748 def perfwalk(ui, repo, *pats, **opts):
749 opts = _byteskwargs(opts)
749 opts = _byteskwargs(opts)
750 timer, fm = gettimer(ui, opts)
750 timer, fm = gettimer(ui, opts)
751 m = scmutil.match(repo[None], pats, {})
751 m = scmutil.match(repo[None], pats, {})
752 timer(
752 timer(
753 lambda: len(
753 lambda: len(
754 list(
754 list(
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 )
756 )
757 )
757 )
758 )
758 )
759 fm.end()
759 fm.end()
760
760
761
761
762 @command(b'perf--annotate', formatteropts)
762 @command(b'perf::annotate|perfannotate', formatteropts)
763 def perfannotate(ui, repo, f, **opts):
763 def perfannotate(ui, repo, f, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 fc = repo[b'.'][f]
766 fc = repo[b'.'][f]
767 timer(lambda: len(fc.annotate(True)))
767 timer(lambda: len(fc.annotate(True)))
768 fm.end()
768 fm.end()
769
769
770
770
771 @command(
771 @command(
772 b'perf--status',
772 b'perf::status|perfstatus',
773 [
773 [
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 ]
776 ]
777 + formatteropts,
777 + formatteropts,
778 )
778 )
779 def perfstatus(ui, repo, **opts):
779 def perfstatus(ui, repo, **opts):
780 """benchmark the performance of a single status call
780 """benchmark the performance of a single status call
781
781
782 The repository data are preserved between each call.
782 The repository data are preserved between each call.
783
783
784 By default, only the status of the tracked file are requested. If
784 By default, only the status of the tracked file are requested. If
785 `--unknown` is passed, the "unknown" files are also tracked.
785 `--unknown` is passed, the "unknown" files are also tracked.
786 """
786 """
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 # m = match.always(repo.root, repo.getcwd())
788 # m = match.always(repo.root, repo.getcwd())
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 # False))))
790 # False))))
791 timer, fm = gettimer(ui, opts)
791 timer, fm = gettimer(ui, opts)
792 if opts[b'dirstate']:
792 if opts[b'dirstate']:
793 dirstate = repo.dirstate
793 dirstate = repo.dirstate
794 m = scmutil.matchall(repo)
794 m = scmutil.matchall(repo)
795 unknown = opts[b'unknown']
795 unknown = opts[b'unknown']
796
796
797 def status_dirstate():
797 def status_dirstate():
798 s = dirstate.status(
798 s = dirstate.status(
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 )
800 )
801 sum(map(bool, s))
801 sum(map(bool, s))
802
802
803 timer(status_dirstate)
803 timer(status_dirstate)
804 else:
804 else:
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 fm.end()
806 fm.end()
807
807
808
808
809 @command(b'perf--addremove', formatteropts)
809 @command(b'perf::addremove|perfaddremove', formatteropts)
810 def perfaddremove(ui, repo, **opts):
810 def perfaddremove(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 try:
813 try:
814 oldquiet = repo.ui.quiet
814 oldquiet = repo.ui.quiet
815 repo.ui.quiet = True
815 repo.ui.quiet = True
816 matcher = scmutil.match(repo[None])
816 matcher = scmutil.match(repo[None])
817 opts[b'dry_run'] = True
817 opts[b'dry_run'] = True
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 uipathfn = scmutil.getuipathfn(repo)
819 uipathfn = scmutil.getuipathfn(repo)
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 else:
821 else:
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 finally:
823 finally:
824 repo.ui.quiet = oldquiet
824 repo.ui.quiet = oldquiet
825 fm.end()
825 fm.end()
826
826
827
827
828 def clearcaches(cl):
828 def clearcaches(cl):
829 # behave somewhat consistently across internal API changes
829 # behave somewhat consistently across internal API changes
830 if util.safehasattr(cl, b'clearcaches'):
830 if util.safehasattr(cl, b'clearcaches'):
831 cl.clearcaches()
831 cl.clearcaches()
832 elif util.safehasattr(cl, b'_nodecache'):
832 elif util.safehasattr(cl, b'_nodecache'):
833 # <= hg-5.2
833 # <= hg-5.2
834 from mercurial.node import nullid, nullrev
834 from mercurial.node import nullid, nullrev
835
835
836 cl._nodecache = {nullid: nullrev}
836 cl._nodecache = {nullid: nullrev}
837 cl._nodepos = None
837 cl._nodepos = None
838
838
839
839
840 @command(b'perf--heads', formatteropts)
840 @command(b'perf::heads|perfheads', formatteropts)
841 def perfheads(ui, repo, **opts):
841 def perfheads(ui, repo, **opts):
842 """benchmark the computation of a changelog heads"""
842 """benchmark the computation of a changelog heads"""
843 opts = _byteskwargs(opts)
843 opts = _byteskwargs(opts)
844 timer, fm = gettimer(ui, opts)
844 timer, fm = gettimer(ui, opts)
845 cl = repo.changelog
845 cl = repo.changelog
846
846
847 def s():
847 def s():
848 clearcaches(cl)
848 clearcaches(cl)
849
849
850 def d():
850 def d():
851 len(cl.headrevs())
851 len(cl.headrevs())
852
852
853 timer(d, setup=s)
853 timer(d, setup=s)
854 fm.end()
854 fm.end()
855
855
856
856
857 @command(
857 @command(
858 b'perf--tags',
858 b'perf::tags|perftags',
859 formatteropts
859 formatteropts
860 + [
860 + [
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 ],
862 ],
863 )
863 )
864 def perftags(ui, repo, **opts):
864 def perftags(ui, repo, **opts):
865 opts = _byteskwargs(opts)
865 opts = _byteskwargs(opts)
866 timer, fm = gettimer(ui, opts)
866 timer, fm = gettimer(ui, opts)
867 repocleartagscache = repocleartagscachefunc(repo)
867 repocleartagscache = repocleartagscachefunc(repo)
868 clearrevlogs = opts[b'clear_revlogs']
868 clearrevlogs = opts[b'clear_revlogs']
869
869
870 def s():
870 def s():
871 if clearrevlogs:
871 if clearrevlogs:
872 clearchangelog(repo)
872 clearchangelog(repo)
873 clearfilecache(repo.unfiltered(), 'manifest')
873 clearfilecache(repo.unfiltered(), 'manifest')
874 repocleartagscache()
874 repocleartagscache()
875
875
876 def t():
876 def t():
877 return len(repo.tags())
877 return len(repo.tags())
878
878
879 timer(t, setup=s)
879 timer(t, setup=s)
880 fm.end()
880 fm.end()
881
881
882
882
883 @command(b'perf--ancestors', formatteropts)
883 @command(b'perf::ancestors|perfancestors', formatteropts)
884 def perfancestors(ui, repo, **opts):
884 def perfancestors(ui, repo, **opts):
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887 heads = repo.changelog.headrevs()
887 heads = repo.changelog.headrevs()
888
888
889 def d():
889 def d():
890 for a in repo.changelog.ancestors(heads):
890 for a in repo.changelog.ancestors(heads):
891 pass
891 pass
892
892
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896
896
897 @command(b'perf--ancestorset', formatteropts)
897 @command(b'perf::ancestorset|perfancestorset', formatteropts)
898 def perfancestorset(ui, repo, revset, **opts):
898 def perfancestorset(ui, repo, revset, **opts):
899 opts = _byteskwargs(opts)
899 opts = _byteskwargs(opts)
900 timer, fm = gettimer(ui, opts)
900 timer, fm = gettimer(ui, opts)
901 revs = repo.revs(revset)
901 revs = repo.revs(revset)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 s = repo.changelog.ancestors(heads)
905 s = repo.changelog.ancestors(heads)
906 for rev in revs:
906 for rev in revs:
907 rev in s
907 rev in s
908
908
909 timer(d)
909 timer(d)
910 fm.end()
910 fm.end()
911
911
912
912
913 @command(b'perf--discovery', formatteropts, b'PATH')
913 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
914 def perfdiscovery(ui, repo, path, **opts):
914 def perfdiscovery(ui, repo, path, **opts):
915 """benchmark discovery between local repo and the peer at given path"""
915 """benchmark discovery between local repo and the peer at given path"""
916 repos = [repo, None]
916 repos = [repo, None]
917 timer, fm = gettimer(ui, opts)
917 timer, fm = gettimer(ui, opts)
918 path = ui.expandpath(path)
918 path = ui.expandpath(path)
919
919
920 def s():
920 def s():
921 repos[1] = hg.peer(ui, opts, path)
921 repos[1] = hg.peer(ui, opts, path)
922
922
923 def d():
923 def d():
924 setdiscovery.findcommonheads(ui, *repos)
924 setdiscovery.findcommonheads(ui, *repos)
925
925
926 timer(d, setup=s)
926 timer(d, setup=s)
927 fm.end()
927 fm.end()
928
928
929
929
930 @command(
930 @command(
931 b'perf--bookmarks',
931 b'perf::bookmarks|perfbookmarks',
932 formatteropts
932 formatteropts
933 + [
933 + [
934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 ],
935 ],
936 )
936 )
937 def perfbookmarks(ui, repo, **opts):
937 def perfbookmarks(ui, repo, **opts):
938 """benchmark parsing bookmarks from disk to memory"""
938 """benchmark parsing bookmarks from disk to memory"""
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940 timer, fm = gettimer(ui, opts)
940 timer, fm = gettimer(ui, opts)
941
941
942 clearrevlogs = opts[b'clear_revlogs']
942 clearrevlogs = opts[b'clear_revlogs']
943
943
944 def s():
944 def s():
945 if clearrevlogs:
945 if clearrevlogs:
946 clearchangelog(repo)
946 clearchangelog(repo)
947 clearfilecache(repo, b'_bookmarks')
947 clearfilecache(repo, b'_bookmarks')
948
948
949 def d():
949 def d():
950 repo._bookmarks
950 repo._bookmarks
951
951
952 timer(d, setup=s)
952 timer(d, setup=s)
953 fm.end()
953 fm.end()
954
954
955
955
956 @command(b'perf--bundleread', formatteropts, b'BUNDLE')
956 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
957 def perfbundleread(ui, repo, bundlepath, **opts):
957 def perfbundleread(ui, repo, bundlepath, **opts):
958 """Benchmark reading of bundle files.
958 """Benchmark reading of bundle files.
959
959
960 This command is meant to isolate the I/O part of bundle reading as
960 This command is meant to isolate the I/O part of bundle reading as
961 much as possible.
961 much as possible.
962 """
962 """
963 from mercurial import (
963 from mercurial import (
964 bundle2,
964 bundle2,
965 exchange,
965 exchange,
966 streamclone,
966 streamclone,
967 )
967 )
968
968
969 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
970
970
971 def makebench(fn):
971 def makebench(fn):
972 def run():
972 def run():
973 with open(bundlepath, b'rb') as fh:
973 with open(bundlepath, b'rb') as fh:
974 bundle = exchange.readbundle(ui, fh, bundlepath)
974 bundle = exchange.readbundle(ui, fh, bundlepath)
975 fn(bundle)
975 fn(bundle)
976
976
977 return run
977 return run
978
978
979 def makereadnbytes(size):
979 def makereadnbytes(size):
980 def run():
980 def run():
981 with open(bundlepath, b'rb') as fh:
981 with open(bundlepath, b'rb') as fh:
982 bundle = exchange.readbundle(ui, fh, bundlepath)
982 bundle = exchange.readbundle(ui, fh, bundlepath)
983 while bundle.read(size):
983 while bundle.read(size):
984 pass
984 pass
985
985
986 return run
986 return run
987
987
988 def makestdioread(size):
988 def makestdioread(size):
989 def run():
989 def run():
990 with open(bundlepath, b'rb') as fh:
990 with open(bundlepath, b'rb') as fh:
991 while fh.read(size):
991 while fh.read(size):
992 pass
992 pass
993
993
994 return run
994 return run
995
995
996 # bundle1
996 # bundle1
997
997
998 def deltaiter(bundle):
998 def deltaiter(bundle):
999 for delta in bundle.deltaiter():
999 for delta in bundle.deltaiter():
1000 pass
1000 pass
1001
1001
1002 def iterchunks(bundle):
1002 def iterchunks(bundle):
1003 for chunk in bundle.getchunks():
1003 for chunk in bundle.getchunks():
1004 pass
1004 pass
1005
1005
1006 # bundle2
1006 # bundle2
1007
1007
1008 def forwardchunks(bundle):
1008 def forwardchunks(bundle):
1009 for chunk in bundle._forwardchunks():
1009 for chunk in bundle._forwardchunks():
1010 pass
1010 pass
1011
1011
1012 def iterparts(bundle):
1012 def iterparts(bundle):
1013 for part in bundle.iterparts():
1013 for part in bundle.iterparts():
1014 pass
1014 pass
1015
1015
1016 def iterpartsseekable(bundle):
1016 def iterpartsseekable(bundle):
1017 for part in bundle.iterparts(seekable=True):
1017 for part in bundle.iterparts(seekable=True):
1018 pass
1018 pass
1019
1019
1020 def seek(bundle):
1020 def seek(bundle):
1021 for part in bundle.iterparts(seekable=True):
1021 for part in bundle.iterparts(seekable=True):
1022 part.seek(0, os.SEEK_END)
1022 part.seek(0, os.SEEK_END)
1023
1023
1024 def makepartreadnbytes(size):
1024 def makepartreadnbytes(size):
1025 def run():
1025 def run():
1026 with open(bundlepath, b'rb') as fh:
1026 with open(bundlepath, b'rb') as fh:
1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 for part in bundle.iterparts():
1028 for part in bundle.iterparts():
1029 while part.read(size):
1029 while part.read(size):
1030 pass
1030 pass
1031
1031
1032 return run
1032 return run
1033
1033
1034 benches = [
1034 benches = [
1035 (makestdioread(8192), b'read(8k)'),
1035 (makestdioread(8192), b'read(8k)'),
1036 (makestdioread(16384), b'read(16k)'),
1036 (makestdioread(16384), b'read(16k)'),
1037 (makestdioread(32768), b'read(32k)'),
1037 (makestdioread(32768), b'read(32k)'),
1038 (makestdioread(131072), b'read(128k)'),
1038 (makestdioread(131072), b'read(128k)'),
1039 ]
1039 ]
1040
1040
1041 with open(bundlepath, b'rb') as fh:
1041 with open(bundlepath, b'rb') as fh:
1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1043
1043
1044 if isinstance(bundle, changegroup.cg1unpacker):
1044 if isinstance(bundle, changegroup.cg1unpacker):
1045 benches.extend(
1045 benches.extend(
1046 [
1046 [
1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 (makebench(iterchunks), b'cg1 getchunks()'),
1048 (makebench(iterchunks), b'cg1 getchunks()'),
1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 ]
1053 ]
1054 )
1054 )
1055 elif isinstance(bundle, bundle2.unbundle20):
1055 elif isinstance(bundle, bundle2.unbundle20):
1056 benches.extend(
1056 benches.extend(
1057 [
1057 [
1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 (makebench(iterparts), b'bundle2 iterparts()'),
1059 (makebench(iterparts), b'bundle2 iterparts()'),
1060 (
1060 (
1061 makebench(iterpartsseekable),
1061 makebench(iterpartsseekable),
1062 b'bundle2 iterparts() seekable',
1062 b'bundle2 iterparts() seekable',
1063 ),
1063 ),
1064 (makebench(seek), b'bundle2 part seek()'),
1064 (makebench(seek), b'bundle2 part seek()'),
1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 ]
1069 ]
1070 )
1070 )
1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 raise error.Abort(b'stream clone bundles not supported')
1072 raise error.Abort(b'stream clone bundles not supported')
1073 else:
1073 else:
1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075
1075
1076 for fn, title in benches:
1076 for fn, title in benches:
1077 timer, fm = gettimer(ui, opts)
1077 timer, fm = gettimer(ui, opts)
1078 timer(fn, title=title)
1078 timer(fn, title=title)
1079 fm.end()
1079 fm.end()
1080
1080
1081
1081
1082 @command(
1082 @command(
1083 b'perf--changegroupchangelog',
1083 b'perf::changegroupchangelog|perfchangegroupchangelog',
1084 formatteropts
1084 formatteropts
1085 + [
1085 + [
1086 (b'', b'cgversion', b'02', b'changegroup version'),
1086 (b'', b'cgversion', b'02', b'changegroup version'),
1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 ],
1088 ],
1089 )
1089 )
1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 """Benchmark producing a changelog group for a changegroup.
1091 """Benchmark producing a changelog group for a changegroup.
1092
1092
1093 This measures the time spent processing the changelog during a
1093 This measures the time spent processing the changelog during a
1094 bundle operation. This occurs during `hg bundle` and on a server
1094 bundle operation. This occurs during `hg bundle` and on a server
1095 processing a `getbundle` wire protocol request (handles clones
1095 processing a `getbundle` wire protocol request (handles clones
1096 and pull requests).
1096 and pull requests).
1097
1097
1098 By default, all revisions are added to the changegroup.
1098 By default, all revisions are added to the changegroup.
1099 """
1099 """
1100 opts = _byteskwargs(opts)
1100 opts = _byteskwargs(opts)
1101 cl = repo.changelog
1101 cl = repo.changelog
1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 bundler = changegroup.getbundler(cgversion, repo)
1103 bundler = changegroup.getbundler(cgversion, repo)
1104
1104
1105 def d():
1105 def d():
1106 state, chunks = bundler._generatechangelog(cl, nodes)
1106 state, chunks = bundler._generatechangelog(cl, nodes)
1107 for chunk in chunks:
1107 for chunk in chunks:
1108 pass
1108 pass
1109
1109
1110 timer, fm = gettimer(ui, opts)
1110 timer, fm = gettimer(ui, opts)
1111
1111
1112 # Terminal printing can interfere with timing. So disable it.
1112 # Terminal printing can interfere with timing. So disable it.
1113 with ui.configoverride({(b'progress', b'disable'): True}):
1113 with ui.configoverride({(b'progress', b'disable'): True}):
1114 timer(d)
1114 timer(d)
1115
1115
1116 fm.end()
1116 fm.end()
1117
1117
1118
1118
1119 @command(b'perf--dirs', formatteropts)
1119 @command(b'perf::dirs|perfdirs', formatteropts)
1120 def perfdirs(ui, repo, **opts):
1120 def perfdirs(ui, repo, **opts):
1121 opts = _byteskwargs(opts)
1121 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1122 timer, fm = gettimer(ui, opts)
1123 dirstate = repo.dirstate
1123 dirstate = repo.dirstate
1124 b'a' in dirstate
1124 b'a' in dirstate
1125
1125
1126 def d():
1126 def d():
1127 dirstate.hasdir(b'a')
1127 dirstate.hasdir(b'a')
1128 del dirstate._map._dirs
1128 del dirstate._map._dirs
1129
1129
1130 timer(d)
1130 timer(d)
1131 fm.end()
1131 fm.end()
1132
1132
1133
1133
1134 @command(
1134 @command(
1135 b'perf--dirstate',
1135 b'perf::dirstate|perfdirstate',
1136 [
1136 [
1137 (
1137 (
1138 b'',
1138 b'',
1139 b'iteration',
1139 b'iteration',
1140 None,
1140 None,
1141 b'benchmark a full iteration for the dirstate',
1141 b'benchmark a full iteration for the dirstate',
1142 ),
1142 ),
1143 (
1143 (
1144 b'',
1144 b'',
1145 b'contains',
1145 b'contains',
1146 None,
1146 None,
1147 b'benchmark a large amount of `nf in dirstate` calls',
1147 b'benchmark a large amount of `nf in dirstate` calls',
1148 ),
1148 ),
1149 ]
1149 ]
1150 + formatteropts,
1150 + formatteropts,
1151 )
1151 )
1152 def perfdirstate(ui, repo, **opts):
1152 def perfdirstate(ui, repo, **opts):
1153 """benchmap the time of various distate operations
1153 """benchmap the time of various distate operations
1154
1154
1155 By default benchmark the time necessary to load a dirstate from scratch.
1155 By default benchmark the time necessary to load a dirstate from scratch.
1156 The dirstate is loaded to the point were a "contains" request can be
1156 The dirstate is loaded to the point were a "contains" request can be
1157 answered.
1157 answered.
1158 """
1158 """
1159 opts = _byteskwargs(opts)
1159 opts = _byteskwargs(opts)
1160 timer, fm = gettimer(ui, opts)
1160 timer, fm = gettimer(ui, opts)
1161 b"a" in repo.dirstate
1161 b"a" in repo.dirstate
1162
1162
1163 if opts[b'iteration'] and opts[b'contains']:
1163 if opts[b'iteration'] and opts[b'contains']:
1164 msg = b'only specify one of --iteration or --contains'
1164 msg = b'only specify one of --iteration or --contains'
1165 raise error.Abort(msg)
1165 raise error.Abort(msg)
1166
1166
1167 if opts[b'iteration']:
1167 if opts[b'iteration']:
1168 setup = None
1168 setup = None
1169 dirstate = repo.dirstate
1169 dirstate = repo.dirstate
1170
1170
1171 def d():
1171 def d():
1172 for f in dirstate:
1172 for f in dirstate:
1173 pass
1173 pass
1174
1174
1175 elif opts[b'contains']:
1175 elif opts[b'contains']:
1176 setup = None
1176 setup = None
1177 dirstate = repo.dirstate
1177 dirstate = repo.dirstate
1178 allfiles = list(dirstate)
1178 allfiles = list(dirstate)
1179 # also add file path that will be "missing" from the dirstate
1179 # also add file path that will be "missing" from the dirstate
1180 allfiles.extend([f[::-1] for f in allfiles])
1180 allfiles.extend([f[::-1] for f in allfiles])
1181
1181
1182 def d():
1182 def d():
1183 for f in allfiles:
1183 for f in allfiles:
1184 f in dirstate
1184 f in dirstate
1185
1185
1186 else:
1186 else:
1187
1187
1188 def setup():
1188 def setup():
1189 repo.dirstate.invalidate()
1189 repo.dirstate.invalidate()
1190
1190
1191 def d():
1191 def d():
1192 b"a" in repo.dirstate
1192 b"a" in repo.dirstate
1193
1193
1194 timer(d, setup=setup)
1194 timer(d, setup=setup)
1195 fm.end()
1195 fm.end()
1196
1196
1197
1197
1198 @command(b'perf--dirstatedirs', formatteropts)
1198 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1199 def perfdirstatedirs(ui, repo, **opts):
1199 def perfdirstatedirs(ui, repo, **opts):
1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 opts = _byteskwargs(opts)
1201 opts = _byteskwargs(opts)
1202 timer, fm = gettimer(ui, opts)
1202 timer, fm = gettimer(ui, opts)
1203 repo.dirstate.hasdir(b"a")
1203 repo.dirstate.hasdir(b"a")
1204
1204
1205 def setup():
1205 def setup():
1206 del repo.dirstate._map._dirs
1206 del repo.dirstate._map._dirs
1207
1207
1208 def d():
1208 def d():
1209 repo.dirstate.hasdir(b"a")
1209 repo.dirstate.hasdir(b"a")
1210
1210
1211 timer(d, setup=setup)
1211 timer(d, setup=setup)
1212 fm.end()
1212 fm.end()
1213
1213
1214
1214
1215 @command(b'perf--dirstatefoldmap', formatteropts)
1215 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1216 def perfdirstatefoldmap(ui, repo, **opts):
1216 def perfdirstatefoldmap(ui, repo, **opts):
1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1218
1218
1219 The dirstate filefoldmap cache is dropped between every request.
1219 The dirstate filefoldmap cache is dropped between every request.
1220 """
1220 """
1221 opts = _byteskwargs(opts)
1221 opts = _byteskwargs(opts)
1222 timer, fm = gettimer(ui, opts)
1222 timer, fm = gettimer(ui, opts)
1223 dirstate = repo.dirstate
1223 dirstate = repo.dirstate
1224 dirstate._map.filefoldmap.get(b'a')
1224 dirstate._map.filefoldmap.get(b'a')
1225
1225
1226 def setup():
1226 def setup():
1227 del dirstate._map.filefoldmap
1227 del dirstate._map.filefoldmap
1228
1228
1229 def d():
1229 def d():
1230 dirstate._map.filefoldmap.get(b'a')
1230 dirstate._map.filefoldmap.get(b'a')
1231
1231
1232 timer(d, setup=setup)
1232 timer(d, setup=setup)
1233 fm.end()
1233 fm.end()
1234
1234
1235
1235
1236 @command(b'perf--dirfoldmap', formatteropts)
1236 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1237 def perfdirfoldmap(ui, repo, **opts):
1237 def perfdirfoldmap(ui, repo, **opts):
1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239
1239
1240 The dirstate dirfoldmap cache is dropped between every request.
1240 The dirstate dirfoldmap cache is dropped between every request.
1241 """
1241 """
1242 opts = _byteskwargs(opts)
1242 opts = _byteskwargs(opts)
1243 timer, fm = gettimer(ui, opts)
1243 timer, fm = gettimer(ui, opts)
1244 dirstate = repo.dirstate
1244 dirstate = repo.dirstate
1245 dirstate._map.dirfoldmap.get(b'a')
1245 dirstate._map.dirfoldmap.get(b'a')
1246
1246
1247 def setup():
1247 def setup():
1248 del dirstate._map.dirfoldmap
1248 del dirstate._map.dirfoldmap
1249 del dirstate._map._dirs
1249 del dirstate._map._dirs
1250
1250
1251 def d():
1251 def d():
1252 dirstate._map.dirfoldmap.get(b'a')
1252 dirstate._map.dirfoldmap.get(b'a')
1253
1253
1254 timer(d, setup=setup)
1254 timer(d, setup=setup)
1255 fm.end()
1255 fm.end()
1256
1256
1257
1257
1258 @command(b'perf--dirstatewrite', formatteropts)
1258 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1259 def perfdirstatewrite(ui, repo, **opts):
1259 def perfdirstatewrite(ui, repo, **opts):
1260 """benchmap the time it take to write a dirstate on disk"""
1260 """benchmap the time it take to write a dirstate on disk"""
1261 opts = _byteskwargs(opts)
1261 opts = _byteskwargs(opts)
1262 timer, fm = gettimer(ui, opts)
1262 timer, fm = gettimer(ui, opts)
1263 ds = repo.dirstate
1263 ds = repo.dirstate
1264 b"a" in ds
1264 b"a" in ds
1265
1265
1266 def setup():
1266 def setup():
1267 ds._dirty = True
1267 ds._dirty = True
1268
1268
1269 def d():
1269 def d():
1270 ds.write(repo.currenttransaction())
1270 ds.write(repo.currenttransaction())
1271
1271
1272 timer(d, setup=setup)
1272 timer(d, setup=setup)
1273 fm.end()
1273 fm.end()
1274
1274
1275
1275
1276 def _getmergerevs(repo, opts):
1276 def _getmergerevs(repo, opts):
1277 """parse command argument to return rev involved in merge
1277 """parse command argument to return rev involved in merge
1278
1278
1279 input: options dictionnary with `rev`, `from` and `bse`
1279 input: options dictionnary with `rev`, `from` and `bse`
1280 output: (localctx, otherctx, basectx)
1280 output: (localctx, otherctx, basectx)
1281 """
1281 """
1282 if opts[b'from']:
1282 if opts[b'from']:
1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 wctx = repo[fromrev]
1284 wctx = repo[fromrev]
1285 else:
1285 else:
1286 wctx = repo[None]
1286 wctx = repo[None]
1287 # we don't want working dir files to be stat'd in the benchmark, so
1287 # we don't want working dir files to be stat'd in the benchmark, so
1288 # prime that cache
1288 # prime that cache
1289 wctx.dirty()
1289 wctx.dirty()
1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 if opts[b'base']:
1291 if opts[b'base']:
1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 ancestor = repo[fromrev]
1293 ancestor = repo[fromrev]
1294 else:
1294 else:
1295 ancestor = wctx.ancestor(rctx)
1295 ancestor = wctx.ancestor(rctx)
1296 return (wctx, rctx, ancestor)
1296 return (wctx, rctx, ancestor)
1297
1297
1298
1298
1299 @command(
1299 @command(
1300 b'perf--mergecalculate',
1300 b'perf::mergecalculate|perfmergecalculate',
1301 [
1301 [
1302 (b'r', b'rev', b'.', b'rev to merge against'),
1302 (b'r', b'rev', b'.', b'rev to merge against'),
1303 (b'', b'from', b'', b'rev to merge from'),
1303 (b'', b'from', b'', b'rev to merge from'),
1304 (b'', b'base', b'', b'the revision to use as base'),
1304 (b'', b'base', b'', b'the revision to use as base'),
1305 ]
1305 ]
1306 + formatteropts,
1306 + formatteropts,
1307 )
1307 )
1308 def perfmergecalculate(ui, repo, **opts):
1308 def perfmergecalculate(ui, repo, **opts):
1309 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1310 timer, fm = gettimer(ui, opts)
1310 timer, fm = gettimer(ui, opts)
1311
1311
1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313
1313
1314 def d():
1314 def d():
1315 # acceptremote is True because we don't want prompts in the middle of
1315 # acceptremote is True because we don't want prompts in the middle of
1316 # our benchmark
1316 # our benchmark
1317 merge.calculateupdates(
1317 merge.calculateupdates(
1318 repo,
1318 repo,
1319 wctx,
1319 wctx,
1320 rctx,
1320 rctx,
1321 [ancestor],
1321 [ancestor],
1322 branchmerge=False,
1322 branchmerge=False,
1323 force=False,
1323 force=False,
1324 acceptremote=True,
1324 acceptremote=True,
1325 followcopies=True,
1325 followcopies=True,
1326 )
1326 )
1327
1327
1328 timer(d)
1328 timer(d)
1329 fm.end()
1329 fm.end()
1330
1330
1331
1331
1332 @command(
1332 @command(
1333 b'perf--mergecopies',
1333 b'perf::mergecopies|perfmergecopies',
1334 [
1334 [
1335 (b'r', b'rev', b'.', b'rev to merge against'),
1335 (b'r', b'rev', b'.', b'rev to merge against'),
1336 (b'', b'from', b'', b'rev to merge from'),
1336 (b'', b'from', b'', b'rev to merge from'),
1337 (b'', b'base', b'', b'the revision to use as base'),
1337 (b'', b'base', b'', b'the revision to use as base'),
1338 ]
1338 ]
1339 + formatteropts,
1339 + formatteropts,
1340 )
1340 )
1341 def perfmergecopies(ui, repo, **opts):
1341 def perfmergecopies(ui, repo, **opts):
1342 """measure runtime of `copies.mergecopies`"""
1342 """measure runtime of `copies.mergecopies`"""
1343 opts = _byteskwargs(opts)
1343 opts = _byteskwargs(opts)
1344 timer, fm = gettimer(ui, opts)
1344 timer, fm = gettimer(ui, opts)
1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346
1346
1347 def d():
1347 def d():
1348 # acceptremote is True because we don't want prompts in the middle of
1348 # acceptremote is True because we don't want prompts in the middle of
1349 # our benchmark
1349 # our benchmark
1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1351
1351
1352 timer(d)
1352 timer(d)
1353 fm.end()
1353 fm.end()
1354
1354
1355
1355
1356 @command(b'perf--pathcopies', [], b"REV REV")
1356 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 """benchmark the copy tracing logic"""
1358 """benchmark the copy tracing logic"""
1359 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363
1363
1364 def d():
1364 def d():
1365 copies.pathcopies(ctx1, ctx2)
1365 copies.pathcopies(ctx1, ctx2)
1366
1366
1367 timer(d)
1367 timer(d)
1368 fm.end()
1368 fm.end()
1369
1369
1370
1370
1371 @command(
1371 @command(
1372 b'perf--phases',
1372 b'perf::phases|perfphases',
1373 [
1373 [
1374 (b'', b'full', False, b'include file reading time too'),
1374 (b'', b'full', False, b'include file reading time too'),
1375 ],
1375 ],
1376 b"",
1376 b"",
1377 )
1377 )
1378 def perfphases(ui, repo, **opts):
1378 def perfphases(ui, repo, **opts):
1379 """benchmark phasesets computation"""
1379 """benchmark phasesets computation"""
1380 opts = _byteskwargs(opts)
1380 opts = _byteskwargs(opts)
1381 timer, fm = gettimer(ui, opts)
1381 timer, fm = gettimer(ui, opts)
1382 _phases = repo._phasecache
1382 _phases = repo._phasecache
1383 full = opts.get(b'full')
1383 full = opts.get(b'full')
1384
1384
1385 def d():
1385 def d():
1386 phases = _phases
1386 phases = _phases
1387 if full:
1387 if full:
1388 clearfilecache(repo, b'_phasecache')
1388 clearfilecache(repo, b'_phasecache')
1389 phases = repo._phasecache
1389 phases = repo._phasecache
1390 phases.invalidate()
1390 phases.invalidate()
1391 phases.loadphaserevs(repo)
1391 phases.loadphaserevs(repo)
1392
1392
1393 timer(d)
1393 timer(d)
1394 fm.end()
1394 fm.end()
1395
1395
1396
1396
1397 @command(b'perf--phasesremote', [], b"[DEST]")
1397 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1398 def perfphasesremote(ui, repo, dest=None, **opts):
1398 def perfphasesremote(ui, repo, dest=None, **opts):
1399 """benchmark time needed to analyse phases of the remote server"""
1399 """benchmark time needed to analyse phases of the remote server"""
1400 from mercurial.node import bin
1400 from mercurial.node import bin
1401 from mercurial import (
1401 from mercurial import (
1402 exchange,
1402 exchange,
1403 hg,
1403 hg,
1404 phases,
1404 phases,
1405 )
1405 )
1406
1406
1407 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1408 timer, fm = gettimer(ui, opts)
1409
1409
1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1411 if not path:
1411 if not path:
1412 raise error.Abort(
1412 raise error.Abort(
1413 b'default repository not configured!',
1413 b'default repository not configured!',
1414 hint=b"see 'hg help config.paths'",
1414 hint=b"see 'hg help config.paths'",
1415 )
1415 )
1416 dest = path.pushloc or path.loc
1416 dest = path.pushloc or path.loc
1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 other = hg.peer(repo, opts, dest)
1418 other = hg.peer(repo, opts, dest)
1419
1419
1420 # easier to perform discovery through the operation
1420 # easier to perform discovery through the operation
1421 op = exchange.pushoperation(repo, other)
1421 op = exchange.pushoperation(repo, other)
1422 exchange._pushdiscoverychangeset(op)
1422 exchange._pushdiscoverychangeset(op)
1423
1423
1424 remotesubset = op.fallbackheads
1424 remotesubset = op.fallbackheads
1425
1425
1426 with other.commandexecutor() as e:
1426 with other.commandexecutor() as e:
1427 remotephases = e.callcommand(
1427 remotephases = e.callcommand(
1428 b'listkeys', {b'namespace': b'phases'}
1428 b'listkeys', {b'namespace': b'phases'}
1429 ).result()
1429 ).result()
1430 del other
1430 del other
1431 publishing = remotephases.get(b'publishing', False)
1431 publishing = remotephases.get(b'publishing', False)
1432 if publishing:
1432 if publishing:
1433 ui.statusnoi18n(b'publishing: yes\n')
1433 ui.statusnoi18n(b'publishing: yes\n')
1434 else:
1434 else:
1435 ui.statusnoi18n(b'publishing: no\n')
1435 ui.statusnoi18n(b'publishing: no\n')
1436
1436
1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 if has_node is None:
1438 if has_node is None:
1439 has_node = repo.changelog.nodemap.__contains__
1439 has_node = repo.changelog.nodemap.__contains__
1440 nonpublishroots = 0
1440 nonpublishroots = 0
1441 for nhex, phase in remotephases.iteritems():
1441 for nhex, phase in remotephases.iteritems():
1442 if nhex == b'publishing': # ignore data related to publish option
1442 if nhex == b'publishing': # ignore data related to publish option
1443 continue
1443 continue
1444 node = bin(nhex)
1444 node = bin(nhex)
1445 if has_node(node) and int(phase):
1445 if has_node(node) and int(phase):
1446 nonpublishroots += 1
1446 nonpublishroots += 1
1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449
1449
1450 def d():
1450 def d():
1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1452
1452
1453 timer(d)
1453 timer(d)
1454 fm.end()
1454 fm.end()
1455
1455
1456
1456
1457 @command(
1457 @command(
1458 b'perf--manifest',
1458 b'perf::manifest|perfmanifest',
1459 [
1459 [
1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 ]
1462 ]
1463 + formatteropts,
1463 + formatteropts,
1464 b'REV|NODE',
1464 b'REV|NODE',
1465 )
1465 )
1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 """benchmark the time to read a manifest from disk and return a usable
1467 """benchmark the time to read a manifest from disk and return a usable
1468 dict-like object
1468 dict-like object
1469
1469
1470 Manifest caches are cleared before retrieval."""
1470 Manifest caches are cleared before retrieval."""
1471 opts = _byteskwargs(opts)
1471 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1473 if not manifest_rev:
1473 if not manifest_rev:
1474 ctx = scmutil.revsingle(repo, rev, rev)
1474 ctx = scmutil.revsingle(repo, rev, rev)
1475 t = ctx.manifestnode()
1475 t = ctx.manifestnode()
1476 else:
1476 else:
1477 from mercurial.node import bin
1477 from mercurial.node import bin
1478
1478
1479 if len(rev) == 40:
1479 if len(rev) == 40:
1480 t = bin(rev)
1480 t = bin(rev)
1481 else:
1481 else:
1482 try:
1482 try:
1483 rev = int(rev)
1483 rev = int(rev)
1484
1484
1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 t = repo.manifestlog.getstorage(b'').node(rev)
1486 t = repo.manifestlog.getstorage(b'').node(rev)
1487 else:
1487 else:
1488 t = repo.manifestlog._revlog.lookup(rev)
1488 t = repo.manifestlog._revlog.lookup(rev)
1489 except ValueError:
1489 except ValueError:
1490 raise error.Abort(
1490 raise error.Abort(
1491 b'manifest revision must be integer or full node'
1491 b'manifest revision must be integer or full node'
1492 )
1492 )
1493
1493
1494 def d():
1494 def d():
1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 repo.manifestlog[t].read()
1496 repo.manifestlog[t].read()
1497
1497
1498 timer(d)
1498 timer(d)
1499 fm.end()
1499 fm.end()
1500
1500
1501
1501
1502 @command(b'perf--changeset', formatteropts)
1502 @command(b'perf::changeset|perfchangeset', formatteropts)
1503 def perfchangeset(ui, repo, rev, **opts):
1503 def perfchangeset(ui, repo, rev, **opts):
1504 opts = _byteskwargs(opts)
1504 opts = _byteskwargs(opts)
1505 timer, fm = gettimer(ui, opts)
1505 timer, fm = gettimer(ui, opts)
1506 n = scmutil.revsingle(repo, rev).node()
1506 n = scmutil.revsingle(repo, rev).node()
1507
1507
1508 def d():
1508 def d():
1509 repo.changelog.read(n)
1509 repo.changelog.read(n)
1510 # repo.changelog._cache = None
1510 # repo.changelog._cache = None
1511
1511
1512 timer(d)
1512 timer(d)
1513 fm.end()
1513 fm.end()
1514
1514
1515
1515
1516 @command(b'perf--ignore', formatteropts)
1516 @command(b'perf::ignore|perfignore', formatteropts)
1517 def perfignore(ui, repo, **opts):
1517 def perfignore(ui, repo, **opts):
1518 """benchmark operation related to computing ignore"""
1518 """benchmark operation related to computing ignore"""
1519 opts = _byteskwargs(opts)
1519 opts = _byteskwargs(opts)
1520 timer, fm = gettimer(ui, opts)
1520 timer, fm = gettimer(ui, opts)
1521 dirstate = repo.dirstate
1521 dirstate = repo.dirstate
1522
1522
1523 def setupone():
1523 def setupone():
1524 dirstate.invalidate()
1524 dirstate.invalidate()
1525 clearfilecache(dirstate, b'_ignore')
1525 clearfilecache(dirstate, b'_ignore')
1526
1526
1527 def runone():
1527 def runone():
1528 dirstate._ignore
1528 dirstate._ignore
1529
1529
1530 timer(runone, setup=setupone, title=b"load")
1530 timer(runone, setup=setupone, title=b"load")
1531 fm.end()
1531 fm.end()
1532
1532
1533
1533
1534 @command(
1534 @command(
1535 b'perf--index',
1535 b'perf::index|perfindex',
1536 [
1536 [
1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 ]
1539 ]
1540 + formatteropts,
1540 + formatteropts,
1541 )
1541 )
1542 def perfindex(ui, repo, **opts):
1542 def perfindex(ui, repo, **opts):
1543 """benchmark index creation time followed by a lookup
1543 """benchmark index creation time followed by a lookup
1544
1544
1545 The default is to look `tip` up. Depending on the index implementation,
1545 The default is to look `tip` up. Depending on the index implementation,
1546 the revision looked up can matters. For example, an implementation
1546 the revision looked up can matters. For example, an implementation
1547 scanning the index will have a faster lookup time for `--rev tip` than for
1547 scanning the index will have a faster lookup time for `--rev tip` than for
1548 `--rev 0`. The number of looked up revisions and their order can also
1548 `--rev 0`. The number of looked up revisions and their order can also
1549 matters.
1549 matters.
1550
1550
1551 Example of useful set to test:
1551 Example of useful set to test:
1552
1552
1553 * tip
1553 * tip
1554 * 0
1554 * 0
1555 * -10:
1555 * -10:
1556 * :10
1556 * :10
1557 * -10: + :10
1557 * -10: + :10
1558 * :10: + -10:
1558 * :10: + -10:
1559 * -10000:
1559 * -10000:
1560 * -10000: + 0
1560 * -10000: + 0
1561
1561
1562 It is not currently possible to check for lookup of a missing node. For
1562 It is not currently possible to check for lookup of a missing node. For
1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 import mercurial.revlog
1564 import mercurial.revlog
1565
1565
1566 opts = _byteskwargs(opts)
1566 opts = _byteskwargs(opts)
1567 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 if opts[b'no_lookup']:
1569 if opts[b'no_lookup']:
1570 if opts['rev']:
1570 if opts['rev']:
1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 nodes = []
1572 nodes = []
1573 elif not opts[b'rev']:
1573 elif not opts[b'rev']:
1574 nodes = [repo[b"tip"].node()]
1574 nodes = [repo[b"tip"].node()]
1575 else:
1575 else:
1576 revs = scmutil.revrange(repo, opts[b'rev'])
1576 revs = scmutil.revrange(repo, opts[b'rev'])
1577 cl = repo.changelog
1577 cl = repo.changelog
1578 nodes = [cl.node(r) for r in revs]
1578 nodes = [cl.node(r) for r in revs]
1579
1579
1580 unfi = repo.unfiltered()
1580 unfi = repo.unfiltered()
1581 # find the filecache func directly
1581 # find the filecache func directly
1582 # This avoid polluting the benchmark with the filecache logic
1582 # This avoid polluting the benchmark with the filecache logic
1583 makecl = unfi.__class__.changelog.func
1583 makecl = unfi.__class__.changelog.func
1584
1584
1585 def setup():
1585 def setup():
1586 # probably not necessary, but for good measure
1586 # probably not necessary, but for good measure
1587 clearchangelog(unfi)
1587 clearchangelog(unfi)
1588
1588
1589 def d():
1589 def d():
1590 cl = makecl(unfi)
1590 cl = makecl(unfi)
1591 for n in nodes:
1591 for n in nodes:
1592 cl.rev(n)
1592 cl.rev(n)
1593
1593
1594 timer(d, setup=setup)
1594 timer(d, setup=setup)
1595 fm.end()
1595 fm.end()
1596
1596
1597
1597
1598 @command(
1598 @command(
1599 b'perf--nodemap',
1599 b'perf::nodemap|perfnodemap',
1600 [
1600 [
1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 ]
1603 ]
1604 + formatteropts,
1604 + formatteropts,
1605 )
1605 )
1606 def perfnodemap(ui, repo, **opts):
1606 def perfnodemap(ui, repo, **opts):
1607 """benchmark the time necessary to look up revision from a cold nodemap
1607 """benchmark the time necessary to look up revision from a cold nodemap
1608
1608
1609 Depending on the implementation, the amount and order of revision we look
1609 Depending on the implementation, the amount and order of revision we look
1610 up can varies. Example of useful set to test:
1610 up can varies. Example of useful set to test:
1611 * tip
1611 * tip
1612 * 0
1612 * 0
1613 * -10:
1613 * -10:
1614 * :10
1614 * :10
1615 * -10: + :10
1615 * -10: + :10
1616 * :10: + -10:
1616 * :10: + -10:
1617 * -10000:
1617 * -10000:
1618 * -10000: + 0
1618 * -10000: + 0
1619
1619
1620 The command currently focus on valid binary lookup. Benchmarking for
1620 The command currently focus on valid binary lookup. Benchmarking for
1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 """
1622 """
1623 import mercurial.revlog
1623 import mercurial.revlog
1624
1624
1625 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1626 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628
1628
1629 unfi = repo.unfiltered()
1629 unfi = repo.unfiltered()
1630 clearcaches = opts[b'clear_caches']
1630 clearcaches = opts[b'clear_caches']
1631 # find the filecache func directly
1631 # find the filecache func directly
1632 # This avoid polluting the benchmark with the filecache logic
1632 # This avoid polluting the benchmark with the filecache logic
1633 makecl = unfi.__class__.changelog.func
1633 makecl = unfi.__class__.changelog.func
1634 if not opts[b'rev']:
1634 if not opts[b'rev']:
1635 raise error.Abort(b'use --rev to specify revisions to look up')
1635 raise error.Abort(b'use --rev to specify revisions to look up')
1636 revs = scmutil.revrange(repo, opts[b'rev'])
1636 revs = scmutil.revrange(repo, opts[b'rev'])
1637 cl = repo.changelog
1637 cl = repo.changelog
1638 nodes = [cl.node(r) for r in revs]
1638 nodes = [cl.node(r) for r in revs]
1639
1639
1640 # use a list to pass reference to a nodemap from one closure to the next
1640 # use a list to pass reference to a nodemap from one closure to the next
1641 nodeget = [None]
1641 nodeget = [None]
1642
1642
1643 def setnodeget():
1643 def setnodeget():
1644 # probably not necessary, but for good measure
1644 # probably not necessary, but for good measure
1645 clearchangelog(unfi)
1645 clearchangelog(unfi)
1646 cl = makecl(unfi)
1646 cl = makecl(unfi)
1647 if util.safehasattr(cl.index, 'get_rev'):
1647 if util.safehasattr(cl.index, 'get_rev'):
1648 nodeget[0] = cl.index.get_rev
1648 nodeget[0] = cl.index.get_rev
1649 else:
1649 else:
1650 nodeget[0] = cl.nodemap.get
1650 nodeget[0] = cl.nodemap.get
1651
1651
1652 def d():
1652 def d():
1653 get = nodeget[0]
1653 get = nodeget[0]
1654 for n in nodes:
1654 for n in nodes:
1655 get(n)
1655 get(n)
1656
1656
1657 setup = None
1657 setup = None
1658 if clearcaches:
1658 if clearcaches:
1659
1659
1660 def setup():
1660 def setup():
1661 setnodeget()
1661 setnodeget()
1662
1662
1663 else:
1663 else:
1664 setnodeget()
1664 setnodeget()
1665 d() # prewarm the data structure
1665 d() # prewarm the data structure
1666 timer(d, setup=setup)
1666 timer(d, setup=setup)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(b'perf--startup', formatteropts)
1670 @command(b'perf::startup|perfstartup', formatteropts)
1671 def perfstartup(ui, repo, **opts):
1671 def perfstartup(ui, repo, **opts):
1672 opts = _byteskwargs(opts)
1672 opts = _byteskwargs(opts)
1673 timer, fm = gettimer(ui, opts)
1673 timer, fm = gettimer(ui, opts)
1674
1674
1675 def d():
1675 def d():
1676 if os.name != 'nt':
1676 if os.name != 'nt':
1677 os.system(
1677 os.system(
1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 )
1679 )
1680 else:
1680 else:
1681 os.environ['HGRCPATH'] = r' '
1681 os.environ['HGRCPATH'] = r' '
1682 os.system("%s version -q > NUL" % sys.argv[0])
1682 os.system("%s version -q > NUL" % sys.argv[0])
1683
1683
1684 timer(d)
1684 timer(d)
1685 fm.end()
1685 fm.end()
1686
1686
1687
1687
1688 @command(b'perf--parents', formatteropts)
1688 @command(b'perf::parents|perfparents', formatteropts)
1689 def perfparents(ui, repo, **opts):
1689 def perfparents(ui, repo, **opts):
1690 """benchmark the time necessary to fetch one changeset's parents.
1690 """benchmark the time necessary to fetch one changeset's parents.
1691
1691
1692 The fetch is done using the `node identifier`, traversing all object layers
1692 The fetch is done using the `node identifier`, traversing all object layers
1693 from the repository object. The first N revisions will be used for this
1693 from the repository object. The first N revisions will be used for this
1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 (default: 1000).
1695 (default: 1000).
1696 """
1696 """
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 # control the number of commits perfparents iterates over
1699 # control the number of commits perfparents iterates over
1700 # experimental config: perf.parentscount
1700 # experimental config: perf.parentscount
1701 count = getint(ui, b"perf", b"parentscount", 1000)
1701 count = getint(ui, b"perf", b"parentscount", 1000)
1702 if len(repo.changelog) < count:
1702 if len(repo.changelog) < count:
1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 repo = repo.unfiltered()
1704 repo = repo.unfiltered()
1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706
1706
1707 def d():
1707 def d():
1708 for n in nl:
1708 for n in nl:
1709 repo.changelog.parents(n)
1709 repo.changelog.parents(n)
1710
1710
1711 timer(d)
1711 timer(d)
1712 fm.end()
1712 fm.end()
1713
1713
1714
1714
1715 @command(b'perf--ctxfiles', formatteropts)
1715 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1716 def perfctxfiles(ui, repo, x, **opts):
1716 def perfctxfiles(ui, repo, x, **opts):
1717 opts = _byteskwargs(opts)
1717 opts = _byteskwargs(opts)
1718 x = int(x)
1718 x = int(x)
1719 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1720
1720
1721 def d():
1721 def d():
1722 len(repo[x].files())
1722 len(repo[x].files())
1723
1723
1724 timer(d)
1724 timer(d)
1725 fm.end()
1725 fm.end()
1726
1726
1727
1727
1728 @command(b'perf--rawfiles', formatteropts)
1728 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1729 def perfrawfiles(ui, repo, x, **opts):
1729 def perfrawfiles(ui, repo, x, **opts):
1730 opts = _byteskwargs(opts)
1730 opts = _byteskwargs(opts)
1731 x = int(x)
1731 x = int(x)
1732 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1733 cl = repo.changelog
1733 cl = repo.changelog
1734
1734
1735 def d():
1735 def d():
1736 len(cl.read(x)[3])
1736 len(cl.read(x)[3])
1737
1737
1738 timer(d)
1738 timer(d)
1739 fm.end()
1739 fm.end()
1740
1740
1741
1741
1742 @command(b'perf--lookup', formatteropts)
1742 @command(b'perf::lookup|perflookup', formatteropts)
1743 def perflookup(ui, repo, rev, **opts):
1743 def perflookup(ui, repo, rev, **opts):
1744 opts = _byteskwargs(opts)
1744 opts = _byteskwargs(opts)
1745 timer, fm = gettimer(ui, opts)
1745 timer, fm = gettimer(ui, opts)
1746 timer(lambda: len(repo.lookup(rev)))
1746 timer(lambda: len(repo.lookup(rev)))
1747 fm.end()
1747 fm.end()
1748
1748
1749
1749
1750 @command(
1750 @command(
1751 b'perf--linelogedits',
1751 b'perf::linelogedits|perflinelogedits',
1752 [
1752 [
1753 (b'n', b'edits', 10000, b'number of edits'),
1753 (b'n', b'edits', 10000, b'number of edits'),
1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 ],
1755 ],
1756 norepo=True,
1756 norepo=True,
1757 )
1757 )
1758 def perflinelogedits(ui, **opts):
1758 def perflinelogedits(ui, **opts):
1759 from mercurial import linelog
1759 from mercurial import linelog
1760
1760
1761 opts = _byteskwargs(opts)
1761 opts = _byteskwargs(opts)
1762
1762
1763 edits = opts[b'edits']
1763 edits = opts[b'edits']
1764 maxhunklines = opts[b'max_hunk_lines']
1764 maxhunklines = opts[b'max_hunk_lines']
1765
1765
1766 maxb1 = 100000
1766 maxb1 = 100000
1767 random.seed(0)
1767 random.seed(0)
1768 randint = random.randint
1768 randint = random.randint
1769 currentlines = 0
1769 currentlines = 0
1770 arglist = []
1770 arglist = []
1771 for rev in _xrange(edits):
1771 for rev in _xrange(edits):
1772 a1 = randint(0, currentlines)
1772 a1 = randint(0, currentlines)
1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 b1 = randint(0, maxb1)
1774 b1 = randint(0, maxb1)
1775 b2 = randint(b1, b1 + maxhunklines)
1775 b2 = randint(b1, b1 + maxhunklines)
1776 currentlines += (b2 - b1) - (a2 - a1)
1776 currentlines += (b2 - b1) - (a2 - a1)
1777 arglist.append((rev, a1, a2, b1, b2))
1777 arglist.append((rev, a1, a2, b1, b2))
1778
1778
1779 def d():
1779 def d():
1780 ll = linelog.linelog()
1780 ll = linelog.linelog()
1781 for args in arglist:
1781 for args in arglist:
1782 ll.replacelines(*args)
1782 ll.replacelines(*args)
1783
1783
1784 timer, fm = gettimer(ui, opts)
1784 timer, fm = gettimer(ui, opts)
1785 timer(d)
1785 timer(d)
1786 fm.end()
1786 fm.end()
1787
1787
1788
1788
1789 @command(b'perf--revrange', formatteropts)
1789 @command(b'perf::revrange|perfrevrange', formatteropts)
1790 def perfrevrange(ui, repo, *specs, **opts):
1790 def perfrevrange(ui, repo, *specs, **opts):
1791 opts = _byteskwargs(opts)
1791 opts = _byteskwargs(opts)
1792 timer, fm = gettimer(ui, opts)
1792 timer, fm = gettimer(ui, opts)
1793 revrange = scmutil.revrange
1793 revrange = scmutil.revrange
1794 timer(lambda: len(revrange(repo, specs)))
1794 timer(lambda: len(revrange(repo, specs)))
1795 fm.end()
1795 fm.end()
1796
1796
1797
1797
1798 @command(b'perf--nodelookup', formatteropts)
1798 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1799 def perfnodelookup(ui, repo, rev, **opts):
1799 def perfnodelookup(ui, repo, rev, **opts):
1800 opts = _byteskwargs(opts)
1800 opts = _byteskwargs(opts)
1801 timer, fm = gettimer(ui, opts)
1801 timer, fm = gettimer(ui, opts)
1802 import mercurial.revlog
1802 import mercurial.revlog
1803
1803
1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 n = scmutil.revsingle(repo, rev).node()
1805 n = scmutil.revsingle(repo, rev).node()
1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807
1807
1808 def d():
1808 def d():
1809 cl.rev(n)
1809 cl.rev(n)
1810 clearcaches(cl)
1810 clearcaches(cl)
1811
1811
1812 timer(d)
1812 timer(d)
1813 fm.end()
1813 fm.end()
1814
1814
1815
1815
1816 @command(
1816 @command(
1817 b'perf--log',
1817 b'perf::log|perflog',
1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 )
1819 )
1820 def perflog(ui, repo, rev=None, **opts):
1820 def perflog(ui, repo, rev=None, **opts):
1821 opts = _byteskwargs(opts)
1821 opts = _byteskwargs(opts)
1822 if rev is None:
1822 if rev is None:
1823 rev = []
1823 rev = []
1824 timer, fm = gettimer(ui, opts)
1824 timer, fm = gettimer(ui, opts)
1825 ui.pushbuffer()
1825 ui.pushbuffer()
1826 timer(
1826 timer(
1827 lambda: commands.log(
1827 lambda: commands.log(
1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 )
1829 )
1830 )
1830 )
1831 ui.popbuffer()
1831 ui.popbuffer()
1832 fm.end()
1832 fm.end()
1833
1833
1834
1834
1835 @command(b'perf--moonwalk', formatteropts)
1835 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1836 def perfmoonwalk(ui, repo, **opts):
1836 def perfmoonwalk(ui, repo, **opts):
1837 """benchmark walking the changelog backwards
1837 """benchmark walking the changelog backwards
1838
1838
1839 This also loads the changelog data for each revision in the changelog.
1839 This also loads the changelog data for each revision in the changelog.
1840 """
1840 """
1841 opts = _byteskwargs(opts)
1841 opts = _byteskwargs(opts)
1842 timer, fm = gettimer(ui, opts)
1842 timer, fm = gettimer(ui, opts)
1843
1843
1844 def moonwalk():
1844 def moonwalk():
1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 ctx = repo[i]
1846 ctx = repo[i]
1847 ctx.branch() # read changelog data (in addition to the index)
1847 ctx.branch() # read changelog data (in addition to the index)
1848
1848
1849 timer(moonwalk)
1849 timer(moonwalk)
1850 fm.end()
1850 fm.end()
1851
1851
1852
1852
1853 @command(
1853 @command(
1854 b'perf--templating',
1854 b'perf::templating|perftemplating',
1855 [
1855 [
1856 (b'r', b'rev', [], b'revisions to run the template on'),
1856 (b'r', b'rev', [], b'revisions to run the template on'),
1857 ]
1857 ]
1858 + formatteropts,
1858 + formatteropts,
1859 )
1859 )
1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 """test the rendering time of a given template"""
1861 """test the rendering time of a given template"""
1862 if makelogtemplater is None:
1862 if makelogtemplater is None:
1863 raise error.Abort(
1863 raise error.Abort(
1864 b"perftemplating not available with this Mercurial",
1864 b"perftemplating not available with this Mercurial",
1865 hint=b"use 4.3 or later",
1865 hint=b"use 4.3 or later",
1866 )
1866 )
1867
1867
1868 opts = _byteskwargs(opts)
1868 opts = _byteskwargs(opts)
1869
1869
1870 nullui = ui.copy()
1870 nullui = ui.copy()
1871 nullui.fout = open(os.devnull, 'wb')
1871 nullui.fout = open(os.devnull, 'wb')
1872 nullui.disablepager()
1872 nullui.disablepager()
1873 revs = opts.get(b'rev')
1873 revs = opts.get(b'rev')
1874 if not revs:
1874 if not revs:
1875 revs = [b'all()']
1875 revs = [b'all()']
1876 revs = list(scmutil.revrange(repo, revs))
1876 revs = list(scmutil.revrange(repo, revs))
1877
1877
1878 defaulttemplate = (
1878 defaulttemplate = (
1879 b'{date|shortdate} [{rev}:{node|short}]'
1879 b'{date|shortdate} [{rev}:{node|short}]'
1880 b' {author|person}: {desc|firstline}\n'
1880 b' {author|person}: {desc|firstline}\n'
1881 )
1881 )
1882 if testedtemplate is None:
1882 if testedtemplate is None:
1883 testedtemplate = defaulttemplate
1883 testedtemplate = defaulttemplate
1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885
1885
1886 def format():
1886 def format():
1887 for r in revs:
1887 for r in revs:
1888 ctx = repo[r]
1888 ctx = repo[r]
1889 displayer.show(ctx)
1889 displayer.show(ctx)
1890 displayer.flush(ctx)
1890 displayer.flush(ctx)
1891
1891
1892 timer, fm = gettimer(ui, opts)
1892 timer, fm = gettimer(ui, opts)
1893 timer(format)
1893 timer(format)
1894 fm.end()
1894 fm.end()
1895
1895
1896
1896
1897 def _displaystats(ui, opts, entries, data):
1897 def _displaystats(ui, opts, entries, data):
1898 # use a second formatter because the data are quite different, not sure
1898 # use a second formatter because the data are quite different, not sure
1899 # how it flies with the templater.
1899 # how it flies with the templater.
1900 fm = ui.formatter(b'perf-stats', opts)
1900 fm = ui.formatter(b'perf-stats', opts)
1901 for key, title in entries:
1901 for key, title in entries:
1902 values = data[key]
1902 values = data[key]
1903 nbvalues = len(data)
1903 nbvalues = len(data)
1904 values.sort()
1904 values.sort()
1905 stats = {
1905 stats = {
1906 'key': key,
1906 'key': key,
1907 'title': title,
1907 'title': title,
1908 'nbitems': len(values),
1908 'nbitems': len(values),
1909 'min': values[0][0],
1909 'min': values[0][0],
1910 '10%': values[(nbvalues * 10) // 100][0],
1910 '10%': values[(nbvalues * 10) // 100][0],
1911 '25%': values[(nbvalues * 25) // 100][0],
1911 '25%': values[(nbvalues * 25) // 100][0],
1912 '50%': values[(nbvalues * 50) // 100][0],
1912 '50%': values[(nbvalues * 50) // 100][0],
1913 '75%': values[(nbvalues * 75) // 100][0],
1913 '75%': values[(nbvalues * 75) // 100][0],
1914 '80%': values[(nbvalues * 80) // 100][0],
1914 '80%': values[(nbvalues * 80) // 100][0],
1915 '85%': values[(nbvalues * 85) // 100][0],
1915 '85%': values[(nbvalues * 85) // 100][0],
1916 '90%': values[(nbvalues * 90) // 100][0],
1916 '90%': values[(nbvalues * 90) // 100][0],
1917 '95%': values[(nbvalues * 95) // 100][0],
1917 '95%': values[(nbvalues * 95) // 100][0],
1918 '99%': values[(nbvalues * 99) // 100][0],
1918 '99%': values[(nbvalues * 99) // 100][0],
1919 'max': values[-1][0],
1919 'max': values[-1][0],
1920 }
1920 }
1921 fm.startitem()
1921 fm.startitem()
1922 fm.data(**stats)
1922 fm.data(**stats)
1923 # make node pretty for the human output
1923 # make node pretty for the human output
1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 lines = [
1925 lines = [
1926 'min',
1926 'min',
1927 '10%',
1927 '10%',
1928 '25%',
1928 '25%',
1929 '50%',
1929 '50%',
1930 '75%',
1930 '75%',
1931 '80%',
1931 '80%',
1932 '85%',
1932 '85%',
1933 '90%',
1933 '90%',
1934 '95%',
1934 '95%',
1935 '99%',
1935 '99%',
1936 'max',
1936 'max',
1937 ]
1937 ]
1938 for l in lines:
1938 for l in lines:
1939 fm.plain('%s: %s\n' % (l, stats[l]))
1939 fm.plain('%s: %s\n' % (l, stats[l]))
1940 fm.end()
1940 fm.end()
1941
1941
1942
1942
1943 @command(
1943 @command(
1944 b'perf--helper-mergecopies',
1944 b'perf::helper-mergecopies|perfhelper-mergecopies',
1945 formatteropts
1945 formatteropts
1946 + [
1946 + [
1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 (b'', b'timing', False, b'provides extra data (costly)'),
1948 (b'', b'timing', False, b'provides extra data (costly)'),
1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 ],
1950 ],
1951 )
1951 )
1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 """find statistics about potential parameters for `perfmergecopies`
1953 """find statistics about potential parameters for `perfmergecopies`
1954
1954
1955 This command find (base, p1, p2) triplet relevant for copytracing
1955 This command find (base, p1, p2) triplet relevant for copytracing
1956 benchmarking in the context of a merge. It reports values for some of the
1956 benchmarking in the context of a merge. It reports values for some of the
1957 parameters that impact merge copy tracing time during merge.
1957 parameters that impact merge copy tracing time during merge.
1958
1958
1959 If `--timing` is set, rename detection is run and the associated timing
1959 If `--timing` is set, rename detection is run and the associated timing
1960 will be reported. The extra details come at the cost of slower command
1960 will be reported. The extra details come at the cost of slower command
1961 execution.
1961 execution.
1962
1962
1963 Since rename detection is only run once, other factors might easily
1963 Since rename detection is only run once, other factors might easily
1964 affect the precision of the timing. However it should give a good
1964 affect the precision of the timing. However it should give a good
1965 approximation of which revision triplets are very costly.
1965 approximation of which revision triplets are very costly.
1966 """
1966 """
1967 opts = _byteskwargs(opts)
1967 opts = _byteskwargs(opts)
1968 fm = ui.formatter(b'perf', opts)
1968 fm = ui.formatter(b'perf', opts)
1969 dotiming = opts[b'timing']
1969 dotiming = opts[b'timing']
1970 dostats = opts[b'stats']
1970 dostats = opts[b'stats']
1971
1971
1972 output_template = [
1972 output_template = [
1973 ("base", "%(base)12s"),
1973 ("base", "%(base)12s"),
1974 ("p1", "%(p1.node)12s"),
1974 ("p1", "%(p1.node)12s"),
1975 ("p2", "%(p2.node)12s"),
1975 ("p2", "%(p2.node)12s"),
1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 ("p1.time", "%(p1.time)12.3f"),
1979 ("p1.time", "%(p1.time)12.3f"),
1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 ("p2.time", "%(p2.time)12.3f"),
1983 ("p2.time", "%(p2.time)12.3f"),
1984 ("renames", "%(nbrenamedfiles)12d"),
1984 ("renames", "%(nbrenamedfiles)12d"),
1985 ("total.time", "%(time)12.3f"),
1985 ("total.time", "%(time)12.3f"),
1986 ]
1986 ]
1987 if not dotiming:
1987 if not dotiming:
1988 output_template = [
1988 output_template = [
1989 i
1989 i
1990 for i in output_template
1990 for i in output_template
1991 if not ('time' in i[0] or 'renames' in i[0])
1991 if not ('time' in i[0] or 'renames' in i[0])
1992 ]
1992 ]
1993 header_names = [h for (h, v) in output_template]
1993 header_names = [h for (h, v) in output_template]
1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 fm.plain(header % tuple(header_names))
1996 fm.plain(header % tuple(header_names))
1997
1997
1998 if not revs:
1998 if not revs:
1999 revs = ['all()']
1999 revs = ['all()']
2000 revs = scmutil.revrange(repo, revs)
2000 revs = scmutil.revrange(repo, revs)
2001
2001
2002 if dostats:
2002 if dostats:
2003 alldata = {
2003 alldata = {
2004 'nbrevs': [],
2004 'nbrevs': [],
2005 'nbmissingfiles': [],
2005 'nbmissingfiles': [],
2006 }
2006 }
2007 if dotiming:
2007 if dotiming:
2008 alldata['parentnbrenames'] = []
2008 alldata['parentnbrenames'] = []
2009 alldata['totalnbrenames'] = []
2009 alldata['totalnbrenames'] = []
2010 alldata['parenttime'] = []
2010 alldata['parenttime'] = []
2011 alldata['totaltime'] = []
2011 alldata['totaltime'] = []
2012
2012
2013 roi = repo.revs('merge() and %ld', revs)
2013 roi = repo.revs('merge() and %ld', revs)
2014 for r in roi:
2014 for r in roi:
2015 ctx = repo[r]
2015 ctx = repo[r]
2016 p1 = ctx.p1()
2016 p1 = ctx.p1()
2017 p2 = ctx.p2()
2017 p2 = ctx.p2()
2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 for b in bases:
2019 for b in bases:
2020 b = repo[b]
2020 b = repo[b]
2021 p1missing = copies._computeforwardmissing(b, p1)
2021 p1missing = copies._computeforwardmissing(b, p1)
2022 p2missing = copies._computeforwardmissing(b, p2)
2022 p2missing = copies._computeforwardmissing(b, p2)
2023 data = {
2023 data = {
2024 b'base': b.hex(),
2024 b'base': b.hex(),
2025 b'p1.node': p1.hex(),
2025 b'p1.node': p1.hex(),
2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 b'p1.nbmissingfiles': len(p1missing),
2027 b'p1.nbmissingfiles': len(p1missing),
2028 b'p2.node': p2.hex(),
2028 b'p2.node': p2.hex(),
2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 b'p2.nbmissingfiles': len(p2missing),
2030 b'p2.nbmissingfiles': len(p2missing),
2031 }
2031 }
2032 if dostats:
2032 if dostats:
2033 if p1missing:
2033 if p1missing:
2034 alldata['nbrevs'].append(
2034 alldata['nbrevs'].append(
2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 )
2036 )
2037 alldata['nbmissingfiles'].append(
2037 alldata['nbmissingfiles'].append(
2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 )
2039 )
2040 if p2missing:
2040 if p2missing:
2041 alldata['nbrevs'].append(
2041 alldata['nbrevs'].append(
2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 )
2043 )
2044 alldata['nbmissingfiles'].append(
2044 alldata['nbmissingfiles'].append(
2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 )
2046 )
2047 if dotiming:
2047 if dotiming:
2048 begin = util.timer()
2048 begin = util.timer()
2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 end = util.timer()
2050 end = util.timer()
2051 # not very stable timing since we did only one run
2051 # not very stable timing since we did only one run
2052 data['time'] = end - begin
2052 data['time'] = end - begin
2053 # mergedata contains five dicts: "copy", "movewithdir",
2053 # mergedata contains five dicts: "copy", "movewithdir",
2054 # "diverge", "renamedelete" and "dirmove".
2054 # "diverge", "renamedelete" and "dirmove".
2055 # The first 4 are about renamed file so lets count that.
2055 # The first 4 are about renamed file so lets count that.
2056 renames = len(mergedata[0])
2056 renames = len(mergedata[0])
2057 renames += len(mergedata[1])
2057 renames += len(mergedata[1])
2058 renames += len(mergedata[2])
2058 renames += len(mergedata[2])
2059 renames += len(mergedata[3])
2059 renames += len(mergedata[3])
2060 data['nbrenamedfiles'] = renames
2060 data['nbrenamedfiles'] = renames
2061 begin = util.timer()
2061 begin = util.timer()
2062 p1renames = copies.pathcopies(b, p1)
2062 p1renames = copies.pathcopies(b, p1)
2063 end = util.timer()
2063 end = util.timer()
2064 data['p1.time'] = end - begin
2064 data['p1.time'] = end - begin
2065 begin = util.timer()
2065 begin = util.timer()
2066 p2renames = copies.pathcopies(b, p2)
2066 p2renames = copies.pathcopies(b, p2)
2067 end = util.timer()
2067 end = util.timer()
2068 data['p2.time'] = end - begin
2068 data['p2.time'] = end - begin
2069 data['p1.renamedfiles'] = len(p1renames)
2069 data['p1.renamedfiles'] = len(p1renames)
2070 data['p2.renamedfiles'] = len(p2renames)
2070 data['p2.renamedfiles'] = len(p2renames)
2071
2071
2072 if dostats:
2072 if dostats:
2073 if p1missing:
2073 if p1missing:
2074 alldata['parentnbrenames'].append(
2074 alldata['parentnbrenames'].append(
2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 )
2076 )
2077 alldata['parenttime'].append(
2077 alldata['parenttime'].append(
2078 (data['p1.time'], b.hex(), p1.hex())
2078 (data['p1.time'], b.hex(), p1.hex())
2079 )
2079 )
2080 if p2missing:
2080 if p2missing:
2081 alldata['parentnbrenames'].append(
2081 alldata['parentnbrenames'].append(
2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 )
2083 )
2084 alldata['parenttime'].append(
2084 alldata['parenttime'].append(
2085 (data['p2.time'], b.hex(), p2.hex())
2085 (data['p2.time'], b.hex(), p2.hex())
2086 )
2086 )
2087 if p1missing or p2missing:
2087 if p1missing or p2missing:
2088 alldata['totalnbrenames'].append(
2088 alldata['totalnbrenames'].append(
2089 (
2089 (
2090 data['nbrenamedfiles'],
2090 data['nbrenamedfiles'],
2091 b.hex(),
2091 b.hex(),
2092 p1.hex(),
2092 p1.hex(),
2093 p2.hex(),
2093 p2.hex(),
2094 )
2094 )
2095 )
2095 )
2096 alldata['totaltime'].append(
2096 alldata['totaltime'].append(
2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 )
2098 )
2099 fm.startitem()
2099 fm.startitem()
2100 fm.data(**data)
2100 fm.data(**data)
2101 # make node pretty for the human output
2101 # make node pretty for the human output
2102 out = data.copy()
2102 out = data.copy()
2103 out['base'] = fm.hexfunc(b.node())
2103 out['base'] = fm.hexfunc(b.node())
2104 out['p1.node'] = fm.hexfunc(p1.node())
2104 out['p1.node'] = fm.hexfunc(p1.node())
2105 out['p2.node'] = fm.hexfunc(p2.node())
2105 out['p2.node'] = fm.hexfunc(p2.node())
2106 fm.plain(output % out)
2106 fm.plain(output % out)
2107
2107
2108 fm.end()
2108 fm.end()
2109 if dostats:
2109 if dostats:
2110 # use a second formatter because the data are quite different, not sure
2110 # use a second formatter because the data are quite different, not sure
2111 # how it flies with the templater.
2111 # how it flies with the templater.
2112 entries = [
2112 entries = [
2113 ('nbrevs', 'number of revision covered'),
2113 ('nbrevs', 'number of revision covered'),
2114 ('nbmissingfiles', 'number of missing files at head'),
2114 ('nbmissingfiles', 'number of missing files at head'),
2115 ]
2115 ]
2116 if dotiming:
2116 if dotiming:
2117 entries.append(
2117 entries.append(
2118 ('parentnbrenames', 'rename from one parent to base')
2118 ('parentnbrenames', 'rename from one parent to base')
2119 )
2119 )
2120 entries.append(('totalnbrenames', 'total number of renames'))
2120 entries.append(('totalnbrenames', 'total number of renames'))
2121 entries.append(('parenttime', 'time for one parent'))
2121 entries.append(('parenttime', 'time for one parent'))
2122 entries.append(('totaltime', 'time for both parents'))
2122 entries.append(('totaltime', 'time for both parents'))
2123 _displaystats(ui, opts, entries, alldata)
2123 _displaystats(ui, opts, entries, alldata)
2124
2124
2125
2125
2126 @command(
2126 @command(
2127 b'perf--helper-pathcopies',
2127 b'perf::helper-pathcopies|perfhelper-pathcopies',
2128 formatteropts
2128 formatteropts
2129 + [
2129 + [
2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 (b'', b'timing', False, b'provides extra data (costly)'),
2131 (b'', b'timing', False, b'provides extra data (costly)'),
2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 ],
2133 ],
2134 )
2134 )
2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 """find statistic about potential parameters for the `perftracecopies`
2136 """find statistic about potential parameters for the `perftracecopies`
2137
2137
2138 This command find source-destination pair relevant for copytracing testing.
2138 This command find source-destination pair relevant for copytracing testing.
2139 It report value for some of the parameters that impact copy tracing time.
2139 It report value for some of the parameters that impact copy tracing time.
2140
2140
2141 If `--timing` is set, rename detection is run and the associated timing
2141 If `--timing` is set, rename detection is run and the associated timing
2142 will be reported. The extra details comes at the cost of a slower command
2142 will be reported. The extra details comes at the cost of a slower command
2143 execution.
2143 execution.
2144
2144
2145 Since the rename detection is only run once, other factors might easily
2145 Since the rename detection is only run once, other factors might easily
2146 affect the precision of the timing. However it should give a good
2146 affect the precision of the timing. However it should give a good
2147 approximation of which revision pairs are very costly.
2147 approximation of which revision pairs are very costly.
2148 """
2148 """
2149 opts = _byteskwargs(opts)
2149 opts = _byteskwargs(opts)
2150 fm = ui.formatter(b'perf', opts)
2150 fm = ui.formatter(b'perf', opts)
2151 dotiming = opts[b'timing']
2151 dotiming = opts[b'timing']
2152 dostats = opts[b'stats']
2152 dostats = opts[b'stats']
2153
2153
2154 if dotiming:
2154 if dotiming:
2155 header = '%12s %12s %12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s %12s %12s\n'
2156 output = (
2156 output = (
2157 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 )
2160 )
2161 header_names = (
2161 header_names = (
2162 "source",
2162 "source",
2163 "destination",
2163 "destination",
2164 "nb-revs",
2164 "nb-revs",
2165 "nb-files",
2165 "nb-files",
2166 "nb-renames",
2166 "nb-renames",
2167 "time",
2167 "time",
2168 )
2168 )
2169 fm.plain(header % header_names)
2169 fm.plain(header % header_names)
2170 else:
2170 else:
2171 header = '%12s %12s %12s %12s\n'
2171 header = '%12s %12s %12s %12s\n'
2172 output = (
2172 output = (
2173 "%(source)12s %(destination)12s "
2173 "%(source)12s %(destination)12s "
2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 )
2175 )
2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177
2177
2178 if not revs:
2178 if not revs:
2179 revs = ['all()']
2179 revs = ['all()']
2180 revs = scmutil.revrange(repo, revs)
2180 revs = scmutil.revrange(repo, revs)
2181
2181
2182 if dostats:
2182 if dostats:
2183 alldata = {
2183 alldata = {
2184 'nbrevs': [],
2184 'nbrevs': [],
2185 'nbmissingfiles': [],
2185 'nbmissingfiles': [],
2186 }
2186 }
2187 if dotiming:
2187 if dotiming:
2188 alldata['nbrenames'] = []
2188 alldata['nbrenames'] = []
2189 alldata['time'] = []
2189 alldata['time'] = []
2190
2190
2191 roi = repo.revs('merge() and %ld', revs)
2191 roi = repo.revs('merge() and %ld', revs)
2192 for r in roi:
2192 for r in roi:
2193 ctx = repo[r]
2193 ctx = repo[r]
2194 p1 = ctx.p1().rev()
2194 p1 = ctx.p1().rev()
2195 p2 = ctx.p2().rev()
2195 p2 = ctx.p2().rev()
2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 for p in (p1, p2):
2197 for p in (p1, p2):
2198 for b in bases:
2198 for b in bases:
2199 base = repo[b]
2199 base = repo[b]
2200 parent = repo[p]
2200 parent = repo[p]
2201 missing = copies._computeforwardmissing(base, parent)
2201 missing = copies._computeforwardmissing(base, parent)
2202 if not missing:
2202 if not missing:
2203 continue
2203 continue
2204 data = {
2204 data = {
2205 b'source': base.hex(),
2205 b'source': base.hex(),
2206 b'destination': parent.hex(),
2206 b'destination': parent.hex(),
2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 b'nbmissingfiles': len(missing),
2208 b'nbmissingfiles': len(missing),
2209 }
2209 }
2210 if dostats:
2210 if dostats:
2211 alldata['nbrevs'].append(
2211 alldata['nbrevs'].append(
2212 (
2212 (
2213 data['nbrevs'],
2213 data['nbrevs'],
2214 base.hex(),
2214 base.hex(),
2215 parent.hex(),
2215 parent.hex(),
2216 )
2216 )
2217 )
2217 )
2218 alldata['nbmissingfiles'].append(
2218 alldata['nbmissingfiles'].append(
2219 (
2219 (
2220 data['nbmissingfiles'],
2220 data['nbmissingfiles'],
2221 base.hex(),
2221 base.hex(),
2222 parent.hex(),
2222 parent.hex(),
2223 )
2223 )
2224 )
2224 )
2225 if dotiming:
2225 if dotiming:
2226 begin = util.timer()
2226 begin = util.timer()
2227 renames = copies.pathcopies(base, parent)
2227 renames = copies.pathcopies(base, parent)
2228 end = util.timer()
2228 end = util.timer()
2229 # not very stable timing since we did only one run
2229 # not very stable timing since we did only one run
2230 data['time'] = end - begin
2230 data['time'] = end - begin
2231 data['nbrenamedfiles'] = len(renames)
2231 data['nbrenamedfiles'] = len(renames)
2232 if dostats:
2232 if dostats:
2233 alldata['time'].append(
2233 alldata['time'].append(
2234 (
2234 (
2235 data['time'],
2235 data['time'],
2236 base.hex(),
2236 base.hex(),
2237 parent.hex(),
2237 parent.hex(),
2238 )
2238 )
2239 )
2239 )
2240 alldata['nbrenames'].append(
2240 alldata['nbrenames'].append(
2241 (
2241 (
2242 data['nbrenamedfiles'],
2242 data['nbrenamedfiles'],
2243 base.hex(),
2243 base.hex(),
2244 parent.hex(),
2244 parent.hex(),
2245 )
2245 )
2246 )
2246 )
2247 fm.startitem()
2247 fm.startitem()
2248 fm.data(**data)
2248 fm.data(**data)
2249 out = data.copy()
2249 out = data.copy()
2250 out['source'] = fm.hexfunc(base.node())
2250 out['source'] = fm.hexfunc(base.node())
2251 out['destination'] = fm.hexfunc(parent.node())
2251 out['destination'] = fm.hexfunc(parent.node())
2252 fm.plain(output % out)
2252 fm.plain(output % out)
2253
2253
2254 fm.end()
2254 fm.end()
2255 if dostats:
2255 if dostats:
2256 entries = [
2256 entries = [
2257 ('nbrevs', 'number of revision covered'),
2257 ('nbrevs', 'number of revision covered'),
2258 ('nbmissingfiles', 'number of missing files at head'),
2258 ('nbmissingfiles', 'number of missing files at head'),
2259 ]
2259 ]
2260 if dotiming:
2260 if dotiming:
2261 entries.append(('nbrenames', 'renamed files'))
2261 entries.append(('nbrenames', 'renamed files'))
2262 entries.append(('time', 'time'))
2262 entries.append(('time', 'time'))
2263 _displaystats(ui, opts, entries, alldata)
2263 _displaystats(ui, opts, entries, alldata)
2264
2264
2265
2265
2266 @command(b'perf--cca', formatteropts)
2266 @command(b'perf::cca|perfcca', formatteropts)
2267 def perfcca(ui, repo, **opts):
2267 def perfcca(ui, repo, **opts):
2268 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2269 timer, fm = gettimer(ui, opts)
2269 timer, fm = gettimer(ui, opts)
2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 fm.end()
2271 fm.end()
2272
2272
2273
2273
2274 @command(b'perf--fncacheload', formatteropts)
2274 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2275 def perffncacheload(ui, repo, **opts):
2275 def perffncacheload(ui, repo, **opts):
2276 opts = _byteskwargs(opts)
2276 opts = _byteskwargs(opts)
2277 timer, fm = gettimer(ui, opts)
2277 timer, fm = gettimer(ui, opts)
2278 s = repo.store
2278 s = repo.store
2279
2279
2280 def d():
2280 def d():
2281 s.fncache._load()
2281 s.fncache._load()
2282
2282
2283 timer(d)
2283 timer(d)
2284 fm.end()
2284 fm.end()
2285
2285
2286
2286
2287 @command(b'perf--fncachewrite', formatteropts)
2287 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2288 def perffncachewrite(ui, repo, **opts):
2288 def perffncachewrite(ui, repo, **opts):
2289 opts = _byteskwargs(opts)
2289 opts = _byteskwargs(opts)
2290 timer, fm = gettimer(ui, opts)
2290 timer, fm = gettimer(ui, opts)
2291 s = repo.store
2291 s = repo.store
2292 lock = repo.lock()
2292 lock = repo.lock()
2293 s.fncache._load()
2293 s.fncache._load()
2294 tr = repo.transaction(b'perffncachewrite')
2294 tr = repo.transaction(b'perffncachewrite')
2295 tr.addbackup(b'fncache')
2295 tr.addbackup(b'fncache')
2296
2296
2297 def d():
2297 def d():
2298 s.fncache._dirty = True
2298 s.fncache._dirty = True
2299 s.fncache.write(tr)
2299 s.fncache.write(tr)
2300
2300
2301 timer(d)
2301 timer(d)
2302 tr.close()
2302 tr.close()
2303 lock.release()
2303 lock.release()
2304 fm.end()
2304 fm.end()
2305
2305
2306
2306
2307 @command(b'perf--fncacheencode', formatteropts)
2307 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2308 def perffncacheencode(ui, repo, **opts):
2308 def perffncacheencode(ui, repo, **opts):
2309 opts = _byteskwargs(opts)
2309 opts = _byteskwargs(opts)
2310 timer, fm = gettimer(ui, opts)
2310 timer, fm = gettimer(ui, opts)
2311 s = repo.store
2311 s = repo.store
2312 s.fncache._load()
2312 s.fncache._load()
2313
2313
2314 def d():
2314 def d():
2315 for p in s.fncache.entries:
2315 for p in s.fncache.entries:
2316 s.encode(p)
2316 s.encode(p)
2317
2317
2318 timer(d)
2318 timer(d)
2319 fm.end()
2319 fm.end()
2320
2320
2321
2321
2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 while not done.is_set():
2323 while not done.is_set():
2324 pair = q.get()
2324 pair = q.get()
2325 while pair is not None:
2325 while pair is not None:
2326 if xdiff:
2326 if xdiff:
2327 mdiff.bdiff.xdiffblocks(*pair)
2327 mdiff.bdiff.xdiffblocks(*pair)
2328 elif blocks:
2328 elif blocks:
2329 mdiff.bdiff.blocks(*pair)
2329 mdiff.bdiff.blocks(*pair)
2330 else:
2330 else:
2331 mdiff.textdiff(*pair)
2331 mdiff.textdiff(*pair)
2332 q.task_done()
2332 q.task_done()
2333 pair = q.get()
2333 pair = q.get()
2334 q.task_done() # for the None one
2334 q.task_done() # for the None one
2335 with ready:
2335 with ready:
2336 ready.wait()
2336 ready.wait()
2337
2337
2338
2338
2339 def _manifestrevision(repo, mnode):
2339 def _manifestrevision(repo, mnode):
2340 ml = repo.manifestlog
2340 ml = repo.manifestlog
2341
2341
2342 if util.safehasattr(ml, b'getstorage'):
2342 if util.safehasattr(ml, b'getstorage'):
2343 store = ml.getstorage(b'')
2343 store = ml.getstorage(b'')
2344 else:
2344 else:
2345 store = ml._revlog
2345 store = ml._revlog
2346
2346
2347 return store.revision(mnode)
2347 return store.revision(mnode)
2348
2348
2349
2349
2350 @command(
2350 @command(
2351 b'perf--bdiff',
2351 b'perf::bdiff|perfbdiff',
2352 revlogopts
2352 revlogopts
2353 + formatteropts
2353 + formatteropts
2354 + [
2354 + [
2355 (
2355 (
2356 b'',
2356 b'',
2357 b'count',
2357 b'count',
2358 1,
2358 1,
2359 b'number of revisions to test (when using --startrev)',
2359 b'number of revisions to test (when using --startrev)',
2360 ),
2360 ),
2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 ],
2365 ],
2366 b'-c|-m|FILE REV',
2366 b'-c|-m|FILE REV',
2367 )
2367 )
2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 """benchmark a bdiff between revisions
2369 """benchmark a bdiff between revisions
2370
2370
2371 By default, benchmark a bdiff between its delta parent and itself.
2371 By default, benchmark a bdiff between its delta parent and itself.
2372
2372
2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 revisions starting at the specified revision.
2374 revisions starting at the specified revision.
2375
2375
2376 With ``--alldata``, assume the requested revision is a changeset and
2376 With ``--alldata``, assume the requested revision is a changeset and
2377 measure bdiffs for all changes related to that changeset (manifest
2377 measure bdiffs for all changes related to that changeset (manifest
2378 and filelogs).
2378 and filelogs).
2379 """
2379 """
2380 opts = _byteskwargs(opts)
2380 opts = _byteskwargs(opts)
2381
2381
2382 if opts[b'xdiff'] and not opts[b'blocks']:
2382 if opts[b'xdiff'] and not opts[b'blocks']:
2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384
2384
2385 if opts[b'alldata']:
2385 if opts[b'alldata']:
2386 opts[b'changelog'] = True
2386 opts[b'changelog'] = True
2387
2387
2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 file_, rev = None, file_
2389 file_, rev = None, file_
2390 elif rev is None:
2390 elif rev is None:
2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392
2392
2393 blocks = opts[b'blocks']
2393 blocks = opts[b'blocks']
2394 xdiff = opts[b'xdiff']
2394 xdiff = opts[b'xdiff']
2395 textpairs = []
2395 textpairs = []
2396
2396
2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398
2398
2399 startrev = r.rev(r.lookup(rev))
2399 startrev = r.rev(r.lookup(rev))
2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 if opts[b'alldata']:
2401 if opts[b'alldata']:
2402 # Load revisions associated with changeset.
2402 # Load revisions associated with changeset.
2403 ctx = repo[rev]
2403 ctx = repo[rev]
2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 for pctx in ctx.parents():
2405 for pctx in ctx.parents():
2406 pman = _manifestrevision(repo, pctx.manifestnode())
2406 pman = _manifestrevision(repo, pctx.manifestnode())
2407 textpairs.append((pman, mtext))
2407 textpairs.append((pman, mtext))
2408
2408
2409 # Load filelog revisions by iterating manifest delta.
2409 # Load filelog revisions by iterating manifest delta.
2410 man = ctx.manifest()
2410 man = ctx.manifest()
2411 pman = ctx.p1().manifest()
2411 pman = ctx.p1().manifest()
2412 for filename, change in pman.diff(man).items():
2412 for filename, change in pman.diff(man).items():
2413 fctx = repo.file(filename)
2413 fctx = repo.file(filename)
2414 f1 = fctx.revision(change[0][0] or -1)
2414 f1 = fctx.revision(change[0][0] or -1)
2415 f2 = fctx.revision(change[1][0] or -1)
2415 f2 = fctx.revision(change[1][0] or -1)
2416 textpairs.append((f1, f2))
2416 textpairs.append((f1, f2))
2417 else:
2417 else:
2418 dp = r.deltaparent(rev)
2418 dp = r.deltaparent(rev)
2419 textpairs.append((r.revision(dp), r.revision(rev)))
2419 textpairs.append((r.revision(dp), r.revision(rev)))
2420
2420
2421 withthreads = threads > 0
2421 withthreads = threads > 0
2422 if not withthreads:
2422 if not withthreads:
2423
2423
2424 def d():
2424 def d():
2425 for pair in textpairs:
2425 for pair in textpairs:
2426 if xdiff:
2426 if xdiff:
2427 mdiff.bdiff.xdiffblocks(*pair)
2427 mdiff.bdiff.xdiffblocks(*pair)
2428 elif blocks:
2428 elif blocks:
2429 mdiff.bdiff.blocks(*pair)
2429 mdiff.bdiff.blocks(*pair)
2430 else:
2430 else:
2431 mdiff.textdiff(*pair)
2431 mdiff.textdiff(*pair)
2432
2432
2433 else:
2433 else:
2434 q = queue()
2434 q = queue()
2435 for i in _xrange(threads):
2435 for i in _xrange(threads):
2436 q.put(None)
2436 q.put(None)
2437 ready = threading.Condition()
2437 ready = threading.Condition()
2438 done = threading.Event()
2438 done = threading.Event()
2439 for i in _xrange(threads):
2439 for i in _xrange(threads):
2440 threading.Thread(
2440 threading.Thread(
2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 ).start()
2442 ).start()
2443 q.join()
2443 q.join()
2444
2444
2445 def d():
2445 def d():
2446 for pair in textpairs:
2446 for pair in textpairs:
2447 q.put(pair)
2447 q.put(pair)
2448 for i in _xrange(threads):
2448 for i in _xrange(threads):
2449 q.put(None)
2449 q.put(None)
2450 with ready:
2450 with ready:
2451 ready.notify_all()
2451 ready.notify_all()
2452 q.join()
2452 q.join()
2453
2453
2454 timer, fm = gettimer(ui, opts)
2454 timer, fm = gettimer(ui, opts)
2455 timer(d)
2455 timer(d)
2456 fm.end()
2456 fm.end()
2457
2457
2458 if withthreads:
2458 if withthreads:
2459 done.set()
2459 done.set()
2460 for i in _xrange(threads):
2460 for i in _xrange(threads):
2461 q.put(None)
2461 q.put(None)
2462 with ready:
2462 with ready:
2463 ready.notify_all()
2463 ready.notify_all()
2464
2464
2465
2465
2466 @command(
2466 @command(
2467 b'perf--unidiff',
2467 b'perf::unidiff|perfunidiff',
2468 revlogopts
2468 revlogopts
2469 + formatteropts
2469 + formatteropts
2470 + [
2470 + [
2471 (
2471 (
2472 b'',
2472 b'',
2473 b'count',
2473 b'count',
2474 1,
2474 1,
2475 b'number of revisions to test (when using --startrev)',
2475 b'number of revisions to test (when using --startrev)',
2476 ),
2476 ),
2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 ],
2478 ],
2479 b'-c|-m|FILE REV',
2479 b'-c|-m|FILE REV',
2480 )
2480 )
2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 """benchmark a unified diff between revisions
2482 """benchmark a unified diff between revisions
2483
2483
2484 This doesn't include any copy tracing - it's just a unified diff
2484 This doesn't include any copy tracing - it's just a unified diff
2485 of the texts.
2485 of the texts.
2486
2486
2487 By default, benchmark a diff between its delta parent and itself.
2487 By default, benchmark a diff between its delta parent and itself.
2488
2488
2489 With ``--count``, benchmark diffs between delta parents and self for N
2489 With ``--count``, benchmark diffs between delta parents and self for N
2490 revisions starting at the specified revision.
2490 revisions starting at the specified revision.
2491
2491
2492 With ``--alldata``, assume the requested revision is a changeset and
2492 With ``--alldata``, assume the requested revision is a changeset and
2493 measure diffs for all changes related to that changeset (manifest
2493 measure diffs for all changes related to that changeset (manifest
2494 and filelogs).
2494 and filelogs).
2495 """
2495 """
2496 opts = _byteskwargs(opts)
2496 opts = _byteskwargs(opts)
2497 if opts[b'alldata']:
2497 if opts[b'alldata']:
2498 opts[b'changelog'] = True
2498 opts[b'changelog'] = True
2499
2499
2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 file_, rev = None, file_
2501 file_, rev = None, file_
2502 elif rev is None:
2502 elif rev is None:
2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504
2504
2505 textpairs = []
2505 textpairs = []
2506
2506
2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508
2508
2509 startrev = r.rev(r.lookup(rev))
2509 startrev = r.rev(r.lookup(rev))
2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 if opts[b'alldata']:
2511 if opts[b'alldata']:
2512 # Load revisions associated with changeset.
2512 # Load revisions associated with changeset.
2513 ctx = repo[rev]
2513 ctx = repo[rev]
2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 for pctx in ctx.parents():
2515 for pctx in ctx.parents():
2516 pman = _manifestrevision(repo, pctx.manifestnode())
2516 pman = _manifestrevision(repo, pctx.manifestnode())
2517 textpairs.append((pman, mtext))
2517 textpairs.append((pman, mtext))
2518
2518
2519 # Load filelog revisions by iterating manifest delta.
2519 # Load filelog revisions by iterating manifest delta.
2520 man = ctx.manifest()
2520 man = ctx.manifest()
2521 pman = ctx.p1().manifest()
2521 pman = ctx.p1().manifest()
2522 for filename, change in pman.diff(man).items():
2522 for filename, change in pman.diff(man).items():
2523 fctx = repo.file(filename)
2523 fctx = repo.file(filename)
2524 f1 = fctx.revision(change[0][0] or -1)
2524 f1 = fctx.revision(change[0][0] or -1)
2525 f2 = fctx.revision(change[1][0] or -1)
2525 f2 = fctx.revision(change[1][0] or -1)
2526 textpairs.append((f1, f2))
2526 textpairs.append((f1, f2))
2527 else:
2527 else:
2528 dp = r.deltaparent(rev)
2528 dp = r.deltaparent(rev)
2529 textpairs.append((r.revision(dp), r.revision(rev)))
2529 textpairs.append((r.revision(dp), r.revision(rev)))
2530
2530
2531 def d():
2531 def d():
2532 for left, right in textpairs:
2532 for left, right in textpairs:
2533 # The date strings don't matter, so we pass empty strings.
2533 # The date strings don't matter, so we pass empty strings.
2534 headerlines, hunks = mdiff.unidiff(
2534 headerlines, hunks = mdiff.unidiff(
2535 left, b'', right, b'', b'left', b'right', binary=False
2535 left, b'', right, b'', b'left', b'right', binary=False
2536 )
2536 )
2537 # consume iterators in roughly the way patch.py does
2537 # consume iterators in roughly the way patch.py does
2538 b'\n'.join(headerlines)
2538 b'\n'.join(headerlines)
2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540
2540
2541 timer, fm = gettimer(ui, opts)
2541 timer, fm = gettimer(ui, opts)
2542 timer(d)
2542 timer(d)
2543 fm.end()
2543 fm.end()
2544
2544
2545
2545
2546 @command(b'perf--diffwd', formatteropts)
2546 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2547 def perfdiffwd(ui, repo, **opts):
2547 def perfdiffwd(ui, repo, **opts):
2548 """Profile diff of working directory changes"""
2548 """Profile diff of working directory changes"""
2549 opts = _byteskwargs(opts)
2549 opts = _byteskwargs(opts)
2550 timer, fm = gettimer(ui, opts)
2550 timer, fm = gettimer(ui, opts)
2551 options = {
2551 options = {
2552 'w': 'ignore_all_space',
2552 'w': 'ignore_all_space',
2553 'b': 'ignore_space_change',
2553 'b': 'ignore_space_change',
2554 'B': 'ignore_blank_lines',
2554 'B': 'ignore_blank_lines',
2555 }
2555 }
2556
2556
2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 opts = {options[c]: b'1' for c in diffopt}
2558 opts = {options[c]: b'1' for c in diffopt}
2559
2559
2560 def d():
2560 def d():
2561 ui.pushbuffer()
2561 ui.pushbuffer()
2562 commands.diff(ui, repo, **opts)
2562 commands.diff(ui, repo, **opts)
2563 ui.popbuffer()
2563 ui.popbuffer()
2564
2564
2565 diffopt = diffopt.encode('ascii')
2565 diffopt = diffopt.encode('ascii')
2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 timer(d, title=title)
2567 timer(d, title=title)
2568 fm.end()
2568 fm.end()
2569
2569
2570
2570
2571 @command(b'perf--revlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2571 @command(
2572 b'perf::revlogindex|perfrevlogindex',
2573 revlogopts + formatteropts,
2574 b'-c|-m|FILE',
2575 )
2572 def perfrevlogindex(ui, repo, file_=None, **opts):
2576 def perfrevlogindex(ui, repo, file_=None, **opts):
2573 """Benchmark operations against a revlog index.
2577 """Benchmark operations against a revlog index.
2574
2578
2575 This tests constructing a revlog instance, reading index data,
2579 This tests constructing a revlog instance, reading index data,
2576 parsing index data, and performing various operations related to
2580 parsing index data, and performing various operations related to
2577 index data.
2581 index data.
2578 """
2582 """
2579
2583
2580 opts = _byteskwargs(opts)
2584 opts = _byteskwargs(opts)
2581
2585
2582 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2586 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2583
2587
2584 opener = getattr(rl, 'opener') # trick linter
2588 opener = getattr(rl, 'opener') # trick linter
2585 indexfile = rl.indexfile
2589 indexfile = rl.indexfile
2586 data = opener.read(indexfile)
2590 data = opener.read(indexfile)
2587
2591
2588 header = struct.unpack(b'>I', data[0:4])[0]
2592 header = struct.unpack(b'>I', data[0:4])[0]
2589 version = header & 0xFFFF
2593 version = header & 0xFFFF
2590 if version == 1:
2594 if version == 1:
2591 revlogio = revlog.revlogio()
2595 revlogio = revlog.revlogio()
2592 inline = header & (1 << 16)
2596 inline = header & (1 << 16)
2593 else:
2597 else:
2594 raise error.Abort(b'unsupported revlog version: %d' % version)
2598 raise error.Abort(b'unsupported revlog version: %d' % version)
2595
2599
2596 rllen = len(rl)
2600 rllen = len(rl)
2597
2601
2598 node0 = rl.node(0)
2602 node0 = rl.node(0)
2599 node25 = rl.node(rllen // 4)
2603 node25 = rl.node(rllen // 4)
2600 node50 = rl.node(rllen // 2)
2604 node50 = rl.node(rllen // 2)
2601 node75 = rl.node(rllen // 4 * 3)
2605 node75 = rl.node(rllen // 4 * 3)
2602 node100 = rl.node(rllen - 1)
2606 node100 = rl.node(rllen - 1)
2603
2607
2604 allrevs = range(rllen)
2608 allrevs = range(rllen)
2605 allrevsrev = list(reversed(allrevs))
2609 allrevsrev = list(reversed(allrevs))
2606 allnodes = [rl.node(rev) for rev in range(rllen)]
2610 allnodes = [rl.node(rev) for rev in range(rllen)]
2607 allnodesrev = list(reversed(allnodes))
2611 allnodesrev = list(reversed(allnodes))
2608
2612
2609 def constructor():
2613 def constructor():
2610 revlog.revlog(opener, indexfile)
2614 revlog.revlog(opener, indexfile)
2611
2615
2612 def read():
2616 def read():
2613 with opener(indexfile) as fh:
2617 with opener(indexfile) as fh:
2614 fh.read()
2618 fh.read()
2615
2619
2616 def parseindex():
2620 def parseindex():
2617 revlogio.parseindex(data, inline)
2621 revlogio.parseindex(data, inline)
2618
2622
2619 def getentry(revornode):
2623 def getentry(revornode):
2620 index = revlogio.parseindex(data, inline)[0]
2624 index = revlogio.parseindex(data, inline)[0]
2621 index[revornode]
2625 index[revornode]
2622
2626
2623 def getentries(revs, count=1):
2627 def getentries(revs, count=1):
2624 index = revlogio.parseindex(data, inline)[0]
2628 index = revlogio.parseindex(data, inline)[0]
2625
2629
2626 for i in range(count):
2630 for i in range(count):
2627 for rev in revs:
2631 for rev in revs:
2628 index[rev]
2632 index[rev]
2629
2633
2630 def resolvenode(node):
2634 def resolvenode(node):
2631 index = revlogio.parseindex(data, inline)[0]
2635 index = revlogio.parseindex(data, inline)[0]
2632 rev = getattr(index, 'rev', None)
2636 rev = getattr(index, 'rev', None)
2633 if rev is None:
2637 if rev is None:
2634 nodemap = getattr(
2638 nodemap = getattr(
2635 revlogio.parseindex(data, inline)[0], 'nodemap', None
2639 revlogio.parseindex(data, inline)[0], 'nodemap', None
2636 )
2640 )
2637 # This only works for the C code.
2641 # This only works for the C code.
2638 if nodemap is None:
2642 if nodemap is None:
2639 return
2643 return
2640 rev = nodemap.__getitem__
2644 rev = nodemap.__getitem__
2641
2645
2642 try:
2646 try:
2643 rev(node)
2647 rev(node)
2644 except error.RevlogError:
2648 except error.RevlogError:
2645 pass
2649 pass
2646
2650
2647 def resolvenodes(nodes, count=1):
2651 def resolvenodes(nodes, count=1):
2648 index = revlogio.parseindex(data, inline)[0]
2652 index = revlogio.parseindex(data, inline)[0]
2649 rev = getattr(index, 'rev', None)
2653 rev = getattr(index, 'rev', None)
2650 if rev is None:
2654 if rev is None:
2651 nodemap = getattr(
2655 nodemap = getattr(
2652 revlogio.parseindex(data, inline)[0], 'nodemap', None
2656 revlogio.parseindex(data, inline)[0], 'nodemap', None
2653 )
2657 )
2654 # This only works for the C code.
2658 # This only works for the C code.
2655 if nodemap is None:
2659 if nodemap is None:
2656 return
2660 return
2657 rev = nodemap.__getitem__
2661 rev = nodemap.__getitem__
2658
2662
2659 for i in range(count):
2663 for i in range(count):
2660 for node in nodes:
2664 for node in nodes:
2661 try:
2665 try:
2662 rev(node)
2666 rev(node)
2663 except error.RevlogError:
2667 except error.RevlogError:
2664 pass
2668 pass
2665
2669
2666 benches = [
2670 benches = [
2667 (constructor, b'revlog constructor'),
2671 (constructor, b'revlog constructor'),
2668 (read, b'read'),
2672 (read, b'read'),
2669 (parseindex, b'create index object'),
2673 (parseindex, b'create index object'),
2670 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2674 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2671 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2675 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2672 (lambda: resolvenode(node0), b'look up node at rev 0'),
2676 (lambda: resolvenode(node0), b'look up node at rev 0'),
2673 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2677 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2674 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2678 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2675 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2679 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2676 (lambda: resolvenode(node100), b'look up node at tip'),
2680 (lambda: resolvenode(node100), b'look up node at tip'),
2677 # 2x variation is to measure caching impact.
2681 # 2x variation is to measure caching impact.
2678 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2682 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2679 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2683 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2680 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2684 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2681 (
2685 (
2682 lambda: resolvenodes(allnodesrev, 2),
2686 lambda: resolvenodes(allnodesrev, 2),
2683 b'look up all nodes 2x (reverse)',
2687 b'look up all nodes 2x (reverse)',
2684 ),
2688 ),
2685 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2689 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2686 (
2690 (
2687 lambda: getentries(allrevs, 2),
2691 lambda: getentries(allrevs, 2),
2688 b'retrieve all index entries 2x (forward)',
2692 b'retrieve all index entries 2x (forward)',
2689 ),
2693 ),
2690 (
2694 (
2691 lambda: getentries(allrevsrev),
2695 lambda: getentries(allrevsrev),
2692 b'retrieve all index entries (reverse)',
2696 b'retrieve all index entries (reverse)',
2693 ),
2697 ),
2694 (
2698 (
2695 lambda: getentries(allrevsrev, 2),
2699 lambda: getentries(allrevsrev, 2),
2696 b'retrieve all index entries 2x (reverse)',
2700 b'retrieve all index entries 2x (reverse)',
2697 ),
2701 ),
2698 ]
2702 ]
2699
2703
2700 for fn, title in benches:
2704 for fn, title in benches:
2701 timer, fm = gettimer(ui, opts)
2705 timer, fm = gettimer(ui, opts)
2702 timer(fn, title=title)
2706 timer(fn, title=title)
2703 fm.end()
2707 fm.end()
2704
2708
2705
2709
2706 @command(
2710 @command(
2707 b'perf--revlogrevisions',
2711 b'perf::revlogrevisions|perfrevlogrevisions',
2708 revlogopts
2712 revlogopts
2709 + formatteropts
2713 + formatteropts
2710 + [
2714 + [
2711 (b'd', b'dist', 100, b'distance between the revisions'),
2715 (b'd', b'dist', 100, b'distance between the revisions'),
2712 (b's', b'startrev', 0, b'revision to start reading at'),
2716 (b's', b'startrev', 0, b'revision to start reading at'),
2713 (b'', b'reverse', False, b'read in reverse'),
2717 (b'', b'reverse', False, b'read in reverse'),
2714 ],
2718 ],
2715 b'-c|-m|FILE',
2719 b'-c|-m|FILE',
2716 )
2720 )
2717 def perfrevlogrevisions(
2721 def perfrevlogrevisions(
2718 ui, repo, file_=None, startrev=0, reverse=False, **opts
2722 ui, repo, file_=None, startrev=0, reverse=False, **opts
2719 ):
2723 ):
2720 """Benchmark reading a series of revisions from a revlog.
2724 """Benchmark reading a series of revisions from a revlog.
2721
2725
2722 By default, we read every ``-d/--dist`` revision from 0 to tip of
2726 By default, we read every ``-d/--dist`` revision from 0 to tip of
2723 the specified revlog.
2727 the specified revlog.
2724
2728
2725 The start revision can be defined via ``-s/--startrev``.
2729 The start revision can be defined via ``-s/--startrev``.
2726 """
2730 """
2727 opts = _byteskwargs(opts)
2731 opts = _byteskwargs(opts)
2728
2732
2729 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2733 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2730 rllen = getlen(ui)(rl)
2734 rllen = getlen(ui)(rl)
2731
2735
2732 if startrev < 0:
2736 if startrev < 0:
2733 startrev = rllen + startrev
2737 startrev = rllen + startrev
2734
2738
2735 def d():
2739 def d():
2736 rl.clearcaches()
2740 rl.clearcaches()
2737
2741
2738 beginrev = startrev
2742 beginrev = startrev
2739 endrev = rllen
2743 endrev = rllen
2740 dist = opts[b'dist']
2744 dist = opts[b'dist']
2741
2745
2742 if reverse:
2746 if reverse:
2743 beginrev, endrev = endrev - 1, beginrev - 1
2747 beginrev, endrev = endrev - 1, beginrev - 1
2744 dist = -1 * dist
2748 dist = -1 * dist
2745
2749
2746 for x in _xrange(beginrev, endrev, dist):
2750 for x in _xrange(beginrev, endrev, dist):
2747 # Old revisions don't support passing int.
2751 # Old revisions don't support passing int.
2748 n = rl.node(x)
2752 n = rl.node(x)
2749 rl.revision(n)
2753 rl.revision(n)
2750
2754
2751 timer, fm = gettimer(ui, opts)
2755 timer, fm = gettimer(ui, opts)
2752 timer(d)
2756 timer(d)
2753 fm.end()
2757 fm.end()
2754
2758
2755
2759
2756 @command(
2760 @command(
2757 b'perf--revlogwrite',
2761 b'perf::revlogwrite|perfrevlogwrite',
2758 revlogopts
2762 revlogopts
2759 + formatteropts
2763 + formatteropts
2760 + [
2764 + [
2761 (b's', b'startrev', 1000, b'revision to start writing at'),
2765 (b's', b'startrev', 1000, b'revision to start writing at'),
2762 (b'', b'stoprev', -1, b'last revision to write'),
2766 (b'', b'stoprev', -1, b'last revision to write'),
2763 (b'', b'count', 3, b'number of passes to perform'),
2767 (b'', b'count', 3, b'number of passes to perform'),
2764 (b'', b'details', False, b'print timing for every revisions tested'),
2768 (b'', b'details', False, b'print timing for every revisions tested'),
2765 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2769 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2766 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2770 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2767 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2771 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2768 ],
2772 ],
2769 b'-c|-m|FILE',
2773 b'-c|-m|FILE',
2770 )
2774 )
2771 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2775 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2772 """Benchmark writing a series of revisions to a revlog.
2776 """Benchmark writing a series of revisions to a revlog.
2773
2777
2774 Possible source values are:
2778 Possible source values are:
2775 * `full`: add from a full text (default).
2779 * `full`: add from a full text (default).
2776 * `parent-1`: add from a delta to the first parent
2780 * `parent-1`: add from a delta to the first parent
2777 * `parent-2`: add from a delta to the second parent if it exists
2781 * `parent-2`: add from a delta to the second parent if it exists
2778 (use a delta from the first parent otherwise)
2782 (use a delta from the first parent otherwise)
2779 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2783 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2780 * `storage`: add from the existing precomputed deltas
2784 * `storage`: add from the existing precomputed deltas
2781
2785
2782 Note: This performance command measures performance in a custom way. As a
2786 Note: This performance command measures performance in a custom way. As a
2783 result some of the global configuration of the 'perf' command does not
2787 result some of the global configuration of the 'perf' command does not
2784 apply to it:
2788 apply to it:
2785
2789
2786 * ``pre-run``: disabled
2790 * ``pre-run``: disabled
2787
2791
2788 * ``profile-benchmark``: disabled
2792 * ``profile-benchmark``: disabled
2789
2793
2790 * ``run-limits``: disabled use --count instead
2794 * ``run-limits``: disabled use --count instead
2791 """
2795 """
2792 opts = _byteskwargs(opts)
2796 opts = _byteskwargs(opts)
2793
2797
2794 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2798 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2795 rllen = getlen(ui)(rl)
2799 rllen = getlen(ui)(rl)
2796 if startrev < 0:
2800 if startrev < 0:
2797 startrev = rllen + startrev
2801 startrev = rllen + startrev
2798 if stoprev < 0:
2802 if stoprev < 0:
2799 stoprev = rllen + stoprev
2803 stoprev = rllen + stoprev
2800
2804
2801 lazydeltabase = opts['lazydeltabase']
2805 lazydeltabase = opts['lazydeltabase']
2802 source = opts['source']
2806 source = opts['source']
2803 clearcaches = opts['clear_caches']
2807 clearcaches = opts['clear_caches']
2804 validsource = (
2808 validsource = (
2805 b'full',
2809 b'full',
2806 b'parent-1',
2810 b'parent-1',
2807 b'parent-2',
2811 b'parent-2',
2808 b'parent-smallest',
2812 b'parent-smallest',
2809 b'storage',
2813 b'storage',
2810 )
2814 )
2811 if source not in validsource:
2815 if source not in validsource:
2812 raise error.Abort('invalid source type: %s' % source)
2816 raise error.Abort('invalid source type: %s' % source)
2813
2817
2814 ### actually gather results
2818 ### actually gather results
2815 count = opts['count']
2819 count = opts['count']
2816 if count <= 0:
2820 if count <= 0:
2817 raise error.Abort('invalide run count: %d' % count)
2821 raise error.Abort('invalide run count: %d' % count)
2818 allresults = []
2822 allresults = []
2819 for c in range(count):
2823 for c in range(count):
2820 timing = _timeonewrite(
2824 timing = _timeonewrite(
2821 ui,
2825 ui,
2822 rl,
2826 rl,
2823 source,
2827 source,
2824 startrev,
2828 startrev,
2825 stoprev,
2829 stoprev,
2826 c + 1,
2830 c + 1,
2827 lazydeltabase=lazydeltabase,
2831 lazydeltabase=lazydeltabase,
2828 clearcaches=clearcaches,
2832 clearcaches=clearcaches,
2829 )
2833 )
2830 allresults.append(timing)
2834 allresults.append(timing)
2831
2835
2832 ### consolidate the results in a single list
2836 ### consolidate the results in a single list
2833 results = []
2837 results = []
2834 for idx, (rev, t) in enumerate(allresults[0]):
2838 for idx, (rev, t) in enumerate(allresults[0]):
2835 ts = [t]
2839 ts = [t]
2836 for other in allresults[1:]:
2840 for other in allresults[1:]:
2837 orev, ot = other[idx]
2841 orev, ot = other[idx]
2838 assert orev == rev
2842 assert orev == rev
2839 ts.append(ot)
2843 ts.append(ot)
2840 results.append((rev, ts))
2844 results.append((rev, ts))
2841 resultcount = len(results)
2845 resultcount = len(results)
2842
2846
2843 ### Compute and display relevant statistics
2847 ### Compute and display relevant statistics
2844
2848
2845 # get a formatter
2849 # get a formatter
2846 fm = ui.formatter(b'perf', opts)
2850 fm = ui.formatter(b'perf', opts)
2847 displayall = ui.configbool(b"perf", b"all-timing", False)
2851 displayall = ui.configbool(b"perf", b"all-timing", False)
2848
2852
2849 # print individual details if requested
2853 # print individual details if requested
2850 if opts['details']:
2854 if opts['details']:
2851 for idx, item in enumerate(results, 1):
2855 for idx, item in enumerate(results, 1):
2852 rev, data = item
2856 rev, data = item
2853 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2857 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2854 formatone(fm, data, title=title, displayall=displayall)
2858 formatone(fm, data, title=title, displayall=displayall)
2855
2859
2856 # sorts results by median time
2860 # sorts results by median time
2857 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2861 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2858 # list of (name, index) to display)
2862 # list of (name, index) to display)
2859 relevants = [
2863 relevants = [
2860 ("min", 0),
2864 ("min", 0),
2861 ("10%", resultcount * 10 // 100),
2865 ("10%", resultcount * 10 // 100),
2862 ("25%", resultcount * 25 // 100),
2866 ("25%", resultcount * 25 // 100),
2863 ("50%", resultcount * 70 // 100),
2867 ("50%", resultcount * 70 // 100),
2864 ("75%", resultcount * 75 // 100),
2868 ("75%", resultcount * 75 // 100),
2865 ("90%", resultcount * 90 // 100),
2869 ("90%", resultcount * 90 // 100),
2866 ("95%", resultcount * 95 // 100),
2870 ("95%", resultcount * 95 // 100),
2867 ("99%", resultcount * 99 // 100),
2871 ("99%", resultcount * 99 // 100),
2868 ("99.9%", resultcount * 999 // 1000),
2872 ("99.9%", resultcount * 999 // 1000),
2869 ("99.99%", resultcount * 9999 // 10000),
2873 ("99.99%", resultcount * 9999 // 10000),
2870 ("99.999%", resultcount * 99999 // 100000),
2874 ("99.999%", resultcount * 99999 // 100000),
2871 ("max", -1),
2875 ("max", -1),
2872 ]
2876 ]
2873 if not ui.quiet:
2877 if not ui.quiet:
2874 for name, idx in relevants:
2878 for name, idx in relevants:
2875 data = results[idx]
2879 data = results[idx]
2876 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2880 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2877 formatone(fm, data[1], title=title, displayall=displayall)
2881 formatone(fm, data[1], title=title, displayall=displayall)
2878
2882
2879 # XXX summing that many float will not be very precise, we ignore this fact
2883 # XXX summing that many float will not be very precise, we ignore this fact
2880 # for now
2884 # for now
2881 totaltime = []
2885 totaltime = []
2882 for item in allresults:
2886 for item in allresults:
2883 totaltime.append(
2887 totaltime.append(
2884 (
2888 (
2885 sum(x[1][0] for x in item),
2889 sum(x[1][0] for x in item),
2886 sum(x[1][1] for x in item),
2890 sum(x[1][1] for x in item),
2887 sum(x[1][2] for x in item),
2891 sum(x[1][2] for x in item),
2888 )
2892 )
2889 )
2893 )
2890 formatone(
2894 formatone(
2891 fm,
2895 fm,
2892 totaltime,
2896 totaltime,
2893 title="total time (%d revs)" % resultcount,
2897 title="total time (%d revs)" % resultcount,
2894 displayall=displayall,
2898 displayall=displayall,
2895 )
2899 )
2896 fm.end()
2900 fm.end()
2897
2901
2898
2902
2899 class _faketr(object):
2903 class _faketr(object):
2900 def add(s, x, y, z=None):
2904 def add(s, x, y, z=None):
2901 return None
2905 return None
2902
2906
2903
2907
2904 def _timeonewrite(
2908 def _timeonewrite(
2905 ui,
2909 ui,
2906 orig,
2910 orig,
2907 source,
2911 source,
2908 startrev,
2912 startrev,
2909 stoprev,
2913 stoprev,
2910 runidx=None,
2914 runidx=None,
2911 lazydeltabase=True,
2915 lazydeltabase=True,
2912 clearcaches=True,
2916 clearcaches=True,
2913 ):
2917 ):
2914 timings = []
2918 timings = []
2915 tr = _faketr()
2919 tr = _faketr()
2916 with _temprevlog(ui, orig, startrev) as dest:
2920 with _temprevlog(ui, orig, startrev) as dest:
2917 dest._lazydeltabase = lazydeltabase
2921 dest._lazydeltabase = lazydeltabase
2918 revs = list(orig.revs(startrev, stoprev))
2922 revs = list(orig.revs(startrev, stoprev))
2919 total = len(revs)
2923 total = len(revs)
2920 topic = 'adding'
2924 topic = 'adding'
2921 if runidx is not None:
2925 if runidx is not None:
2922 topic += ' (run #%d)' % runidx
2926 topic += ' (run #%d)' % runidx
2923 # Support both old and new progress API
2927 # Support both old and new progress API
2924 if util.safehasattr(ui, 'makeprogress'):
2928 if util.safehasattr(ui, 'makeprogress'):
2925 progress = ui.makeprogress(topic, unit='revs', total=total)
2929 progress = ui.makeprogress(topic, unit='revs', total=total)
2926
2930
2927 def updateprogress(pos):
2931 def updateprogress(pos):
2928 progress.update(pos)
2932 progress.update(pos)
2929
2933
2930 def completeprogress():
2934 def completeprogress():
2931 progress.complete()
2935 progress.complete()
2932
2936
2933 else:
2937 else:
2934
2938
2935 def updateprogress(pos):
2939 def updateprogress(pos):
2936 ui.progress(topic, pos, unit='revs', total=total)
2940 ui.progress(topic, pos, unit='revs', total=total)
2937
2941
2938 def completeprogress():
2942 def completeprogress():
2939 ui.progress(topic, None, unit='revs', total=total)
2943 ui.progress(topic, None, unit='revs', total=total)
2940
2944
2941 for idx, rev in enumerate(revs):
2945 for idx, rev in enumerate(revs):
2942 updateprogress(idx)
2946 updateprogress(idx)
2943 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2947 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2944 if clearcaches:
2948 if clearcaches:
2945 dest.index.clearcaches()
2949 dest.index.clearcaches()
2946 dest.clearcaches()
2950 dest.clearcaches()
2947 with timeone() as r:
2951 with timeone() as r:
2948 dest.addrawrevision(*addargs, **addkwargs)
2952 dest.addrawrevision(*addargs, **addkwargs)
2949 timings.append((rev, r[0]))
2953 timings.append((rev, r[0]))
2950 updateprogress(total)
2954 updateprogress(total)
2951 completeprogress()
2955 completeprogress()
2952 return timings
2956 return timings
2953
2957
2954
2958
2955 def _getrevisionseed(orig, rev, tr, source):
2959 def _getrevisionseed(orig, rev, tr, source):
2956 from mercurial.node import nullid
2960 from mercurial.node import nullid
2957
2961
2958 linkrev = orig.linkrev(rev)
2962 linkrev = orig.linkrev(rev)
2959 node = orig.node(rev)
2963 node = orig.node(rev)
2960 p1, p2 = orig.parents(node)
2964 p1, p2 = orig.parents(node)
2961 flags = orig.flags(rev)
2965 flags = orig.flags(rev)
2962 cachedelta = None
2966 cachedelta = None
2963 text = None
2967 text = None
2964
2968
2965 if source == b'full':
2969 if source == b'full':
2966 text = orig.revision(rev)
2970 text = orig.revision(rev)
2967 elif source == b'parent-1':
2971 elif source == b'parent-1':
2968 baserev = orig.rev(p1)
2972 baserev = orig.rev(p1)
2969 cachedelta = (baserev, orig.revdiff(p1, rev))
2973 cachedelta = (baserev, orig.revdiff(p1, rev))
2970 elif source == b'parent-2':
2974 elif source == b'parent-2':
2971 parent = p2
2975 parent = p2
2972 if p2 == nullid:
2976 if p2 == nullid:
2973 parent = p1
2977 parent = p1
2974 baserev = orig.rev(parent)
2978 baserev = orig.rev(parent)
2975 cachedelta = (baserev, orig.revdiff(parent, rev))
2979 cachedelta = (baserev, orig.revdiff(parent, rev))
2976 elif source == b'parent-smallest':
2980 elif source == b'parent-smallest':
2977 p1diff = orig.revdiff(p1, rev)
2981 p1diff = orig.revdiff(p1, rev)
2978 parent = p1
2982 parent = p1
2979 diff = p1diff
2983 diff = p1diff
2980 if p2 != nullid:
2984 if p2 != nullid:
2981 p2diff = orig.revdiff(p2, rev)
2985 p2diff = orig.revdiff(p2, rev)
2982 if len(p1diff) > len(p2diff):
2986 if len(p1diff) > len(p2diff):
2983 parent = p2
2987 parent = p2
2984 diff = p2diff
2988 diff = p2diff
2985 baserev = orig.rev(parent)
2989 baserev = orig.rev(parent)
2986 cachedelta = (baserev, diff)
2990 cachedelta = (baserev, diff)
2987 elif source == b'storage':
2991 elif source == b'storage':
2988 baserev = orig.deltaparent(rev)
2992 baserev = orig.deltaparent(rev)
2989 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2993 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2990
2994
2991 return (
2995 return (
2992 (text, tr, linkrev, p1, p2),
2996 (text, tr, linkrev, p1, p2),
2993 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2997 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2994 )
2998 )
2995
2999
2996
3000
2997 @contextlib.contextmanager
3001 @contextlib.contextmanager
2998 def _temprevlog(ui, orig, truncaterev):
3002 def _temprevlog(ui, orig, truncaterev):
2999 from mercurial import vfs as vfsmod
3003 from mercurial import vfs as vfsmod
3000
3004
3001 if orig._inline:
3005 if orig._inline:
3002 raise error.Abort('not supporting inline revlog (yet)')
3006 raise error.Abort('not supporting inline revlog (yet)')
3003 revlogkwargs = {}
3007 revlogkwargs = {}
3004 k = 'upperboundcomp'
3008 k = 'upperboundcomp'
3005 if util.safehasattr(orig, k):
3009 if util.safehasattr(orig, k):
3006 revlogkwargs[k] = getattr(orig, k)
3010 revlogkwargs[k] = getattr(orig, k)
3007
3011
3008 origindexpath = orig.opener.join(orig.indexfile)
3012 origindexpath = orig.opener.join(orig.indexfile)
3009 origdatapath = orig.opener.join(orig.datafile)
3013 origdatapath = orig.opener.join(orig.datafile)
3010 indexname = 'revlog.i'
3014 indexname = 'revlog.i'
3011 dataname = 'revlog.d'
3015 dataname = 'revlog.d'
3012
3016
3013 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3017 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3014 try:
3018 try:
3015 # copy the data file in a temporary directory
3019 # copy the data file in a temporary directory
3016 ui.debug('copying data in %s\n' % tmpdir)
3020 ui.debug('copying data in %s\n' % tmpdir)
3017 destindexpath = os.path.join(tmpdir, 'revlog.i')
3021 destindexpath = os.path.join(tmpdir, 'revlog.i')
3018 destdatapath = os.path.join(tmpdir, 'revlog.d')
3022 destdatapath = os.path.join(tmpdir, 'revlog.d')
3019 shutil.copyfile(origindexpath, destindexpath)
3023 shutil.copyfile(origindexpath, destindexpath)
3020 shutil.copyfile(origdatapath, destdatapath)
3024 shutil.copyfile(origdatapath, destdatapath)
3021
3025
3022 # remove the data we want to add again
3026 # remove the data we want to add again
3023 ui.debug('truncating data to be rewritten\n')
3027 ui.debug('truncating data to be rewritten\n')
3024 with open(destindexpath, 'ab') as index:
3028 with open(destindexpath, 'ab') as index:
3025 index.seek(0)
3029 index.seek(0)
3026 index.truncate(truncaterev * orig._io.size)
3030 index.truncate(truncaterev * orig._io.size)
3027 with open(destdatapath, 'ab') as data:
3031 with open(destdatapath, 'ab') as data:
3028 data.seek(0)
3032 data.seek(0)
3029 data.truncate(orig.start(truncaterev))
3033 data.truncate(orig.start(truncaterev))
3030
3034
3031 # instantiate a new revlog from the temporary copy
3035 # instantiate a new revlog from the temporary copy
3032 ui.debug('truncating adding to be rewritten\n')
3036 ui.debug('truncating adding to be rewritten\n')
3033 vfs = vfsmod.vfs(tmpdir)
3037 vfs = vfsmod.vfs(tmpdir)
3034 vfs.options = getattr(orig.opener, 'options', None)
3038 vfs.options = getattr(orig.opener, 'options', None)
3035
3039
3036 dest = revlog.revlog(
3040 dest = revlog.revlog(
3037 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3041 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3038 )
3042 )
3039 if dest._inline:
3043 if dest._inline:
3040 raise error.Abort('not supporting inline revlog (yet)')
3044 raise error.Abort('not supporting inline revlog (yet)')
3041 # make sure internals are initialized
3045 # make sure internals are initialized
3042 dest.revision(len(dest) - 1)
3046 dest.revision(len(dest) - 1)
3043 yield dest
3047 yield dest
3044 del dest, vfs
3048 del dest, vfs
3045 finally:
3049 finally:
3046 shutil.rmtree(tmpdir, True)
3050 shutil.rmtree(tmpdir, True)
3047
3051
3048
3052
3049 @command(
3053 @command(
3050 b'perf--revlogchunks',
3054 b'perf::revlogchunks|perfrevlogchunks',
3051 revlogopts
3055 revlogopts
3052 + formatteropts
3056 + formatteropts
3053 + [
3057 + [
3054 (b'e', b'engines', b'', b'compression engines to use'),
3058 (b'e', b'engines', b'', b'compression engines to use'),
3055 (b's', b'startrev', 0, b'revision to start at'),
3059 (b's', b'startrev', 0, b'revision to start at'),
3056 ],
3060 ],
3057 b'-c|-m|FILE',
3061 b'-c|-m|FILE',
3058 )
3062 )
3059 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3063 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3060 """Benchmark operations on revlog chunks.
3064 """Benchmark operations on revlog chunks.
3061
3065
3062 Logically, each revlog is a collection of fulltext revisions. However,
3066 Logically, each revlog is a collection of fulltext revisions. However,
3063 stored within each revlog are "chunks" of possibly compressed data. This
3067 stored within each revlog are "chunks" of possibly compressed data. This
3064 data needs to be read and decompressed or compressed and written.
3068 data needs to be read and decompressed or compressed and written.
3065
3069
3066 This command measures the time it takes to read+decompress and recompress
3070 This command measures the time it takes to read+decompress and recompress
3067 chunks in a revlog. It effectively isolates I/O and compression performance.
3071 chunks in a revlog. It effectively isolates I/O and compression performance.
3068 For measurements of higher-level operations like resolving revisions,
3072 For measurements of higher-level operations like resolving revisions,
3069 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3073 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3070 """
3074 """
3071 opts = _byteskwargs(opts)
3075 opts = _byteskwargs(opts)
3072
3076
3073 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3077 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3074
3078
3075 # _chunkraw was renamed to _getsegmentforrevs.
3079 # _chunkraw was renamed to _getsegmentforrevs.
3076 try:
3080 try:
3077 segmentforrevs = rl._getsegmentforrevs
3081 segmentforrevs = rl._getsegmentforrevs
3078 except AttributeError:
3082 except AttributeError:
3079 segmentforrevs = rl._chunkraw
3083 segmentforrevs = rl._chunkraw
3080
3084
3081 # Verify engines argument.
3085 # Verify engines argument.
3082 if engines:
3086 if engines:
3083 engines = {e.strip() for e in engines.split(b',')}
3087 engines = {e.strip() for e in engines.split(b',')}
3084 for engine in engines:
3088 for engine in engines:
3085 try:
3089 try:
3086 util.compressionengines[engine]
3090 util.compressionengines[engine]
3087 except KeyError:
3091 except KeyError:
3088 raise error.Abort(b'unknown compression engine: %s' % engine)
3092 raise error.Abort(b'unknown compression engine: %s' % engine)
3089 else:
3093 else:
3090 engines = []
3094 engines = []
3091 for e in util.compengines:
3095 for e in util.compengines:
3092 engine = util.compengines[e]
3096 engine = util.compengines[e]
3093 try:
3097 try:
3094 if engine.available():
3098 if engine.available():
3095 engine.revlogcompressor().compress(b'dummy')
3099 engine.revlogcompressor().compress(b'dummy')
3096 engines.append(e)
3100 engines.append(e)
3097 except NotImplementedError:
3101 except NotImplementedError:
3098 pass
3102 pass
3099
3103
3100 revs = list(rl.revs(startrev, len(rl) - 1))
3104 revs = list(rl.revs(startrev, len(rl) - 1))
3101
3105
3102 def rlfh(rl):
3106 def rlfh(rl):
3103 if rl._inline:
3107 if rl._inline:
3104 return getsvfs(repo)(rl.indexfile)
3108 return getsvfs(repo)(rl.indexfile)
3105 else:
3109 else:
3106 return getsvfs(repo)(rl.datafile)
3110 return getsvfs(repo)(rl.datafile)
3107
3111
3108 def doread():
3112 def doread():
3109 rl.clearcaches()
3113 rl.clearcaches()
3110 for rev in revs:
3114 for rev in revs:
3111 segmentforrevs(rev, rev)
3115 segmentforrevs(rev, rev)
3112
3116
3113 def doreadcachedfh():
3117 def doreadcachedfh():
3114 rl.clearcaches()
3118 rl.clearcaches()
3115 fh = rlfh(rl)
3119 fh = rlfh(rl)
3116 for rev in revs:
3120 for rev in revs:
3117 segmentforrevs(rev, rev, df=fh)
3121 segmentforrevs(rev, rev, df=fh)
3118
3122
3119 def doreadbatch():
3123 def doreadbatch():
3120 rl.clearcaches()
3124 rl.clearcaches()
3121 segmentforrevs(revs[0], revs[-1])
3125 segmentforrevs(revs[0], revs[-1])
3122
3126
3123 def doreadbatchcachedfh():
3127 def doreadbatchcachedfh():
3124 rl.clearcaches()
3128 rl.clearcaches()
3125 fh = rlfh(rl)
3129 fh = rlfh(rl)
3126 segmentforrevs(revs[0], revs[-1], df=fh)
3130 segmentforrevs(revs[0], revs[-1], df=fh)
3127
3131
3128 def dochunk():
3132 def dochunk():
3129 rl.clearcaches()
3133 rl.clearcaches()
3130 fh = rlfh(rl)
3134 fh = rlfh(rl)
3131 for rev in revs:
3135 for rev in revs:
3132 rl._chunk(rev, df=fh)
3136 rl._chunk(rev, df=fh)
3133
3137
3134 chunks = [None]
3138 chunks = [None]
3135
3139
3136 def dochunkbatch():
3140 def dochunkbatch():
3137 rl.clearcaches()
3141 rl.clearcaches()
3138 fh = rlfh(rl)
3142 fh = rlfh(rl)
3139 # Save chunks as a side-effect.
3143 # Save chunks as a side-effect.
3140 chunks[0] = rl._chunks(revs, df=fh)
3144 chunks[0] = rl._chunks(revs, df=fh)
3141
3145
3142 def docompress(compressor):
3146 def docompress(compressor):
3143 rl.clearcaches()
3147 rl.clearcaches()
3144
3148
3145 try:
3149 try:
3146 # Swap in the requested compression engine.
3150 # Swap in the requested compression engine.
3147 oldcompressor = rl._compressor
3151 oldcompressor = rl._compressor
3148 rl._compressor = compressor
3152 rl._compressor = compressor
3149 for chunk in chunks[0]:
3153 for chunk in chunks[0]:
3150 rl.compress(chunk)
3154 rl.compress(chunk)
3151 finally:
3155 finally:
3152 rl._compressor = oldcompressor
3156 rl._compressor = oldcompressor
3153
3157
3154 benches = [
3158 benches = [
3155 (lambda: doread(), b'read'),
3159 (lambda: doread(), b'read'),
3156 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3160 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3157 (lambda: doreadbatch(), b'read batch'),
3161 (lambda: doreadbatch(), b'read batch'),
3158 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3162 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3159 (lambda: dochunk(), b'chunk'),
3163 (lambda: dochunk(), b'chunk'),
3160 (lambda: dochunkbatch(), b'chunk batch'),
3164 (lambda: dochunkbatch(), b'chunk batch'),
3161 ]
3165 ]
3162
3166
3163 for engine in sorted(engines):
3167 for engine in sorted(engines):
3164 compressor = util.compengines[engine].revlogcompressor()
3168 compressor = util.compengines[engine].revlogcompressor()
3165 benches.append(
3169 benches.append(
3166 (
3170 (
3167 functools.partial(docompress, compressor),
3171 functools.partial(docompress, compressor),
3168 b'compress w/ %s' % engine,
3172 b'compress w/ %s' % engine,
3169 )
3173 )
3170 )
3174 )
3171
3175
3172 for fn, title in benches:
3176 for fn, title in benches:
3173 timer, fm = gettimer(ui, opts)
3177 timer, fm = gettimer(ui, opts)
3174 timer(fn, title=title)
3178 timer(fn, title=title)
3175 fm.end()
3179 fm.end()
3176
3180
3177
3181
3178 @command(
3182 @command(
3179 b'perf--revlogrevision',
3183 b'perf::revlogrevision|perfrevlogrevision',
3180 revlogopts
3184 revlogopts
3181 + formatteropts
3185 + formatteropts
3182 + [(b'', b'cache', False, b'use caches instead of clearing')],
3186 + [(b'', b'cache', False, b'use caches instead of clearing')],
3183 b'-c|-m|FILE REV',
3187 b'-c|-m|FILE REV',
3184 )
3188 )
3185 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3189 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3186 """Benchmark obtaining a revlog revision.
3190 """Benchmark obtaining a revlog revision.
3187
3191
3188 Obtaining a revlog revision consists of roughly the following steps:
3192 Obtaining a revlog revision consists of roughly the following steps:
3189
3193
3190 1. Compute the delta chain
3194 1. Compute the delta chain
3191 2. Slice the delta chain if applicable
3195 2. Slice the delta chain if applicable
3192 3. Obtain the raw chunks for that delta chain
3196 3. Obtain the raw chunks for that delta chain
3193 4. Decompress each raw chunk
3197 4. Decompress each raw chunk
3194 5. Apply binary patches to obtain fulltext
3198 5. Apply binary patches to obtain fulltext
3195 6. Verify hash of fulltext
3199 6. Verify hash of fulltext
3196
3200
3197 This command measures the time spent in each of these phases.
3201 This command measures the time spent in each of these phases.
3198 """
3202 """
3199 opts = _byteskwargs(opts)
3203 opts = _byteskwargs(opts)
3200
3204
3201 if opts.get(b'changelog') or opts.get(b'manifest'):
3205 if opts.get(b'changelog') or opts.get(b'manifest'):
3202 file_, rev = None, file_
3206 file_, rev = None, file_
3203 elif rev is None:
3207 elif rev is None:
3204 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3208 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3205
3209
3206 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3210 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3207
3211
3208 # _chunkraw was renamed to _getsegmentforrevs.
3212 # _chunkraw was renamed to _getsegmentforrevs.
3209 try:
3213 try:
3210 segmentforrevs = r._getsegmentforrevs
3214 segmentforrevs = r._getsegmentforrevs
3211 except AttributeError:
3215 except AttributeError:
3212 segmentforrevs = r._chunkraw
3216 segmentforrevs = r._chunkraw
3213
3217
3214 node = r.lookup(rev)
3218 node = r.lookup(rev)
3215 rev = r.rev(node)
3219 rev = r.rev(node)
3216
3220
3217 def getrawchunks(data, chain):
3221 def getrawchunks(data, chain):
3218 start = r.start
3222 start = r.start
3219 length = r.length
3223 length = r.length
3220 inline = r._inline
3224 inline = r._inline
3221 iosize = r._io.size
3225 iosize = r._io.size
3222 buffer = util.buffer
3226 buffer = util.buffer
3223
3227
3224 chunks = []
3228 chunks = []
3225 ladd = chunks.append
3229 ladd = chunks.append
3226 for idx, item in enumerate(chain):
3230 for idx, item in enumerate(chain):
3227 offset = start(item[0])
3231 offset = start(item[0])
3228 bits = data[idx]
3232 bits = data[idx]
3229 for rev in item:
3233 for rev in item:
3230 chunkstart = start(rev)
3234 chunkstart = start(rev)
3231 if inline:
3235 if inline:
3232 chunkstart += (rev + 1) * iosize
3236 chunkstart += (rev + 1) * iosize
3233 chunklength = length(rev)
3237 chunklength = length(rev)
3234 ladd(buffer(bits, chunkstart - offset, chunklength))
3238 ladd(buffer(bits, chunkstart - offset, chunklength))
3235
3239
3236 return chunks
3240 return chunks
3237
3241
3238 def dodeltachain(rev):
3242 def dodeltachain(rev):
3239 if not cache:
3243 if not cache:
3240 r.clearcaches()
3244 r.clearcaches()
3241 r._deltachain(rev)
3245 r._deltachain(rev)
3242
3246
3243 def doread(chain):
3247 def doread(chain):
3244 if not cache:
3248 if not cache:
3245 r.clearcaches()
3249 r.clearcaches()
3246 for item in slicedchain:
3250 for item in slicedchain:
3247 segmentforrevs(item[0], item[-1])
3251 segmentforrevs(item[0], item[-1])
3248
3252
3249 def doslice(r, chain, size):
3253 def doslice(r, chain, size):
3250 for s in slicechunk(r, chain, targetsize=size):
3254 for s in slicechunk(r, chain, targetsize=size):
3251 pass
3255 pass
3252
3256
3253 def dorawchunks(data, chain):
3257 def dorawchunks(data, chain):
3254 if not cache:
3258 if not cache:
3255 r.clearcaches()
3259 r.clearcaches()
3256 getrawchunks(data, chain)
3260 getrawchunks(data, chain)
3257
3261
3258 def dodecompress(chunks):
3262 def dodecompress(chunks):
3259 decomp = r.decompress
3263 decomp = r.decompress
3260 for chunk in chunks:
3264 for chunk in chunks:
3261 decomp(chunk)
3265 decomp(chunk)
3262
3266
3263 def dopatch(text, bins):
3267 def dopatch(text, bins):
3264 if not cache:
3268 if not cache:
3265 r.clearcaches()
3269 r.clearcaches()
3266 mdiff.patches(text, bins)
3270 mdiff.patches(text, bins)
3267
3271
3268 def dohash(text):
3272 def dohash(text):
3269 if not cache:
3273 if not cache:
3270 r.clearcaches()
3274 r.clearcaches()
3271 r.checkhash(text, node, rev=rev)
3275 r.checkhash(text, node, rev=rev)
3272
3276
3273 def dorevision():
3277 def dorevision():
3274 if not cache:
3278 if not cache:
3275 r.clearcaches()
3279 r.clearcaches()
3276 r.revision(node)
3280 r.revision(node)
3277
3281
3278 try:
3282 try:
3279 from mercurial.revlogutils.deltas import slicechunk
3283 from mercurial.revlogutils.deltas import slicechunk
3280 except ImportError:
3284 except ImportError:
3281 slicechunk = getattr(revlog, '_slicechunk', None)
3285 slicechunk = getattr(revlog, '_slicechunk', None)
3282
3286
3283 size = r.length(rev)
3287 size = r.length(rev)
3284 chain = r._deltachain(rev)[0]
3288 chain = r._deltachain(rev)[0]
3285 if not getattr(r, '_withsparseread', False):
3289 if not getattr(r, '_withsparseread', False):
3286 slicedchain = (chain,)
3290 slicedchain = (chain,)
3287 else:
3291 else:
3288 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3292 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3289 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3293 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3290 rawchunks = getrawchunks(data, slicedchain)
3294 rawchunks = getrawchunks(data, slicedchain)
3291 bins = r._chunks(chain)
3295 bins = r._chunks(chain)
3292 text = bytes(bins[0])
3296 text = bytes(bins[0])
3293 bins = bins[1:]
3297 bins = bins[1:]
3294 text = mdiff.patches(text, bins)
3298 text = mdiff.patches(text, bins)
3295
3299
3296 benches = [
3300 benches = [
3297 (lambda: dorevision(), b'full'),
3301 (lambda: dorevision(), b'full'),
3298 (lambda: dodeltachain(rev), b'deltachain'),
3302 (lambda: dodeltachain(rev), b'deltachain'),
3299 (lambda: doread(chain), b'read'),
3303 (lambda: doread(chain), b'read'),
3300 ]
3304 ]
3301
3305
3302 if getattr(r, '_withsparseread', False):
3306 if getattr(r, '_withsparseread', False):
3303 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3307 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3304 benches.append(slicing)
3308 benches.append(slicing)
3305
3309
3306 benches.extend(
3310 benches.extend(
3307 [
3311 [
3308 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3312 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3309 (lambda: dodecompress(rawchunks), b'decompress'),
3313 (lambda: dodecompress(rawchunks), b'decompress'),
3310 (lambda: dopatch(text, bins), b'patch'),
3314 (lambda: dopatch(text, bins), b'patch'),
3311 (lambda: dohash(text), b'hash'),
3315 (lambda: dohash(text), b'hash'),
3312 ]
3316 ]
3313 )
3317 )
3314
3318
3315 timer, fm = gettimer(ui, opts)
3319 timer, fm = gettimer(ui, opts)
3316 for fn, title in benches:
3320 for fn, title in benches:
3317 timer(fn, title=title)
3321 timer(fn, title=title)
3318 fm.end()
3322 fm.end()
3319
3323
3320
3324
3321 @command(
3325 @command(
3322 b'perf--revset',
3326 b'perf::revset|perfrevset',
3323 [
3327 [
3324 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3328 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3325 (b'', b'contexts', False, b'obtain changectx for each revision'),
3329 (b'', b'contexts', False, b'obtain changectx for each revision'),
3326 ]
3330 ]
3327 + formatteropts,
3331 + formatteropts,
3328 b"REVSET",
3332 b"REVSET",
3329 )
3333 )
3330 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3334 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3331 """benchmark the execution time of a revset
3335 """benchmark the execution time of a revset
3332
3336
3333 Use the --clean option if need to evaluate the impact of build volatile
3337 Use the --clean option if need to evaluate the impact of build volatile
3334 revisions set cache on the revset execution. Volatile cache hold filtered
3338 revisions set cache on the revset execution. Volatile cache hold filtered
3335 and obsolete related cache."""
3339 and obsolete related cache."""
3336 opts = _byteskwargs(opts)
3340 opts = _byteskwargs(opts)
3337
3341
3338 timer, fm = gettimer(ui, opts)
3342 timer, fm = gettimer(ui, opts)
3339
3343
3340 def d():
3344 def d():
3341 if clear:
3345 if clear:
3342 repo.invalidatevolatilesets()
3346 repo.invalidatevolatilesets()
3343 if contexts:
3347 if contexts:
3344 for ctx in repo.set(expr):
3348 for ctx in repo.set(expr):
3345 pass
3349 pass
3346 else:
3350 else:
3347 for r in repo.revs(expr):
3351 for r in repo.revs(expr):
3348 pass
3352 pass
3349
3353
3350 timer(d)
3354 timer(d)
3351 fm.end()
3355 fm.end()
3352
3356
3353
3357
3354 @command(
3358 @command(
3355 b'perf--volatilesets',
3359 b'perf::volatilesets|perfvolatilesets',
3356 [
3360 [
3357 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3361 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3358 ]
3362 ]
3359 + formatteropts,
3363 + formatteropts,
3360 )
3364 )
3361 def perfvolatilesets(ui, repo, *names, **opts):
3365 def perfvolatilesets(ui, repo, *names, **opts):
3362 """benchmark the computation of various volatile set
3366 """benchmark the computation of various volatile set
3363
3367
3364 Volatile set computes element related to filtering and obsolescence."""
3368 Volatile set computes element related to filtering and obsolescence."""
3365 opts = _byteskwargs(opts)
3369 opts = _byteskwargs(opts)
3366 timer, fm = gettimer(ui, opts)
3370 timer, fm = gettimer(ui, opts)
3367 repo = repo.unfiltered()
3371 repo = repo.unfiltered()
3368
3372
3369 def getobs(name):
3373 def getobs(name):
3370 def d():
3374 def d():
3371 repo.invalidatevolatilesets()
3375 repo.invalidatevolatilesets()
3372 if opts[b'clear_obsstore']:
3376 if opts[b'clear_obsstore']:
3373 clearfilecache(repo, b'obsstore')
3377 clearfilecache(repo, b'obsstore')
3374 obsolete.getrevs(repo, name)
3378 obsolete.getrevs(repo, name)
3375
3379
3376 return d
3380 return d
3377
3381
3378 allobs = sorted(obsolete.cachefuncs)
3382 allobs = sorted(obsolete.cachefuncs)
3379 if names:
3383 if names:
3380 allobs = [n for n in allobs if n in names]
3384 allobs = [n for n in allobs if n in names]
3381
3385
3382 for name in allobs:
3386 for name in allobs:
3383 timer(getobs(name), title=name)
3387 timer(getobs(name), title=name)
3384
3388
3385 def getfiltered(name):
3389 def getfiltered(name):
3386 def d():
3390 def d():
3387 repo.invalidatevolatilesets()
3391 repo.invalidatevolatilesets()
3388 if opts[b'clear_obsstore']:
3392 if opts[b'clear_obsstore']:
3389 clearfilecache(repo, b'obsstore')
3393 clearfilecache(repo, b'obsstore')
3390 repoview.filterrevs(repo, name)
3394 repoview.filterrevs(repo, name)
3391
3395
3392 return d
3396 return d
3393
3397
3394 allfilter = sorted(repoview.filtertable)
3398 allfilter = sorted(repoview.filtertable)
3395 if names:
3399 if names:
3396 allfilter = [n for n in allfilter if n in names]
3400 allfilter = [n for n in allfilter if n in names]
3397
3401
3398 for name in allfilter:
3402 for name in allfilter:
3399 timer(getfiltered(name), title=name)
3403 timer(getfiltered(name), title=name)
3400 fm.end()
3404 fm.end()
3401
3405
3402
3406
3403 @command(
3407 @command(
3404 b'perf--branchmap',
3408 b'perf::branchmap|perfbranchmap',
3405 [
3409 [
3406 (b'f', b'full', False, b'Includes build time of subset'),
3410 (b'f', b'full', False, b'Includes build time of subset'),
3407 (
3411 (
3408 b'',
3412 b'',
3409 b'clear-revbranch',
3413 b'clear-revbranch',
3410 False,
3414 False,
3411 b'purge the revbranch cache between computation',
3415 b'purge the revbranch cache between computation',
3412 ),
3416 ),
3413 ]
3417 ]
3414 + formatteropts,
3418 + formatteropts,
3415 )
3419 )
3416 def perfbranchmap(ui, repo, *filternames, **opts):
3420 def perfbranchmap(ui, repo, *filternames, **opts):
3417 """benchmark the update of a branchmap
3421 """benchmark the update of a branchmap
3418
3422
3419 This benchmarks the full repo.branchmap() call with read and write disabled
3423 This benchmarks the full repo.branchmap() call with read and write disabled
3420 """
3424 """
3421 opts = _byteskwargs(opts)
3425 opts = _byteskwargs(opts)
3422 full = opts.get(b"full", False)
3426 full = opts.get(b"full", False)
3423 clear_revbranch = opts.get(b"clear_revbranch", False)
3427 clear_revbranch = opts.get(b"clear_revbranch", False)
3424 timer, fm = gettimer(ui, opts)
3428 timer, fm = gettimer(ui, opts)
3425
3429
3426 def getbranchmap(filtername):
3430 def getbranchmap(filtername):
3427 """generate a benchmark function for the filtername"""
3431 """generate a benchmark function for the filtername"""
3428 if filtername is None:
3432 if filtername is None:
3429 view = repo
3433 view = repo
3430 else:
3434 else:
3431 view = repo.filtered(filtername)
3435 view = repo.filtered(filtername)
3432 if util.safehasattr(view._branchcaches, '_per_filter'):
3436 if util.safehasattr(view._branchcaches, '_per_filter'):
3433 filtered = view._branchcaches._per_filter
3437 filtered = view._branchcaches._per_filter
3434 else:
3438 else:
3435 # older versions
3439 # older versions
3436 filtered = view._branchcaches
3440 filtered = view._branchcaches
3437
3441
3438 def d():
3442 def d():
3439 if clear_revbranch:
3443 if clear_revbranch:
3440 repo.revbranchcache()._clear()
3444 repo.revbranchcache()._clear()
3441 if full:
3445 if full:
3442 view._branchcaches.clear()
3446 view._branchcaches.clear()
3443 else:
3447 else:
3444 filtered.pop(filtername, None)
3448 filtered.pop(filtername, None)
3445 view.branchmap()
3449 view.branchmap()
3446
3450
3447 return d
3451 return d
3448
3452
3449 # add filter in smaller subset to bigger subset
3453 # add filter in smaller subset to bigger subset
3450 possiblefilters = set(repoview.filtertable)
3454 possiblefilters = set(repoview.filtertable)
3451 if filternames:
3455 if filternames:
3452 possiblefilters &= set(filternames)
3456 possiblefilters &= set(filternames)
3453 subsettable = getbranchmapsubsettable()
3457 subsettable = getbranchmapsubsettable()
3454 allfilters = []
3458 allfilters = []
3455 while possiblefilters:
3459 while possiblefilters:
3456 for name in possiblefilters:
3460 for name in possiblefilters:
3457 subset = subsettable.get(name)
3461 subset = subsettable.get(name)
3458 if subset not in possiblefilters:
3462 if subset not in possiblefilters:
3459 break
3463 break
3460 else:
3464 else:
3461 assert False, b'subset cycle %s!' % possiblefilters
3465 assert False, b'subset cycle %s!' % possiblefilters
3462 allfilters.append(name)
3466 allfilters.append(name)
3463 possiblefilters.remove(name)
3467 possiblefilters.remove(name)
3464
3468
3465 # warm the cache
3469 # warm the cache
3466 if not full:
3470 if not full:
3467 for name in allfilters:
3471 for name in allfilters:
3468 repo.filtered(name).branchmap()
3472 repo.filtered(name).branchmap()
3469 if not filternames or b'unfiltered' in filternames:
3473 if not filternames or b'unfiltered' in filternames:
3470 # add unfiltered
3474 # add unfiltered
3471 allfilters.append(None)
3475 allfilters.append(None)
3472
3476
3473 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3477 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3474 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3478 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3475 branchcacheread.set(classmethod(lambda *args: None))
3479 branchcacheread.set(classmethod(lambda *args: None))
3476 else:
3480 else:
3477 # older versions
3481 # older versions
3478 branchcacheread = safeattrsetter(branchmap, b'read')
3482 branchcacheread = safeattrsetter(branchmap, b'read')
3479 branchcacheread.set(lambda *args: None)
3483 branchcacheread.set(lambda *args: None)
3480 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3484 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3481 branchcachewrite.set(lambda *args: None)
3485 branchcachewrite.set(lambda *args: None)
3482 try:
3486 try:
3483 for name in allfilters:
3487 for name in allfilters:
3484 printname = name
3488 printname = name
3485 if name is None:
3489 if name is None:
3486 printname = b'unfiltered'
3490 printname = b'unfiltered'
3487 timer(getbranchmap(name), title=printname)
3491 timer(getbranchmap(name), title=printname)
3488 finally:
3492 finally:
3489 branchcacheread.restore()
3493 branchcacheread.restore()
3490 branchcachewrite.restore()
3494 branchcachewrite.restore()
3491 fm.end()
3495 fm.end()
3492
3496
3493
3497
3494 @command(
3498 @command(
3495 b'perf--branchmapupdate',
3499 b'perf::branchmapupdate|perfbranchmapupdate',
3496 [
3500 [
3497 (b'', b'base', [], b'subset of revision to start from'),
3501 (b'', b'base', [], b'subset of revision to start from'),
3498 (b'', b'target', [], b'subset of revision to end with'),
3502 (b'', b'target', [], b'subset of revision to end with'),
3499 (b'', b'clear-caches', False, b'clear cache between each runs'),
3503 (b'', b'clear-caches', False, b'clear cache between each runs'),
3500 ]
3504 ]
3501 + formatteropts,
3505 + formatteropts,
3502 )
3506 )
3503 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3507 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3504 """benchmark branchmap update from for <base> revs to <target> revs
3508 """benchmark branchmap update from for <base> revs to <target> revs
3505
3509
3506 If `--clear-caches` is passed, the following items will be reset before
3510 If `--clear-caches` is passed, the following items will be reset before
3507 each update:
3511 each update:
3508 * the changelog instance and associated indexes
3512 * the changelog instance and associated indexes
3509 * the rev-branch-cache instance
3513 * the rev-branch-cache instance
3510
3514
3511 Examples:
3515 Examples:
3512
3516
3513 # update for the one last revision
3517 # update for the one last revision
3514 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3518 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3515
3519
3516 $ update for change coming with a new branch
3520 $ update for change coming with a new branch
3517 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3521 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3518 """
3522 """
3519 from mercurial import branchmap
3523 from mercurial import branchmap
3520 from mercurial import repoview
3524 from mercurial import repoview
3521
3525
3522 opts = _byteskwargs(opts)
3526 opts = _byteskwargs(opts)
3523 timer, fm = gettimer(ui, opts)
3527 timer, fm = gettimer(ui, opts)
3524 clearcaches = opts[b'clear_caches']
3528 clearcaches = opts[b'clear_caches']
3525 unfi = repo.unfiltered()
3529 unfi = repo.unfiltered()
3526 x = [None] # used to pass data between closure
3530 x = [None] # used to pass data between closure
3527
3531
3528 # we use a `list` here to avoid possible side effect from smartset
3532 # we use a `list` here to avoid possible side effect from smartset
3529 baserevs = list(scmutil.revrange(repo, base))
3533 baserevs = list(scmutil.revrange(repo, base))
3530 targetrevs = list(scmutil.revrange(repo, target))
3534 targetrevs = list(scmutil.revrange(repo, target))
3531 if not baserevs:
3535 if not baserevs:
3532 raise error.Abort(b'no revisions selected for --base')
3536 raise error.Abort(b'no revisions selected for --base')
3533 if not targetrevs:
3537 if not targetrevs:
3534 raise error.Abort(b'no revisions selected for --target')
3538 raise error.Abort(b'no revisions selected for --target')
3535
3539
3536 # make sure the target branchmap also contains the one in the base
3540 # make sure the target branchmap also contains the one in the base
3537 targetrevs = list(set(baserevs) | set(targetrevs))
3541 targetrevs = list(set(baserevs) | set(targetrevs))
3538 targetrevs.sort()
3542 targetrevs.sort()
3539
3543
3540 cl = repo.changelog
3544 cl = repo.changelog
3541 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3545 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3542 allbaserevs.sort()
3546 allbaserevs.sort()
3543 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3547 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3544
3548
3545 newrevs = list(alltargetrevs.difference(allbaserevs))
3549 newrevs = list(alltargetrevs.difference(allbaserevs))
3546 newrevs.sort()
3550 newrevs.sort()
3547
3551
3548 allrevs = frozenset(unfi.changelog.revs())
3552 allrevs = frozenset(unfi.changelog.revs())
3549 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3553 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3550 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3554 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3551
3555
3552 def basefilter(repo, visibilityexceptions=None):
3556 def basefilter(repo, visibilityexceptions=None):
3553 return basefilterrevs
3557 return basefilterrevs
3554
3558
3555 def targetfilter(repo, visibilityexceptions=None):
3559 def targetfilter(repo, visibilityexceptions=None):
3556 return targetfilterrevs
3560 return targetfilterrevs
3557
3561
3558 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3562 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3559 ui.status(msg % (len(allbaserevs), len(newrevs)))
3563 ui.status(msg % (len(allbaserevs), len(newrevs)))
3560 if targetfilterrevs:
3564 if targetfilterrevs:
3561 msg = b'(%d revisions still filtered)\n'
3565 msg = b'(%d revisions still filtered)\n'
3562 ui.status(msg % len(targetfilterrevs))
3566 ui.status(msg % len(targetfilterrevs))
3563
3567
3564 try:
3568 try:
3565 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3569 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3566 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3570 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3567
3571
3568 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3572 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3569 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3573 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3570
3574
3571 # try to find an existing branchmap to reuse
3575 # try to find an existing branchmap to reuse
3572 subsettable = getbranchmapsubsettable()
3576 subsettable = getbranchmapsubsettable()
3573 candidatefilter = subsettable.get(None)
3577 candidatefilter = subsettable.get(None)
3574 while candidatefilter is not None:
3578 while candidatefilter is not None:
3575 candidatebm = repo.filtered(candidatefilter).branchmap()
3579 candidatebm = repo.filtered(candidatefilter).branchmap()
3576 if candidatebm.validfor(baserepo):
3580 if candidatebm.validfor(baserepo):
3577 filtered = repoview.filterrevs(repo, candidatefilter)
3581 filtered = repoview.filterrevs(repo, candidatefilter)
3578 missing = [r for r in allbaserevs if r in filtered]
3582 missing = [r for r in allbaserevs if r in filtered]
3579 base = candidatebm.copy()
3583 base = candidatebm.copy()
3580 base.update(baserepo, missing)
3584 base.update(baserepo, missing)
3581 break
3585 break
3582 candidatefilter = subsettable.get(candidatefilter)
3586 candidatefilter = subsettable.get(candidatefilter)
3583 else:
3587 else:
3584 # no suitable subset where found
3588 # no suitable subset where found
3585 base = branchmap.branchcache()
3589 base = branchmap.branchcache()
3586 base.update(baserepo, allbaserevs)
3590 base.update(baserepo, allbaserevs)
3587
3591
3588 def setup():
3592 def setup():
3589 x[0] = base.copy()
3593 x[0] = base.copy()
3590 if clearcaches:
3594 if clearcaches:
3591 unfi._revbranchcache = None
3595 unfi._revbranchcache = None
3592 clearchangelog(repo)
3596 clearchangelog(repo)
3593
3597
3594 def bench():
3598 def bench():
3595 x[0].update(targetrepo, newrevs)
3599 x[0].update(targetrepo, newrevs)
3596
3600
3597 timer(bench, setup=setup)
3601 timer(bench, setup=setup)
3598 fm.end()
3602 fm.end()
3599 finally:
3603 finally:
3600 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3604 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3601 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3605 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3602
3606
3603
3607
3604 @command(
3608 @command(
3605 b'perf--branchmapload',
3609 b'perf::branchmapload|perfbranchmapload',
3606 [
3610 [
3607 (b'f', b'filter', b'', b'Specify repoview filter'),
3611 (b'f', b'filter', b'', b'Specify repoview filter'),
3608 (b'', b'list', False, b'List brachmap filter caches'),
3612 (b'', b'list', False, b'List brachmap filter caches'),
3609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3613 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3610 ]
3614 ]
3611 + formatteropts,
3615 + formatteropts,
3612 )
3616 )
3613 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3617 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3614 """benchmark reading the branchmap"""
3618 """benchmark reading the branchmap"""
3615 opts = _byteskwargs(opts)
3619 opts = _byteskwargs(opts)
3616 clearrevlogs = opts[b'clear_revlogs']
3620 clearrevlogs = opts[b'clear_revlogs']
3617
3621
3618 if list:
3622 if list:
3619 for name, kind, st in repo.cachevfs.readdir(stat=True):
3623 for name, kind, st in repo.cachevfs.readdir(stat=True):
3620 if name.startswith(b'branch2'):
3624 if name.startswith(b'branch2'):
3621 filtername = name.partition(b'-')[2] or b'unfiltered'
3625 filtername = name.partition(b'-')[2] or b'unfiltered'
3622 ui.status(
3626 ui.status(
3623 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3627 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3624 )
3628 )
3625 return
3629 return
3626 if not filter:
3630 if not filter:
3627 filter = None
3631 filter = None
3628 subsettable = getbranchmapsubsettable()
3632 subsettable = getbranchmapsubsettable()
3629 if filter is None:
3633 if filter is None:
3630 repo = repo.unfiltered()
3634 repo = repo.unfiltered()
3631 else:
3635 else:
3632 repo = repoview.repoview(repo, filter)
3636 repo = repoview.repoview(repo, filter)
3633
3637
3634 repo.branchmap() # make sure we have a relevant, up to date branchmap
3638 repo.branchmap() # make sure we have a relevant, up to date branchmap
3635
3639
3636 try:
3640 try:
3637 fromfile = branchmap.branchcache.fromfile
3641 fromfile = branchmap.branchcache.fromfile
3638 except AttributeError:
3642 except AttributeError:
3639 # older versions
3643 # older versions
3640 fromfile = branchmap.read
3644 fromfile = branchmap.read
3641
3645
3642 currentfilter = filter
3646 currentfilter = filter
3643 # try once without timer, the filter may not be cached
3647 # try once without timer, the filter may not be cached
3644 while fromfile(repo) is None:
3648 while fromfile(repo) is None:
3645 currentfilter = subsettable.get(currentfilter)
3649 currentfilter = subsettable.get(currentfilter)
3646 if currentfilter is None:
3650 if currentfilter is None:
3647 raise error.Abort(
3651 raise error.Abort(
3648 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3652 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3649 )
3653 )
3650 repo = repo.filtered(currentfilter)
3654 repo = repo.filtered(currentfilter)
3651 timer, fm = gettimer(ui, opts)
3655 timer, fm = gettimer(ui, opts)
3652
3656
3653 def setup():
3657 def setup():
3654 if clearrevlogs:
3658 if clearrevlogs:
3655 clearchangelog(repo)
3659 clearchangelog(repo)
3656
3660
3657 def bench():
3661 def bench():
3658 fromfile(repo)
3662 fromfile(repo)
3659
3663
3660 timer(bench, setup=setup)
3664 timer(bench, setup=setup)
3661 fm.end()
3665 fm.end()
3662
3666
3663
3667
3664 @command(b'perf--loadmarkers')
3668 @command(b'perf::loadmarkers|perfloadmarkers')
3665 def perfloadmarkers(ui, repo):
3669 def perfloadmarkers(ui, repo):
3666 """benchmark the time to parse the on-disk markers for a repo
3670 """benchmark the time to parse the on-disk markers for a repo
3667
3671
3668 Result is the number of markers in the repo."""
3672 Result is the number of markers in the repo."""
3669 timer, fm = gettimer(ui)
3673 timer, fm = gettimer(ui)
3670 svfs = getsvfs(repo)
3674 svfs = getsvfs(repo)
3671 timer(lambda: len(obsolete.obsstore(svfs)))
3675 timer(lambda: len(obsolete.obsstore(svfs)))
3672 fm.end()
3676 fm.end()
3673
3677
3674
3678
3675 @command(
3679 @command(
3676 b'perf--lrucachedict',
3680 b'perf::lrucachedict|perflrucachedict',
3677 formatteropts
3681 formatteropts
3678 + [
3682 + [
3679 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3683 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3680 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3681 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3682 (b'', b'size', 4, b'size of cache'),
3686 (b'', b'size', 4, b'size of cache'),
3683 (b'', b'gets', 10000, b'number of key lookups'),
3687 (b'', b'gets', 10000, b'number of key lookups'),
3684 (b'', b'sets', 10000, b'number of key sets'),
3688 (b'', b'sets', 10000, b'number of key sets'),
3685 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3686 (
3690 (
3687 b'',
3691 b'',
3688 b'mixedgetfreq',
3692 b'mixedgetfreq',
3689 50,
3693 50,
3690 b'frequency of get vs set ops in mixed mode',
3694 b'frequency of get vs set ops in mixed mode',
3691 ),
3695 ),
3692 ],
3696 ],
3693 norepo=True,
3697 norepo=True,
3694 )
3698 )
3695 def perflrucache(
3699 def perflrucache(
3696 ui,
3700 ui,
3697 mincost=0,
3701 mincost=0,
3698 maxcost=100,
3702 maxcost=100,
3699 costlimit=0,
3703 costlimit=0,
3700 size=4,
3704 size=4,
3701 gets=10000,
3705 gets=10000,
3702 sets=10000,
3706 sets=10000,
3703 mixed=10000,
3707 mixed=10000,
3704 mixedgetfreq=50,
3708 mixedgetfreq=50,
3705 **opts
3709 **opts
3706 ):
3710 ):
3707 opts = _byteskwargs(opts)
3711 opts = _byteskwargs(opts)
3708
3712
3709 def doinit():
3713 def doinit():
3710 for i in _xrange(10000):
3714 for i in _xrange(10000):
3711 util.lrucachedict(size)
3715 util.lrucachedict(size)
3712
3716
3713 costrange = list(range(mincost, maxcost + 1))
3717 costrange = list(range(mincost, maxcost + 1))
3714
3718
3715 values = []
3719 values = []
3716 for i in _xrange(size):
3720 for i in _xrange(size):
3717 values.append(random.randint(0, _maxint))
3721 values.append(random.randint(0, _maxint))
3718
3722
3719 # Get mode fills the cache and tests raw lookup performance with no
3723 # Get mode fills the cache and tests raw lookup performance with no
3720 # eviction.
3724 # eviction.
3721 getseq = []
3725 getseq = []
3722 for i in _xrange(gets):
3726 for i in _xrange(gets):
3723 getseq.append(random.choice(values))
3727 getseq.append(random.choice(values))
3724
3728
3725 def dogets():
3729 def dogets():
3726 d = util.lrucachedict(size)
3730 d = util.lrucachedict(size)
3727 for v in values:
3731 for v in values:
3728 d[v] = v
3732 d[v] = v
3729 for key in getseq:
3733 for key in getseq:
3730 value = d[key]
3734 value = d[key]
3731 value # silence pyflakes warning
3735 value # silence pyflakes warning
3732
3736
3733 def dogetscost():
3737 def dogetscost():
3734 d = util.lrucachedict(size, maxcost=costlimit)
3738 d = util.lrucachedict(size, maxcost=costlimit)
3735 for i, v in enumerate(values):
3739 for i, v in enumerate(values):
3736 d.insert(v, v, cost=costs[i])
3740 d.insert(v, v, cost=costs[i])
3737 for key in getseq:
3741 for key in getseq:
3738 try:
3742 try:
3739 value = d[key]
3743 value = d[key]
3740 value # silence pyflakes warning
3744 value # silence pyflakes warning
3741 except KeyError:
3745 except KeyError:
3742 pass
3746 pass
3743
3747
3744 # Set mode tests insertion speed with cache eviction.
3748 # Set mode tests insertion speed with cache eviction.
3745 setseq = []
3749 setseq = []
3746 costs = []
3750 costs = []
3747 for i in _xrange(sets):
3751 for i in _xrange(sets):
3748 setseq.append(random.randint(0, _maxint))
3752 setseq.append(random.randint(0, _maxint))
3749 costs.append(random.choice(costrange))
3753 costs.append(random.choice(costrange))
3750
3754
3751 def doinserts():
3755 def doinserts():
3752 d = util.lrucachedict(size)
3756 d = util.lrucachedict(size)
3753 for v in setseq:
3757 for v in setseq:
3754 d.insert(v, v)
3758 d.insert(v, v)
3755
3759
3756 def doinsertscost():
3760 def doinsertscost():
3757 d = util.lrucachedict(size, maxcost=costlimit)
3761 d = util.lrucachedict(size, maxcost=costlimit)
3758 for i, v in enumerate(setseq):
3762 for i, v in enumerate(setseq):
3759 d.insert(v, v, cost=costs[i])
3763 d.insert(v, v, cost=costs[i])
3760
3764
3761 def dosets():
3765 def dosets():
3762 d = util.lrucachedict(size)
3766 d = util.lrucachedict(size)
3763 for v in setseq:
3767 for v in setseq:
3764 d[v] = v
3768 d[v] = v
3765
3769
3766 # Mixed mode randomly performs gets and sets with eviction.
3770 # Mixed mode randomly performs gets and sets with eviction.
3767 mixedops = []
3771 mixedops = []
3768 for i in _xrange(mixed):
3772 for i in _xrange(mixed):
3769 r = random.randint(0, 100)
3773 r = random.randint(0, 100)
3770 if r < mixedgetfreq:
3774 if r < mixedgetfreq:
3771 op = 0
3775 op = 0
3772 else:
3776 else:
3773 op = 1
3777 op = 1
3774
3778
3775 mixedops.append(
3779 mixedops.append(
3776 (op, random.randint(0, size * 2), random.choice(costrange))
3780 (op, random.randint(0, size * 2), random.choice(costrange))
3777 )
3781 )
3778
3782
3779 def domixed():
3783 def domixed():
3780 d = util.lrucachedict(size)
3784 d = util.lrucachedict(size)
3781
3785
3782 for op, v, cost in mixedops:
3786 for op, v, cost in mixedops:
3783 if op == 0:
3787 if op == 0:
3784 try:
3788 try:
3785 d[v]
3789 d[v]
3786 except KeyError:
3790 except KeyError:
3787 pass
3791 pass
3788 else:
3792 else:
3789 d[v] = v
3793 d[v] = v
3790
3794
3791 def domixedcost():
3795 def domixedcost():
3792 d = util.lrucachedict(size, maxcost=costlimit)
3796 d = util.lrucachedict(size, maxcost=costlimit)
3793
3797
3794 for op, v, cost in mixedops:
3798 for op, v, cost in mixedops:
3795 if op == 0:
3799 if op == 0:
3796 try:
3800 try:
3797 d[v]
3801 d[v]
3798 except KeyError:
3802 except KeyError:
3799 pass
3803 pass
3800 else:
3804 else:
3801 d.insert(v, v, cost=cost)
3805 d.insert(v, v, cost=cost)
3802
3806
3803 benches = [
3807 benches = [
3804 (doinit, b'init'),
3808 (doinit, b'init'),
3805 ]
3809 ]
3806
3810
3807 if costlimit:
3811 if costlimit:
3808 benches.extend(
3812 benches.extend(
3809 [
3813 [
3810 (dogetscost, b'gets w/ cost limit'),
3814 (dogetscost, b'gets w/ cost limit'),
3811 (doinsertscost, b'inserts w/ cost limit'),
3815 (doinsertscost, b'inserts w/ cost limit'),
3812 (domixedcost, b'mixed w/ cost limit'),
3816 (domixedcost, b'mixed w/ cost limit'),
3813 ]
3817 ]
3814 )
3818 )
3815 else:
3819 else:
3816 benches.extend(
3820 benches.extend(
3817 [
3821 [
3818 (dogets, b'gets'),
3822 (dogets, b'gets'),
3819 (doinserts, b'inserts'),
3823 (doinserts, b'inserts'),
3820 (dosets, b'sets'),
3824 (dosets, b'sets'),
3821 (domixed, b'mixed'),
3825 (domixed, b'mixed'),
3822 ]
3826 ]
3823 )
3827 )
3824
3828
3825 for fn, title in benches:
3829 for fn, title in benches:
3826 timer, fm = gettimer(ui, opts)
3830 timer, fm = gettimer(ui, opts)
3827 timer(fn, title=title)
3831 timer(fn, title=title)
3828 fm.end()
3832 fm.end()
3829
3833
3830
3834
3831 @command(
3835 @command(
3832 b'perf--write',
3836 b'perf::write|perfwrite',
3833 formatteropts
3837 formatteropts
3834 + [
3838 + [
3835 (b'', b'write-method', b'write', b'ui write method'),
3839 (b'', b'write-method', b'write', b'ui write method'),
3836 (b'', b'nlines', 100, b'number of lines'),
3840 (b'', b'nlines', 100, b'number of lines'),
3837 (b'', b'nitems', 100, b'number of items (per line)'),
3841 (b'', b'nitems', 100, b'number of items (per line)'),
3838 (b'', b'item', b'x', b'item that is written'),
3842 (b'', b'item', b'x', b'item that is written'),
3839 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3843 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3840 (b'', b'flush-line', None, b'flush after each line'),
3844 (b'', b'flush-line', None, b'flush after each line'),
3841 ],
3845 ],
3842 )
3846 )
3843 def perfwrite(ui, repo, **opts):
3847 def perfwrite(ui, repo, **opts):
3844 """microbenchmark ui.write (and others)"""
3848 """microbenchmark ui.write (and others)"""
3845 opts = _byteskwargs(opts)
3849 opts = _byteskwargs(opts)
3846
3850
3847 write = getattr(ui, _sysstr(opts[b'write_method']))
3851 write = getattr(ui, _sysstr(opts[b'write_method']))
3848 nlines = int(opts[b'nlines'])
3852 nlines = int(opts[b'nlines'])
3849 nitems = int(opts[b'nitems'])
3853 nitems = int(opts[b'nitems'])
3850 item = opts[b'item']
3854 item = opts[b'item']
3851 batch_line = opts.get(b'batch_line')
3855 batch_line = opts.get(b'batch_line')
3852 flush_line = opts.get(b'flush_line')
3856 flush_line = opts.get(b'flush_line')
3853
3857
3854 if batch_line:
3858 if batch_line:
3855 line = item * nitems + b'\n'
3859 line = item * nitems + b'\n'
3856
3860
3857 def benchmark():
3861 def benchmark():
3858 for i in pycompat.xrange(nlines):
3862 for i in pycompat.xrange(nlines):
3859 if batch_line:
3863 if batch_line:
3860 write(line)
3864 write(line)
3861 else:
3865 else:
3862 for i in pycompat.xrange(nitems):
3866 for i in pycompat.xrange(nitems):
3863 write(item)
3867 write(item)
3864 write(b'\n')
3868 write(b'\n')
3865 if flush_line:
3869 if flush_line:
3866 ui.flush()
3870 ui.flush()
3867 ui.flush()
3871 ui.flush()
3868
3872
3869 timer, fm = gettimer(ui, opts)
3873 timer, fm = gettimer(ui, opts)
3870 timer(benchmark)
3874 timer(benchmark)
3871 fm.end()
3875 fm.end()
3872
3876
3873
3877
3874 def uisetup(ui):
3878 def uisetup(ui):
3875 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3879 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3876 commands, b'debugrevlogopts'
3880 commands, b'debugrevlogopts'
3877 ):
3881 ):
3878 # for "historical portability":
3882 # for "historical portability":
3879 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3883 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3880 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3884 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3881 # openrevlog() should cause failure, because it has been
3885 # openrevlog() should cause failure, because it has been
3882 # available since 3.5 (or 49c583ca48c4).
3886 # available since 3.5 (or 49c583ca48c4).
3883 def openrevlog(orig, repo, cmd, file_, opts):
3887 def openrevlog(orig, repo, cmd, file_, opts):
3884 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3888 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3885 raise error.Abort(
3889 raise error.Abort(
3886 b"This version doesn't support --dir option",
3890 b"This version doesn't support --dir option",
3887 hint=b"use 3.5 or later",
3891 hint=b"use 3.5 or later",
3888 )
3892 )
3889 return orig(repo, cmd, file_, opts)
3893 return orig(repo, cmd, file_, opts)
3890
3894
3891 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3895 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3892
3896
3893
3897
3894 @command(
3898 @command(
3895 b'perf--progress',
3899 b'perf::progress|perfprogress',
3896 formatteropts
3900 formatteropts
3897 + [
3901 + [
3898 (b'', b'topic', b'topic', b'topic for progress messages'),
3902 (b'', b'topic', b'topic', b'topic for progress messages'),
3899 (b'c', b'total', 1000000, b'total value we are progressing to'),
3903 (b'c', b'total', 1000000, b'total value we are progressing to'),
3900 ],
3904 ],
3901 norepo=True,
3905 norepo=True,
3902 )
3906 )
3903 def perfprogress(ui, topic=None, total=None, **opts):
3907 def perfprogress(ui, topic=None, total=None, **opts):
3904 """printing of progress bars"""
3908 """printing of progress bars"""
3905 opts = _byteskwargs(opts)
3909 opts = _byteskwargs(opts)
3906
3910
3907 timer, fm = gettimer(ui, opts)
3911 timer, fm = gettimer(ui, opts)
3908
3912
3909 def doprogress():
3913 def doprogress():
3910 with ui.makeprogress(topic, total=total) as progress:
3914 with ui.makeprogress(topic, total=total) as progress:
3911 for i in _xrange(total):
3915 for i in _xrange(total):
3912 progress.increment()
3916 progress.increment()
3913
3917
3914 timer(doprogress)
3918 timer(doprogress)
3915 fm.end()
3919 fm.end()
@@ -1,425 +1,425
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf--addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf--ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf--ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf--annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf--bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf--bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf--branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf--branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf--branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf--bundleread
99 perf::bundleread
100 Benchmark reading of bundle files.
100 Benchmark reading of bundle files.
101 perf--cca (no help text available)
101 perf::cca (no help text available)
102 perf--changegroupchangelog
102 perf::changegroupchangelog
103 Benchmark producing a changelog group for a changegroup.
103 Benchmark producing a changelog group for a changegroup.
104 perf--changeset
104 perf::changeset
105 (no help text available)
105 (no help text available)
106 perf--ctxfiles
106 perf::ctxfiles
107 (no help text available)
107 (no help text available)
108 perf--diffwd Profile diff of working directory changes
108 perf::diffwd Profile diff of working directory changes
109 perf--dirfoldmap
109 perf::dirfoldmap
110 benchmap a 'dirstate._map.dirfoldmap.get()' request
110 benchmap a 'dirstate._map.dirfoldmap.get()' request
111 perf--dirs (no help text available)
111 perf::dirs (no help text available)
112 perf--dirstate
112 perf::dirstate
113 benchmap the time of various distate operations
113 benchmap the time of various distate operations
114 perf--dirstatedirs
114 perf::dirstatedirs
115 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
115 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
116 perf--dirstatefoldmap
116 perf::dirstatefoldmap
117 benchmap a 'dirstate._map.filefoldmap.get()' request
117 benchmap a 'dirstate._map.filefoldmap.get()' request
118 perf--dirstatewrite
118 perf::dirstatewrite
119 benchmap the time it take to write a dirstate on disk
119 benchmap the time it take to write a dirstate on disk
120 perf--discovery
120 perf::discovery
121 benchmark discovery between local repo and the peer at given
121 benchmark discovery between local repo and the peer at given
122 path
122 path
123 perf--fncacheencode
123 perf::fncacheencode
124 (no help text available)
124 (no help text available)
125 perf--fncacheload
125 perf::fncacheload
126 (no help text available)
126 (no help text available)
127 perf--fncachewrite
127 perf::fncachewrite
128 (no help text available)
128 (no help text available)
129 perf--heads benchmark the computation of a changelog heads
129 perf::heads benchmark the computation of a changelog heads
130 perf--helper-mergecopies
130 perf::helper-mergecopies
131 find statistics about potential parameters for
131 find statistics about potential parameters for
132 'perfmergecopies'
132 'perfmergecopies'
133 perf--helper-pathcopies
133 perf::helper-pathcopies
134 find statistic about potential parameters for the
134 find statistic about potential parameters for the
135 'perftracecopies'
135 'perftracecopies'
136 perf--ignore benchmark operation related to computing ignore
136 perf::ignore benchmark operation related to computing ignore
137 perf--index benchmark index creation time followed by a lookup
137 perf::index benchmark index creation time followed by a lookup
138 perf--linelogedits
138 perf::linelogedits
139 (no help text available)
139 (no help text available)
140 perf--loadmarkers
140 perf::loadmarkers
141 benchmark the time to parse the on-disk markers for a repo
141 benchmark the time to parse the on-disk markers for a repo
142 perf--log (no help text available)
142 perf::log (no help text available)
143 perf--lookup (no help text available)
143 perf::lookup (no help text available)
144 perf--lrucachedict
144 perf::lrucachedict
145 (no help text available)
145 (no help text available)
146 perf--manifest
146 perf::manifest
147 benchmark the time to read a manifest from disk and return a
147 benchmark the time to read a manifest from disk and return a
148 usable
148 usable
149 perf--mergecalculate
149 perf::mergecalculate
150 (no help text available)
150 (no help text available)
151 perf--mergecopies
151 perf::mergecopies
152 measure runtime of 'copies.mergecopies'
152 measure runtime of 'copies.mergecopies'
153 perf--moonwalk
153 perf::moonwalk
154 benchmark walking the changelog backwards
154 benchmark walking the changelog backwards
155 perf--nodelookup
155 perf::nodelookup
156 (no help text available)
156 (no help text available)
157 perf--nodemap
157 perf::nodemap
158 benchmark the time necessary to look up revision from a cold
158 benchmark the time necessary to look up revision from a cold
159 nodemap
159 nodemap
160 perf--parents
160 perf::parents
161 benchmark the time necessary to fetch one changeset's parents.
161 benchmark the time necessary to fetch one changeset's parents.
162 perf--pathcopies
162 perf::pathcopies
163 benchmark the copy tracing logic
163 benchmark the copy tracing logic
164 perf--phases benchmark phasesets computation
164 perf::phases benchmark phasesets computation
165 perf--phasesremote
165 perf::phasesremote
166 benchmark time needed to analyse phases of the remote server
166 benchmark time needed to analyse phases of the remote server
167 perf--progress
167 perf::progress
168 printing of progress bars
168 printing of progress bars
169 perf--rawfiles
169 perf::rawfiles
170 (no help text available)
170 (no help text available)
171 perf--revlogchunks
171 perf::revlogchunks
172 Benchmark operations on revlog chunks.
172 Benchmark operations on revlog chunks.
173 perf--revlogindex
173 perf::revlogindex
174 Benchmark operations against a revlog index.
174 Benchmark operations against a revlog index.
175 perf--revlogrevision
175 perf::revlogrevision
176 Benchmark obtaining a revlog revision.
176 Benchmark obtaining a revlog revision.
177 perf--revlogrevisions
177 perf::revlogrevisions
178 Benchmark reading a series of revisions from a revlog.
178 Benchmark reading a series of revisions from a revlog.
179 perf--revlogwrite
179 perf::revlogwrite
180 Benchmark writing a series of revisions to a revlog.
180 Benchmark writing a series of revisions to a revlog.
181 perf--revrange
181 perf::revrange
182 (no help text available)
182 (no help text available)
183 perf--revset benchmark the execution time of a revset
183 perf::revset benchmark the execution time of a revset
184 perf--startup
184 perf::startup
185 (no help text available)
185 (no help text available)
186 perf--status benchmark the performance of a single status call
186 perf::status benchmark the performance of a single status call
187 perf--tags (no help text available)
187 perf::tags (no help text available)
188 perf--templating
188 perf::templating
189 test the rendering time of a given template
189 test the rendering time of a given template
190 perf--unidiff
190 perf::unidiff
191 benchmark a unified diff between revisions
191 benchmark a unified diff between revisions
192 perf--volatilesets
192 perf::volatilesets
193 benchmark the computation of various volatile set
193 benchmark the computation of various volatile set
194 perf--walk (no help text available)
194 perf::walk (no help text available)
195 perf--write microbenchmark ui.write (and others)
195 perf::write microbenchmark ui.write (and others)
196
196
197 (use 'hg help -v perf' to show built-in aliases and global options)
197 (use 'hg help -v perf' to show built-in aliases and global options)
198
198
199 $ hg help perfaddremove
199 $ hg help perfaddremove
200 hg perf--addremove
200 hg perf::addremove
201
201
202 aliases: perfaddremove
202 aliases: perfaddremove
203
203
204 (no help text available)
204 (no help text available)
205
205
206 options:
206 options:
207
207
208 -T --template TEMPLATE display with template
208 -T --template TEMPLATE display with template
209
209
210 (some details hidden, use --verbose to show complete help)
210 (some details hidden, use --verbose to show complete help)
211
211
212 $ hg perfaddremove
212 $ hg perfaddremove
213 $ hg perfancestors
213 $ hg perfancestors
214 $ hg perfancestorset 2
214 $ hg perfancestorset 2
215 $ hg perfannotate a
215 $ hg perfannotate a
216 $ hg perfbdiff -c 1
216 $ hg perfbdiff -c 1
217 $ hg perfbdiff --alldata 1
217 $ hg perfbdiff --alldata 1
218 $ hg perfunidiff -c 1
218 $ hg perfunidiff -c 1
219 $ hg perfunidiff --alldata 1
219 $ hg perfunidiff --alldata 1
220 $ hg perfbookmarks
220 $ hg perfbookmarks
221 $ hg perfbranchmap
221 $ hg perfbranchmap
222 $ hg perfbranchmapload
222 $ hg perfbranchmapload
223 $ hg perfbranchmapupdate --base "not tip" --target "tip"
223 $ hg perfbranchmapupdate --base "not tip" --target "tip"
224 benchmark of branchmap with 3 revisions with 1 new ones
224 benchmark of branchmap with 3 revisions with 1 new ones
225 $ hg perfcca
225 $ hg perfcca
226 $ hg perfchangegroupchangelog
226 $ hg perfchangegroupchangelog
227 $ hg perfchangegroupchangelog --cgversion 01
227 $ hg perfchangegroupchangelog --cgversion 01
228 $ hg perfchangeset 2
228 $ hg perfchangeset 2
229 $ hg perfctxfiles 2
229 $ hg perfctxfiles 2
230 $ hg perfdiffwd
230 $ hg perfdiffwd
231 $ hg perfdirfoldmap
231 $ hg perfdirfoldmap
232 $ hg perfdirs
232 $ hg perfdirs
233 $ hg perfdirstate
233 $ hg perfdirstate
234 $ hg perfdirstate --contains
234 $ hg perfdirstate --contains
235 $ hg perfdirstate --iteration
235 $ hg perfdirstate --iteration
236 $ hg perfdirstatedirs
236 $ hg perfdirstatedirs
237 $ hg perfdirstatefoldmap
237 $ hg perfdirstatefoldmap
238 $ hg perfdirstatewrite
238 $ hg perfdirstatewrite
239 #if repofncache
239 #if repofncache
240 $ hg perffncacheencode
240 $ hg perffncacheencode
241 $ hg perffncacheload
241 $ hg perffncacheload
242 $ hg debugrebuildfncache
242 $ hg debugrebuildfncache
243 fncache already up to date
243 fncache already up to date
244 $ hg perffncachewrite
244 $ hg perffncachewrite
245 $ hg debugrebuildfncache
245 $ hg debugrebuildfncache
246 fncache already up to date
246 fncache already up to date
247 #endif
247 #endif
248 $ hg perfheads
248 $ hg perfheads
249 $ hg perfignore
249 $ hg perfignore
250 $ hg perfindex
250 $ hg perfindex
251 $ hg perflinelogedits -n 1
251 $ hg perflinelogedits -n 1
252 $ hg perfloadmarkers
252 $ hg perfloadmarkers
253 $ hg perflog
253 $ hg perflog
254 $ hg perflookup 2
254 $ hg perflookup 2
255 $ hg perflrucache
255 $ hg perflrucache
256 $ hg perfmanifest 2
256 $ hg perfmanifest 2
257 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
257 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
258 $ hg perfmanifest -m 44fe2c8352bb
258 $ hg perfmanifest -m 44fe2c8352bb
259 abort: manifest revision must be integer or full node
259 abort: manifest revision must be integer or full node
260 [255]
260 [255]
261 $ hg perfmergecalculate -r 3
261 $ hg perfmergecalculate -r 3
262 $ hg perfmoonwalk
262 $ hg perfmoonwalk
263 $ hg perfnodelookup 2
263 $ hg perfnodelookup 2
264 $ hg perfpathcopies 1 2
264 $ hg perfpathcopies 1 2
265 $ hg perfprogress --total 1000
265 $ hg perfprogress --total 1000
266 $ hg perfrawfiles 2
266 $ hg perfrawfiles 2
267 $ hg perfrevlogindex -c
267 $ hg perfrevlogindex -c
268 #if reporevlogstore
268 #if reporevlogstore
269 $ hg perfrevlogrevisions .hg/store/data/a.i
269 $ hg perfrevlogrevisions .hg/store/data/a.i
270 #endif
270 #endif
271 $ hg perfrevlogrevision -m 0
271 $ hg perfrevlogrevision -m 0
272 $ hg perfrevlogchunks -c
272 $ hg perfrevlogchunks -c
273 $ hg perfrevrange
273 $ hg perfrevrange
274 $ hg perfrevset 'all()'
274 $ hg perfrevset 'all()'
275 $ hg perfstartup
275 $ hg perfstartup
276 $ hg perfstatus
276 $ hg perfstatus
277 $ hg perfstatus --dirstate
277 $ hg perfstatus --dirstate
278 $ hg perftags
278 $ hg perftags
279 $ hg perftemplating
279 $ hg perftemplating
280 $ hg perfvolatilesets
280 $ hg perfvolatilesets
281 $ hg perfwalk
281 $ hg perfwalk
282 $ hg perfparents
282 $ hg perfparents
283 $ hg perfdiscovery -q .
283 $ hg perfdiscovery -q .
284
284
285 Test run control
285 Test run control
286 ----------------
286 ----------------
287
287
288 Simple single entry
288 Simple single entry
289
289
290 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
290 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
291 ! wall * comb * user * sys * (best of 15) (glob)
291 ! wall * comb * user * sys * (best of 15) (glob)
292
292
293 Multiple entries
293 Multiple entries
294
294
295 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
295 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
296 ! wall * comb * user * sys * (best of 5) (glob)
296 ! wall * comb * user * sys * (best of 5) (glob)
297
297
298 error case are ignored
298 error case are ignored
299
299
300 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
300 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
301 malformatted run limit entry, missing "-": 500
301 malformatted run limit entry, missing "-": 500
302 ! wall * comb * user * sys * (best of 5) (glob)
302 ! wall * comb * user * sys * (best of 5) (glob)
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
304 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
304 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
305 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
305 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
306 ! wall * comb * user * sys * (best of 5) (glob)
306 ! wall * comb * user * sys * (best of 5) (glob)
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
308 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
308 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
309 ! wall * comb * user * sys * (best of 5) (glob)
309 ! wall * comb * user * sys * (best of 5) (glob)
310
310
311 test actual output
311 test actual output
312 ------------------
312 ------------------
313
313
314 normal output:
314 normal output:
315
315
316 $ hg perfheads --config perf.stub=no
316 $ hg perfheads --config perf.stub=no
317 ! wall * comb * user * sys * (best of *) (glob)
317 ! wall * comb * user * sys * (best of *) (glob)
318
318
319 detailed output:
319 detailed output:
320
320
321 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
321 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
322 ! wall * comb * user * sys * (best of *) (glob)
322 ! wall * comb * user * sys * (best of *) (glob)
323 ! wall * comb * user * sys * (max of *) (glob)
323 ! wall * comb * user * sys * (max of *) (glob)
324 ! wall * comb * user * sys * (avg of *) (glob)
324 ! wall * comb * user * sys * (avg of *) (glob)
325 ! wall * comb * user * sys * (median of *) (glob)
325 ! wall * comb * user * sys * (median of *) (glob)
326
326
327 test json output
327 test json output
328 ----------------
328 ----------------
329
329
330 normal output:
330 normal output:
331
331
332 $ hg perfheads --template json --config perf.stub=no
332 $ hg perfheads --template json --config perf.stub=no
333 [
333 [
334 {
334 {
335 "comb": *, (glob)
335 "comb": *, (glob)
336 "count": *, (glob)
336 "count": *, (glob)
337 "sys": *, (glob)
337 "sys": *, (glob)
338 "user": *, (glob)
338 "user": *, (glob)
339 "wall": * (glob)
339 "wall": * (glob)
340 }
340 }
341 ]
341 ]
342
342
343 detailed output:
343 detailed output:
344
344
345 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
345 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
346 [
346 [
347 {
347 {
348 "avg.comb": *, (glob)
348 "avg.comb": *, (glob)
349 "avg.count": *, (glob)
349 "avg.count": *, (glob)
350 "avg.sys": *, (glob)
350 "avg.sys": *, (glob)
351 "avg.user": *, (glob)
351 "avg.user": *, (glob)
352 "avg.wall": *, (glob)
352 "avg.wall": *, (glob)
353 "comb": *, (glob)
353 "comb": *, (glob)
354 "count": *, (glob)
354 "count": *, (glob)
355 "max.comb": *, (glob)
355 "max.comb": *, (glob)
356 "max.count": *, (glob)
356 "max.count": *, (glob)
357 "max.sys": *, (glob)
357 "max.sys": *, (glob)
358 "max.user": *, (glob)
358 "max.user": *, (glob)
359 "max.wall": *, (glob)
359 "max.wall": *, (glob)
360 "median.comb": *, (glob)
360 "median.comb": *, (glob)
361 "median.count": *, (glob)
361 "median.count": *, (glob)
362 "median.sys": *, (glob)
362 "median.sys": *, (glob)
363 "median.user": *, (glob)
363 "median.user": *, (glob)
364 "median.wall": *, (glob)
364 "median.wall": *, (glob)
365 "sys": *, (glob)
365 "sys": *, (glob)
366 "user": *, (glob)
366 "user": *, (glob)
367 "wall": * (glob)
367 "wall": * (glob)
368 }
368 }
369 ]
369 ]
370
370
371 Test pre-run feature
371 Test pre-run feature
372 --------------------
372 --------------------
373
373
374 (perf discovery has some spurious output)
374 (perf discovery has some spurious output)
375
375
376 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
376 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
377 ! wall * comb * user * sys * (best of 1) (glob)
377 ! wall * comb * user * sys * (best of 1) (glob)
378 searching for changes
378 searching for changes
379 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
379 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
380 ! wall * comb * user * sys * (best of 1) (glob)
380 ! wall * comb * user * sys * (best of 1) (glob)
381 searching for changes
381 searching for changes
382 searching for changes
382 searching for changes
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
384 ! wall * comb * user * sys * (best of 1) (glob)
384 ! wall * comb * user * sys * (best of 1) (glob)
385 searching for changes
385 searching for changes
386 searching for changes
386 searching for changes
387 searching for changes
387 searching for changes
388 searching for changes
388 searching for changes
389
389
390 test profile-benchmark option
390 test profile-benchmark option
391 ------------------------------
391 ------------------------------
392
392
393 Function to check that statprof ran
393 Function to check that statprof ran
394 $ statprofran () {
394 $ statprofran () {
395 > egrep 'Sample count:|No samples recorded' > /dev/null
395 > egrep 'Sample count:|No samples recorded' > /dev/null
396 > }
396 > }
397 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
397 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
398
398
399 Check perf.py for historical portability
399 Check perf.py for historical portability
400 ----------------------------------------
400 ----------------------------------------
401
401
402 $ cd "$TESTDIR/.."
402 $ cd "$TESTDIR/.."
403
403
404 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
404 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
405 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
405 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
406 > "$TESTDIR"/check-perf-code.py contrib/perf.py
406 > "$TESTDIR"/check-perf-code.py contrib/perf.py
407 contrib/perf.py:\d+: (re)
407 contrib/perf.py:\d+: (re)
408 > from mercurial import (
408 > from mercurial import (
409 import newer module separately in try clause for early Mercurial
409 import newer module separately in try clause for early Mercurial
410 contrib/perf.py:\d+: (re)
410 contrib/perf.py:\d+: (re)
411 > from mercurial import (
411 > from mercurial import (
412 import newer module separately in try clause for early Mercurial
412 import newer module separately in try clause for early Mercurial
413 contrib/perf.py:\d+: (re)
413 contrib/perf.py:\d+: (re)
414 > origindexpath = orig.opener.join(orig.indexfile)
414 > origindexpath = orig.opener.join(orig.indexfile)
415 use getvfs()/getsvfs() for early Mercurial
415 use getvfs()/getsvfs() for early Mercurial
416 contrib/perf.py:\d+: (re)
416 contrib/perf.py:\d+: (re)
417 > origdatapath = orig.opener.join(orig.datafile)
417 > origdatapath = orig.opener.join(orig.datafile)
418 use getvfs()/getsvfs() for early Mercurial
418 use getvfs()/getsvfs() for early Mercurial
419 contrib/perf.py:\d+: (re)
419 contrib/perf.py:\d+: (re)
420 > vfs = vfsmod.vfs(tmpdir)
420 > vfs = vfsmod.vfs(tmpdir)
421 use getvfs()/getsvfs() for early Mercurial
421 use getvfs()/getsvfs() for early Mercurial
422 contrib/perf.py:\d+: (re)
422 contrib/perf.py:\d+: (re)
423 > vfs.options = getattr(orig.opener, 'options', None)
423 > vfs.options = getattr(orig.opener, 'options', None)
424 use getvfs()/getsvfs() for early Mercurial
424 use getvfs()/getsvfs() for early Mercurial
425 [1]
425 [1]
General Comments 0
You need to be logged in to leave comments. Login now