##// END OF EJS Templates
perf-discovery: use `get_unique_pull_path`...
marmoute -
r47735:92029a43 default
parent child Browse files
Show More
@@ -1,3919 +1,3925 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if 'norepo' not in getargspec(command).args:
234 if 'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf',
294 b'perf',
295 b'pre-run',
295 b'pre-run',
296 default=mercurial.configitems.dynamicdefault,
296 default=mercurial.configitems.dynamicdefault,
297 )
297 )
298 configitem(
298 configitem(
299 b'perf',
299 b'perf',
300 b'profile-benchmark',
300 b'profile-benchmark',
301 default=mercurial.configitems.dynamicdefault,
301 default=mercurial.configitems.dynamicdefault,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'run-limits',
305 b'run-limits',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 except (ImportError, AttributeError):
309 except (ImportError, AttributeError):
310 pass
310 pass
311 except TypeError:
311 except TypeError:
312 # compatibility fix for a11fd395e83f
312 # compatibility fix for a11fd395e83f
313 # hg version: 5.2
313 # hg version: 5.2
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'presleep',
316 b'presleep',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'stub',
321 b'stub',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf',
325 b'perf',
326 b'parentscount',
326 b'parentscount',
327 default=mercurial.configitems.dynamicdefault,
327 default=mercurial.configitems.dynamicdefault,
328 )
328 )
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'all-timing',
331 b'all-timing',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'pre-run',
336 b'pre-run',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'profile-benchmark',
341 b'profile-benchmark',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'run-limits',
346 b'run-limits',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349
349
350
350
351 def getlen(ui):
351 def getlen(ui):
352 if ui.configbool(b"perf", b"stub", False):
352 if ui.configbool(b"perf", b"stub", False):
353 return lambda x: 1
353 return lambda x: 1
354 return len
354 return len
355
355
356
356
357 class noop(object):
357 class noop(object):
358 """dummy context manager"""
358 """dummy context manager"""
359
359
360 def __enter__(self):
360 def __enter__(self):
361 pass
361 pass
362
362
363 def __exit__(self, *args):
363 def __exit__(self, *args):
364 pass
364 pass
365
365
366
366
367 NOOPCTX = noop()
367 NOOPCTX = noop()
368
368
369
369
370 def gettimer(ui, opts=None):
370 def gettimer(ui, opts=None):
371 """return a timer function and formatter: (timer, formatter)
371 """return a timer function and formatter: (timer, formatter)
372
372
373 This function exists to gather the creation of formatter in a single
373 This function exists to gather the creation of formatter in a single
374 place instead of duplicating it in all performance commands."""
374 place instead of duplicating it in all performance commands."""
375
375
376 # enforce an idle period before execution to counteract power management
376 # enforce an idle period before execution to counteract power management
377 # experimental config: perf.presleep
377 # experimental config: perf.presleep
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379
379
380 if opts is None:
380 if opts is None:
381 opts = {}
381 opts = {}
382 # redirect all to stderr unless buffer api is in use
382 # redirect all to stderr unless buffer api is in use
383 if not ui._buffers:
383 if not ui._buffers:
384 ui = ui.copy()
384 ui = ui.copy()
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 if uifout:
386 if uifout:
387 # for "historical portability":
387 # for "historical portability":
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 uifout.set(ui.ferr)
389 uifout.set(ui.ferr)
390
390
391 # get a formatter
391 # get a formatter
392 uiformatter = getattr(ui, 'formatter', None)
392 uiformatter = getattr(ui, 'formatter', None)
393 if uiformatter:
393 if uiformatter:
394 fm = uiformatter(b'perf', opts)
394 fm = uiformatter(b'perf', opts)
395 else:
395 else:
396 # for "historical portability":
396 # for "historical portability":
397 # define formatter locally, because ui.formatter has been
397 # define formatter locally, because ui.formatter has been
398 # available since 2.2 (or ae5f92e154d3)
398 # available since 2.2 (or ae5f92e154d3)
399 from mercurial import node
399 from mercurial import node
400
400
401 class defaultformatter(object):
401 class defaultformatter(object):
402 """Minimized composition of baseformatter and plainformatter"""
402 """Minimized composition of baseformatter and plainformatter"""
403
403
404 def __init__(self, ui, topic, opts):
404 def __init__(self, ui, topic, opts):
405 self._ui = ui
405 self._ui = ui
406 if ui.debugflag:
406 if ui.debugflag:
407 self.hexfunc = node.hex
407 self.hexfunc = node.hex
408 else:
408 else:
409 self.hexfunc = node.short
409 self.hexfunc = node.short
410
410
411 def __nonzero__(self):
411 def __nonzero__(self):
412 return False
412 return False
413
413
414 __bool__ = __nonzero__
414 __bool__ = __nonzero__
415
415
416 def startitem(self):
416 def startitem(self):
417 pass
417 pass
418
418
419 def data(self, **data):
419 def data(self, **data):
420 pass
420 pass
421
421
422 def write(self, fields, deftext, *fielddata, **opts):
422 def write(self, fields, deftext, *fielddata, **opts):
423 self._ui.write(deftext % fielddata, **opts)
423 self._ui.write(deftext % fielddata, **opts)
424
424
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 if cond:
426 if cond:
427 self._ui.write(deftext % fielddata, **opts)
427 self._ui.write(deftext % fielddata, **opts)
428
428
429 def plain(self, text, **opts):
429 def plain(self, text, **opts):
430 self._ui.write(text, **opts)
430 self._ui.write(text, **opts)
431
431
432 def end(self):
432 def end(self):
433 pass
433 pass
434
434
435 fm = defaultformatter(ui, b'perf', opts)
435 fm = defaultformatter(ui, b'perf', opts)
436
436
437 # stub function, runs code only once instead of in a loop
437 # stub function, runs code only once instead of in a loop
438 # experimental config: perf.stub
438 # experimental config: perf.stub
439 if ui.configbool(b"perf", b"stub", False):
439 if ui.configbool(b"perf", b"stub", False):
440 return functools.partial(stub_timer, fm), fm
440 return functools.partial(stub_timer, fm), fm
441
441
442 # experimental config: perf.all-timing
442 # experimental config: perf.all-timing
443 displayall = ui.configbool(b"perf", b"all-timing", False)
443 displayall = ui.configbool(b"perf", b"all-timing", False)
444
444
445 # experimental config: perf.run-limits
445 # experimental config: perf.run-limits
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 limits = []
447 limits = []
448 for item in limitspec:
448 for item in limitspec:
449 parts = item.split(b'-', 1)
449 parts = item.split(b'-', 1)
450 if len(parts) < 2:
450 if len(parts) < 2:
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 continue
452 continue
453 try:
453 try:
454 time_limit = float(_sysstr(parts[0]))
454 time_limit = float(_sysstr(parts[0]))
455 except ValueError as e:
455 except ValueError as e:
456 ui.warn(
456 ui.warn(
457 (
457 (
458 b'malformatted run limit entry, %s: %s\n'
458 b'malformatted run limit entry, %s: %s\n'
459 % (_bytestr(e), item)
459 % (_bytestr(e), item)
460 )
460 )
461 )
461 )
462 continue
462 continue
463 try:
463 try:
464 run_limit = int(_sysstr(parts[1]))
464 run_limit = int(_sysstr(parts[1]))
465 except ValueError as e:
465 except ValueError as e:
466 ui.warn(
466 ui.warn(
467 (
467 (
468 b'malformatted run limit entry, %s: %s\n'
468 b'malformatted run limit entry, %s: %s\n'
469 % (_bytestr(e), item)
469 % (_bytestr(e), item)
470 )
470 )
471 )
471 )
472 continue
472 continue
473 limits.append((time_limit, run_limit))
473 limits.append((time_limit, run_limit))
474 if not limits:
474 if not limits:
475 limits = DEFAULTLIMITS
475 limits = DEFAULTLIMITS
476
476
477 profiler = None
477 profiler = None
478 if profiling is not None:
478 if profiling is not None:
479 if ui.configbool(b"perf", b"profile-benchmark", False):
479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 profiler = profiling.profile(ui)
480 profiler = profiling.profile(ui)
481
481
482 prerun = getint(ui, b"perf", b"pre-run", 0)
482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 t = functools.partial(
483 t = functools.partial(
484 _timer,
484 _timer,
485 fm,
485 fm,
486 displayall=displayall,
486 displayall=displayall,
487 limits=limits,
487 limits=limits,
488 prerun=prerun,
488 prerun=prerun,
489 profiler=profiler,
489 profiler=profiler,
490 )
490 )
491 return t, fm
491 return t, fm
492
492
493
493
494 def stub_timer(fm, func, setup=None, title=None):
494 def stub_timer(fm, func, setup=None, title=None):
495 if setup is not None:
495 if setup is not None:
496 setup()
496 setup()
497 func()
497 func()
498
498
499
499
500 @contextlib.contextmanager
500 @contextlib.contextmanager
501 def timeone():
501 def timeone():
502 r = []
502 r = []
503 ostart = os.times()
503 ostart = os.times()
504 cstart = util.timer()
504 cstart = util.timer()
505 yield r
505 yield r
506 cstop = util.timer()
506 cstop = util.timer()
507 ostop = os.times()
507 ostop = os.times()
508 a, b = ostart, ostop
508 a, b = ostart, ostop
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510
510
511
511
512 # list of stop condition (elapsed time, minimal run count)
512 # list of stop condition (elapsed time, minimal run count)
513 DEFAULTLIMITS = (
513 DEFAULTLIMITS = (
514 (3.0, 100),
514 (3.0, 100),
515 (10.0, 3),
515 (10.0, 3),
516 )
516 )
517
517
518
518
519 def _timer(
519 def _timer(
520 fm,
520 fm,
521 func,
521 func,
522 setup=None,
522 setup=None,
523 title=None,
523 title=None,
524 displayall=False,
524 displayall=False,
525 limits=DEFAULTLIMITS,
525 limits=DEFAULTLIMITS,
526 prerun=0,
526 prerun=0,
527 profiler=None,
527 profiler=None,
528 ):
528 ):
529 gc.collect()
529 gc.collect()
530 results = []
530 results = []
531 begin = util.timer()
531 begin = util.timer()
532 count = 0
532 count = 0
533 if profiler is None:
533 if profiler is None:
534 profiler = NOOPCTX
534 profiler = NOOPCTX
535 for i in range(prerun):
535 for i in range(prerun):
536 if setup is not None:
536 if setup is not None:
537 setup()
537 setup()
538 func()
538 func()
539 keepgoing = True
539 keepgoing = True
540 while keepgoing:
540 while keepgoing:
541 if setup is not None:
541 if setup is not None:
542 setup()
542 setup()
543 with profiler:
543 with profiler:
544 with timeone() as item:
544 with timeone() as item:
545 r = func()
545 r = func()
546 profiler = NOOPCTX
546 profiler = NOOPCTX
547 count += 1
547 count += 1
548 results.append(item[0])
548 results.append(item[0])
549 cstop = util.timer()
549 cstop = util.timer()
550 # Look for a stop condition.
550 # Look for a stop condition.
551 elapsed = cstop - begin
551 elapsed = cstop - begin
552 for t, mincount in limits:
552 for t, mincount in limits:
553 if elapsed >= t and count >= mincount:
553 if elapsed >= t and count >= mincount:
554 keepgoing = False
554 keepgoing = False
555 break
555 break
556
556
557 formatone(fm, results, title=title, result=r, displayall=displayall)
557 formatone(fm, results, title=title, result=r, displayall=displayall)
558
558
559
559
560 def formatone(fm, timings, title=None, result=None, displayall=False):
560 def formatone(fm, timings, title=None, result=None, displayall=False):
561
561
562 count = len(timings)
562 count = len(timings)
563
563
564 fm.startitem()
564 fm.startitem()
565
565
566 if title:
566 if title:
567 fm.write(b'title', b'! %s\n', title)
567 fm.write(b'title', b'! %s\n', title)
568 if result:
568 if result:
569 fm.write(b'result', b'! result: %s\n', result)
569 fm.write(b'result', b'! result: %s\n', result)
570
570
571 def display(role, entry):
571 def display(role, entry):
572 prefix = b''
572 prefix = b''
573 if role != b'best':
573 if role != b'best':
574 prefix = b'%s.' % role
574 prefix = b'%s.' % role
575 fm.plain(b'!')
575 fm.plain(b'!')
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 fm.write(prefix + b'user', b' user %f', entry[1])
578 fm.write(prefix + b'user', b' user %f', entry[1])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 fm.plain(b'\n')
581 fm.plain(b'\n')
582
582
583 timings.sort()
583 timings.sort()
584 min_val = timings[0]
584 min_val = timings[0]
585 display(b'best', min_val)
585 display(b'best', min_val)
586 if displayall:
586 if displayall:
587 max_val = timings[-1]
587 max_val = timings[-1]
588 display(b'max', max_val)
588 display(b'max', max_val)
589 avg = tuple([sum(x) / count for x in zip(*timings)])
589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 display(b'avg', avg)
590 display(b'avg', avg)
591 median = timings[len(timings) // 2]
591 median = timings[len(timings) // 2]
592 display(b'median', median)
592 display(b'median', median)
593
593
594
594
595 # utilities for historical portability
595 # utilities for historical portability
596
596
597
597
598 def getint(ui, section, name, default):
598 def getint(ui, section, name, default):
599 # for "historical portability":
599 # for "historical portability":
600 # ui.configint has been available since 1.9 (or fa2b596db182)
600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 v = ui.config(section, name, None)
601 v = ui.config(section, name, None)
602 if v is None:
602 if v is None:
603 return default
603 return default
604 try:
604 try:
605 return int(v)
605 return int(v)
606 except ValueError:
606 except ValueError:
607 raise error.ConfigError(
607 raise error.ConfigError(
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 )
609 )
610
610
611
611
612 def safeattrsetter(obj, name, ignoremissing=False):
612 def safeattrsetter(obj, name, ignoremissing=False):
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614
614
615 This function is aborted, if 'obj' doesn't have 'name' attribute
615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 at runtime. This avoids overlooking removal of an attribute, which
616 at runtime. This avoids overlooking removal of an attribute, which
617 breaks assumption of performance measurement, in the future.
617 breaks assumption of performance measurement, in the future.
618
618
619 This function returns the object to (1) assign a new value, and
619 This function returns the object to (1) assign a new value, and
620 (2) restore an original value to the attribute.
620 (2) restore an original value to the attribute.
621
621
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 abortion, and this function returns None. This is useful to
623 abortion, and this function returns None. This is useful to
624 examine an attribute, which isn't ensured in all Mercurial
624 examine an attribute, which isn't ensured in all Mercurial
625 versions.
625 versions.
626 """
626 """
627 if not util.safehasattr(obj, name):
627 if not util.safehasattr(obj, name):
628 if ignoremissing:
628 if ignoremissing:
629 return None
629 return None
630 raise error.Abort(
630 raise error.Abort(
631 (
631 (
632 b"missing attribute %s of %s might break assumption"
632 b"missing attribute %s of %s might break assumption"
633 b" of performance measurement"
633 b" of performance measurement"
634 )
634 )
635 % (name, obj)
635 % (name, obj)
636 )
636 )
637
637
638 origvalue = getattr(obj, _sysstr(name))
638 origvalue = getattr(obj, _sysstr(name))
639
639
640 class attrutil(object):
640 class attrutil(object):
641 def set(self, newvalue):
641 def set(self, newvalue):
642 setattr(obj, _sysstr(name), newvalue)
642 setattr(obj, _sysstr(name), newvalue)
643
643
644 def restore(self):
644 def restore(self):
645 setattr(obj, _sysstr(name), origvalue)
645 setattr(obj, _sysstr(name), origvalue)
646
646
647 return attrutil()
647 return attrutil()
648
648
649
649
650 # utilities to examine each internal API changes
650 # utilities to examine each internal API changes
651
651
652
652
653 def getbranchmapsubsettable():
653 def getbranchmapsubsettable():
654 # for "historical portability":
654 # for "historical portability":
655 # subsettable is defined in:
655 # subsettable is defined in:
656 # - branchmap since 2.9 (or 175c6fd8cacc)
656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 # - repoview since 2.5 (or 59a9f18d4587)
657 # - repoview since 2.5 (or 59a9f18d4587)
658 # - repoviewutil since 5.0
658 # - repoviewutil since 5.0
659 for mod in (branchmap, repoview, repoviewutil):
659 for mod in (branchmap, repoview, repoviewutil):
660 subsettable = getattr(mod, 'subsettable', None)
660 subsettable = getattr(mod, 'subsettable', None)
661 if subsettable:
661 if subsettable:
662 return subsettable
662 return subsettable
663
663
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 # branchmap and repoview modules exist, but subsettable attribute
665 # branchmap and repoview modules exist, but subsettable attribute
666 # doesn't)
666 # doesn't)
667 raise error.Abort(
667 raise error.Abort(
668 b"perfbranchmap not available with this Mercurial",
668 b"perfbranchmap not available with this Mercurial",
669 hint=b"use 2.5 or later",
669 hint=b"use 2.5 or later",
670 )
670 )
671
671
672
672
673 def getsvfs(repo):
673 def getsvfs(repo):
674 """Return appropriate object to access files under .hg/store"""
674 """Return appropriate object to access files under .hg/store"""
675 # for "historical portability":
675 # for "historical portability":
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 svfs = getattr(repo, 'svfs', None)
677 svfs = getattr(repo, 'svfs', None)
678 if svfs:
678 if svfs:
679 return svfs
679 return svfs
680 else:
680 else:
681 return getattr(repo, 'sopener')
681 return getattr(repo, 'sopener')
682
682
683
683
684 def getvfs(repo):
684 def getvfs(repo):
685 """Return appropriate object to access files under .hg"""
685 """Return appropriate object to access files under .hg"""
686 # for "historical portability":
686 # for "historical portability":
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 vfs = getattr(repo, 'vfs', None)
688 vfs = getattr(repo, 'vfs', None)
689 if vfs:
689 if vfs:
690 return vfs
690 return vfs
691 else:
691 else:
692 return getattr(repo, 'opener')
692 return getattr(repo, 'opener')
693
693
694
694
695 def repocleartagscachefunc(repo):
695 def repocleartagscachefunc(repo):
696 """Return the function to clear tags cache according to repo internal API"""
696 """Return the function to clear tags cache according to repo internal API"""
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 # correct way to clear tags cache, because existing code paths
699 # correct way to clear tags cache, because existing code paths
700 # expect _tagscache to be a structured object.
700 # expect _tagscache to be a structured object.
701 def clearcache():
701 def clearcache():
702 # _tagscache has been filteredpropertycache since 2.5 (or
702 # _tagscache has been filteredpropertycache since 2.5 (or
703 # 98c867ac1330), and delattr() can't work in such case
703 # 98c867ac1330), and delattr() can't work in such case
704 if '_tagscache' in vars(repo):
704 if '_tagscache' in vars(repo):
705 del repo.__dict__['_tagscache']
705 del repo.__dict__['_tagscache']
706
706
707 return clearcache
707 return clearcache
708
708
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 if repotags: # since 1.4 (or 5614a628d173)
710 if repotags: # since 1.4 (or 5614a628d173)
711 return lambda: repotags.set(None)
711 return lambda: repotags.set(None)
712
712
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 return lambda: repotagscache.set(None)
715 return lambda: repotagscache.set(None)
716
716
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 # this point, but it isn't so problematic, because:
718 # this point, but it isn't so problematic, because:
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 # in perftags() causes failure soon
720 # in perftags() causes failure soon
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 raise error.Abort(b"tags API of this hg command is unknown")
722 raise error.Abort(b"tags API of this hg command is unknown")
723
723
724
724
725 # utilities to clear cache
725 # utilities to clear cache
726
726
727
727
728 def clearfilecache(obj, attrname):
728 def clearfilecache(obj, attrname):
729 unfiltered = getattr(obj, 'unfiltered', None)
729 unfiltered = getattr(obj, 'unfiltered', None)
730 if unfiltered is not None:
730 if unfiltered is not None:
731 obj = obj.unfiltered()
731 obj = obj.unfiltered()
732 if attrname in vars(obj):
732 if attrname in vars(obj):
733 delattr(obj, attrname)
733 delattr(obj, attrname)
734 obj._filecache.pop(attrname, None)
734 obj._filecache.pop(attrname, None)
735
735
736
736
737 def clearchangelog(repo):
737 def clearchangelog(repo):
738 if repo is not repo.unfiltered():
738 if repo is not repo.unfiltered():
739 object.__setattr__(repo, '_clcachekey', None)
739 object.__setattr__(repo, '_clcachekey', None)
740 object.__setattr__(repo, '_clcache', None)
740 object.__setattr__(repo, '_clcache', None)
741 clearfilecache(repo.unfiltered(), 'changelog')
741 clearfilecache(repo.unfiltered(), 'changelog')
742
742
743
743
744 # perf commands
744 # perf commands
745
745
746
746
747 @command(b'perf::walk|perfwalk', formatteropts)
747 @command(b'perf::walk|perfwalk', formatteropts)
748 def perfwalk(ui, repo, *pats, **opts):
748 def perfwalk(ui, repo, *pats, **opts):
749 opts = _byteskwargs(opts)
749 opts = _byteskwargs(opts)
750 timer, fm = gettimer(ui, opts)
750 timer, fm = gettimer(ui, opts)
751 m = scmutil.match(repo[None], pats, {})
751 m = scmutil.match(repo[None], pats, {})
752 timer(
752 timer(
753 lambda: len(
753 lambda: len(
754 list(
754 list(
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 )
756 )
757 )
757 )
758 )
758 )
759 fm.end()
759 fm.end()
760
760
761
761
762 @command(b'perf::annotate|perfannotate', formatteropts)
762 @command(b'perf::annotate|perfannotate', formatteropts)
763 def perfannotate(ui, repo, f, **opts):
763 def perfannotate(ui, repo, f, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 fc = repo[b'.'][f]
766 fc = repo[b'.'][f]
767 timer(lambda: len(fc.annotate(True)))
767 timer(lambda: len(fc.annotate(True)))
768 fm.end()
768 fm.end()
769
769
770
770
771 @command(
771 @command(
772 b'perf::status|perfstatus',
772 b'perf::status|perfstatus',
773 [
773 [
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 ]
776 ]
777 + formatteropts,
777 + formatteropts,
778 )
778 )
779 def perfstatus(ui, repo, **opts):
779 def perfstatus(ui, repo, **opts):
780 """benchmark the performance of a single status call
780 """benchmark the performance of a single status call
781
781
782 The repository data are preserved between each call.
782 The repository data are preserved between each call.
783
783
784 By default, only the status of the tracked file are requested. If
784 By default, only the status of the tracked file are requested. If
785 `--unknown` is passed, the "unknown" files are also tracked.
785 `--unknown` is passed, the "unknown" files are also tracked.
786 """
786 """
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 # m = match.always(repo.root, repo.getcwd())
788 # m = match.always(repo.root, repo.getcwd())
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 # False))))
790 # False))))
791 timer, fm = gettimer(ui, opts)
791 timer, fm = gettimer(ui, opts)
792 if opts[b'dirstate']:
792 if opts[b'dirstate']:
793 dirstate = repo.dirstate
793 dirstate = repo.dirstate
794 m = scmutil.matchall(repo)
794 m = scmutil.matchall(repo)
795 unknown = opts[b'unknown']
795 unknown = opts[b'unknown']
796
796
797 def status_dirstate():
797 def status_dirstate():
798 s = dirstate.status(
798 s = dirstate.status(
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 )
800 )
801 sum(map(bool, s))
801 sum(map(bool, s))
802
802
803 timer(status_dirstate)
803 timer(status_dirstate)
804 else:
804 else:
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 fm.end()
806 fm.end()
807
807
808
808
809 @command(b'perf::addremove|perfaddremove', formatteropts)
809 @command(b'perf::addremove|perfaddremove', formatteropts)
810 def perfaddremove(ui, repo, **opts):
810 def perfaddremove(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 try:
813 try:
814 oldquiet = repo.ui.quiet
814 oldquiet = repo.ui.quiet
815 repo.ui.quiet = True
815 repo.ui.quiet = True
816 matcher = scmutil.match(repo[None])
816 matcher = scmutil.match(repo[None])
817 opts[b'dry_run'] = True
817 opts[b'dry_run'] = True
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 uipathfn = scmutil.getuipathfn(repo)
819 uipathfn = scmutil.getuipathfn(repo)
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 else:
821 else:
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 finally:
823 finally:
824 repo.ui.quiet = oldquiet
824 repo.ui.quiet = oldquiet
825 fm.end()
825 fm.end()
826
826
827
827
828 def clearcaches(cl):
828 def clearcaches(cl):
829 # behave somewhat consistently across internal API changes
829 # behave somewhat consistently across internal API changes
830 if util.safehasattr(cl, b'clearcaches'):
830 if util.safehasattr(cl, b'clearcaches'):
831 cl.clearcaches()
831 cl.clearcaches()
832 elif util.safehasattr(cl, b'_nodecache'):
832 elif util.safehasattr(cl, b'_nodecache'):
833 # <= hg-5.2
833 # <= hg-5.2
834 from mercurial.node import nullid, nullrev
834 from mercurial.node import nullid, nullrev
835
835
836 cl._nodecache = {nullid: nullrev}
836 cl._nodecache = {nullid: nullrev}
837 cl._nodepos = None
837 cl._nodepos = None
838
838
839
839
840 @command(b'perf::heads|perfheads', formatteropts)
840 @command(b'perf::heads|perfheads', formatteropts)
841 def perfheads(ui, repo, **opts):
841 def perfheads(ui, repo, **opts):
842 """benchmark the computation of a changelog heads"""
842 """benchmark the computation of a changelog heads"""
843 opts = _byteskwargs(opts)
843 opts = _byteskwargs(opts)
844 timer, fm = gettimer(ui, opts)
844 timer, fm = gettimer(ui, opts)
845 cl = repo.changelog
845 cl = repo.changelog
846
846
847 def s():
847 def s():
848 clearcaches(cl)
848 clearcaches(cl)
849
849
850 def d():
850 def d():
851 len(cl.headrevs())
851 len(cl.headrevs())
852
852
853 timer(d, setup=s)
853 timer(d, setup=s)
854 fm.end()
854 fm.end()
855
855
856
856
857 @command(
857 @command(
858 b'perf::tags|perftags',
858 b'perf::tags|perftags',
859 formatteropts
859 formatteropts
860 + [
860 + [
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 ],
862 ],
863 )
863 )
864 def perftags(ui, repo, **opts):
864 def perftags(ui, repo, **opts):
865 opts = _byteskwargs(opts)
865 opts = _byteskwargs(opts)
866 timer, fm = gettimer(ui, opts)
866 timer, fm = gettimer(ui, opts)
867 repocleartagscache = repocleartagscachefunc(repo)
867 repocleartagscache = repocleartagscachefunc(repo)
868 clearrevlogs = opts[b'clear_revlogs']
868 clearrevlogs = opts[b'clear_revlogs']
869
869
870 def s():
870 def s():
871 if clearrevlogs:
871 if clearrevlogs:
872 clearchangelog(repo)
872 clearchangelog(repo)
873 clearfilecache(repo.unfiltered(), 'manifest')
873 clearfilecache(repo.unfiltered(), 'manifest')
874 repocleartagscache()
874 repocleartagscache()
875
875
876 def t():
876 def t():
877 return len(repo.tags())
877 return len(repo.tags())
878
878
879 timer(t, setup=s)
879 timer(t, setup=s)
880 fm.end()
880 fm.end()
881
881
882
882
883 @command(b'perf::ancestors|perfancestors', formatteropts)
883 @command(b'perf::ancestors|perfancestors', formatteropts)
884 def perfancestors(ui, repo, **opts):
884 def perfancestors(ui, repo, **opts):
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887 heads = repo.changelog.headrevs()
887 heads = repo.changelog.headrevs()
888
888
889 def d():
889 def d():
890 for a in repo.changelog.ancestors(heads):
890 for a in repo.changelog.ancestors(heads):
891 pass
891 pass
892
892
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896
896
897 @command(b'perf::ancestorset|perfancestorset', formatteropts)
897 @command(b'perf::ancestorset|perfancestorset', formatteropts)
898 def perfancestorset(ui, repo, revset, **opts):
898 def perfancestorset(ui, repo, revset, **opts):
899 opts = _byteskwargs(opts)
899 opts = _byteskwargs(opts)
900 timer, fm = gettimer(ui, opts)
900 timer, fm = gettimer(ui, opts)
901 revs = repo.revs(revset)
901 revs = repo.revs(revset)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 s = repo.changelog.ancestors(heads)
905 s = repo.changelog.ancestors(heads)
906 for rev in revs:
906 for rev in revs:
907 rev in s
907 rev in s
908
908
909 timer(d)
909 timer(d)
910 fm.end()
910 fm.end()
911
911
912
912
913 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
913 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
914 def perfdiscovery(ui, repo, path, **opts):
914 def perfdiscovery(ui, repo, path, **opts):
915 """benchmark discovery between local repo and the peer at given path"""
915 """benchmark discovery between local repo and the peer at given path"""
916 repos = [repo, None]
916 repos = [repo, None]
917 timer, fm = gettimer(ui, opts)
917 timer, fm = gettimer(ui, opts)
918 path = ui.expandpath(path)
918
919 try:
920 from mercurial.utils.urlutil import get_unique_pull_path
921
922 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
923 except ImportError:
924 path = ui.expandpath(path)
919
925
920 def s():
926 def s():
921 repos[1] = hg.peer(ui, opts, path)
927 repos[1] = hg.peer(ui, opts, path)
922
928
923 def d():
929 def d():
924 setdiscovery.findcommonheads(ui, *repos)
930 setdiscovery.findcommonheads(ui, *repos)
925
931
926 timer(d, setup=s)
932 timer(d, setup=s)
927 fm.end()
933 fm.end()
928
934
929
935
930 @command(
936 @command(
931 b'perf::bookmarks|perfbookmarks',
937 b'perf::bookmarks|perfbookmarks',
932 formatteropts
938 formatteropts
933 + [
939 + [
934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
940 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 ],
941 ],
936 )
942 )
937 def perfbookmarks(ui, repo, **opts):
943 def perfbookmarks(ui, repo, **opts):
938 """benchmark parsing bookmarks from disk to memory"""
944 """benchmark parsing bookmarks from disk to memory"""
939 opts = _byteskwargs(opts)
945 opts = _byteskwargs(opts)
940 timer, fm = gettimer(ui, opts)
946 timer, fm = gettimer(ui, opts)
941
947
942 clearrevlogs = opts[b'clear_revlogs']
948 clearrevlogs = opts[b'clear_revlogs']
943
949
944 def s():
950 def s():
945 if clearrevlogs:
951 if clearrevlogs:
946 clearchangelog(repo)
952 clearchangelog(repo)
947 clearfilecache(repo, b'_bookmarks')
953 clearfilecache(repo, b'_bookmarks')
948
954
949 def d():
955 def d():
950 repo._bookmarks
956 repo._bookmarks
951
957
952 timer(d, setup=s)
958 timer(d, setup=s)
953 fm.end()
959 fm.end()
954
960
955
961
956 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
962 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
957 def perfbundleread(ui, repo, bundlepath, **opts):
963 def perfbundleread(ui, repo, bundlepath, **opts):
958 """Benchmark reading of bundle files.
964 """Benchmark reading of bundle files.
959
965
960 This command is meant to isolate the I/O part of bundle reading as
966 This command is meant to isolate the I/O part of bundle reading as
961 much as possible.
967 much as possible.
962 """
968 """
963 from mercurial import (
969 from mercurial import (
964 bundle2,
970 bundle2,
965 exchange,
971 exchange,
966 streamclone,
972 streamclone,
967 )
973 )
968
974
969 opts = _byteskwargs(opts)
975 opts = _byteskwargs(opts)
970
976
971 def makebench(fn):
977 def makebench(fn):
972 def run():
978 def run():
973 with open(bundlepath, b'rb') as fh:
979 with open(bundlepath, b'rb') as fh:
974 bundle = exchange.readbundle(ui, fh, bundlepath)
980 bundle = exchange.readbundle(ui, fh, bundlepath)
975 fn(bundle)
981 fn(bundle)
976
982
977 return run
983 return run
978
984
979 def makereadnbytes(size):
985 def makereadnbytes(size):
980 def run():
986 def run():
981 with open(bundlepath, b'rb') as fh:
987 with open(bundlepath, b'rb') as fh:
982 bundle = exchange.readbundle(ui, fh, bundlepath)
988 bundle = exchange.readbundle(ui, fh, bundlepath)
983 while bundle.read(size):
989 while bundle.read(size):
984 pass
990 pass
985
991
986 return run
992 return run
987
993
988 def makestdioread(size):
994 def makestdioread(size):
989 def run():
995 def run():
990 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
991 while fh.read(size):
997 while fh.read(size):
992 pass
998 pass
993
999
994 return run
1000 return run
995
1001
996 # bundle1
1002 # bundle1
997
1003
998 def deltaiter(bundle):
1004 def deltaiter(bundle):
999 for delta in bundle.deltaiter():
1005 for delta in bundle.deltaiter():
1000 pass
1006 pass
1001
1007
1002 def iterchunks(bundle):
1008 def iterchunks(bundle):
1003 for chunk in bundle.getchunks():
1009 for chunk in bundle.getchunks():
1004 pass
1010 pass
1005
1011
1006 # bundle2
1012 # bundle2
1007
1013
1008 def forwardchunks(bundle):
1014 def forwardchunks(bundle):
1009 for chunk in bundle._forwardchunks():
1015 for chunk in bundle._forwardchunks():
1010 pass
1016 pass
1011
1017
1012 def iterparts(bundle):
1018 def iterparts(bundle):
1013 for part in bundle.iterparts():
1019 for part in bundle.iterparts():
1014 pass
1020 pass
1015
1021
1016 def iterpartsseekable(bundle):
1022 def iterpartsseekable(bundle):
1017 for part in bundle.iterparts(seekable=True):
1023 for part in bundle.iterparts(seekable=True):
1018 pass
1024 pass
1019
1025
1020 def seek(bundle):
1026 def seek(bundle):
1021 for part in bundle.iterparts(seekable=True):
1027 for part in bundle.iterparts(seekable=True):
1022 part.seek(0, os.SEEK_END)
1028 part.seek(0, os.SEEK_END)
1023
1029
1024 def makepartreadnbytes(size):
1030 def makepartreadnbytes(size):
1025 def run():
1031 def run():
1026 with open(bundlepath, b'rb') as fh:
1032 with open(bundlepath, b'rb') as fh:
1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1033 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 for part in bundle.iterparts():
1034 for part in bundle.iterparts():
1029 while part.read(size):
1035 while part.read(size):
1030 pass
1036 pass
1031
1037
1032 return run
1038 return run
1033
1039
1034 benches = [
1040 benches = [
1035 (makestdioread(8192), b'read(8k)'),
1041 (makestdioread(8192), b'read(8k)'),
1036 (makestdioread(16384), b'read(16k)'),
1042 (makestdioread(16384), b'read(16k)'),
1037 (makestdioread(32768), b'read(32k)'),
1043 (makestdioread(32768), b'read(32k)'),
1038 (makestdioread(131072), b'read(128k)'),
1044 (makestdioread(131072), b'read(128k)'),
1039 ]
1045 ]
1040
1046
1041 with open(bundlepath, b'rb') as fh:
1047 with open(bundlepath, b'rb') as fh:
1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1048 bundle = exchange.readbundle(ui, fh, bundlepath)
1043
1049
1044 if isinstance(bundle, changegroup.cg1unpacker):
1050 if isinstance(bundle, changegroup.cg1unpacker):
1045 benches.extend(
1051 benches.extend(
1046 [
1052 [
1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1053 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 (makebench(iterchunks), b'cg1 getchunks()'),
1054 (makebench(iterchunks), b'cg1 getchunks()'),
1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1055 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1056 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1057 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1058 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 ]
1059 ]
1054 )
1060 )
1055 elif isinstance(bundle, bundle2.unbundle20):
1061 elif isinstance(bundle, bundle2.unbundle20):
1056 benches.extend(
1062 benches.extend(
1057 [
1063 [
1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1064 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 (makebench(iterparts), b'bundle2 iterparts()'),
1065 (makebench(iterparts), b'bundle2 iterparts()'),
1060 (
1066 (
1061 makebench(iterpartsseekable),
1067 makebench(iterpartsseekable),
1062 b'bundle2 iterparts() seekable',
1068 b'bundle2 iterparts() seekable',
1063 ),
1069 ),
1064 (makebench(seek), b'bundle2 part seek()'),
1070 (makebench(seek), b'bundle2 part seek()'),
1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1071 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1072 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1073 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1074 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 ]
1075 ]
1070 )
1076 )
1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1077 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 raise error.Abort(b'stream clone bundles not supported')
1078 raise error.Abort(b'stream clone bundles not supported')
1073 else:
1079 else:
1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1080 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075
1081
1076 for fn, title in benches:
1082 for fn, title in benches:
1077 timer, fm = gettimer(ui, opts)
1083 timer, fm = gettimer(ui, opts)
1078 timer(fn, title=title)
1084 timer(fn, title=title)
1079 fm.end()
1085 fm.end()
1080
1086
1081
1087
1082 @command(
1088 @command(
1083 b'perf::changegroupchangelog|perfchangegroupchangelog',
1089 b'perf::changegroupchangelog|perfchangegroupchangelog',
1084 formatteropts
1090 formatteropts
1085 + [
1091 + [
1086 (b'', b'cgversion', b'02', b'changegroup version'),
1092 (b'', b'cgversion', b'02', b'changegroup version'),
1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1093 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 ],
1094 ],
1089 )
1095 )
1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1096 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 """Benchmark producing a changelog group for a changegroup.
1097 """Benchmark producing a changelog group for a changegroup.
1092
1098
1093 This measures the time spent processing the changelog during a
1099 This measures the time spent processing the changelog during a
1094 bundle operation. This occurs during `hg bundle` and on a server
1100 bundle operation. This occurs during `hg bundle` and on a server
1095 processing a `getbundle` wire protocol request (handles clones
1101 processing a `getbundle` wire protocol request (handles clones
1096 and pull requests).
1102 and pull requests).
1097
1103
1098 By default, all revisions are added to the changegroup.
1104 By default, all revisions are added to the changegroup.
1099 """
1105 """
1100 opts = _byteskwargs(opts)
1106 opts = _byteskwargs(opts)
1101 cl = repo.changelog
1107 cl = repo.changelog
1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1108 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 bundler = changegroup.getbundler(cgversion, repo)
1109 bundler = changegroup.getbundler(cgversion, repo)
1104
1110
1105 def d():
1111 def d():
1106 state, chunks = bundler._generatechangelog(cl, nodes)
1112 state, chunks = bundler._generatechangelog(cl, nodes)
1107 for chunk in chunks:
1113 for chunk in chunks:
1108 pass
1114 pass
1109
1115
1110 timer, fm = gettimer(ui, opts)
1116 timer, fm = gettimer(ui, opts)
1111
1117
1112 # Terminal printing can interfere with timing. So disable it.
1118 # Terminal printing can interfere with timing. So disable it.
1113 with ui.configoverride({(b'progress', b'disable'): True}):
1119 with ui.configoverride({(b'progress', b'disable'): True}):
1114 timer(d)
1120 timer(d)
1115
1121
1116 fm.end()
1122 fm.end()
1117
1123
1118
1124
1119 @command(b'perf::dirs|perfdirs', formatteropts)
1125 @command(b'perf::dirs|perfdirs', formatteropts)
1120 def perfdirs(ui, repo, **opts):
1126 def perfdirs(ui, repo, **opts):
1121 opts = _byteskwargs(opts)
1127 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1128 timer, fm = gettimer(ui, opts)
1123 dirstate = repo.dirstate
1129 dirstate = repo.dirstate
1124 b'a' in dirstate
1130 b'a' in dirstate
1125
1131
1126 def d():
1132 def d():
1127 dirstate.hasdir(b'a')
1133 dirstate.hasdir(b'a')
1128 del dirstate._map._dirs
1134 del dirstate._map._dirs
1129
1135
1130 timer(d)
1136 timer(d)
1131 fm.end()
1137 fm.end()
1132
1138
1133
1139
1134 @command(
1140 @command(
1135 b'perf::dirstate|perfdirstate',
1141 b'perf::dirstate|perfdirstate',
1136 [
1142 [
1137 (
1143 (
1138 b'',
1144 b'',
1139 b'iteration',
1145 b'iteration',
1140 None,
1146 None,
1141 b'benchmark a full iteration for the dirstate',
1147 b'benchmark a full iteration for the dirstate',
1142 ),
1148 ),
1143 (
1149 (
1144 b'',
1150 b'',
1145 b'contains',
1151 b'contains',
1146 None,
1152 None,
1147 b'benchmark a large amount of `nf in dirstate` calls',
1153 b'benchmark a large amount of `nf in dirstate` calls',
1148 ),
1154 ),
1149 ]
1155 ]
1150 + formatteropts,
1156 + formatteropts,
1151 )
1157 )
1152 def perfdirstate(ui, repo, **opts):
1158 def perfdirstate(ui, repo, **opts):
1153 """benchmap the time of various distate operations
1159 """benchmap the time of various distate operations
1154
1160
1155 By default benchmark the time necessary to load a dirstate from scratch.
1161 By default benchmark the time necessary to load a dirstate from scratch.
1156 The dirstate is loaded to the point were a "contains" request can be
1162 The dirstate is loaded to the point were a "contains" request can be
1157 answered.
1163 answered.
1158 """
1164 """
1159 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1160 timer, fm = gettimer(ui, opts)
1166 timer, fm = gettimer(ui, opts)
1161 b"a" in repo.dirstate
1167 b"a" in repo.dirstate
1162
1168
1163 if opts[b'iteration'] and opts[b'contains']:
1169 if opts[b'iteration'] and opts[b'contains']:
1164 msg = b'only specify one of --iteration or --contains'
1170 msg = b'only specify one of --iteration or --contains'
1165 raise error.Abort(msg)
1171 raise error.Abort(msg)
1166
1172
1167 if opts[b'iteration']:
1173 if opts[b'iteration']:
1168 setup = None
1174 setup = None
1169 dirstate = repo.dirstate
1175 dirstate = repo.dirstate
1170
1176
1171 def d():
1177 def d():
1172 for f in dirstate:
1178 for f in dirstate:
1173 pass
1179 pass
1174
1180
1175 elif opts[b'contains']:
1181 elif opts[b'contains']:
1176 setup = None
1182 setup = None
1177 dirstate = repo.dirstate
1183 dirstate = repo.dirstate
1178 allfiles = list(dirstate)
1184 allfiles = list(dirstate)
1179 # also add file path that will be "missing" from the dirstate
1185 # also add file path that will be "missing" from the dirstate
1180 allfiles.extend([f[::-1] for f in allfiles])
1186 allfiles.extend([f[::-1] for f in allfiles])
1181
1187
1182 def d():
1188 def d():
1183 for f in allfiles:
1189 for f in allfiles:
1184 f in dirstate
1190 f in dirstate
1185
1191
1186 else:
1192 else:
1187
1193
1188 def setup():
1194 def setup():
1189 repo.dirstate.invalidate()
1195 repo.dirstate.invalidate()
1190
1196
1191 def d():
1197 def d():
1192 b"a" in repo.dirstate
1198 b"a" in repo.dirstate
1193
1199
1194 timer(d, setup=setup)
1200 timer(d, setup=setup)
1195 fm.end()
1201 fm.end()
1196
1202
1197
1203
1198 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1204 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1199 def perfdirstatedirs(ui, repo, **opts):
1205 def perfdirstatedirs(ui, repo, **opts):
1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1206 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 opts = _byteskwargs(opts)
1207 opts = _byteskwargs(opts)
1202 timer, fm = gettimer(ui, opts)
1208 timer, fm = gettimer(ui, opts)
1203 repo.dirstate.hasdir(b"a")
1209 repo.dirstate.hasdir(b"a")
1204
1210
1205 def setup():
1211 def setup():
1206 del repo.dirstate._map._dirs
1212 del repo.dirstate._map._dirs
1207
1213
1208 def d():
1214 def d():
1209 repo.dirstate.hasdir(b"a")
1215 repo.dirstate.hasdir(b"a")
1210
1216
1211 timer(d, setup=setup)
1217 timer(d, setup=setup)
1212 fm.end()
1218 fm.end()
1213
1219
1214
1220
1215 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1221 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1216 def perfdirstatefoldmap(ui, repo, **opts):
1222 def perfdirstatefoldmap(ui, repo, **opts):
1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1223 """benchmap a `dirstate._map.filefoldmap.get()` request
1218
1224
1219 The dirstate filefoldmap cache is dropped between every request.
1225 The dirstate filefoldmap cache is dropped between every request.
1220 """
1226 """
1221 opts = _byteskwargs(opts)
1227 opts = _byteskwargs(opts)
1222 timer, fm = gettimer(ui, opts)
1228 timer, fm = gettimer(ui, opts)
1223 dirstate = repo.dirstate
1229 dirstate = repo.dirstate
1224 dirstate._map.filefoldmap.get(b'a')
1230 dirstate._map.filefoldmap.get(b'a')
1225
1231
1226 def setup():
1232 def setup():
1227 del dirstate._map.filefoldmap
1233 del dirstate._map.filefoldmap
1228
1234
1229 def d():
1235 def d():
1230 dirstate._map.filefoldmap.get(b'a')
1236 dirstate._map.filefoldmap.get(b'a')
1231
1237
1232 timer(d, setup=setup)
1238 timer(d, setup=setup)
1233 fm.end()
1239 fm.end()
1234
1240
1235
1241
1236 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1242 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1237 def perfdirfoldmap(ui, repo, **opts):
1243 def perfdirfoldmap(ui, repo, **opts):
1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1244 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239
1245
1240 The dirstate dirfoldmap cache is dropped between every request.
1246 The dirstate dirfoldmap cache is dropped between every request.
1241 """
1247 """
1242 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1243 timer, fm = gettimer(ui, opts)
1249 timer, fm = gettimer(ui, opts)
1244 dirstate = repo.dirstate
1250 dirstate = repo.dirstate
1245 dirstate._map.dirfoldmap.get(b'a')
1251 dirstate._map.dirfoldmap.get(b'a')
1246
1252
1247 def setup():
1253 def setup():
1248 del dirstate._map.dirfoldmap
1254 del dirstate._map.dirfoldmap
1249 del dirstate._map._dirs
1255 del dirstate._map._dirs
1250
1256
1251 def d():
1257 def d():
1252 dirstate._map.dirfoldmap.get(b'a')
1258 dirstate._map.dirfoldmap.get(b'a')
1253
1259
1254 timer(d, setup=setup)
1260 timer(d, setup=setup)
1255 fm.end()
1261 fm.end()
1256
1262
1257
1263
1258 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1264 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1259 def perfdirstatewrite(ui, repo, **opts):
1265 def perfdirstatewrite(ui, repo, **opts):
1260 """benchmap the time it take to write a dirstate on disk"""
1266 """benchmap the time it take to write a dirstate on disk"""
1261 opts = _byteskwargs(opts)
1267 opts = _byteskwargs(opts)
1262 timer, fm = gettimer(ui, opts)
1268 timer, fm = gettimer(ui, opts)
1263 ds = repo.dirstate
1269 ds = repo.dirstate
1264 b"a" in ds
1270 b"a" in ds
1265
1271
1266 def setup():
1272 def setup():
1267 ds._dirty = True
1273 ds._dirty = True
1268
1274
1269 def d():
1275 def d():
1270 ds.write(repo.currenttransaction())
1276 ds.write(repo.currenttransaction())
1271
1277
1272 timer(d, setup=setup)
1278 timer(d, setup=setup)
1273 fm.end()
1279 fm.end()
1274
1280
1275
1281
1276 def _getmergerevs(repo, opts):
1282 def _getmergerevs(repo, opts):
1277 """parse command argument to return rev involved in merge
1283 """parse command argument to return rev involved in merge
1278
1284
1279 input: options dictionnary with `rev`, `from` and `bse`
1285 input: options dictionnary with `rev`, `from` and `bse`
1280 output: (localctx, otherctx, basectx)
1286 output: (localctx, otherctx, basectx)
1281 """
1287 """
1282 if opts[b'from']:
1288 if opts[b'from']:
1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1289 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 wctx = repo[fromrev]
1290 wctx = repo[fromrev]
1285 else:
1291 else:
1286 wctx = repo[None]
1292 wctx = repo[None]
1287 # we don't want working dir files to be stat'd in the benchmark, so
1293 # we don't want working dir files to be stat'd in the benchmark, so
1288 # prime that cache
1294 # prime that cache
1289 wctx.dirty()
1295 wctx.dirty()
1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1296 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 if opts[b'base']:
1297 if opts[b'base']:
1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1298 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 ancestor = repo[fromrev]
1299 ancestor = repo[fromrev]
1294 else:
1300 else:
1295 ancestor = wctx.ancestor(rctx)
1301 ancestor = wctx.ancestor(rctx)
1296 return (wctx, rctx, ancestor)
1302 return (wctx, rctx, ancestor)
1297
1303
1298
1304
1299 @command(
1305 @command(
1300 b'perf::mergecalculate|perfmergecalculate',
1306 b'perf::mergecalculate|perfmergecalculate',
1301 [
1307 [
1302 (b'r', b'rev', b'.', b'rev to merge against'),
1308 (b'r', b'rev', b'.', b'rev to merge against'),
1303 (b'', b'from', b'', b'rev to merge from'),
1309 (b'', b'from', b'', b'rev to merge from'),
1304 (b'', b'base', b'', b'the revision to use as base'),
1310 (b'', b'base', b'', b'the revision to use as base'),
1305 ]
1311 ]
1306 + formatteropts,
1312 + formatteropts,
1307 )
1313 )
1308 def perfmergecalculate(ui, repo, **opts):
1314 def perfmergecalculate(ui, repo, **opts):
1309 opts = _byteskwargs(opts)
1315 opts = _byteskwargs(opts)
1310 timer, fm = gettimer(ui, opts)
1316 timer, fm = gettimer(ui, opts)
1311
1317
1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1318 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313
1319
1314 def d():
1320 def d():
1315 # acceptremote is True because we don't want prompts in the middle of
1321 # acceptremote is True because we don't want prompts in the middle of
1316 # our benchmark
1322 # our benchmark
1317 merge.calculateupdates(
1323 merge.calculateupdates(
1318 repo,
1324 repo,
1319 wctx,
1325 wctx,
1320 rctx,
1326 rctx,
1321 [ancestor],
1327 [ancestor],
1322 branchmerge=False,
1328 branchmerge=False,
1323 force=False,
1329 force=False,
1324 acceptremote=True,
1330 acceptremote=True,
1325 followcopies=True,
1331 followcopies=True,
1326 )
1332 )
1327
1333
1328 timer(d)
1334 timer(d)
1329 fm.end()
1335 fm.end()
1330
1336
1331
1337
1332 @command(
1338 @command(
1333 b'perf::mergecopies|perfmergecopies',
1339 b'perf::mergecopies|perfmergecopies',
1334 [
1340 [
1335 (b'r', b'rev', b'.', b'rev to merge against'),
1341 (b'r', b'rev', b'.', b'rev to merge against'),
1336 (b'', b'from', b'', b'rev to merge from'),
1342 (b'', b'from', b'', b'rev to merge from'),
1337 (b'', b'base', b'', b'the revision to use as base'),
1343 (b'', b'base', b'', b'the revision to use as base'),
1338 ]
1344 ]
1339 + formatteropts,
1345 + formatteropts,
1340 )
1346 )
1341 def perfmergecopies(ui, repo, **opts):
1347 def perfmergecopies(ui, repo, **opts):
1342 """measure runtime of `copies.mergecopies`"""
1348 """measure runtime of `copies.mergecopies`"""
1343 opts = _byteskwargs(opts)
1349 opts = _byteskwargs(opts)
1344 timer, fm = gettimer(ui, opts)
1350 timer, fm = gettimer(ui, opts)
1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1351 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346
1352
1347 def d():
1353 def d():
1348 # acceptremote is True because we don't want prompts in the middle of
1354 # acceptremote is True because we don't want prompts in the middle of
1349 # our benchmark
1355 # our benchmark
1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1356 copies.mergecopies(repo, wctx, rctx, ancestor)
1351
1357
1352 timer(d)
1358 timer(d)
1353 fm.end()
1359 fm.end()
1354
1360
1355
1361
1356 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1362 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1363 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 """benchmark the copy tracing logic"""
1364 """benchmark the copy tracing logic"""
1359 opts = _byteskwargs(opts)
1365 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1366 timer, fm = gettimer(ui, opts)
1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1367 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1368 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363
1369
1364 def d():
1370 def d():
1365 copies.pathcopies(ctx1, ctx2)
1371 copies.pathcopies(ctx1, ctx2)
1366
1372
1367 timer(d)
1373 timer(d)
1368 fm.end()
1374 fm.end()
1369
1375
1370
1376
1371 @command(
1377 @command(
1372 b'perf::phases|perfphases',
1378 b'perf::phases|perfphases',
1373 [
1379 [
1374 (b'', b'full', False, b'include file reading time too'),
1380 (b'', b'full', False, b'include file reading time too'),
1375 ],
1381 ],
1376 b"",
1382 b"",
1377 )
1383 )
1378 def perfphases(ui, repo, **opts):
1384 def perfphases(ui, repo, **opts):
1379 """benchmark phasesets computation"""
1385 """benchmark phasesets computation"""
1380 opts = _byteskwargs(opts)
1386 opts = _byteskwargs(opts)
1381 timer, fm = gettimer(ui, opts)
1387 timer, fm = gettimer(ui, opts)
1382 _phases = repo._phasecache
1388 _phases = repo._phasecache
1383 full = opts.get(b'full')
1389 full = opts.get(b'full')
1384
1390
1385 def d():
1391 def d():
1386 phases = _phases
1392 phases = _phases
1387 if full:
1393 if full:
1388 clearfilecache(repo, b'_phasecache')
1394 clearfilecache(repo, b'_phasecache')
1389 phases = repo._phasecache
1395 phases = repo._phasecache
1390 phases.invalidate()
1396 phases.invalidate()
1391 phases.loadphaserevs(repo)
1397 phases.loadphaserevs(repo)
1392
1398
1393 timer(d)
1399 timer(d)
1394 fm.end()
1400 fm.end()
1395
1401
1396
1402
1397 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1403 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1398 def perfphasesremote(ui, repo, dest=None, **opts):
1404 def perfphasesremote(ui, repo, dest=None, **opts):
1399 """benchmark time needed to analyse phases of the remote server"""
1405 """benchmark time needed to analyse phases of the remote server"""
1400 from mercurial.node import bin
1406 from mercurial.node import bin
1401 from mercurial import (
1407 from mercurial import (
1402 exchange,
1408 exchange,
1403 hg,
1409 hg,
1404 phases,
1410 phases,
1405 )
1411 )
1406
1412
1407 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1409
1415
1410 path = ui.getpath(dest, default=(b'default-push', b'default'))
1416 path = ui.getpath(dest, default=(b'default-push', b'default'))
1411 if not path:
1417 if not path:
1412 raise error.Abort(
1418 raise error.Abort(
1413 b'default repository not configured!',
1419 b'default repository not configured!',
1414 hint=b"see 'hg help config.paths'",
1420 hint=b"see 'hg help config.paths'",
1415 )
1421 )
1416 dest = path.pushloc or path.loc
1422 dest = path.pushloc or path.loc
1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1423 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 other = hg.peer(repo, opts, dest)
1424 other = hg.peer(repo, opts, dest)
1419
1425
1420 # easier to perform discovery through the operation
1426 # easier to perform discovery through the operation
1421 op = exchange.pushoperation(repo, other)
1427 op = exchange.pushoperation(repo, other)
1422 exchange._pushdiscoverychangeset(op)
1428 exchange._pushdiscoverychangeset(op)
1423
1429
1424 remotesubset = op.fallbackheads
1430 remotesubset = op.fallbackheads
1425
1431
1426 with other.commandexecutor() as e:
1432 with other.commandexecutor() as e:
1427 remotephases = e.callcommand(
1433 remotephases = e.callcommand(
1428 b'listkeys', {b'namespace': b'phases'}
1434 b'listkeys', {b'namespace': b'phases'}
1429 ).result()
1435 ).result()
1430 del other
1436 del other
1431 publishing = remotephases.get(b'publishing', False)
1437 publishing = remotephases.get(b'publishing', False)
1432 if publishing:
1438 if publishing:
1433 ui.statusnoi18n(b'publishing: yes\n')
1439 ui.statusnoi18n(b'publishing: yes\n')
1434 else:
1440 else:
1435 ui.statusnoi18n(b'publishing: no\n')
1441 ui.statusnoi18n(b'publishing: no\n')
1436
1442
1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1443 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 if has_node is None:
1444 if has_node is None:
1439 has_node = repo.changelog.nodemap.__contains__
1445 has_node = repo.changelog.nodemap.__contains__
1440 nonpublishroots = 0
1446 nonpublishroots = 0
1441 for nhex, phase in remotephases.iteritems():
1447 for nhex, phase in remotephases.iteritems():
1442 if nhex == b'publishing': # ignore data related to publish option
1448 if nhex == b'publishing': # ignore data related to publish option
1443 continue
1449 continue
1444 node = bin(nhex)
1450 node = bin(nhex)
1445 if has_node(node) and int(phase):
1451 if has_node(node) and int(phase):
1446 nonpublishroots += 1
1452 nonpublishroots += 1
1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1453 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1454 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449
1455
1450 def d():
1456 def d():
1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1457 phases.remotephasessummary(repo, remotesubset, remotephases)
1452
1458
1453 timer(d)
1459 timer(d)
1454 fm.end()
1460 fm.end()
1455
1461
1456
1462
1457 @command(
1463 @command(
1458 b'perf::manifest|perfmanifest',
1464 b'perf::manifest|perfmanifest',
1459 [
1465 [
1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1466 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1467 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 ]
1468 ]
1463 + formatteropts,
1469 + formatteropts,
1464 b'REV|NODE',
1470 b'REV|NODE',
1465 )
1471 )
1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1472 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 """benchmark the time to read a manifest from disk and return a usable
1473 """benchmark the time to read a manifest from disk and return a usable
1468 dict-like object
1474 dict-like object
1469
1475
1470 Manifest caches are cleared before retrieval."""
1476 Manifest caches are cleared before retrieval."""
1471 opts = _byteskwargs(opts)
1477 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1478 timer, fm = gettimer(ui, opts)
1473 if not manifest_rev:
1479 if not manifest_rev:
1474 ctx = scmutil.revsingle(repo, rev, rev)
1480 ctx = scmutil.revsingle(repo, rev, rev)
1475 t = ctx.manifestnode()
1481 t = ctx.manifestnode()
1476 else:
1482 else:
1477 from mercurial.node import bin
1483 from mercurial.node import bin
1478
1484
1479 if len(rev) == 40:
1485 if len(rev) == 40:
1480 t = bin(rev)
1486 t = bin(rev)
1481 else:
1487 else:
1482 try:
1488 try:
1483 rev = int(rev)
1489 rev = int(rev)
1484
1490
1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1491 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 t = repo.manifestlog.getstorage(b'').node(rev)
1492 t = repo.manifestlog.getstorage(b'').node(rev)
1487 else:
1493 else:
1488 t = repo.manifestlog._revlog.lookup(rev)
1494 t = repo.manifestlog._revlog.lookup(rev)
1489 except ValueError:
1495 except ValueError:
1490 raise error.Abort(
1496 raise error.Abort(
1491 b'manifest revision must be integer or full node'
1497 b'manifest revision must be integer or full node'
1492 )
1498 )
1493
1499
1494 def d():
1500 def d():
1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1501 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 repo.manifestlog[t].read()
1502 repo.manifestlog[t].read()
1497
1503
1498 timer(d)
1504 timer(d)
1499 fm.end()
1505 fm.end()
1500
1506
1501
1507
1502 @command(b'perf::changeset|perfchangeset', formatteropts)
1508 @command(b'perf::changeset|perfchangeset', formatteropts)
1503 def perfchangeset(ui, repo, rev, **opts):
1509 def perfchangeset(ui, repo, rev, **opts):
1504 opts = _byteskwargs(opts)
1510 opts = _byteskwargs(opts)
1505 timer, fm = gettimer(ui, opts)
1511 timer, fm = gettimer(ui, opts)
1506 n = scmutil.revsingle(repo, rev).node()
1512 n = scmutil.revsingle(repo, rev).node()
1507
1513
1508 def d():
1514 def d():
1509 repo.changelog.read(n)
1515 repo.changelog.read(n)
1510 # repo.changelog._cache = None
1516 # repo.changelog._cache = None
1511
1517
1512 timer(d)
1518 timer(d)
1513 fm.end()
1519 fm.end()
1514
1520
1515
1521
1516 @command(b'perf::ignore|perfignore', formatteropts)
1522 @command(b'perf::ignore|perfignore', formatteropts)
1517 def perfignore(ui, repo, **opts):
1523 def perfignore(ui, repo, **opts):
1518 """benchmark operation related to computing ignore"""
1524 """benchmark operation related to computing ignore"""
1519 opts = _byteskwargs(opts)
1525 opts = _byteskwargs(opts)
1520 timer, fm = gettimer(ui, opts)
1526 timer, fm = gettimer(ui, opts)
1521 dirstate = repo.dirstate
1527 dirstate = repo.dirstate
1522
1528
1523 def setupone():
1529 def setupone():
1524 dirstate.invalidate()
1530 dirstate.invalidate()
1525 clearfilecache(dirstate, b'_ignore')
1531 clearfilecache(dirstate, b'_ignore')
1526
1532
1527 def runone():
1533 def runone():
1528 dirstate._ignore
1534 dirstate._ignore
1529
1535
1530 timer(runone, setup=setupone, title=b"load")
1536 timer(runone, setup=setupone, title=b"load")
1531 fm.end()
1537 fm.end()
1532
1538
1533
1539
1534 @command(
1540 @command(
1535 b'perf::index|perfindex',
1541 b'perf::index|perfindex',
1536 [
1542 [
1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1543 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1544 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 ]
1545 ]
1540 + formatteropts,
1546 + formatteropts,
1541 )
1547 )
1542 def perfindex(ui, repo, **opts):
1548 def perfindex(ui, repo, **opts):
1543 """benchmark index creation time followed by a lookup
1549 """benchmark index creation time followed by a lookup
1544
1550
1545 The default is to look `tip` up. Depending on the index implementation,
1551 The default is to look `tip` up. Depending on the index implementation,
1546 the revision looked up can matters. For example, an implementation
1552 the revision looked up can matters. For example, an implementation
1547 scanning the index will have a faster lookup time for `--rev tip` than for
1553 scanning the index will have a faster lookup time for `--rev tip` than for
1548 `--rev 0`. The number of looked up revisions and their order can also
1554 `--rev 0`. The number of looked up revisions and their order can also
1549 matters.
1555 matters.
1550
1556
1551 Example of useful set to test:
1557 Example of useful set to test:
1552
1558
1553 * tip
1559 * tip
1554 * 0
1560 * 0
1555 * -10:
1561 * -10:
1556 * :10
1562 * :10
1557 * -10: + :10
1563 * -10: + :10
1558 * :10: + -10:
1564 * :10: + -10:
1559 * -10000:
1565 * -10000:
1560 * -10000: + 0
1566 * -10000: + 0
1561
1567
1562 It is not currently possible to check for lookup of a missing node. For
1568 It is not currently possible to check for lookup of a missing node. For
1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1569 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 import mercurial.revlog
1570 import mercurial.revlog
1565
1571
1566 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1567 timer, fm = gettimer(ui, opts)
1573 timer, fm = gettimer(ui, opts)
1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1574 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 if opts[b'no_lookup']:
1575 if opts[b'no_lookup']:
1570 if opts['rev']:
1576 if opts['rev']:
1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1577 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 nodes = []
1578 nodes = []
1573 elif not opts[b'rev']:
1579 elif not opts[b'rev']:
1574 nodes = [repo[b"tip"].node()]
1580 nodes = [repo[b"tip"].node()]
1575 else:
1581 else:
1576 revs = scmutil.revrange(repo, opts[b'rev'])
1582 revs = scmutil.revrange(repo, opts[b'rev'])
1577 cl = repo.changelog
1583 cl = repo.changelog
1578 nodes = [cl.node(r) for r in revs]
1584 nodes = [cl.node(r) for r in revs]
1579
1585
1580 unfi = repo.unfiltered()
1586 unfi = repo.unfiltered()
1581 # find the filecache func directly
1587 # find the filecache func directly
1582 # This avoid polluting the benchmark with the filecache logic
1588 # This avoid polluting the benchmark with the filecache logic
1583 makecl = unfi.__class__.changelog.func
1589 makecl = unfi.__class__.changelog.func
1584
1590
1585 def setup():
1591 def setup():
1586 # probably not necessary, but for good measure
1592 # probably not necessary, but for good measure
1587 clearchangelog(unfi)
1593 clearchangelog(unfi)
1588
1594
1589 def d():
1595 def d():
1590 cl = makecl(unfi)
1596 cl = makecl(unfi)
1591 for n in nodes:
1597 for n in nodes:
1592 cl.rev(n)
1598 cl.rev(n)
1593
1599
1594 timer(d, setup=setup)
1600 timer(d, setup=setup)
1595 fm.end()
1601 fm.end()
1596
1602
1597
1603
1598 @command(
1604 @command(
1599 b'perf::nodemap|perfnodemap',
1605 b'perf::nodemap|perfnodemap',
1600 [
1606 [
1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1607 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1608 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 ]
1609 ]
1604 + formatteropts,
1610 + formatteropts,
1605 )
1611 )
1606 def perfnodemap(ui, repo, **opts):
1612 def perfnodemap(ui, repo, **opts):
1607 """benchmark the time necessary to look up revision from a cold nodemap
1613 """benchmark the time necessary to look up revision from a cold nodemap
1608
1614
1609 Depending on the implementation, the amount and order of revision we look
1615 Depending on the implementation, the amount and order of revision we look
1610 up can varies. Example of useful set to test:
1616 up can varies. Example of useful set to test:
1611 * tip
1617 * tip
1612 * 0
1618 * 0
1613 * -10:
1619 * -10:
1614 * :10
1620 * :10
1615 * -10: + :10
1621 * -10: + :10
1616 * :10: + -10:
1622 * :10: + -10:
1617 * -10000:
1623 * -10000:
1618 * -10000: + 0
1624 * -10000: + 0
1619
1625
1620 The command currently focus on valid binary lookup. Benchmarking for
1626 The command currently focus on valid binary lookup. Benchmarking for
1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1627 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 """
1628 """
1623 import mercurial.revlog
1629 import mercurial.revlog
1624
1630
1625 opts = _byteskwargs(opts)
1631 opts = _byteskwargs(opts)
1626 timer, fm = gettimer(ui, opts)
1632 timer, fm = gettimer(ui, opts)
1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1633 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628
1634
1629 unfi = repo.unfiltered()
1635 unfi = repo.unfiltered()
1630 clearcaches = opts[b'clear_caches']
1636 clearcaches = opts[b'clear_caches']
1631 # find the filecache func directly
1637 # find the filecache func directly
1632 # This avoid polluting the benchmark with the filecache logic
1638 # This avoid polluting the benchmark with the filecache logic
1633 makecl = unfi.__class__.changelog.func
1639 makecl = unfi.__class__.changelog.func
1634 if not opts[b'rev']:
1640 if not opts[b'rev']:
1635 raise error.Abort(b'use --rev to specify revisions to look up')
1641 raise error.Abort(b'use --rev to specify revisions to look up')
1636 revs = scmutil.revrange(repo, opts[b'rev'])
1642 revs = scmutil.revrange(repo, opts[b'rev'])
1637 cl = repo.changelog
1643 cl = repo.changelog
1638 nodes = [cl.node(r) for r in revs]
1644 nodes = [cl.node(r) for r in revs]
1639
1645
1640 # use a list to pass reference to a nodemap from one closure to the next
1646 # use a list to pass reference to a nodemap from one closure to the next
1641 nodeget = [None]
1647 nodeget = [None]
1642
1648
1643 def setnodeget():
1649 def setnodeget():
1644 # probably not necessary, but for good measure
1650 # probably not necessary, but for good measure
1645 clearchangelog(unfi)
1651 clearchangelog(unfi)
1646 cl = makecl(unfi)
1652 cl = makecl(unfi)
1647 if util.safehasattr(cl.index, 'get_rev'):
1653 if util.safehasattr(cl.index, 'get_rev'):
1648 nodeget[0] = cl.index.get_rev
1654 nodeget[0] = cl.index.get_rev
1649 else:
1655 else:
1650 nodeget[0] = cl.nodemap.get
1656 nodeget[0] = cl.nodemap.get
1651
1657
1652 def d():
1658 def d():
1653 get = nodeget[0]
1659 get = nodeget[0]
1654 for n in nodes:
1660 for n in nodes:
1655 get(n)
1661 get(n)
1656
1662
1657 setup = None
1663 setup = None
1658 if clearcaches:
1664 if clearcaches:
1659
1665
1660 def setup():
1666 def setup():
1661 setnodeget()
1667 setnodeget()
1662
1668
1663 else:
1669 else:
1664 setnodeget()
1670 setnodeget()
1665 d() # prewarm the data structure
1671 d() # prewarm the data structure
1666 timer(d, setup=setup)
1672 timer(d, setup=setup)
1667 fm.end()
1673 fm.end()
1668
1674
1669
1675
1670 @command(b'perf::startup|perfstartup', formatteropts)
1676 @command(b'perf::startup|perfstartup', formatteropts)
1671 def perfstartup(ui, repo, **opts):
1677 def perfstartup(ui, repo, **opts):
1672 opts = _byteskwargs(opts)
1678 opts = _byteskwargs(opts)
1673 timer, fm = gettimer(ui, opts)
1679 timer, fm = gettimer(ui, opts)
1674
1680
1675 def d():
1681 def d():
1676 if os.name != 'nt':
1682 if os.name != 'nt':
1677 os.system(
1683 os.system(
1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1684 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 )
1685 )
1680 else:
1686 else:
1681 os.environ['HGRCPATH'] = r' '
1687 os.environ['HGRCPATH'] = r' '
1682 os.system("%s version -q > NUL" % sys.argv[0])
1688 os.system("%s version -q > NUL" % sys.argv[0])
1683
1689
1684 timer(d)
1690 timer(d)
1685 fm.end()
1691 fm.end()
1686
1692
1687
1693
1688 @command(b'perf::parents|perfparents', formatteropts)
1694 @command(b'perf::parents|perfparents', formatteropts)
1689 def perfparents(ui, repo, **opts):
1695 def perfparents(ui, repo, **opts):
1690 """benchmark the time necessary to fetch one changeset's parents.
1696 """benchmark the time necessary to fetch one changeset's parents.
1691
1697
1692 The fetch is done using the `node identifier`, traversing all object layers
1698 The fetch is done using the `node identifier`, traversing all object layers
1693 from the repository object. The first N revisions will be used for this
1699 from the repository object. The first N revisions will be used for this
1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1700 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 (default: 1000).
1701 (default: 1000).
1696 """
1702 """
1697 opts = _byteskwargs(opts)
1703 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1704 timer, fm = gettimer(ui, opts)
1699 # control the number of commits perfparents iterates over
1705 # control the number of commits perfparents iterates over
1700 # experimental config: perf.parentscount
1706 # experimental config: perf.parentscount
1701 count = getint(ui, b"perf", b"parentscount", 1000)
1707 count = getint(ui, b"perf", b"parentscount", 1000)
1702 if len(repo.changelog) < count:
1708 if len(repo.changelog) < count:
1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1709 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 repo = repo.unfiltered()
1710 repo = repo.unfiltered()
1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1711 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706
1712
1707 def d():
1713 def d():
1708 for n in nl:
1714 for n in nl:
1709 repo.changelog.parents(n)
1715 repo.changelog.parents(n)
1710
1716
1711 timer(d)
1717 timer(d)
1712 fm.end()
1718 fm.end()
1713
1719
1714
1720
1715 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1721 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1716 def perfctxfiles(ui, repo, x, **opts):
1722 def perfctxfiles(ui, repo, x, **opts):
1717 opts = _byteskwargs(opts)
1723 opts = _byteskwargs(opts)
1718 x = int(x)
1724 x = int(x)
1719 timer, fm = gettimer(ui, opts)
1725 timer, fm = gettimer(ui, opts)
1720
1726
1721 def d():
1727 def d():
1722 len(repo[x].files())
1728 len(repo[x].files())
1723
1729
1724 timer(d)
1730 timer(d)
1725 fm.end()
1731 fm.end()
1726
1732
1727
1733
1728 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1734 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1729 def perfrawfiles(ui, repo, x, **opts):
1735 def perfrawfiles(ui, repo, x, **opts):
1730 opts = _byteskwargs(opts)
1736 opts = _byteskwargs(opts)
1731 x = int(x)
1737 x = int(x)
1732 timer, fm = gettimer(ui, opts)
1738 timer, fm = gettimer(ui, opts)
1733 cl = repo.changelog
1739 cl = repo.changelog
1734
1740
1735 def d():
1741 def d():
1736 len(cl.read(x)[3])
1742 len(cl.read(x)[3])
1737
1743
1738 timer(d)
1744 timer(d)
1739 fm.end()
1745 fm.end()
1740
1746
1741
1747
1742 @command(b'perf::lookup|perflookup', formatteropts)
1748 @command(b'perf::lookup|perflookup', formatteropts)
1743 def perflookup(ui, repo, rev, **opts):
1749 def perflookup(ui, repo, rev, **opts):
1744 opts = _byteskwargs(opts)
1750 opts = _byteskwargs(opts)
1745 timer, fm = gettimer(ui, opts)
1751 timer, fm = gettimer(ui, opts)
1746 timer(lambda: len(repo.lookup(rev)))
1752 timer(lambda: len(repo.lookup(rev)))
1747 fm.end()
1753 fm.end()
1748
1754
1749
1755
1750 @command(
1756 @command(
1751 b'perf::linelogedits|perflinelogedits',
1757 b'perf::linelogedits|perflinelogedits',
1752 [
1758 [
1753 (b'n', b'edits', 10000, b'number of edits'),
1759 (b'n', b'edits', 10000, b'number of edits'),
1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1760 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 ],
1761 ],
1756 norepo=True,
1762 norepo=True,
1757 )
1763 )
1758 def perflinelogedits(ui, **opts):
1764 def perflinelogedits(ui, **opts):
1759 from mercurial import linelog
1765 from mercurial import linelog
1760
1766
1761 opts = _byteskwargs(opts)
1767 opts = _byteskwargs(opts)
1762
1768
1763 edits = opts[b'edits']
1769 edits = opts[b'edits']
1764 maxhunklines = opts[b'max_hunk_lines']
1770 maxhunklines = opts[b'max_hunk_lines']
1765
1771
1766 maxb1 = 100000
1772 maxb1 = 100000
1767 random.seed(0)
1773 random.seed(0)
1768 randint = random.randint
1774 randint = random.randint
1769 currentlines = 0
1775 currentlines = 0
1770 arglist = []
1776 arglist = []
1771 for rev in _xrange(edits):
1777 for rev in _xrange(edits):
1772 a1 = randint(0, currentlines)
1778 a1 = randint(0, currentlines)
1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1779 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 b1 = randint(0, maxb1)
1780 b1 = randint(0, maxb1)
1775 b2 = randint(b1, b1 + maxhunklines)
1781 b2 = randint(b1, b1 + maxhunklines)
1776 currentlines += (b2 - b1) - (a2 - a1)
1782 currentlines += (b2 - b1) - (a2 - a1)
1777 arglist.append((rev, a1, a2, b1, b2))
1783 arglist.append((rev, a1, a2, b1, b2))
1778
1784
1779 def d():
1785 def d():
1780 ll = linelog.linelog()
1786 ll = linelog.linelog()
1781 for args in arglist:
1787 for args in arglist:
1782 ll.replacelines(*args)
1788 ll.replacelines(*args)
1783
1789
1784 timer, fm = gettimer(ui, opts)
1790 timer, fm = gettimer(ui, opts)
1785 timer(d)
1791 timer(d)
1786 fm.end()
1792 fm.end()
1787
1793
1788
1794
1789 @command(b'perf::revrange|perfrevrange', formatteropts)
1795 @command(b'perf::revrange|perfrevrange', formatteropts)
1790 def perfrevrange(ui, repo, *specs, **opts):
1796 def perfrevrange(ui, repo, *specs, **opts):
1791 opts = _byteskwargs(opts)
1797 opts = _byteskwargs(opts)
1792 timer, fm = gettimer(ui, opts)
1798 timer, fm = gettimer(ui, opts)
1793 revrange = scmutil.revrange
1799 revrange = scmutil.revrange
1794 timer(lambda: len(revrange(repo, specs)))
1800 timer(lambda: len(revrange(repo, specs)))
1795 fm.end()
1801 fm.end()
1796
1802
1797
1803
1798 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1804 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1799 def perfnodelookup(ui, repo, rev, **opts):
1805 def perfnodelookup(ui, repo, rev, **opts):
1800 opts = _byteskwargs(opts)
1806 opts = _byteskwargs(opts)
1801 timer, fm = gettimer(ui, opts)
1807 timer, fm = gettimer(ui, opts)
1802 import mercurial.revlog
1808 import mercurial.revlog
1803
1809
1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1810 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 n = scmutil.revsingle(repo, rev).node()
1811 n = scmutil.revsingle(repo, rev).node()
1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1812 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807
1813
1808 def d():
1814 def d():
1809 cl.rev(n)
1815 cl.rev(n)
1810 clearcaches(cl)
1816 clearcaches(cl)
1811
1817
1812 timer(d)
1818 timer(d)
1813 fm.end()
1819 fm.end()
1814
1820
1815
1821
1816 @command(
1822 @command(
1817 b'perf::log|perflog',
1823 b'perf::log|perflog',
1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1824 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 )
1825 )
1820 def perflog(ui, repo, rev=None, **opts):
1826 def perflog(ui, repo, rev=None, **opts):
1821 opts = _byteskwargs(opts)
1827 opts = _byteskwargs(opts)
1822 if rev is None:
1828 if rev is None:
1823 rev = []
1829 rev = []
1824 timer, fm = gettimer(ui, opts)
1830 timer, fm = gettimer(ui, opts)
1825 ui.pushbuffer()
1831 ui.pushbuffer()
1826 timer(
1832 timer(
1827 lambda: commands.log(
1833 lambda: commands.log(
1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1834 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 )
1835 )
1830 )
1836 )
1831 ui.popbuffer()
1837 ui.popbuffer()
1832 fm.end()
1838 fm.end()
1833
1839
1834
1840
1835 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1841 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1836 def perfmoonwalk(ui, repo, **opts):
1842 def perfmoonwalk(ui, repo, **opts):
1837 """benchmark walking the changelog backwards
1843 """benchmark walking the changelog backwards
1838
1844
1839 This also loads the changelog data for each revision in the changelog.
1845 This also loads the changelog data for each revision in the changelog.
1840 """
1846 """
1841 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1842 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1843
1849
1844 def moonwalk():
1850 def moonwalk():
1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1851 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 ctx = repo[i]
1852 ctx = repo[i]
1847 ctx.branch() # read changelog data (in addition to the index)
1853 ctx.branch() # read changelog data (in addition to the index)
1848
1854
1849 timer(moonwalk)
1855 timer(moonwalk)
1850 fm.end()
1856 fm.end()
1851
1857
1852
1858
1853 @command(
1859 @command(
1854 b'perf::templating|perftemplating',
1860 b'perf::templating|perftemplating',
1855 [
1861 [
1856 (b'r', b'rev', [], b'revisions to run the template on'),
1862 (b'r', b'rev', [], b'revisions to run the template on'),
1857 ]
1863 ]
1858 + formatteropts,
1864 + formatteropts,
1859 )
1865 )
1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1866 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 """test the rendering time of a given template"""
1867 """test the rendering time of a given template"""
1862 if makelogtemplater is None:
1868 if makelogtemplater is None:
1863 raise error.Abort(
1869 raise error.Abort(
1864 b"perftemplating not available with this Mercurial",
1870 b"perftemplating not available with this Mercurial",
1865 hint=b"use 4.3 or later",
1871 hint=b"use 4.3 or later",
1866 )
1872 )
1867
1873
1868 opts = _byteskwargs(opts)
1874 opts = _byteskwargs(opts)
1869
1875
1870 nullui = ui.copy()
1876 nullui = ui.copy()
1871 nullui.fout = open(os.devnull, 'wb')
1877 nullui.fout = open(os.devnull, 'wb')
1872 nullui.disablepager()
1878 nullui.disablepager()
1873 revs = opts.get(b'rev')
1879 revs = opts.get(b'rev')
1874 if not revs:
1880 if not revs:
1875 revs = [b'all()']
1881 revs = [b'all()']
1876 revs = list(scmutil.revrange(repo, revs))
1882 revs = list(scmutil.revrange(repo, revs))
1877
1883
1878 defaulttemplate = (
1884 defaulttemplate = (
1879 b'{date|shortdate} [{rev}:{node|short}]'
1885 b'{date|shortdate} [{rev}:{node|short}]'
1880 b' {author|person}: {desc|firstline}\n'
1886 b' {author|person}: {desc|firstline}\n'
1881 )
1887 )
1882 if testedtemplate is None:
1888 if testedtemplate is None:
1883 testedtemplate = defaulttemplate
1889 testedtemplate = defaulttemplate
1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1890 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885
1891
1886 def format():
1892 def format():
1887 for r in revs:
1893 for r in revs:
1888 ctx = repo[r]
1894 ctx = repo[r]
1889 displayer.show(ctx)
1895 displayer.show(ctx)
1890 displayer.flush(ctx)
1896 displayer.flush(ctx)
1891
1897
1892 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1893 timer(format)
1899 timer(format)
1894 fm.end()
1900 fm.end()
1895
1901
1896
1902
1897 def _displaystats(ui, opts, entries, data):
1903 def _displaystats(ui, opts, entries, data):
1898 # use a second formatter because the data are quite different, not sure
1904 # use a second formatter because the data are quite different, not sure
1899 # how it flies with the templater.
1905 # how it flies with the templater.
1900 fm = ui.formatter(b'perf-stats', opts)
1906 fm = ui.formatter(b'perf-stats', opts)
1901 for key, title in entries:
1907 for key, title in entries:
1902 values = data[key]
1908 values = data[key]
1903 nbvalues = len(data)
1909 nbvalues = len(data)
1904 values.sort()
1910 values.sort()
1905 stats = {
1911 stats = {
1906 'key': key,
1912 'key': key,
1907 'title': title,
1913 'title': title,
1908 'nbitems': len(values),
1914 'nbitems': len(values),
1909 'min': values[0][0],
1915 'min': values[0][0],
1910 '10%': values[(nbvalues * 10) // 100][0],
1916 '10%': values[(nbvalues * 10) // 100][0],
1911 '25%': values[(nbvalues * 25) // 100][0],
1917 '25%': values[(nbvalues * 25) // 100][0],
1912 '50%': values[(nbvalues * 50) // 100][0],
1918 '50%': values[(nbvalues * 50) // 100][0],
1913 '75%': values[(nbvalues * 75) // 100][0],
1919 '75%': values[(nbvalues * 75) // 100][0],
1914 '80%': values[(nbvalues * 80) // 100][0],
1920 '80%': values[(nbvalues * 80) // 100][0],
1915 '85%': values[(nbvalues * 85) // 100][0],
1921 '85%': values[(nbvalues * 85) // 100][0],
1916 '90%': values[(nbvalues * 90) // 100][0],
1922 '90%': values[(nbvalues * 90) // 100][0],
1917 '95%': values[(nbvalues * 95) // 100][0],
1923 '95%': values[(nbvalues * 95) // 100][0],
1918 '99%': values[(nbvalues * 99) // 100][0],
1924 '99%': values[(nbvalues * 99) // 100][0],
1919 'max': values[-1][0],
1925 'max': values[-1][0],
1920 }
1926 }
1921 fm.startitem()
1927 fm.startitem()
1922 fm.data(**stats)
1928 fm.data(**stats)
1923 # make node pretty for the human output
1929 # make node pretty for the human output
1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1930 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 lines = [
1931 lines = [
1926 'min',
1932 'min',
1927 '10%',
1933 '10%',
1928 '25%',
1934 '25%',
1929 '50%',
1935 '50%',
1930 '75%',
1936 '75%',
1931 '80%',
1937 '80%',
1932 '85%',
1938 '85%',
1933 '90%',
1939 '90%',
1934 '95%',
1940 '95%',
1935 '99%',
1941 '99%',
1936 'max',
1942 'max',
1937 ]
1943 ]
1938 for l in lines:
1944 for l in lines:
1939 fm.plain('%s: %s\n' % (l, stats[l]))
1945 fm.plain('%s: %s\n' % (l, stats[l]))
1940 fm.end()
1946 fm.end()
1941
1947
1942
1948
1943 @command(
1949 @command(
1944 b'perf::helper-mergecopies|perfhelper-mergecopies',
1950 b'perf::helper-mergecopies|perfhelper-mergecopies',
1945 formatteropts
1951 formatteropts
1946 + [
1952 + [
1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1953 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 (b'', b'timing', False, b'provides extra data (costly)'),
1954 (b'', b'timing', False, b'provides extra data (costly)'),
1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1955 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 ],
1956 ],
1951 )
1957 )
1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1958 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 """find statistics about potential parameters for `perfmergecopies`
1959 """find statistics about potential parameters for `perfmergecopies`
1954
1960
1955 This command find (base, p1, p2) triplet relevant for copytracing
1961 This command find (base, p1, p2) triplet relevant for copytracing
1956 benchmarking in the context of a merge. It reports values for some of the
1962 benchmarking in the context of a merge. It reports values for some of the
1957 parameters that impact merge copy tracing time during merge.
1963 parameters that impact merge copy tracing time during merge.
1958
1964
1959 If `--timing` is set, rename detection is run and the associated timing
1965 If `--timing` is set, rename detection is run and the associated timing
1960 will be reported. The extra details come at the cost of slower command
1966 will be reported. The extra details come at the cost of slower command
1961 execution.
1967 execution.
1962
1968
1963 Since rename detection is only run once, other factors might easily
1969 Since rename detection is only run once, other factors might easily
1964 affect the precision of the timing. However it should give a good
1970 affect the precision of the timing. However it should give a good
1965 approximation of which revision triplets are very costly.
1971 approximation of which revision triplets are very costly.
1966 """
1972 """
1967 opts = _byteskwargs(opts)
1973 opts = _byteskwargs(opts)
1968 fm = ui.formatter(b'perf', opts)
1974 fm = ui.formatter(b'perf', opts)
1969 dotiming = opts[b'timing']
1975 dotiming = opts[b'timing']
1970 dostats = opts[b'stats']
1976 dostats = opts[b'stats']
1971
1977
1972 output_template = [
1978 output_template = [
1973 ("base", "%(base)12s"),
1979 ("base", "%(base)12s"),
1974 ("p1", "%(p1.node)12s"),
1980 ("p1", "%(p1.node)12s"),
1975 ("p2", "%(p2.node)12s"),
1981 ("p2", "%(p2.node)12s"),
1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1982 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1983 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1984 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 ("p1.time", "%(p1.time)12.3f"),
1985 ("p1.time", "%(p1.time)12.3f"),
1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1986 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1987 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1988 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 ("p2.time", "%(p2.time)12.3f"),
1989 ("p2.time", "%(p2.time)12.3f"),
1984 ("renames", "%(nbrenamedfiles)12d"),
1990 ("renames", "%(nbrenamedfiles)12d"),
1985 ("total.time", "%(time)12.3f"),
1991 ("total.time", "%(time)12.3f"),
1986 ]
1992 ]
1987 if not dotiming:
1993 if not dotiming:
1988 output_template = [
1994 output_template = [
1989 i
1995 i
1990 for i in output_template
1996 for i in output_template
1991 if not ('time' in i[0] or 'renames' in i[0])
1997 if not ('time' in i[0] or 'renames' in i[0])
1992 ]
1998 ]
1993 header_names = [h for (h, v) in output_template]
1999 header_names = [h for (h, v) in output_template]
1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
2000 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2001 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 fm.plain(header % tuple(header_names))
2002 fm.plain(header % tuple(header_names))
1997
2003
1998 if not revs:
2004 if not revs:
1999 revs = ['all()']
2005 revs = ['all()']
2000 revs = scmutil.revrange(repo, revs)
2006 revs = scmutil.revrange(repo, revs)
2001
2007
2002 if dostats:
2008 if dostats:
2003 alldata = {
2009 alldata = {
2004 'nbrevs': [],
2010 'nbrevs': [],
2005 'nbmissingfiles': [],
2011 'nbmissingfiles': [],
2006 }
2012 }
2007 if dotiming:
2013 if dotiming:
2008 alldata['parentnbrenames'] = []
2014 alldata['parentnbrenames'] = []
2009 alldata['totalnbrenames'] = []
2015 alldata['totalnbrenames'] = []
2010 alldata['parenttime'] = []
2016 alldata['parenttime'] = []
2011 alldata['totaltime'] = []
2017 alldata['totaltime'] = []
2012
2018
2013 roi = repo.revs('merge() and %ld', revs)
2019 roi = repo.revs('merge() and %ld', revs)
2014 for r in roi:
2020 for r in roi:
2015 ctx = repo[r]
2021 ctx = repo[r]
2016 p1 = ctx.p1()
2022 p1 = ctx.p1()
2017 p2 = ctx.p2()
2023 p2 = ctx.p2()
2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2024 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 for b in bases:
2025 for b in bases:
2020 b = repo[b]
2026 b = repo[b]
2021 p1missing = copies._computeforwardmissing(b, p1)
2027 p1missing = copies._computeforwardmissing(b, p1)
2022 p2missing = copies._computeforwardmissing(b, p2)
2028 p2missing = copies._computeforwardmissing(b, p2)
2023 data = {
2029 data = {
2024 b'base': b.hex(),
2030 b'base': b.hex(),
2025 b'p1.node': p1.hex(),
2031 b'p1.node': p1.hex(),
2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2032 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 b'p1.nbmissingfiles': len(p1missing),
2033 b'p1.nbmissingfiles': len(p1missing),
2028 b'p2.node': p2.hex(),
2034 b'p2.node': p2.hex(),
2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2035 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 b'p2.nbmissingfiles': len(p2missing),
2036 b'p2.nbmissingfiles': len(p2missing),
2031 }
2037 }
2032 if dostats:
2038 if dostats:
2033 if p1missing:
2039 if p1missing:
2034 alldata['nbrevs'].append(
2040 alldata['nbrevs'].append(
2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2041 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 )
2042 )
2037 alldata['nbmissingfiles'].append(
2043 alldata['nbmissingfiles'].append(
2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2044 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 )
2045 )
2040 if p2missing:
2046 if p2missing:
2041 alldata['nbrevs'].append(
2047 alldata['nbrevs'].append(
2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2048 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 )
2049 )
2044 alldata['nbmissingfiles'].append(
2050 alldata['nbmissingfiles'].append(
2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2051 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 )
2052 )
2047 if dotiming:
2053 if dotiming:
2048 begin = util.timer()
2054 begin = util.timer()
2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2055 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 end = util.timer()
2056 end = util.timer()
2051 # not very stable timing since we did only one run
2057 # not very stable timing since we did only one run
2052 data['time'] = end - begin
2058 data['time'] = end - begin
2053 # mergedata contains five dicts: "copy", "movewithdir",
2059 # mergedata contains five dicts: "copy", "movewithdir",
2054 # "diverge", "renamedelete" and "dirmove".
2060 # "diverge", "renamedelete" and "dirmove".
2055 # The first 4 are about renamed file so lets count that.
2061 # The first 4 are about renamed file so lets count that.
2056 renames = len(mergedata[0])
2062 renames = len(mergedata[0])
2057 renames += len(mergedata[1])
2063 renames += len(mergedata[1])
2058 renames += len(mergedata[2])
2064 renames += len(mergedata[2])
2059 renames += len(mergedata[3])
2065 renames += len(mergedata[3])
2060 data['nbrenamedfiles'] = renames
2066 data['nbrenamedfiles'] = renames
2061 begin = util.timer()
2067 begin = util.timer()
2062 p1renames = copies.pathcopies(b, p1)
2068 p1renames = copies.pathcopies(b, p1)
2063 end = util.timer()
2069 end = util.timer()
2064 data['p1.time'] = end - begin
2070 data['p1.time'] = end - begin
2065 begin = util.timer()
2071 begin = util.timer()
2066 p2renames = copies.pathcopies(b, p2)
2072 p2renames = copies.pathcopies(b, p2)
2067 end = util.timer()
2073 end = util.timer()
2068 data['p2.time'] = end - begin
2074 data['p2.time'] = end - begin
2069 data['p1.renamedfiles'] = len(p1renames)
2075 data['p1.renamedfiles'] = len(p1renames)
2070 data['p2.renamedfiles'] = len(p2renames)
2076 data['p2.renamedfiles'] = len(p2renames)
2071
2077
2072 if dostats:
2078 if dostats:
2073 if p1missing:
2079 if p1missing:
2074 alldata['parentnbrenames'].append(
2080 alldata['parentnbrenames'].append(
2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2081 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 )
2082 )
2077 alldata['parenttime'].append(
2083 alldata['parenttime'].append(
2078 (data['p1.time'], b.hex(), p1.hex())
2084 (data['p1.time'], b.hex(), p1.hex())
2079 )
2085 )
2080 if p2missing:
2086 if p2missing:
2081 alldata['parentnbrenames'].append(
2087 alldata['parentnbrenames'].append(
2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2088 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 )
2089 )
2084 alldata['parenttime'].append(
2090 alldata['parenttime'].append(
2085 (data['p2.time'], b.hex(), p2.hex())
2091 (data['p2.time'], b.hex(), p2.hex())
2086 )
2092 )
2087 if p1missing or p2missing:
2093 if p1missing or p2missing:
2088 alldata['totalnbrenames'].append(
2094 alldata['totalnbrenames'].append(
2089 (
2095 (
2090 data['nbrenamedfiles'],
2096 data['nbrenamedfiles'],
2091 b.hex(),
2097 b.hex(),
2092 p1.hex(),
2098 p1.hex(),
2093 p2.hex(),
2099 p2.hex(),
2094 )
2100 )
2095 )
2101 )
2096 alldata['totaltime'].append(
2102 alldata['totaltime'].append(
2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2103 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 )
2104 )
2099 fm.startitem()
2105 fm.startitem()
2100 fm.data(**data)
2106 fm.data(**data)
2101 # make node pretty for the human output
2107 # make node pretty for the human output
2102 out = data.copy()
2108 out = data.copy()
2103 out['base'] = fm.hexfunc(b.node())
2109 out['base'] = fm.hexfunc(b.node())
2104 out['p1.node'] = fm.hexfunc(p1.node())
2110 out['p1.node'] = fm.hexfunc(p1.node())
2105 out['p2.node'] = fm.hexfunc(p2.node())
2111 out['p2.node'] = fm.hexfunc(p2.node())
2106 fm.plain(output % out)
2112 fm.plain(output % out)
2107
2113
2108 fm.end()
2114 fm.end()
2109 if dostats:
2115 if dostats:
2110 # use a second formatter because the data are quite different, not sure
2116 # use a second formatter because the data are quite different, not sure
2111 # how it flies with the templater.
2117 # how it flies with the templater.
2112 entries = [
2118 entries = [
2113 ('nbrevs', 'number of revision covered'),
2119 ('nbrevs', 'number of revision covered'),
2114 ('nbmissingfiles', 'number of missing files at head'),
2120 ('nbmissingfiles', 'number of missing files at head'),
2115 ]
2121 ]
2116 if dotiming:
2122 if dotiming:
2117 entries.append(
2123 entries.append(
2118 ('parentnbrenames', 'rename from one parent to base')
2124 ('parentnbrenames', 'rename from one parent to base')
2119 )
2125 )
2120 entries.append(('totalnbrenames', 'total number of renames'))
2126 entries.append(('totalnbrenames', 'total number of renames'))
2121 entries.append(('parenttime', 'time for one parent'))
2127 entries.append(('parenttime', 'time for one parent'))
2122 entries.append(('totaltime', 'time for both parents'))
2128 entries.append(('totaltime', 'time for both parents'))
2123 _displaystats(ui, opts, entries, alldata)
2129 _displaystats(ui, opts, entries, alldata)
2124
2130
2125
2131
2126 @command(
2132 @command(
2127 b'perf::helper-pathcopies|perfhelper-pathcopies',
2133 b'perf::helper-pathcopies|perfhelper-pathcopies',
2128 formatteropts
2134 formatteropts
2129 + [
2135 + [
2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2136 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 (b'', b'timing', False, b'provides extra data (costly)'),
2137 (b'', b'timing', False, b'provides extra data (costly)'),
2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2138 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 ],
2139 ],
2134 )
2140 )
2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2141 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 """find statistic about potential parameters for the `perftracecopies`
2142 """find statistic about potential parameters for the `perftracecopies`
2137
2143
2138 This command find source-destination pair relevant for copytracing testing.
2144 This command find source-destination pair relevant for copytracing testing.
2139 It report value for some of the parameters that impact copy tracing time.
2145 It report value for some of the parameters that impact copy tracing time.
2140
2146
2141 If `--timing` is set, rename detection is run and the associated timing
2147 If `--timing` is set, rename detection is run and the associated timing
2142 will be reported. The extra details comes at the cost of a slower command
2148 will be reported. The extra details comes at the cost of a slower command
2143 execution.
2149 execution.
2144
2150
2145 Since the rename detection is only run once, other factors might easily
2151 Since the rename detection is only run once, other factors might easily
2146 affect the precision of the timing. However it should give a good
2152 affect the precision of the timing. However it should give a good
2147 approximation of which revision pairs are very costly.
2153 approximation of which revision pairs are very costly.
2148 """
2154 """
2149 opts = _byteskwargs(opts)
2155 opts = _byteskwargs(opts)
2150 fm = ui.formatter(b'perf', opts)
2156 fm = ui.formatter(b'perf', opts)
2151 dotiming = opts[b'timing']
2157 dotiming = opts[b'timing']
2152 dostats = opts[b'stats']
2158 dostats = opts[b'stats']
2153
2159
2154 if dotiming:
2160 if dotiming:
2155 header = '%12s %12s %12s %12s %12s %12s\n'
2161 header = '%12s %12s %12s %12s %12s %12s\n'
2156 output = (
2162 output = (
2157 "%(source)12s %(destination)12s "
2163 "%(source)12s %(destination)12s "
2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2164 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2165 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 )
2166 )
2161 header_names = (
2167 header_names = (
2162 "source",
2168 "source",
2163 "destination",
2169 "destination",
2164 "nb-revs",
2170 "nb-revs",
2165 "nb-files",
2171 "nb-files",
2166 "nb-renames",
2172 "nb-renames",
2167 "time",
2173 "time",
2168 )
2174 )
2169 fm.plain(header % header_names)
2175 fm.plain(header % header_names)
2170 else:
2176 else:
2171 header = '%12s %12s %12s %12s\n'
2177 header = '%12s %12s %12s %12s\n'
2172 output = (
2178 output = (
2173 "%(source)12s %(destination)12s "
2179 "%(source)12s %(destination)12s "
2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2180 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 )
2181 )
2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2182 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177
2183
2178 if not revs:
2184 if not revs:
2179 revs = ['all()']
2185 revs = ['all()']
2180 revs = scmutil.revrange(repo, revs)
2186 revs = scmutil.revrange(repo, revs)
2181
2187
2182 if dostats:
2188 if dostats:
2183 alldata = {
2189 alldata = {
2184 'nbrevs': [],
2190 'nbrevs': [],
2185 'nbmissingfiles': [],
2191 'nbmissingfiles': [],
2186 }
2192 }
2187 if dotiming:
2193 if dotiming:
2188 alldata['nbrenames'] = []
2194 alldata['nbrenames'] = []
2189 alldata['time'] = []
2195 alldata['time'] = []
2190
2196
2191 roi = repo.revs('merge() and %ld', revs)
2197 roi = repo.revs('merge() and %ld', revs)
2192 for r in roi:
2198 for r in roi:
2193 ctx = repo[r]
2199 ctx = repo[r]
2194 p1 = ctx.p1().rev()
2200 p1 = ctx.p1().rev()
2195 p2 = ctx.p2().rev()
2201 p2 = ctx.p2().rev()
2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2202 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 for p in (p1, p2):
2203 for p in (p1, p2):
2198 for b in bases:
2204 for b in bases:
2199 base = repo[b]
2205 base = repo[b]
2200 parent = repo[p]
2206 parent = repo[p]
2201 missing = copies._computeforwardmissing(base, parent)
2207 missing = copies._computeforwardmissing(base, parent)
2202 if not missing:
2208 if not missing:
2203 continue
2209 continue
2204 data = {
2210 data = {
2205 b'source': base.hex(),
2211 b'source': base.hex(),
2206 b'destination': parent.hex(),
2212 b'destination': parent.hex(),
2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2213 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 b'nbmissingfiles': len(missing),
2214 b'nbmissingfiles': len(missing),
2209 }
2215 }
2210 if dostats:
2216 if dostats:
2211 alldata['nbrevs'].append(
2217 alldata['nbrevs'].append(
2212 (
2218 (
2213 data['nbrevs'],
2219 data['nbrevs'],
2214 base.hex(),
2220 base.hex(),
2215 parent.hex(),
2221 parent.hex(),
2216 )
2222 )
2217 )
2223 )
2218 alldata['nbmissingfiles'].append(
2224 alldata['nbmissingfiles'].append(
2219 (
2225 (
2220 data['nbmissingfiles'],
2226 data['nbmissingfiles'],
2221 base.hex(),
2227 base.hex(),
2222 parent.hex(),
2228 parent.hex(),
2223 )
2229 )
2224 )
2230 )
2225 if dotiming:
2231 if dotiming:
2226 begin = util.timer()
2232 begin = util.timer()
2227 renames = copies.pathcopies(base, parent)
2233 renames = copies.pathcopies(base, parent)
2228 end = util.timer()
2234 end = util.timer()
2229 # not very stable timing since we did only one run
2235 # not very stable timing since we did only one run
2230 data['time'] = end - begin
2236 data['time'] = end - begin
2231 data['nbrenamedfiles'] = len(renames)
2237 data['nbrenamedfiles'] = len(renames)
2232 if dostats:
2238 if dostats:
2233 alldata['time'].append(
2239 alldata['time'].append(
2234 (
2240 (
2235 data['time'],
2241 data['time'],
2236 base.hex(),
2242 base.hex(),
2237 parent.hex(),
2243 parent.hex(),
2238 )
2244 )
2239 )
2245 )
2240 alldata['nbrenames'].append(
2246 alldata['nbrenames'].append(
2241 (
2247 (
2242 data['nbrenamedfiles'],
2248 data['nbrenamedfiles'],
2243 base.hex(),
2249 base.hex(),
2244 parent.hex(),
2250 parent.hex(),
2245 )
2251 )
2246 )
2252 )
2247 fm.startitem()
2253 fm.startitem()
2248 fm.data(**data)
2254 fm.data(**data)
2249 out = data.copy()
2255 out = data.copy()
2250 out['source'] = fm.hexfunc(base.node())
2256 out['source'] = fm.hexfunc(base.node())
2251 out['destination'] = fm.hexfunc(parent.node())
2257 out['destination'] = fm.hexfunc(parent.node())
2252 fm.plain(output % out)
2258 fm.plain(output % out)
2253
2259
2254 fm.end()
2260 fm.end()
2255 if dostats:
2261 if dostats:
2256 entries = [
2262 entries = [
2257 ('nbrevs', 'number of revision covered'),
2263 ('nbrevs', 'number of revision covered'),
2258 ('nbmissingfiles', 'number of missing files at head'),
2264 ('nbmissingfiles', 'number of missing files at head'),
2259 ]
2265 ]
2260 if dotiming:
2266 if dotiming:
2261 entries.append(('nbrenames', 'renamed files'))
2267 entries.append(('nbrenames', 'renamed files'))
2262 entries.append(('time', 'time'))
2268 entries.append(('time', 'time'))
2263 _displaystats(ui, opts, entries, alldata)
2269 _displaystats(ui, opts, entries, alldata)
2264
2270
2265
2271
2266 @command(b'perf::cca|perfcca', formatteropts)
2272 @command(b'perf::cca|perfcca', formatteropts)
2267 def perfcca(ui, repo, **opts):
2273 def perfcca(ui, repo, **opts):
2268 opts = _byteskwargs(opts)
2274 opts = _byteskwargs(opts)
2269 timer, fm = gettimer(ui, opts)
2275 timer, fm = gettimer(ui, opts)
2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2276 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 fm.end()
2277 fm.end()
2272
2278
2273
2279
2274 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2280 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2275 def perffncacheload(ui, repo, **opts):
2281 def perffncacheload(ui, repo, **opts):
2276 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2277 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2278 s = repo.store
2284 s = repo.store
2279
2285
2280 def d():
2286 def d():
2281 s.fncache._load()
2287 s.fncache._load()
2282
2288
2283 timer(d)
2289 timer(d)
2284 fm.end()
2290 fm.end()
2285
2291
2286
2292
2287 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2293 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2288 def perffncachewrite(ui, repo, **opts):
2294 def perffncachewrite(ui, repo, **opts):
2289 opts = _byteskwargs(opts)
2295 opts = _byteskwargs(opts)
2290 timer, fm = gettimer(ui, opts)
2296 timer, fm = gettimer(ui, opts)
2291 s = repo.store
2297 s = repo.store
2292 lock = repo.lock()
2298 lock = repo.lock()
2293 s.fncache._load()
2299 s.fncache._load()
2294 tr = repo.transaction(b'perffncachewrite')
2300 tr = repo.transaction(b'perffncachewrite')
2295 tr.addbackup(b'fncache')
2301 tr.addbackup(b'fncache')
2296
2302
2297 def d():
2303 def d():
2298 s.fncache._dirty = True
2304 s.fncache._dirty = True
2299 s.fncache.write(tr)
2305 s.fncache.write(tr)
2300
2306
2301 timer(d)
2307 timer(d)
2302 tr.close()
2308 tr.close()
2303 lock.release()
2309 lock.release()
2304 fm.end()
2310 fm.end()
2305
2311
2306
2312
2307 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2313 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2308 def perffncacheencode(ui, repo, **opts):
2314 def perffncacheencode(ui, repo, **opts):
2309 opts = _byteskwargs(opts)
2315 opts = _byteskwargs(opts)
2310 timer, fm = gettimer(ui, opts)
2316 timer, fm = gettimer(ui, opts)
2311 s = repo.store
2317 s = repo.store
2312 s.fncache._load()
2318 s.fncache._load()
2313
2319
2314 def d():
2320 def d():
2315 for p in s.fncache.entries:
2321 for p in s.fncache.entries:
2316 s.encode(p)
2322 s.encode(p)
2317
2323
2318 timer(d)
2324 timer(d)
2319 fm.end()
2325 fm.end()
2320
2326
2321
2327
2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2328 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 while not done.is_set():
2329 while not done.is_set():
2324 pair = q.get()
2330 pair = q.get()
2325 while pair is not None:
2331 while pair is not None:
2326 if xdiff:
2332 if xdiff:
2327 mdiff.bdiff.xdiffblocks(*pair)
2333 mdiff.bdiff.xdiffblocks(*pair)
2328 elif blocks:
2334 elif blocks:
2329 mdiff.bdiff.blocks(*pair)
2335 mdiff.bdiff.blocks(*pair)
2330 else:
2336 else:
2331 mdiff.textdiff(*pair)
2337 mdiff.textdiff(*pair)
2332 q.task_done()
2338 q.task_done()
2333 pair = q.get()
2339 pair = q.get()
2334 q.task_done() # for the None one
2340 q.task_done() # for the None one
2335 with ready:
2341 with ready:
2336 ready.wait()
2342 ready.wait()
2337
2343
2338
2344
2339 def _manifestrevision(repo, mnode):
2345 def _manifestrevision(repo, mnode):
2340 ml = repo.manifestlog
2346 ml = repo.manifestlog
2341
2347
2342 if util.safehasattr(ml, b'getstorage'):
2348 if util.safehasattr(ml, b'getstorage'):
2343 store = ml.getstorage(b'')
2349 store = ml.getstorage(b'')
2344 else:
2350 else:
2345 store = ml._revlog
2351 store = ml._revlog
2346
2352
2347 return store.revision(mnode)
2353 return store.revision(mnode)
2348
2354
2349
2355
2350 @command(
2356 @command(
2351 b'perf::bdiff|perfbdiff',
2357 b'perf::bdiff|perfbdiff',
2352 revlogopts
2358 revlogopts
2353 + formatteropts
2359 + formatteropts
2354 + [
2360 + [
2355 (
2361 (
2356 b'',
2362 b'',
2357 b'count',
2363 b'count',
2358 1,
2364 1,
2359 b'number of revisions to test (when using --startrev)',
2365 b'number of revisions to test (when using --startrev)',
2360 ),
2366 ),
2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2367 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2368 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2369 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2370 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 ],
2371 ],
2366 b'-c|-m|FILE REV',
2372 b'-c|-m|FILE REV',
2367 )
2373 )
2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2374 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 """benchmark a bdiff between revisions
2375 """benchmark a bdiff between revisions
2370
2376
2371 By default, benchmark a bdiff between its delta parent and itself.
2377 By default, benchmark a bdiff between its delta parent and itself.
2372
2378
2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2379 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 revisions starting at the specified revision.
2380 revisions starting at the specified revision.
2375
2381
2376 With ``--alldata``, assume the requested revision is a changeset and
2382 With ``--alldata``, assume the requested revision is a changeset and
2377 measure bdiffs for all changes related to that changeset (manifest
2383 measure bdiffs for all changes related to that changeset (manifest
2378 and filelogs).
2384 and filelogs).
2379 """
2385 """
2380 opts = _byteskwargs(opts)
2386 opts = _byteskwargs(opts)
2381
2387
2382 if opts[b'xdiff'] and not opts[b'blocks']:
2388 if opts[b'xdiff'] and not opts[b'blocks']:
2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2389 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384
2390
2385 if opts[b'alldata']:
2391 if opts[b'alldata']:
2386 opts[b'changelog'] = True
2392 opts[b'changelog'] = True
2387
2393
2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2394 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 file_, rev = None, file_
2395 file_, rev = None, file_
2390 elif rev is None:
2396 elif rev is None:
2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2397 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392
2398
2393 blocks = opts[b'blocks']
2399 blocks = opts[b'blocks']
2394 xdiff = opts[b'xdiff']
2400 xdiff = opts[b'xdiff']
2395 textpairs = []
2401 textpairs = []
2396
2402
2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2403 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398
2404
2399 startrev = r.rev(r.lookup(rev))
2405 startrev = r.rev(r.lookup(rev))
2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2406 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 if opts[b'alldata']:
2407 if opts[b'alldata']:
2402 # Load revisions associated with changeset.
2408 # Load revisions associated with changeset.
2403 ctx = repo[rev]
2409 ctx = repo[rev]
2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2410 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 for pctx in ctx.parents():
2411 for pctx in ctx.parents():
2406 pman = _manifestrevision(repo, pctx.manifestnode())
2412 pman = _manifestrevision(repo, pctx.manifestnode())
2407 textpairs.append((pman, mtext))
2413 textpairs.append((pman, mtext))
2408
2414
2409 # Load filelog revisions by iterating manifest delta.
2415 # Load filelog revisions by iterating manifest delta.
2410 man = ctx.manifest()
2416 man = ctx.manifest()
2411 pman = ctx.p1().manifest()
2417 pman = ctx.p1().manifest()
2412 for filename, change in pman.diff(man).items():
2418 for filename, change in pman.diff(man).items():
2413 fctx = repo.file(filename)
2419 fctx = repo.file(filename)
2414 f1 = fctx.revision(change[0][0] or -1)
2420 f1 = fctx.revision(change[0][0] or -1)
2415 f2 = fctx.revision(change[1][0] or -1)
2421 f2 = fctx.revision(change[1][0] or -1)
2416 textpairs.append((f1, f2))
2422 textpairs.append((f1, f2))
2417 else:
2423 else:
2418 dp = r.deltaparent(rev)
2424 dp = r.deltaparent(rev)
2419 textpairs.append((r.revision(dp), r.revision(rev)))
2425 textpairs.append((r.revision(dp), r.revision(rev)))
2420
2426
2421 withthreads = threads > 0
2427 withthreads = threads > 0
2422 if not withthreads:
2428 if not withthreads:
2423
2429
2424 def d():
2430 def d():
2425 for pair in textpairs:
2431 for pair in textpairs:
2426 if xdiff:
2432 if xdiff:
2427 mdiff.bdiff.xdiffblocks(*pair)
2433 mdiff.bdiff.xdiffblocks(*pair)
2428 elif blocks:
2434 elif blocks:
2429 mdiff.bdiff.blocks(*pair)
2435 mdiff.bdiff.blocks(*pair)
2430 else:
2436 else:
2431 mdiff.textdiff(*pair)
2437 mdiff.textdiff(*pair)
2432
2438
2433 else:
2439 else:
2434 q = queue()
2440 q = queue()
2435 for i in _xrange(threads):
2441 for i in _xrange(threads):
2436 q.put(None)
2442 q.put(None)
2437 ready = threading.Condition()
2443 ready = threading.Condition()
2438 done = threading.Event()
2444 done = threading.Event()
2439 for i in _xrange(threads):
2445 for i in _xrange(threads):
2440 threading.Thread(
2446 threading.Thread(
2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2447 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 ).start()
2448 ).start()
2443 q.join()
2449 q.join()
2444
2450
2445 def d():
2451 def d():
2446 for pair in textpairs:
2452 for pair in textpairs:
2447 q.put(pair)
2453 q.put(pair)
2448 for i in _xrange(threads):
2454 for i in _xrange(threads):
2449 q.put(None)
2455 q.put(None)
2450 with ready:
2456 with ready:
2451 ready.notify_all()
2457 ready.notify_all()
2452 q.join()
2458 q.join()
2453
2459
2454 timer, fm = gettimer(ui, opts)
2460 timer, fm = gettimer(ui, opts)
2455 timer(d)
2461 timer(d)
2456 fm.end()
2462 fm.end()
2457
2463
2458 if withthreads:
2464 if withthreads:
2459 done.set()
2465 done.set()
2460 for i in _xrange(threads):
2466 for i in _xrange(threads):
2461 q.put(None)
2467 q.put(None)
2462 with ready:
2468 with ready:
2463 ready.notify_all()
2469 ready.notify_all()
2464
2470
2465
2471
2466 @command(
2472 @command(
2467 b'perf::unidiff|perfunidiff',
2473 b'perf::unidiff|perfunidiff',
2468 revlogopts
2474 revlogopts
2469 + formatteropts
2475 + formatteropts
2470 + [
2476 + [
2471 (
2477 (
2472 b'',
2478 b'',
2473 b'count',
2479 b'count',
2474 1,
2480 1,
2475 b'number of revisions to test (when using --startrev)',
2481 b'number of revisions to test (when using --startrev)',
2476 ),
2482 ),
2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2483 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 ],
2484 ],
2479 b'-c|-m|FILE REV',
2485 b'-c|-m|FILE REV',
2480 )
2486 )
2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2487 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 """benchmark a unified diff between revisions
2488 """benchmark a unified diff between revisions
2483
2489
2484 This doesn't include any copy tracing - it's just a unified diff
2490 This doesn't include any copy tracing - it's just a unified diff
2485 of the texts.
2491 of the texts.
2486
2492
2487 By default, benchmark a diff between its delta parent and itself.
2493 By default, benchmark a diff between its delta parent and itself.
2488
2494
2489 With ``--count``, benchmark diffs between delta parents and self for N
2495 With ``--count``, benchmark diffs between delta parents and self for N
2490 revisions starting at the specified revision.
2496 revisions starting at the specified revision.
2491
2497
2492 With ``--alldata``, assume the requested revision is a changeset and
2498 With ``--alldata``, assume the requested revision is a changeset and
2493 measure diffs for all changes related to that changeset (manifest
2499 measure diffs for all changes related to that changeset (manifest
2494 and filelogs).
2500 and filelogs).
2495 """
2501 """
2496 opts = _byteskwargs(opts)
2502 opts = _byteskwargs(opts)
2497 if opts[b'alldata']:
2503 if opts[b'alldata']:
2498 opts[b'changelog'] = True
2504 opts[b'changelog'] = True
2499
2505
2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2506 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 file_, rev = None, file_
2507 file_, rev = None, file_
2502 elif rev is None:
2508 elif rev is None:
2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2509 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504
2510
2505 textpairs = []
2511 textpairs = []
2506
2512
2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2513 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508
2514
2509 startrev = r.rev(r.lookup(rev))
2515 startrev = r.rev(r.lookup(rev))
2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2516 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 if opts[b'alldata']:
2517 if opts[b'alldata']:
2512 # Load revisions associated with changeset.
2518 # Load revisions associated with changeset.
2513 ctx = repo[rev]
2519 ctx = repo[rev]
2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2520 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 for pctx in ctx.parents():
2521 for pctx in ctx.parents():
2516 pman = _manifestrevision(repo, pctx.manifestnode())
2522 pman = _manifestrevision(repo, pctx.manifestnode())
2517 textpairs.append((pman, mtext))
2523 textpairs.append((pman, mtext))
2518
2524
2519 # Load filelog revisions by iterating manifest delta.
2525 # Load filelog revisions by iterating manifest delta.
2520 man = ctx.manifest()
2526 man = ctx.manifest()
2521 pman = ctx.p1().manifest()
2527 pman = ctx.p1().manifest()
2522 for filename, change in pman.diff(man).items():
2528 for filename, change in pman.diff(man).items():
2523 fctx = repo.file(filename)
2529 fctx = repo.file(filename)
2524 f1 = fctx.revision(change[0][0] or -1)
2530 f1 = fctx.revision(change[0][0] or -1)
2525 f2 = fctx.revision(change[1][0] or -1)
2531 f2 = fctx.revision(change[1][0] or -1)
2526 textpairs.append((f1, f2))
2532 textpairs.append((f1, f2))
2527 else:
2533 else:
2528 dp = r.deltaparent(rev)
2534 dp = r.deltaparent(rev)
2529 textpairs.append((r.revision(dp), r.revision(rev)))
2535 textpairs.append((r.revision(dp), r.revision(rev)))
2530
2536
2531 def d():
2537 def d():
2532 for left, right in textpairs:
2538 for left, right in textpairs:
2533 # The date strings don't matter, so we pass empty strings.
2539 # The date strings don't matter, so we pass empty strings.
2534 headerlines, hunks = mdiff.unidiff(
2540 headerlines, hunks = mdiff.unidiff(
2535 left, b'', right, b'', b'left', b'right', binary=False
2541 left, b'', right, b'', b'left', b'right', binary=False
2536 )
2542 )
2537 # consume iterators in roughly the way patch.py does
2543 # consume iterators in roughly the way patch.py does
2538 b'\n'.join(headerlines)
2544 b'\n'.join(headerlines)
2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2545 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540
2546
2541 timer, fm = gettimer(ui, opts)
2547 timer, fm = gettimer(ui, opts)
2542 timer(d)
2548 timer(d)
2543 fm.end()
2549 fm.end()
2544
2550
2545
2551
2546 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2552 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2547 def perfdiffwd(ui, repo, **opts):
2553 def perfdiffwd(ui, repo, **opts):
2548 """Profile diff of working directory changes"""
2554 """Profile diff of working directory changes"""
2549 opts = _byteskwargs(opts)
2555 opts = _byteskwargs(opts)
2550 timer, fm = gettimer(ui, opts)
2556 timer, fm = gettimer(ui, opts)
2551 options = {
2557 options = {
2552 'w': 'ignore_all_space',
2558 'w': 'ignore_all_space',
2553 'b': 'ignore_space_change',
2559 'b': 'ignore_space_change',
2554 'B': 'ignore_blank_lines',
2560 'B': 'ignore_blank_lines',
2555 }
2561 }
2556
2562
2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2563 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 opts = {options[c]: b'1' for c in diffopt}
2564 opts = {options[c]: b'1' for c in diffopt}
2559
2565
2560 def d():
2566 def d():
2561 ui.pushbuffer()
2567 ui.pushbuffer()
2562 commands.diff(ui, repo, **opts)
2568 commands.diff(ui, repo, **opts)
2563 ui.popbuffer()
2569 ui.popbuffer()
2564
2570
2565 diffopt = diffopt.encode('ascii')
2571 diffopt = diffopt.encode('ascii')
2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2572 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 timer(d, title=title)
2573 timer(d, title=title)
2568 fm.end()
2574 fm.end()
2569
2575
2570
2576
2571 @command(
2577 @command(
2572 b'perf::revlogindex|perfrevlogindex',
2578 b'perf::revlogindex|perfrevlogindex',
2573 revlogopts + formatteropts,
2579 revlogopts + formatteropts,
2574 b'-c|-m|FILE',
2580 b'-c|-m|FILE',
2575 )
2581 )
2576 def perfrevlogindex(ui, repo, file_=None, **opts):
2582 def perfrevlogindex(ui, repo, file_=None, **opts):
2577 """Benchmark operations against a revlog index.
2583 """Benchmark operations against a revlog index.
2578
2584
2579 This tests constructing a revlog instance, reading index data,
2585 This tests constructing a revlog instance, reading index data,
2580 parsing index data, and performing various operations related to
2586 parsing index data, and performing various operations related to
2581 index data.
2587 index data.
2582 """
2588 """
2583
2589
2584 opts = _byteskwargs(opts)
2590 opts = _byteskwargs(opts)
2585
2591
2586 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2592 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2587
2593
2588 opener = getattr(rl, 'opener') # trick linter
2594 opener = getattr(rl, 'opener') # trick linter
2589 indexfile = rl.indexfile
2595 indexfile = rl.indexfile
2590 data = opener.read(indexfile)
2596 data = opener.read(indexfile)
2591
2597
2592 header = struct.unpack(b'>I', data[0:4])[0]
2598 header = struct.unpack(b'>I', data[0:4])[0]
2593 version = header & 0xFFFF
2599 version = header & 0xFFFF
2594 if version == 1:
2600 if version == 1:
2595 revlogio = revlog.revlogio()
2601 revlogio = revlog.revlogio()
2596 inline = header & (1 << 16)
2602 inline = header & (1 << 16)
2597 else:
2603 else:
2598 raise error.Abort(b'unsupported revlog version: %d' % version)
2604 raise error.Abort(b'unsupported revlog version: %d' % version)
2599
2605
2600 rllen = len(rl)
2606 rllen = len(rl)
2601
2607
2602 node0 = rl.node(0)
2608 node0 = rl.node(0)
2603 node25 = rl.node(rllen // 4)
2609 node25 = rl.node(rllen // 4)
2604 node50 = rl.node(rllen // 2)
2610 node50 = rl.node(rllen // 2)
2605 node75 = rl.node(rllen // 4 * 3)
2611 node75 = rl.node(rllen // 4 * 3)
2606 node100 = rl.node(rllen - 1)
2612 node100 = rl.node(rllen - 1)
2607
2613
2608 allrevs = range(rllen)
2614 allrevs = range(rllen)
2609 allrevsrev = list(reversed(allrevs))
2615 allrevsrev = list(reversed(allrevs))
2610 allnodes = [rl.node(rev) for rev in range(rllen)]
2616 allnodes = [rl.node(rev) for rev in range(rllen)]
2611 allnodesrev = list(reversed(allnodes))
2617 allnodesrev = list(reversed(allnodes))
2612
2618
2613 def constructor():
2619 def constructor():
2614 revlog.revlog(opener, indexfile)
2620 revlog.revlog(opener, indexfile)
2615
2621
2616 def read():
2622 def read():
2617 with opener(indexfile) as fh:
2623 with opener(indexfile) as fh:
2618 fh.read()
2624 fh.read()
2619
2625
2620 def parseindex():
2626 def parseindex():
2621 revlogio.parseindex(data, inline)
2627 revlogio.parseindex(data, inline)
2622
2628
2623 def getentry(revornode):
2629 def getentry(revornode):
2624 index = revlogio.parseindex(data, inline)[0]
2630 index = revlogio.parseindex(data, inline)[0]
2625 index[revornode]
2631 index[revornode]
2626
2632
2627 def getentries(revs, count=1):
2633 def getentries(revs, count=1):
2628 index = revlogio.parseindex(data, inline)[0]
2634 index = revlogio.parseindex(data, inline)[0]
2629
2635
2630 for i in range(count):
2636 for i in range(count):
2631 for rev in revs:
2637 for rev in revs:
2632 index[rev]
2638 index[rev]
2633
2639
2634 def resolvenode(node):
2640 def resolvenode(node):
2635 index = revlogio.parseindex(data, inline)[0]
2641 index = revlogio.parseindex(data, inline)[0]
2636 rev = getattr(index, 'rev', None)
2642 rev = getattr(index, 'rev', None)
2637 if rev is None:
2643 if rev is None:
2638 nodemap = getattr(
2644 nodemap = getattr(
2639 revlogio.parseindex(data, inline)[0], 'nodemap', None
2645 revlogio.parseindex(data, inline)[0], 'nodemap', None
2640 )
2646 )
2641 # This only works for the C code.
2647 # This only works for the C code.
2642 if nodemap is None:
2648 if nodemap is None:
2643 return
2649 return
2644 rev = nodemap.__getitem__
2650 rev = nodemap.__getitem__
2645
2651
2646 try:
2652 try:
2647 rev(node)
2653 rev(node)
2648 except error.RevlogError:
2654 except error.RevlogError:
2649 pass
2655 pass
2650
2656
2651 def resolvenodes(nodes, count=1):
2657 def resolvenodes(nodes, count=1):
2652 index = revlogio.parseindex(data, inline)[0]
2658 index = revlogio.parseindex(data, inline)[0]
2653 rev = getattr(index, 'rev', None)
2659 rev = getattr(index, 'rev', None)
2654 if rev is None:
2660 if rev is None:
2655 nodemap = getattr(
2661 nodemap = getattr(
2656 revlogio.parseindex(data, inline)[0], 'nodemap', None
2662 revlogio.parseindex(data, inline)[0], 'nodemap', None
2657 )
2663 )
2658 # This only works for the C code.
2664 # This only works for the C code.
2659 if nodemap is None:
2665 if nodemap is None:
2660 return
2666 return
2661 rev = nodemap.__getitem__
2667 rev = nodemap.__getitem__
2662
2668
2663 for i in range(count):
2669 for i in range(count):
2664 for node in nodes:
2670 for node in nodes:
2665 try:
2671 try:
2666 rev(node)
2672 rev(node)
2667 except error.RevlogError:
2673 except error.RevlogError:
2668 pass
2674 pass
2669
2675
2670 benches = [
2676 benches = [
2671 (constructor, b'revlog constructor'),
2677 (constructor, b'revlog constructor'),
2672 (read, b'read'),
2678 (read, b'read'),
2673 (parseindex, b'create index object'),
2679 (parseindex, b'create index object'),
2674 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2680 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2675 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2681 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2676 (lambda: resolvenode(node0), b'look up node at rev 0'),
2682 (lambda: resolvenode(node0), b'look up node at rev 0'),
2677 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2683 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2678 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2684 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2679 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2685 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2680 (lambda: resolvenode(node100), b'look up node at tip'),
2686 (lambda: resolvenode(node100), b'look up node at tip'),
2681 # 2x variation is to measure caching impact.
2687 # 2x variation is to measure caching impact.
2682 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2688 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2683 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2689 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2684 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2690 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2685 (
2691 (
2686 lambda: resolvenodes(allnodesrev, 2),
2692 lambda: resolvenodes(allnodesrev, 2),
2687 b'look up all nodes 2x (reverse)',
2693 b'look up all nodes 2x (reverse)',
2688 ),
2694 ),
2689 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2695 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2690 (
2696 (
2691 lambda: getentries(allrevs, 2),
2697 lambda: getentries(allrevs, 2),
2692 b'retrieve all index entries 2x (forward)',
2698 b'retrieve all index entries 2x (forward)',
2693 ),
2699 ),
2694 (
2700 (
2695 lambda: getentries(allrevsrev),
2701 lambda: getentries(allrevsrev),
2696 b'retrieve all index entries (reverse)',
2702 b'retrieve all index entries (reverse)',
2697 ),
2703 ),
2698 (
2704 (
2699 lambda: getentries(allrevsrev, 2),
2705 lambda: getentries(allrevsrev, 2),
2700 b'retrieve all index entries 2x (reverse)',
2706 b'retrieve all index entries 2x (reverse)',
2701 ),
2707 ),
2702 ]
2708 ]
2703
2709
2704 for fn, title in benches:
2710 for fn, title in benches:
2705 timer, fm = gettimer(ui, opts)
2711 timer, fm = gettimer(ui, opts)
2706 timer(fn, title=title)
2712 timer(fn, title=title)
2707 fm.end()
2713 fm.end()
2708
2714
2709
2715
2710 @command(
2716 @command(
2711 b'perf::revlogrevisions|perfrevlogrevisions',
2717 b'perf::revlogrevisions|perfrevlogrevisions',
2712 revlogopts
2718 revlogopts
2713 + formatteropts
2719 + formatteropts
2714 + [
2720 + [
2715 (b'd', b'dist', 100, b'distance between the revisions'),
2721 (b'd', b'dist', 100, b'distance between the revisions'),
2716 (b's', b'startrev', 0, b'revision to start reading at'),
2722 (b's', b'startrev', 0, b'revision to start reading at'),
2717 (b'', b'reverse', False, b'read in reverse'),
2723 (b'', b'reverse', False, b'read in reverse'),
2718 ],
2724 ],
2719 b'-c|-m|FILE',
2725 b'-c|-m|FILE',
2720 )
2726 )
2721 def perfrevlogrevisions(
2727 def perfrevlogrevisions(
2722 ui, repo, file_=None, startrev=0, reverse=False, **opts
2728 ui, repo, file_=None, startrev=0, reverse=False, **opts
2723 ):
2729 ):
2724 """Benchmark reading a series of revisions from a revlog.
2730 """Benchmark reading a series of revisions from a revlog.
2725
2731
2726 By default, we read every ``-d/--dist`` revision from 0 to tip of
2732 By default, we read every ``-d/--dist`` revision from 0 to tip of
2727 the specified revlog.
2733 the specified revlog.
2728
2734
2729 The start revision can be defined via ``-s/--startrev``.
2735 The start revision can be defined via ``-s/--startrev``.
2730 """
2736 """
2731 opts = _byteskwargs(opts)
2737 opts = _byteskwargs(opts)
2732
2738
2733 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2739 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2734 rllen = getlen(ui)(rl)
2740 rllen = getlen(ui)(rl)
2735
2741
2736 if startrev < 0:
2742 if startrev < 0:
2737 startrev = rllen + startrev
2743 startrev = rllen + startrev
2738
2744
2739 def d():
2745 def d():
2740 rl.clearcaches()
2746 rl.clearcaches()
2741
2747
2742 beginrev = startrev
2748 beginrev = startrev
2743 endrev = rllen
2749 endrev = rllen
2744 dist = opts[b'dist']
2750 dist = opts[b'dist']
2745
2751
2746 if reverse:
2752 if reverse:
2747 beginrev, endrev = endrev - 1, beginrev - 1
2753 beginrev, endrev = endrev - 1, beginrev - 1
2748 dist = -1 * dist
2754 dist = -1 * dist
2749
2755
2750 for x in _xrange(beginrev, endrev, dist):
2756 for x in _xrange(beginrev, endrev, dist):
2751 # Old revisions don't support passing int.
2757 # Old revisions don't support passing int.
2752 n = rl.node(x)
2758 n = rl.node(x)
2753 rl.revision(n)
2759 rl.revision(n)
2754
2760
2755 timer, fm = gettimer(ui, opts)
2761 timer, fm = gettimer(ui, opts)
2756 timer(d)
2762 timer(d)
2757 fm.end()
2763 fm.end()
2758
2764
2759
2765
2760 @command(
2766 @command(
2761 b'perf::revlogwrite|perfrevlogwrite',
2767 b'perf::revlogwrite|perfrevlogwrite',
2762 revlogopts
2768 revlogopts
2763 + formatteropts
2769 + formatteropts
2764 + [
2770 + [
2765 (b's', b'startrev', 1000, b'revision to start writing at'),
2771 (b's', b'startrev', 1000, b'revision to start writing at'),
2766 (b'', b'stoprev', -1, b'last revision to write'),
2772 (b'', b'stoprev', -1, b'last revision to write'),
2767 (b'', b'count', 3, b'number of passes to perform'),
2773 (b'', b'count', 3, b'number of passes to perform'),
2768 (b'', b'details', False, b'print timing for every revisions tested'),
2774 (b'', b'details', False, b'print timing for every revisions tested'),
2769 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2775 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2770 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2776 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2771 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2777 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2772 ],
2778 ],
2773 b'-c|-m|FILE',
2779 b'-c|-m|FILE',
2774 )
2780 )
2775 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2781 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2776 """Benchmark writing a series of revisions to a revlog.
2782 """Benchmark writing a series of revisions to a revlog.
2777
2783
2778 Possible source values are:
2784 Possible source values are:
2779 * `full`: add from a full text (default).
2785 * `full`: add from a full text (default).
2780 * `parent-1`: add from a delta to the first parent
2786 * `parent-1`: add from a delta to the first parent
2781 * `parent-2`: add from a delta to the second parent if it exists
2787 * `parent-2`: add from a delta to the second parent if it exists
2782 (use a delta from the first parent otherwise)
2788 (use a delta from the first parent otherwise)
2783 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2789 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2784 * `storage`: add from the existing precomputed deltas
2790 * `storage`: add from the existing precomputed deltas
2785
2791
2786 Note: This performance command measures performance in a custom way. As a
2792 Note: This performance command measures performance in a custom way. As a
2787 result some of the global configuration of the 'perf' command does not
2793 result some of the global configuration of the 'perf' command does not
2788 apply to it:
2794 apply to it:
2789
2795
2790 * ``pre-run``: disabled
2796 * ``pre-run``: disabled
2791
2797
2792 * ``profile-benchmark``: disabled
2798 * ``profile-benchmark``: disabled
2793
2799
2794 * ``run-limits``: disabled use --count instead
2800 * ``run-limits``: disabled use --count instead
2795 """
2801 """
2796 opts = _byteskwargs(opts)
2802 opts = _byteskwargs(opts)
2797
2803
2798 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2804 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2799 rllen = getlen(ui)(rl)
2805 rllen = getlen(ui)(rl)
2800 if startrev < 0:
2806 if startrev < 0:
2801 startrev = rllen + startrev
2807 startrev = rllen + startrev
2802 if stoprev < 0:
2808 if stoprev < 0:
2803 stoprev = rllen + stoprev
2809 stoprev = rllen + stoprev
2804
2810
2805 lazydeltabase = opts['lazydeltabase']
2811 lazydeltabase = opts['lazydeltabase']
2806 source = opts['source']
2812 source = opts['source']
2807 clearcaches = opts['clear_caches']
2813 clearcaches = opts['clear_caches']
2808 validsource = (
2814 validsource = (
2809 b'full',
2815 b'full',
2810 b'parent-1',
2816 b'parent-1',
2811 b'parent-2',
2817 b'parent-2',
2812 b'parent-smallest',
2818 b'parent-smallest',
2813 b'storage',
2819 b'storage',
2814 )
2820 )
2815 if source not in validsource:
2821 if source not in validsource:
2816 raise error.Abort('invalid source type: %s' % source)
2822 raise error.Abort('invalid source type: %s' % source)
2817
2823
2818 ### actually gather results
2824 ### actually gather results
2819 count = opts['count']
2825 count = opts['count']
2820 if count <= 0:
2826 if count <= 0:
2821 raise error.Abort('invalide run count: %d' % count)
2827 raise error.Abort('invalide run count: %d' % count)
2822 allresults = []
2828 allresults = []
2823 for c in range(count):
2829 for c in range(count):
2824 timing = _timeonewrite(
2830 timing = _timeonewrite(
2825 ui,
2831 ui,
2826 rl,
2832 rl,
2827 source,
2833 source,
2828 startrev,
2834 startrev,
2829 stoprev,
2835 stoprev,
2830 c + 1,
2836 c + 1,
2831 lazydeltabase=lazydeltabase,
2837 lazydeltabase=lazydeltabase,
2832 clearcaches=clearcaches,
2838 clearcaches=clearcaches,
2833 )
2839 )
2834 allresults.append(timing)
2840 allresults.append(timing)
2835
2841
2836 ### consolidate the results in a single list
2842 ### consolidate the results in a single list
2837 results = []
2843 results = []
2838 for idx, (rev, t) in enumerate(allresults[0]):
2844 for idx, (rev, t) in enumerate(allresults[0]):
2839 ts = [t]
2845 ts = [t]
2840 for other in allresults[1:]:
2846 for other in allresults[1:]:
2841 orev, ot = other[idx]
2847 orev, ot = other[idx]
2842 assert orev == rev
2848 assert orev == rev
2843 ts.append(ot)
2849 ts.append(ot)
2844 results.append((rev, ts))
2850 results.append((rev, ts))
2845 resultcount = len(results)
2851 resultcount = len(results)
2846
2852
2847 ### Compute and display relevant statistics
2853 ### Compute and display relevant statistics
2848
2854
2849 # get a formatter
2855 # get a formatter
2850 fm = ui.formatter(b'perf', opts)
2856 fm = ui.formatter(b'perf', opts)
2851 displayall = ui.configbool(b"perf", b"all-timing", False)
2857 displayall = ui.configbool(b"perf", b"all-timing", False)
2852
2858
2853 # print individual details if requested
2859 # print individual details if requested
2854 if opts['details']:
2860 if opts['details']:
2855 for idx, item in enumerate(results, 1):
2861 for idx, item in enumerate(results, 1):
2856 rev, data = item
2862 rev, data = item
2857 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2863 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2858 formatone(fm, data, title=title, displayall=displayall)
2864 formatone(fm, data, title=title, displayall=displayall)
2859
2865
2860 # sorts results by median time
2866 # sorts results by median time
2861 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2867 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2862 # list of (name, index) to display)
2868 # list of (name, index) to display)
2863 relevants = [
2869 relevants = [
2864 ("min", 0),
2870 ("min", 0),
2865 ("10%", resultcount * 10 // 100),
2871 ("10%", resultcount * 10 // 100),
2866 ("25%", resultcount * 25 // 100),
2872 ("25%", resultcount * 25 // 100),
2867 ("50%", resultcount * 70 // 100),
2873 ("50%", resultcount * 70 // 100),
2868 ("75%", resultcount * 75 // 100),
2874 ("75%", resultcount * 75 // 100),
2869 ("90%", resultcount * 90 // 100),
2875 ("90%", resultcount * 90 // 100),
2870 ("95%", resultcount * 95 // 100),
2876 ("95%", resultcount * 95 // 100),
2871 ("99%", resultcount * 99 // 100),
2877 ("99%", resultcount * 99 // 100),
2872 ("99.9%", resultcount * 999 // 1000),
2878 ("99.9%", resultcount * 999 // 1000),
2873 ("99.99%", resultcount * 9999 // 10000),
2879 ("99.99%", resultcount * 9999 // 10000),
2874 ("99.999%", resultcount * 99999 // 100000),
2880 ("99.999%", resultcount * 99999 // 100000),
2875 ("max", -1),
2881 ("max", -1),
2876 ]
2882 ]
2877 if not ui.quiet:
2883 if not ui.quiet:
2878 for name, idx in relevants:
2884 for name, idx in relevants:
2879 data = results[idx]
2885 data = results[idx]
2880 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2886 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2881 formatone(fm, data[1], title=title, displayall=displayall)
2887 formatone(fm, data[1], title=title, displayall=displayall)
2882
2888
2883 # XXX summing that many float will not be very precise, we ignore this fact
2889 # XXX summing that many float will not be very precise, we ignore this fact
2884 # for now
2890 # for now
2885 totaltime = []
2891 totaltime = []
2886 for item in allresults:
2892 for item in allresults:
2887 totaltime.append(
2893 totaltime.append(
2888 (
2894 (
2889 sum(x[1][0] for x in item),
2895 sum(x[1][0] for x in item),
2890 sum(x[1][1] for x in item),
2896 sum(x[1][1] for x in item),
2891 sum(x[1][2] for x in item),
2897 sum(x[1][2] for x in item),
2892 )
2898 )
2893 )
2899 )
2894 formatone(
2900 formatone(
2895 fm,
2901 fm,
2896 totaltime,
2902 totaltime,
2897 title="total time (%d revs)" % resultcount,
2903 title="total time (%d revs)" % resultcount,
2898 displayall=displayall,
2904 displayall=displayall,
2899 )
2905 )
2900 fm.end()
2906 fm.end()
2901
2907
2902
2908
2903 class _faketr(object):
2909 class _faketr(object):
2904 def add(s, x, y, z=None):
2910 def add(s, x, y, z=None):
2905 return None
2911 return None
2906
2912
2907
2913
2908 def _timeonewrite(
2914 def _timeonewrite(
2909 ui,
2915 ui,
2910 orig,
2916 orig,
2911 source,
2917 source,
2912 startrev,
2918 startrev,
2913 stoprev,
2919 stoprev,
2914 runidx=None,
2920 runidx=None,
2915 lazydeltabase=True,
2921 lazydeltabase=True,
2916 clearcaches=True,
2922 clearcaches=True,
2917 ):
2923 ):
2918 timings = []
2924 timings = []
2919 tr = _faketr()
2925 tr = _faketr()
2920 with _temprevlog(ui, orig, startrev) as dest:
2926 with _temprevlog(ui, orig, startrev) as dest:
2921 dest._lazydeltabase = lazydeltabase
2927 dest._lazydeltabase = lazydeltabase
2922 revs = list(orig.revs(startrev, stoprev))
2928 revs = list(orig.revs(startrev, stoprev))
2923 total = len(revs)
2929 total = len(revs)
2924 topic = 'adding'
2930 topic = 'adding'
2925 if runidx is not None:
2931 if runidx is not None:
2926 topic += ' (run #%d)' % runidx
2932 topic += ' (run #%d)' % runidx
2927 # Support both old and new progress API
2933 # Support both old and new progress API
2928 if util.safehasattr(ui, 'makeprogress'):
2934 if util.safehasattr(ui, 'makeprogress'):
2929 progress = ui.makeprogress(topic, unit='revs', total=total)
2935 progress = ui.makeprogress(topic, unit='revs', total=total)
2930
2936
2931 def updateprogress(pos):
2937 def updateprogress(pos):
2932 progress.update(pos)
2938 progress.update(pos)
2933
2939
2934 def completeprogress():
2940 def completeprogress():
2935 progress.complete()
2941 progress.complete()
2936
2942
2937 else:
2943 else:
2938
2944
2939 def updateprogress(pos):
2945 def updateprogress(pos):
2940 ui.progress(topic, pos, unit='revs', total=total)
2946 ui.progress(topic, pos, unit='revs', total=total)
2941
2947
2942 def completeprogress():
2948 def completeprogress():
2943 ui.progress(topic, None, unit='revs', total=total)
2949 ui.progress(topic, None, unit='revs', total=total)
2944
2950
2945 for idx, rev in enumerate(revs):
2951 for idx, rev in enumerate(revs):
2946 updateprogress(idx)
2952 updateprogress(idx)
2947 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2953 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2948 if clearcaches:
2954 if clearcaches:
2949 dest.index.clearcaches()
2955 dest.index.clearcaches()
2950 dest.clearcaches()
2956 dest.clearcaches()
2951 with timeone() as r:
2957 with timeone() as r:
2952 dest.addrawrevision(*addargs, **addkwargs)
2958 dest.addrawrevision(*addargs, **addkwargs)
2953 timings.append((rev, r[0]))
2959 timings.append((rev, r[0]))
2954 updateprogress(total)
2960 updateprogress(total)
2955 completeprogress()
2961 completeprogress()
2956 return timings
2962 return timings
2957
2963
2958
2964
2959 def _getrevisionseed(orig, rev, tr, source):
2965 def _getrevisionseed(orig, rev, tr, source):
2960 from mercurial.node import nullid
2966 from mercurial.node import nullid
2961
2967
2962 linkrev = orig.linkrev(rev)
2968 linkrev = orig.linkrev(rev)
2963 node = orig.node(rev)
2969 node = orig.node(rev)
2964 p1, p2 = orig.parents(node)
2970 p1, p2 = orig.parents(node)
2965 flags = orig.flags(rev)
2971 flags = orig.flags(rev)
2966 cachedelta = None
2972 cachedelta = None
2967 text = None
2973 text = None
2968
2974
2969 if source == b'full':
2975 if source == b'full':
2970 text = orig.revision(rev)
2976 text = orig.revision(rev)
2971 elif source == b'parent-1':
2977 elif source == b'parent-1':
2972 baserev = orig.rev(p1)
2978 baserev = orig.rev(p1)
2973 cachedelta = (baserev, orig.revdiff(p1, rev))
2979 cachedelta = (baserev, orig.revdiff(p1, rev))
2974 elif source == b'parent-2':
2980 elif source == b'parent-2':
2975 parent = p2
2981 parent = p2
2976 if p2 == nullid:
2982 if p2 == nullid:
2977 parent = p1
2983 parent = p1
2978 baserev = orig.rev(parent)
2984 baserev = orig.rev(parent)
2979 cachedelta = (baserev, orig.revdiff(parent, rev))
2985 cachedelta = (baserev, orig.revdiff(parent, rev))
2980 elif source == b'parent-smallest':
2986 elif source == b'parent-smallest':
2981 p1diff = orig.revdiff(p1, rev)
2987 p1diff = orig.revdiff(p1, rev)
2982 parent = p1
2988 parent = p1
2983 diff = p1diff
2989 diff = p1diff
2984 if p2 != nullid:
2990 if p2 != nullid:
2985 p2diff = orig.revdiff(p2, rev)
2991 p2diff = orig.revdiff(p2, rev)
2986 if len(p1diff) > len(p2diff):
2992 if len(p1diff) > len(p2diff):
2987 parent = p2
2993 parent = p2
2988 diff = p2diff
2994 diff = p2diff
2989 baserev = orig.rev(parent)
2995 baserev = orig.rev(parent)
2990 cachedelta = (baserev, diff)
2996 cachedelta = (baserev, diff)
2991 elif source == b'storage':
2997 elif source == b'storage':
2992 baserev = orig.deltaparent(rev)
2998 baserev = orig.deltaparent(rev)
2993 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2999 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2994
3000
2995 return (
3001 return (
2996 (text, tr, linkrev, p1, p2),
3002 (text, tr, linkrev, p1, p2),
2997 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3003 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2998 )
3004 )
2999
3005
3000
3006
3001 @contextlib.contextmanager
3007 @contextlib.contextmanager
3002 def _temprevlog(ui, orig, truncaterev):
3008 def _temprevlog(ui, orig, truncaterev):
3003 from mercurial import vfs as vfsmod
3009 from mercurial import vfs as vfsmod
3004
3010
3005 if orig._inline:
3011 if orig._inline:
3006 raise error.Abort('not supporting inline revlog (yet)')
3012 raise error.Abort('not supporting inline revlog (yet)')
3007 revlogkwargs = {}
3013 revlogkwargs = {}
3008 k = 'upperboundcomp'
3014 k = 'upperboundcomp'
3009 if util.safehasattr(orig, k):
3015 if util.safehasattr(orig, k):
3010 revlogkwargs[k] = getattr(orig, k)
3016 revlogkwargs[k] = getattr(orig, k)
3011
3017
3012 origindexpath = orig.opener.join(orig.indexfile)
3018 origindexpath = orig.opener.join(orig.indexfile)
3013 origdatapath = orig.opener.join(orig.datafile)
3019 origdatapath = orig.opener.join(orig.datafile)
3014 indexname = 'revlog.i'
3020 indexname = 'revlog.i'
3015 dataname = 'revlog.d'
3021 dataname = 'revlog.d'
3016
3022
3017 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3023 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3018 try:
3024 try:
3019 # copy the data file in a temporary directory
3025 # copy the data file in a temporary directory
3020 ui.debug('copying data in %s\n' % tmpdir)
3026 ui.debug('copying data in %s\n' % tmpdir)
3021 destindexpath = os.path.join(tmpdir, 'revlog.i')
3027 destindexpath = os.path.join(tmpdir, 'revlog.i')
3022 destdatapath = os.path.join(tmpdir, 'revlog.d')
3028 destdatapath = os.path.join(tmpdir, 'revlog.d')
3023 shutil.copyfile(origindexpath, destindexpath)
3029 shutil.copyfile(origindexpath, destindexpath)
3024 shutil.copyfile(origdatapath, destdatapath)
3030 shutil.copyfile(origdatapath, destdatapath)
3025
3031
3026 # remove the data we want to add again
3032 # remove the data we want to add again
3027 ui.debug('truncating data to be rewritten\n')
3033 ui.debug('truncating data to be rewritten\n')
3028 with open(destindexpath, 'ab') as index:
3034 with open(destindexpath, 'ab') as index:
3029 index.seek(0)
3035 index.seek(0)
3030 index.truncate(truncaterev * orig._io.size)
3036 index.truncate(truncaterev * orig._io.size)
3031 with open(destdatapath, 'ab') as data:
3037 with open(destdatapath, 'ab') as data:
3032 data.seek(0)
3038 data.seek(0)
3033 data.truncate(orig.start(truncaterev))
3039 data.truncate(orig.start(truncaterev))
3034
3040
3035 # instantiate a new revlog from the temporary copy
3041 # instantiate a new revlog from the temporary copy
3036 ui.debug('truncating adding to be rewritten\n')
3042 ui.debug('truncating adding to be rewritten\n')
3037 vfs = vfsmod.vfs(tmpdir)
3043 vfs = vfsmod.vfs(tmpdir)
3038 vfs.options = getattr(orig.opener, 'options', None)
3044 vfs.options = getattr(orig.opener, 'options', None)
3039
3045
3040 dest = revlog.revlog(
3046 dest = revlog.revlog(
3041 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3047 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3042 )
3048 )
3043 if dest._inline:
3049 if dest._inline:
3044 raise error.Abort('not supporting inline revlog (yet)')
3050 raise error.Abort('not supporting inline revlog (yet)')
3045 # make sure internals are initialized
3051 # make sure internals are initialized
3046 dest.revision(len(dest) - 1)
3052 dest.revision(len(dest) - 1)
3047 yield dest
3053 yield dest
3048 del dest, vfs
3054 del dest, vfs
3049 finally:
3055 finally:
3050 shutil.rmtree(tmpdir, True)
3056 shutil.rmtree(tmpdir, True)
3051
3057
3052
3058
3053 @command(
3059 @command(
3054 b'perf::revlogchunks|perfrevlogchunks',
3060 b'perf::revlogchunks|perfrevlogchunks',
3055 revlogopts
3061 revlogopts
3056 + formatteropts
3062 + formatteropts
3057 + [
3063 + [
3058 (b'e', b'engines', b'', b'compression engines to use'),
3064 (b'e', b'engines', b'', b'compression engines to use'),
3059 (b's', b'startrev', 0, b'revision to start at'),
3065 (b's', b'startrev', 0, b'revision to start at'),
3060 ],
3066 ],
3061 b'-c|-m|FILE',
3067 b'-c|-m|FILE',
3062 )
3068 )
3063 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3069 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3064 """Benchmark operations on revlog chunks.
3070 """Benchmark operations on revlog chunks.
3065
3071
3066 Logically, each revlog is a collection of fulltext revisions. However,
3072 Logically, each revlog is a collection of fulltext revisions. However,
3067 stored within each revlog are "chunks" of possibly compressed data. This
3073 stored within each revlog are "chunks" of possibly compressed data. This
3068 data needs to be read and decompressed or compressed and written.
3074 data needs to be read and decompressed or compressed and written.
3069
3075
3070 This command measures the time it takes to read+decompress and recompress
3076 This command measures the time it takes to read+decompress and recompress
3071 chunks in a revlog. It effectively isolates I/O and compression performance.
3077 chunks in a revlog. It effectively isolates I/O and compression performance.
3072 For measurements of higher-level operations like resolving revisions,
3078 For measurements of higher-level operations like resolving revisions,
3073 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3079 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3074 """
3080 """
3075 opts = _byteskwargs(opts)
3081 opts = _byteskwargs(opts)
3076
3082
3077 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3083 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3078
3084
3079 # _chunkraw was renamed to _getsegmentforrevs.
3085 # _chunkraw was renamed to _getsegmentforrevs.
3080 try:
3086 try:
3081 segmentforrevs = rl._getsegmentforrevs
3087 segmentforrevs = rl._getsegmentforrevs
3082 except AttributeError:
3088 except AttributeError:
3083 segmentforrevs = rl._chunkraw
3089 segmentforrevs = rl._chunkraw
3084
3090
3085 # Verify engines argument.
3091 # Verify engines argument.
3086 if engines:
3092 if engines:
3087 engines = {e.strip() for e in engines.split(b',')}
3093 engines = {e.strip() for e in engines.split(b',')}
3088 for engine in engines:
3094 for engine in engines:
3089 try:
3095 try:
3090 util.compressionengines[engine]
3096 util.compressionengines[engine]
3091 except KeyError:
3097 except KeyError:
3092 raise error.Abort(b'unknown compression engine: %s' % engine)
3098 raise error.Abort(b'unknown compression engine: %s' % engine)
3093 else:
3099 else:
3094 engines = []
3100 engines = []
3095 for e in util.compengines:
3101 for e in util.compengines:
3096 engine = util.compengines[e]
3102 engine = util.compengines[e]
3097 try:
3103 try:
3098 if engine.available():
3104 if engine.available():
3099 engine.revlogcompressor().compress(b'dummy')
3105 engine.revlogcompressor().compress(b'dummy')
3100 engines.append(e)
3106 engines.append(e)
3101 except NotImplementedError:
3107 except NotImplementedError:
3102 pass
3108 pass
3103
3109
3104 revs = list(rl.revs(startrev, len(rl) - 1))
3110 revs = list(rl.revs(startrev, len(rl) - 1))
3105
3111
3106 def rlfh(rl):
3112 def rlfh(rl):
3107 if rl._inline:
3113 if rl._inline:
3108 return getsvfs(repo)(rl.indexfile)
3114 return getsvfs(repo)(rl.indexfile)
3109 else:
3115 else:
3110 return getsvfs(repo)(rl.datafile)
3116 return getsvfs(repo)(rl.datafile)
3111
3117
3112 def doread():
3118 def doread():
3113 rl.clearcaches()
3119 rl.clearcaches()
3114 for rev in revs:
3120 for rev in revs:
3115 segmentforrevs(rev, rev)
3121 segmentforrevs(rev, rev)
3116
3122
3117 def doreadcachedfh():
3123 def doreadcachedfh():
3118 rl.clearcaches()
3124 rl.clearcaches()
3119 fh = rlfh(rl)
3125 fh = rlfh(rl)
3120 for rev in revs:
3126 for rev in revs:
3121 segmentforrevs(rev, rev, df=fh)
3127 segmentforrevs(rev, rev, df=fh)
3122
3128
3123 def doreadbatch():
3129 def doreadbatch():
3124 rl.clearcaches()
3130 rl.clearcaches()
3125 segmentforrevs(revs[0], revs[-1])
3131 segmentforrevs(revs[0], revs[-1])
3126
3132
3127 def doreadbatchcachedfh():
3133 def doreadbatchcachedfh():
3128 rl.clearcaches()
3134 rl.clearcaches()
3129 fh = rlfh(rl)
3135 fh = rlfh(rl)
3130 segmentforrevs(revs[0], revs[-1], df=fh)
3136 segmentforrevs(revs[0], revs[-1], df=fh)
3131
3137
3132 def dochunk():
3138 def dochunk():
3133 rl.clearcaches()
3139 rl.clearcaches()
3134 fh = rlfh(rl)
3140 fh = rlfh(rl)
3135 for rev in revs:
3141 for rev in revs:
3136 rl._chunk(rev, df=fh)
3142 rl._chunk(rev, df=fh)
3137
3143
3138 chunks = [None]
3144 chunks = [None]
3139
3145
3140 def dochunkbatch():
3146 def dochunkbatch():
3141 rl.clearcaches()
3147 rl.clearcaches()
3142 fh = rlfh(rl)
3148 fh = rlfh(rl)
3143 # Save chunks as a side-effect.
3149 # Save chunks as a side-effect.
3144 chunks[0] = rl._chunks(revs, df=fh)
3150 chunks[0] = rl._chunks(revs, df=fh)
3145
3151
3146 def docompress(compressor):
3152 def docompress(compressor):
3147 rl.clearcaches()
3153 rl.clearcaches()
3148
3154
3149 try:
3155 try:
3150 # Swap in the requested compression engine.
3156 # Swap in the requested compression engine.
3151 oldcompressor = rl._compressor
3157 oldcompressor = rl._compressor
3152 rl._compressor = compressor
3158 rl._compressor = compressor
3153 for chunk in chunks[0]:
3159 for chunk in chunks[0]:
3154 rl.compress(chunk)
3160 rl.compress(chunk)
3155 finally:
3161 finally:
3156 rl._compressor = oldcompressor
3162 rl._compressor = oldcompressor
3157
3163
3158 benches = [
3164 benches = [
3159 (lambda: doread(), b'read'),
3165 (lambda: doread(), b'read'),
3160 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3166 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3161 (lambda: doreadbatch(), b'read batch'),
3167 (lambda: doreadbatch(), b'read batch'),
3162 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3168 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3163 (lambda: dochunk(), b'chunk'),
3169 (lambda: dochunk(), b'chunk'),
3164 (lambda: dochunkbatch(), b'chunk batch'),
3170 (lambda: dochunkbatch(), b'chunk batch'),
3165 ]
3171 ]
3166
3172
3167 for engine in sorted(engines):
3173 for engine in sorted(engines):
3168 compressor = util.compengines[engine].revlogcompressor()
3174 compressor = util.compengines[engine].revlogcompressor()
3169 benches.append(
3175 benches.append(
3170 (
3176 (
3171 functools.partial(docompress, compressor),
3177 functools.partial(docompress, compressor),
3172 b'compress w/ %s' % engine,
3178 b'compress w/ %s' % engine,
3173 )
3179 )
3174 )
3180 )
3175
3181
3176 for fn, title in benches:
3182 for fn, title in benches:
3177 timer, fm = gettimer(ui, opts)
3183 timer, fm = gettimer(ui, opts)
3178 timer(fn, title=title)
3184 timer(fn, title=title)
3179 fm.end()
3185 fm.end()
3180
3186
3181
3187
3182 @command(
3188 @command(
3183 b'perf::revlogrevision|perfrevlogrevision',
3189 b'perf::revlogrevision|perfrevlogrevision',
3184 revlogopts
3190 revlogopts
3185 + formatteropts
3191 + formatteropts
3186 + [(b'', b'cache', False, b'use caches instead of clearing')],
3192 + [(b'', b'cache', False, b'use caches instead of clearing')],
3187 b'-c|-m|FILE REV',
3193 b'-c|-m|FILE REV',
3188 )
3194 )
3189 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3195 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3190 """Benchmark obtaining a revlog revision.
3196 """Benchmark obtaining a revlog revision.
3191
3197
3192 Obtaining a revlog revision consists of roughly the following steps:
3198 Obtaining a revlog revision consists of roughly the following steps:
3193
3199
3194 1. Compute the delta chain
3200 1. Compute the delta chain
3195 2. Slice the delta chain if applicable
3201 2. Slice the delta chain if applicable
3196 3. Obtain the raw chunks for that delta chain
3202 3. Obtain the raw chunks for that delta chain
3197 4. Decompress each raw chunk
3203 4. Decompress each raw chunk
3198 5. Apply binary patches to obtain fulltext
3204 5. Apply binary patches to obtain fulltext
3199 6. Verify hash of fulltext
3205 6. Verify hash of fulltext
3200
3206
3201 This command measures the time spent in each of these phases.
3207 This command measures the time spent in each of these phases.
3202 """
3208 """
3203 opts = _byteskwargs(opts)
3209 opts = _byteskwargs(opts)
3204
3210
3205 if opts.get(b'changelog') or opts.get(b'manifest'):
3211 if opts.get(b'changelog') or opts.get(b'manifest'):
3206 file_, rev = None, file_
3212 file_, rev = None, file_
3207 elif rev is None:
3213 elif rev is None:
3208 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3214 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3209
3215
3210 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3216 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3211
3217
3212 # _chunkraw was renamed to _getsegmentforrevs.
3218 # _chunkraw was renamed to _getsegmentforrevs.
3213 try:
3219 try:
3214 segmentforrevs = r._getsegmentforrevs
3220 segmentforrevs = r._getsegmentforrevs
3215 except AttributeError:
3221 except AttributeError:
3216 segmentforrevs = r._chunkraw
3222 segmentforrevs = r._chunkraw
3217
3223
3218 node = r.lookup(rev)
3224 node = r.lookup(rev)
3219 rev = r.rev(node)
3225 rev = r.rev(node)
3220
3226
3221 def getrawchunks(data, chain):
3227 def getrawchunks(data, chain):
3222 start = r.start
3228 start = r.start
3223 length = r.length
3229 length = r.length
3224 inline = r._inline
3230 inline = r._inline
3225 iosize = r._io.size
3231 iosize = r._io.size
3226 buffer = util.buffer
3232 buffer = util.buffer
3227
3233
3228 chunks = []
3234 chunks = []
3229 ladd = chunks.append
3235 ladd = chunks.append
3230 for idx, item in enumerate(chain):
3236 for idx, item in enumerate(chain):
3231 offset = start(item[0])
3237 offset = start(item[0])
3232 bits = data[idx]
3238 bits = data[idx]
3233 for rev in item:
3239 for rev in item:
3234 chunkstart = start(rev)
3240 chunkstart = start(rev)
3235 if inline:
3241 if inline:
3236 chunkstart += (rev + 1) * iosize
3242 chunkstart += (rev + 1) * iosize
3237 chunklength = length(rev)
3243 chunklength = length(rev)
3238 ladd(buffer(bits, chunkstart - offset, chunklength))
3244 ladd(buffer(bits, chunkstart - offset, chunklength))
3239
3245
3240 return chunks
3246 return chunks
3241
3247
3242 def dodeltachain(rev):
3248 def dodeltachain(rev):
3243 if not cache:
3249 if not cache:
3244 r.clearcaches()
3250 r.clearcaches()
3245 r._deltachain(rev)
3251 r._deltachain(rev)
3246
3252
3247 def doread(chain):
3253 def doread(chain):
3248 if not cache:
3254 if not cache:
3249 r.clearcaches()
3255 r.clearcaches()
3250 for item in slicedchain:
3256 for item in slicedchain:
3251 segmentforrevs(item[0], item[-1])
3257 segmentforrevs(item[0], item[-1])
3252
3258
3253 def doslice(r, chain, size):
3259 def doslice(r, chain, size):
3254 for s in slicechunk(r, chain, targetsize=size):
3260 for s in slicechunk(r, chain, targetsize=size):
3255 pass
3261 pass
3256
3262
3257 def dorawchunks(data, chain):
3263 def dorawchunks(data, chain):
3258 if not cache:
3264 if not cache:
3259 r.clearcaches()
3265 r.clearcaches()
3260 getrawchunks(data, chain)
3266 getrawchunks(data, chain)
3261
3267
3262 def dodecompress(chunks):
3268 def dodecompress(chunks):
3263 decomp = r.decompress
3269 decomp = r.decompress
3264 for chunk in chunks:
3270 for chunk in chunks:
3265 decomp(chunk)
3271 decomp(chunk)
3266
3272
3267 def dopatch(text, bins):
3273 def dopatch(text, bins):
3268 if not cache:
3274 if not cache:
3269 r.clearcaches()
3275 r.clearcaches()
3270 mdiff.patches(text, bins)
3276 mdiff.patches(text, bins)
3271
3277
3272 def dohash(text):
3278 def dohash(text):
3273 if not cache:
3279 if not cache:
3274 r.clearcaches()
3280 r.clearcaches()
3275 r.checkhash(text, node, rev=rev)
3281 r.checkhash(text, node, rev=rev)
3276
3282
3277 def dorevision():
3283 def dorevision():
3278 if not cache:
3284 if not cache:
3279 r.clearcaches()
3285 r.clearcaches()
3280 r.revision(node)
3286 r.revision(node)
3281
3287
3282 try:
3288 try:
3283 from mercurial.revlogutils.deltas import slicechunk
3289 from mercurial.revlogutils.deltas import slicechunk
3284 except ImportError:
3290 except ImportError:
3285 slicechunk = getattr(revlog, '_slicechunk', None)
3291 slicechunk = getattr(revlog, '_slicechunk', None)
3286
3292
3287 size = r.length(rev)
3293 size = r.length(rev)
3288 chain = r._deltachain(rev)[0]
3294 chain = r._deltachain(rev)[0]
3289 if not getattr(r, '_withsparseread', False):
3295 if not getattr(r, '_withsparseread', False):
3290 slicedchain = (chain,)
3296 slicedchain = (chain,)
3291 else:
3297 else:
3292 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3298 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3293 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3299 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3294 rawchunks = getrawchunks(data, slicedchain)
3300 rawchunks = getrawchunks(data, slicedchain)
3295 bins = r._chunks(chain)
3301 bins = r._chunks(chain)
3296 text = bytes(bins[0])
3302 text = bytes(bins[0])
3297 bins = bins[1:]
3303 bins = bins[1:]
3298 text = mdiff.patches(text, bins)
3304 text = mdiff.patches(text, bins)
3299
3305
3300 benches = [
3306 benches = [
3301 (lambda: dorevision(), b'full'),
3307 (lambda: dorevision(), b'full'),
3302 (lambda: dodeltachain(rev), b'deltachain'),
3308 (lambda: dodeltachain(rev), b'deltachain'),
3303 (lambda: doread(chain), b'read'),
3309 (lambda: doread(chain), b'read'),
3304 ]
3310 ]
3305
3311
3306 if getattr(r, '_withsparseread', False):
3312 if getattr(r, '_withsparseread', False):
3307 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3313 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3308 benches.append(slicing)
3314 benches.append(slicing)
3309
3315
3310 benches.extend(
3316 benches.extend(
3311 [
3317 [
3312 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3318 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3313 (lambda: dodecompress(rawchunks), b'decompress'),
3319 (lambda: dodecompress(rawchunks), b'decompress'),
3314 (lambda: dopatch(text, bins), b'patch'),
3320 (lambda: dopatch(text, bins), b'patch'),
3315 (lambda: dohash(text), b'hash'),
3321 (lambda: dohash(text), b'hash'),
3316 ]
3322 ]
3317 )
3323 )
3318
3324
3319 timer, fm = gettimer(ui, opts)
3325 timer, fm = gettimer(ui, opts)
3320 for fn, title in benches:
3326 for fn, title in benches:
3321 timer(fn, title=title)
3327 timer(fn, title=title)
3322 fm.end()
3328 fm.end()
3323
3329
3324
3330
3325 @command(
3331 @command(
3326 b'perf::revset|perfrevset',
3332 b'perf::revset|perfrevset',
3327 [
3333 [
3328 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3334 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3329 (b'', b'contexts', False, b'obtain changectx for each revision'),
3335 (b'', b'contexts', False, b'obtain changectx for each revision'),
3330 ]
3336 ]
3331 + formatteropts,
3337 + formatteropts,
3332 b"REVSET",
3338 b"REVSET",
3333 )
3339 )
3334 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3340 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3335 """benchmark the execution time of a revset
3341 """benchmark the execution time of a revset
3336
3342
3337 Use the --clean option if need to evaluate the impact of build volatile
3343 Use the --clean option if need to evaluate the impact of build volatile
3338 revisions set cache on the revset execution. Volatile cache hold filtered
3344 revisions set cache on the revset execution. Volatile cache hold filtered
3339 and obsolete related cache."""
3345 and obsolete related cache."""
3340 opts = _byteskwargs(opts)
3346 opts = _byteskwargs(opts)
3341
3347
3342 timer, fm = gettimer(ui, opts)
3348 timer, fm = gettimer(ui, opts)
3343
3349
3344 def d():
3350 def d():
3345 if clear:
3351 if clear:
3346 repo.invalidatevolatilesets()
3352 repo.invalidatevolatilesets()
3347 if contexts:
3353 if contexts:
3348 for ctx in repo.set(expr):
3354 for ctx in repo.set(expr):
3349 pass
3355 pass
3350 else:
3356 else:
3351 for r in repo.revs(expr):
3357 for r in repo.revs(expr):
3352 pass
3358 pass
3353
3359
3354 timer(d)
3360 timer(d)
3355 fm.end()
3361 fm.end()
3356
3362
3357
3363
3358 @command(
3364 @command(
3359 b'perf::volatilesets|perfvolatilesets',
3365 b'perf::volatilesets|perfvolatilesets',
3360 [
3366 [
3361 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3367 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3362 ]
3368 ]
3363 + formatteropts,
3369 + formatteropts,
3364 )
3370 )
3365 def perfvolatilesets(ui, repo, *names, **opts):
3371 def perfvolatilesets(ui, repo, *names, **opts):
3366 """benchmark the computation of various volatile set
3372 """benchmark the computation of various volatile set
3367
3373
3368 Volatile set computes element related to filtering and obsolescence."""
3374 Volatile set computes element related to filtering and obsolescence."""
3369 opts = _byteskwargs(opts)
3375 opts = _byteskwargs(opts)
3370 timer, fm = gettimer(ui, opts)
3376 timer, fm = gettimer(ui, opts)
3371 repo = repo.unfiltered()
3377 repo = repo.unfiltered()
3372
3378
3373 def getobs(name):
3379 def getobs(name):
3374 def d():
3380 def d():
3375 repo.invalidatevolatilesets()
3381 repo.invalidatevolatilesets()
3376 if opts[b'clear_obsstore']:
3382 if opts[b'clear_obsstore']:
3377 clearfilecache(repo, b'obsstore')
3383 clearfilecache(repo, b'obsstore')
3378 obsolete.getrevs(repo, name)
3384 obsolete.getrevs(repo, name)
3379
3385
3380 return d
3386 return d
3381
3387
3382 allobs = sorted(obsolete.cachefuncs)
3388 allobs = sorted(obsolete.cachefuncs)
3383 if names:
3389 if names:
3384 allobs = [n for n in allobs if n in names]
3390 allobs = [n for n in allobs if n in names]
3385
3391
3386 for name in allobs:
3392 for name in allobs:
3387 timer(getobs(name), title=name)
3393 timer(getobs(name), title=name)
3388
3394
3389 def getfiltered(name):
3395 def getfiltered(name):
3390 def d():
3396 def d():
3391 repo.invalidatevolatilesets()
3397 repo.invalidatevolatilesets()
3392 if opts[b'clear_obsstore']:
3398 if opts[b'clear_obsstore']:
3393 clearfilecache(repo, b'obsstore')
3399 clearfilecache(repo, b'obsstore')
3394 repoview.filterrevs(repo, name)
3400 repoview.filterrevs(repo, name)
3395
3401
3396 return d
3402 return d
3397
3403
3398 allfilter = sorted(repoview.filtertable)
3404 allfilter = sorted(repoview.filtertable)
3399 if names:
3405 if names:
3400 allfilter = [n for n in allfilter if n in names]
3406 allfilter = [n for n in allfilter if n in names]
3401
3407
3402 for name in allfilter:
3408 for name in allfilter:
3403 timer(getfiltered(name), title=name)
3409 timer(getfiltered(name), title=name)
3404 fm.end()
3410 fm.end()
3405
3411
3406
3412
3407 @command(
3413 @command(
3408 b'perf::branchmap|perfbranchmap',
3414 b'perf::branchmap|perfbranchmap',
3409 [
3415 [
3410 (b'f', b'full', False, b'Includes build time of subset'),
3416 (b'f', b'full', False, b'Includes build time of subset'),
3411 (
3417 (
3412 b'',
3418 b'',
3413 b'clear-revbranch',
3419 b'clear-revbranch',
3414 False,
3420 False,
3415 b'purge the revbranch cache between computation',
3421 b'purge the revbranch cache between computation',
3416 ),
3422 ),
3417 ]
3423 ]
3418 + formatteropts,
3424 + formatteropts,
3419 )
3425 )
3420 def perfbranchmap(ui, repo, *filternames, **opts):
3426 def perfbranchmap(ui, repo, *filternames, **opts):
3421 """benchmark the update of a branchmap
3427 """benchmark the update of a branchmap
3422
3428
3423 This benchmarks the full repo.branchmap() call with read and write disabled
3429 This benchmarks the full repo.branchmap() call with read and write disabled
3424 """
3430 """
3425 opts = _byteskwargs(opts)
3431 opts = _byteskwargs(opts)
3426 full = opts.get(b"full", False)
3432 full = opts.get(b"full", False)
3427 clear_revbranch = opts.get(b"clear_revbranch", False)
3433 clear_revbranch = opts.get(b"clear_revbranch", False)
3428 timer, fm = gettimer(ui, opts)
3434 timer, fm = gettimer(ui, opts)
3429
3435
3430 def getbranchmap(filtername):
3436 def getbranchmap(filtername):
3431 """generate a benchmark function for the filtername"""
3437 """generate a benchmark function for the filtername"""
3432 if filtername is None:
3438 if filtername is None:
3433 view = repo
3439 view = repo
3434 else:
3440 else:
3435 view = repo.filtered(filtername)
3441 view = repo.filtered(filtername)
3436 if util.safehasattr(view._branchcaches, '_per_filter'):
3442 if util.safehasattr(view._branchcaches, '_per_filter'):
3437 filtered = view._branchcaches._per_filter
3443 filtered = view._branchcaches._per_filter
3438 else:
3444 else:
3439 # older versions
3445 # older versions
3440 filtered = view._branchcaches
3446 filtered = view._branchcaches
3441
3447
3442 def d():
3448 def d():
3443 if clear_revbranch:
3449 if clear_revbranch:
3444 repo.revbranchcache()._clear()
3450 repo.revbranchcache()._clear()
3445 if full:
3451 if full:
3446 view._branchcaches.clear()
3452 view._branchcaches.clear()
3447 else:
3453 else:
3448 filtered.pop(filtername, None)
3454 filtered.pop(filtername, None)
3449 view.branchmap()
3455 view.branchmap()
3450
3456
3451 return d
3457 return d
3452
3458
3453 # add filter in smaller subset to bigger subset
3459 # add filter in smaller subset to bigger subset
3454 possiblefilters = set(repoview.filtertable)
3460 possiblefilters = set(repoview.filtertable)
3455 if filternames:
3461 if filternames:
3456 possiblefilters &= set(filternames)
3462 possiblefilters &= set(filternames)
3457 subsettable = getbranchmapsubsettable()
3463 subsettable = getbranchmapsubsettable()
3458 allfilters = []
3464 allfilters = []
3459 while possiblefilters:
3465 while possiblefilters:
3460 for name in possiblefilters:
3466 for name in possiblefilters:
3461 subset = subsettable.get(name)
3467 subset = subsettable.get(name)
3462 if subset not in possiblefilters:
3468 if subset not in possiblefilters:
3463 break
3469 break
3464 else:
3470 else:
3465 assert False, b'subset cycle %s!' % possiblefilters
3471 assert False, b'subset cycle %s!' % possiblefilters
3466 allfilters.append(name)
3472 allfilters.append(name)
3467 possiblefilters.remove(name)
3473 possiblefilters.remove(name)
3468
3474
3469 # warm the cache
3475 # warm the cache
3470 if not full:
3476 if not full:
3471 for name in allfilters:
3477 for name in allfilters:
3472 repo.filtered(name).branchmap()
3478 repo.filtered(name).branchmap()
3473 if not filternames or b'unfiltered' in filternames:
3479 if not filternames or b'unfiltered' in filternames:
3474 # add unfiltered
3480 # add unfiltered
3475 allfilters.append(None)
3481 allfilters.append(None)
3476
3482
3477 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3483 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3478 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3484 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3479 branchcacheread.set(classmethod(lambda *args: None))
3485 branchcacheread.set(classmethod(lambda *args: None))
3480 else:
3486 else:
3481 # older versions
3487 # older versions
3482 branchcacheread = safeattrsetter(branchmap, b'read')
3488 branchcacheread = safeattrsetter(branchmap, b'read')
3483 branchcacheread.set(lambda *args: None)
3489 branchcacheread.set(lambda *args: None)
3484 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3490 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3485 branchcachewrite.set(lambda *args: None)
3491 branchcachewrite.set(lambda *args: None)
3486 try:
3492 try:
3487 for name in allfilters:
3493 for name in allfilters:
3488 printname = name
3494 printname = name
3489 if name is None:
3495 if name is None:
3490 printname = b'unfiltered'
3496 printname = b'unfiltered'
3491 timer(getbranchmap(name), title=printname)
3497 timer(getbranchmap(name), title=printname)
3492 finally:
3498 finally:
3493 branchcacheread.restore()
3499 branchcacheread.restore()
3494 branchcachewrite.restore()
3500 branchcachewrite.restore()
3495 fm.end()
3501 fm.end()
3496
3502
3497
3503
3498 @command(
3504 @command(
3499 b'perf::branchmapupdate|perfbranchmapupdate',
3505 b'perf::branchmapupdate|perfbranchmapupdate',
3500 [
3506 [
3501 (b'', b'base', [], b'subset of revision to start from'),
3507 (b'', b'base', [], b'subset of revision to start from'),
3502 (b'', b'target', [], b'subset of revision to end with'),
3508 (b'', b'target', [], b'subset of revision to end with'),
3503 (b'', b'clear-caches', False, b'clear cache between each runs'),
3509 (b'', b'clear-caches', False, b'clear cache between each runs'),
3504 ]
3510 ]
3505 + formatteropts,
3511 + formatteropts,
3506 )
3512 )
3507 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3513 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3508 """benchmark branchmap update from for <base> revs to <target> revs
3514 """benchmark branchmap update from for <base> revs to <target> revs
3509
3515
3510 If `--clear-caches` is passed, the following items will be reset before
3516 If `--clear-caches` is passed, the following items will be reset before
3511 each update:
3517 each update:
3512 * the changelog instance and associated indexes
3518 * the changelog instance and associated indexes
3513 * the rev-branch-cache instance
3519 * the rev-branch-cache instance
3514
3520
3515 Examples:
3521 Examples:
3516
3522
3517 # update for the one last revision
3523 # update for the one last revision
3518 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3524 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3519
3525
3520 $ update for change coming with a new branch
3526 $ update for change coming with a new branch
3521 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3527 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3522 """
3528 """
3523 from mercurial import branchmap
3529 from mercurial import branchmap
3524 from mercurial import repoview
3530 from mercurial import repoview
3525
3531
3526 opts = _byteskwargs(opts)
3532 opts = _byteskwargs(opts)
3527 timer, fm = gettimer(ui, opts)
3533 timer, fm = gettimer(ui, opts)
3528 clearcaches = opts[b'clear_caches']
3534 clearcaches = opts[b'clear_caches']
3529 unfi = repo.unfiltered()
3535 unfi = repo.unfiltered()
3530 x = [None] # used to pass data between closure
3536 x = [None] # used to pass data between closure
3531
3537
3532 # we use a `list` here to avoid possible side effect from smartset
3538 # we use a `list` here to avoid possible side effect from smartset
3533 baserevs = list(scmutil.revrange(repo, base))
3539 baserevs = list(scmutil.revrange(repo, base))
3534 targetrevs = list(scmutil.revrange(repo, target))
3540 targetrevs = list(scmutil.revrange(repo, target))
3535 if not baserevs:
3541 if not baserevs:
3536 raise error.Abort(b'no revisions selected for --base')
3542 raise error.Abort(b'no revisions selected for --base')
3537 if not targetrevs:
3543 if not targetrevs:
3538 raise error.Abort(b'no revisions selected for --target')
3544 raise error.Abort(b'no revisions selected for --target')
3539
3545
3540 # make sure the target branchmap also contains the one in the base
3546 # make sure the target branchmap also contains the one in the base
3541 targetrevs = list(set(baserevs) | set(targetrevs))
3547 targetrevs = list(set(baserevs) | set(targetrevs))
3542 targetrevs.sort()
3548 targetrevs.sort()
3543
3549
3544 cl = repo.changelog
3550 cl = repo.changelog
3545 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3551 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3546 allbaserevs.sort()
3552 allbaserevs.sort()
3547 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3553 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3548
3554
3549 newrevs = list(alltargetrevs.difference(allbaserevs))
3555 newrevs = list(alltargetrevs.difference(allbaserevs))
3550 newrevs.sort()
3556 newrevs.sort()
3551
3557
3552 allrevs = frozenset(unfi.changelog.revs())
3558 allrevs = frozenset(unfi.changelog.revs())
3553 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3559 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3554 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3560 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3555
3561
3556 def basefilter(repo, visibilityexceptions=None):
3562 def basefilter(repo, visibilityexceptions=None):
3557 return basefilterrevs
3563 return basefilterrevs
3558
3564
3559 def targetfilter(repo, visibilityexceptions=None):
3565 def targetfilter(repo, visibilityexceptions=None):
3560 return targetfilterrevs
3566 return targetfilterrevs
3561
3567
3562 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3568 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3563 ui.status(msg % (len(allbaserevs), len(newrevs)))
3569 ui.status(msg % (len(allbaserevs), len(newrevs)))
3564 if targetfilterrevs:
3570 if targetfilterrevs:
3565 msg = b'(%d revisions still filtered)\n'
3571 msg = b'(%d revisions still filtered)\n'
3566 ui.status(msg % len(targetfilterrevs))
3572 ui.status(msg % len(targetfilterrevs))
3567
3573
3568 try:
3574 try:
3569 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3575 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3570 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3576 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3571
3577
3572 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3578 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3573 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3579 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3574
3580
3575 # try to find an existing branchmap to reuse
3581 # try to find an existing branchmap to reuse
3576 subsettable = getbranchmapsubsettable()
3582 subsettable = getbranchmapsubsettable()
3577 candidatefilter = subsettable.get(None)
3583 candidatefilter = subsettable.get(None)
3578 while candidatefilter is not None:
3584 while candidatefilter is not None:
3579 candidatebm = repo.filtered(candidatefilter).branchmap()
3585 candidatebm = repo.filtered(candidatefilter).branchmap()
3580 if candidatebm.validfor(baserepo):
3586 if candidatebm.validfor(baserepo):
3581 filtered = repoview.filterrevs(repo, candidatefilter)
3587 filtered = repoview.filterrevs(repo, candidatefilter)
3582 missing = [r for r in allbaserevs if r in filtered]
3588 missing = [r for r in allbaserevs if r in filtered]
3583 base = candidatebm.copy()
3589 base = candidatebm.copy()
3584 base.update(baserepo, missing)
3590 base.update(baserepo, missing)
3585 break
3591 break
3586 candidatefilter = subsettable.get(candidatefilter)
3592 candidatefilter = subsettable.get(candidatefilter)
3587 else:
3593 else:
3588 # no suitable subset where found
3594 # no suitable subset where found
3589 base = branchmap.branchcache()
3595 base = branchmap.branchcache()
3590 base.update(baserepo, allbaserevs)
3596 base.update(baserepo, allbaserevs)
3591
3597
3592 def setup():
3598 def setup():
3593 x[0] = base.copy()
3599 x[0] = base.copy()
3594 if clearcaches:
3600 if clearcaches:
3595 unfi._revbranchcache = None
3601 unfi._revbranchcache = None
3596 clearchangelog(repo)
3602 clearchangelog(repo)
3597
3603
3598 def bench():
3604 def bench():
3599 x[0].update(targetrepo, newrevs)
3605 x[0].update(targetrepo, newrevs)
3600
3606
3601 timer(bench, setup=setup)
3607 timer(bench, setup=setup)
3602 fm.end()
3608 fm.end()
3603 finally:
3609 finally:
3604 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3610 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3605 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3611 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3606
3612
3607
3613
3608 @command(
3614 @command(
3609 b'perf::branchmapload|perfbranchmapload',
3615 b'perf::branchmapload|perfbranchmapload',
3610 [
3616 [
3611 (b'f', b'filter', b'', b'Specify repoview filter'),
3617 (b'f', b'filter', b'', b'Specify repoview filter'),
3612 (b'', b'list', False, b'List brachmap filter caches'),
3618 (b'', b'list', False, b'List brachmap filter caches'),
3613 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3619 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3614 ]
3620 ]
3615 + formatteropts,
3621 + formatteropts,
3616 )
3622 )
3617 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3623 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3618 """benchmark reading the branchmap"""
3624 """benchmark reading the branchmap"""
3619 opts = _byteskwargs(opts)
3625 opts = _byteskwargs(opts)
3620 clearrevlogs = opts[b'clear_revlogs']
3626 clearrevlogs = opts[b'clear_revlogs']
3621
3627
3622 if list:
3628 if list:
3623 for name, kind, st in repo.cachevfs.readdir(stat=True):
3629 for name, kind, st in repo.cachevfs.readdir(stat=True):
3624 if name.startswith(b'branch2'):
3630 if name.startswith(b'branch2'):
3625 filtername = name.partition(b'-')[2] or b'unfiltered'
3631 filtername = name.partition(b'-')[2] or b'unfiltered'
3626 ui.status(
3632 ui.status(
3627 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3633 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3628 )
3634 )
3629 return
3635 return
3630 if not filter:
3636 if not filter:
3631 filter = None
3637 filter = None
3632 subsettable = getbranchmapsubsettable()
3638 subsettable = getbranchmapsubsettable()
3633 if filter is None:
3639 if filter is None:
3634 repo = repo.unfiltered()
3640 repo = repo.unfiltered()
3635 else:
3641 else:
3636 repo = repoview.repoview(repo, filter)
3642 repo = repoview.repoview(repo, filter)
3637
3643
3638 repo.branchmap() # make sure we have a relevant, up to date branchmap
3644 repo.branchmap() # make sure we have a relevant, up to date branchmap
3639
3645
3640 try:
3646 try:
3641 fromfile = branchmap.branchcache.fromfile
3647 fromfile = branchmap.branchcache.fromfile
3642 except AttributeError:
3648 except AttributeError:
3643 # older versions
3649 # older versions
3644 fromfile = branchmap.read
3650 fromfile = branchmap.read
3645
3651
3646 currentfilter = filter
3652 currentfilter = filter
3647 # try once without timer, the filter may not be cached
3653 # try once without timer, the filter may not be cached
3648 while fromfile(repo) is None:
3654 while fromfile(repo) is None:
3649 currentfilter = subsettable.get(currentfilter)
3655 currentfilter = subsettable.get(currentfilter)
3650 if currentfilter is None:
3656 if currentfilter is None:
3651 raise error.Abort(
3657 raise error.Abort(
3652 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3658 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3653 )
3659 )
3654 repo = repo.filtered(currentfilter)
3660 repo = repo.filtered(currentfilter)
3655 timer, fm = gettimer(ui, opts)
3661 timer, fm = gettimer(ui, opts)
3656
3662
3657 def setup():
3663 def setup():
3658 if clearrevlogs:
3664 if clearrevlogs:
3659 clearchangelog(repo)
3665 clearchangelog(repo)
3660
3666
3661 def bench():
3667 def bench():
3662 fromfile(repo)
3668 fromfile(repo)
3663
3669
3664 timer(bench, setup=setup)
3670 timer(bench, setup=setup)
3665 fm.end()
3671 fm.end()
3666
3672
3667
3673
3668 @command(b'perf::loadmarkers|perfloadmarkers')
3674 @command(b'perf::loadmarkers|perfloadmarkers')
3669 def perfloadmarkers(ui, repo):
3675 def perfloadmarkers(ui, repo):
3670 """benchmark the time to parse the on-disk markers for a repo
3676 """benchmark the time to parse the on-disk markers for a repo
3671
3677
3672 Result is the number of markers in the repo."""
3678 Result is the number of markers in the repo."""
3673 timer, fm = gettimer(ui)
3679 timer, fm = gettimer(ui)
3674 svfs = getsvfs(repo)
3680 svfs = getsvfs(repo)
3675 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3681 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3676 fm.end()
3682 fm.end()
3677
3683
3678
3684
3679 @command(
3685 @command(
3680 b'perf::lrucachedict|perflrucachedict',
3686 b'perf::lrucachedict|perflrucachedict',
3681 formatteropts
3687 formatteropts
3682 + [
3688 + [
3683 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3689 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3690 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3691 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3686 (b'', b'size', 4, b'size of cache'),
3692 (b'', b'size', 4, b'size of cache'),
3687 (b'', b'gets', 10000, b'number of key lookups'),
3693 (b'', b'gets', 10000, b'number of key lookups'),
3688 (b'', b'sets', 10000, b'number of key sets'),
3694 (b'', b'sets', 10000, b'number of key sets'),
3689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3695 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3690 (
3696 (
3691 b'',
3697 b'',
3692 b'mixedgetfreq',
3698 b'mixedgetfreq',
3693 50,
3699 50,
3694 b'frequency of get vs set ops in mixed mode',
3700 b'frequency of get vs set ops in mixed mode',
3695 ),
3701 ),
3696 ],
3702 ],
3697 norepo=True,
3703 norepo=True,
3698 )
3704 )
3699 def perflrucache(
3705 def perflrucache(
3700 ui,
3706 ui,
3701 mincost=0,
3707 mincost=0,
3702 maxcost=100,
3708 maxcost=100,
3703 costlimit=0,
3709 costlimit=0,
3704 size=4,
3710 size=4,
3705 gets=10000,
3711 gets=10000,
3706 sets=10000,
3712 sets=10000,
3707 mixed=10000,
3713 mixed=10000,
3708 mixedgetfreq=50,
3714 mixedgetfreq=50,
3709 **opts
3715 **opts
3710 ):
3716 ):
3711 opts = _byteskwargs(opts)
3717 opts = _byteskwargs(opts)
3712
3718
3713 def doinit():
3719 def doinit():
3714 for i in _xrange(10000):
3720 for i in _xrange(10000):
3715 util.lrucachedict(size)
3721 util.lrucachedict(size)
3716
3722
3717 costrange = list(range(mincost, maxcost + 1))
3723 costrange = list(range(mincost, maxcost + 1))
3718
3724
3719 values = []
3725 values = []
3720 for i in _xrange(size):
3726 for i in _xrange(size):
3721 values.append(random.randint(0, _maxint))
3727 values.append(random.randint(0, _maxint))
3722
3728
3723 # Get mode fills the cache and tests raw lookup performance with no
3729 # Get mode fills the cache and tests raw lookup performance with no
3724 # eviction.
3730 # eviction.
3725 getseq = []
3731 getseq = []
3726 for i in _xrange(gets):
3732 for i in _xrange(gets):
3727 getseq.append(random.choice(values))
3733 getseq.append(random.choice(values))
3728
3734
3729 def dogets():
3735 def dogets():
3730 d = util.lrucachedict(size)
3736 d = util.lrucachedict(size)
3731 for v in values:
3737 for v in values:
3732 d[v] = v
3738 d[v] = v
3733 for key in getseq:
3739 for key in getseq:
3734 value = d[key]
3740 value = d[key]
3735 value # silence pyflakes warning
3741 value # silence pyflakes warning
3736
3742
3737 def dogetscost():
3743 def dogetscost():
3738 d = util.lrucachedict(size, maxcost=costlimit)
3744 d = util.lrucachedict(size, maxcost=costlimit)
3739 for i, v in enumerate(values):
3745 for i, v in enumerate(values):
3740 d.insert(v, v, cost=costs[i])
3746 d.insert(v, v, cost=costs[i])
3741 for key in getseq:
3747 for key in getseq:
3742 try:
3748 try:
3743 value = d[key]
3749 value = d[key]
3744 value # silence pyflakes warning
3750 value # silence pyflakes warning
3745 except KeyError:
3751 except KeyError:
3746 pass
3752 pass
3747
3753
3748 # Set mode tests insertion speed with cache eviction.
3754 # Set mode tests insertion speed with cache eviction.
3749 setseq = []
3755 setseq = []
3750 costs = []
3756 costs = []
3751 for i in _xrange(sets):
3757 for i in _xrange(sets):
3752 setseq.append(random.randint(0, _maxint))
3758 setseq.append(random.randint(0, _maxint))
3753 costs.append(random.choice(costrange))
3759 costs.append(random.choice(costrange))
3754
3760
3755 def doinserts():
3761 def doinserts():
3756 d = util.lrucachedict(size)
3762 d = util.lrucachedict(size)
3757 for v in setseq:
3763 for v in setseq:
3758 d.insert(v, v)
3764 d.insert(v, v)
3759
3765
3760 def doinsertscost():
3766 def doinsertscost():
3761 d = util.lrucachedict(size, maxcost=costlimit)
3767 d = util.lrucachedict(size, maxcost=costlimit)
3762 for i, v in enumerate(setseq):
3768 for i, v in enumerate(setseq):
3763 d.insert(v, v, cost=costs[i])
3769 d.insert(v, v, cost=costs[i])
3764
3770
3765 def dosets():
3771 def dosets():
3766 d = util.lrucachedict(size)
3772 d = util.lrucachedict(size)
3767 for v in setseq:
3773 for v in setseq:
3768 d[v] = v
3774 d[v] = v
3769
3775
3770 # Mixed mode randomly performs gets and sets with eviction.
3776 # Mixed mode randomly performs gets and sets with eviction.
3771 mixedops = []
3777 mixedops = []
3772 for i in _xrange(mixed):
3778 for i in _xrange(mixed):
3773 r = random.randint(0, 100)
3779 r = random.randint(0, 100)
3774 if r < mixedgetfreq:
3780 if r < mixedgetfreq:
3775 op = 0
3781 op = 0
3776 else:
3782 else:
3777 op = 1
3783 op = 1
3778
3784
3779 mixedops.append(
3785 mixedops.append(
3780 (op, random.randint(0, size * 2), random.choice(costrange))
3786 (op, random.randint(0, size * 2), random.choice(costrange))
3781 )
3787 )
3782
3788
3783 def domixed():
3789 def domixed():
3784 d = util.lrucachedict(size)
3790 d = util.lrucachedict(size)
3785
3791
3786 for op, v, cost in mixedops:
3792 for op, v, cost in mixedops:
3787 if op == 0:
3793 if op == 0:
3788 try:
3794 try:
3789 d[v]
3795 d[v]
3790 except KeyError:
3796 except KeyError:
3791 pass
3797 pass
3792 else:
3798 else:
3793 d[v] = v
3799 d[v] = v
3794
3800
3795 def domixedcost():
3801 def domixedcost():
3796 d = util.lrucachedict(size, maxcost=costlimit)
3802 d = util.lrucachedict(size, maxcost=costlimit)
3797
3803
3798 for op, v, cost in mixedops:
3804 for op, v, cost in mixedops:
3799 if op == 0:
3805 if op == 0:
3800 try:
3806 try:
3801 d[v]
3807 d[v]
3802 except KeyError:
3808 except KeyError:
3803 pass
3809 pass
3804 else:
3810 else:
3805 d.insert(v, v, cost=cost)
3811 d.insert(v, v, cost=cost)
3806
3812
3807 benches = [
3813 benches = [
3808 (doinit, b'init'),
3814 (doinit, b'init'),
3809 ]
3815 ]
3810
3816
3811 if costlimit:
3817 if costlimit:
3812 benches.extend(
3818 benches.extend(
3813 [
3819 [
3814 (dogetscost, b'gets w/ cost limit'),
3820 (dogetscost, b'gets w/ cost limit'),
3815 (doinsertscost, b'inserts w/ cost limit'),
3821 (doinsertscost, b'inserts w/ cost limit'),
3816 (domixedcost, b'mixed w/ cost limit'),
3822 (domixedcost, b'mixed w/ cost limit'),
3817 ]
3823 ]
3818 )
3824 )
3819 else:
3825 else:
3820 benches.extend(
3826 benches.extend(
3821 [
3827 [
3822 (dogets, b'gets'),
3828 (dogets, b'gets'),
3823 (doinserts, b'inserts'),
3829 (doinserts, b'inserts'),
3824 (dosets, b'sets'),
3830 (dosets, b'sets'),
3825 (domixed, b'mixed'),
3831 (domixed, b'mixed'),
3826 ]
3832 ]
3827 )
3833 )
3828
3834
3829 for fn, title in benches:
3835 for fn, title in benches:
3830 timer, fm = gettimer(ui, opts)
3836 timer, fm = gettimer(ui, opts)
3831 timer(fn, title=title)
3837 timer(fn, title=title)
3832 fm.end()
3838 fm.end()
3833
3839
3834
3840
3835 @command(
3841 @command(
3836 b'perf::write|perfwrite',
3842 b'perf::write|perfwrite',
3837 formatteropts
3843 formatteropts
3838 + [
3844 + [
3839 (b'', b'write-method', b'write', b'ui write method'),
3845 (b'', b'write-method', b'write', b'ui write method'),
3840 (b'', b'nlines', 100, b'number of lines'),
3846 (b'', b'nlines', 100, b'number of lines'),
3841 (b'', b'nitems', 100, b'number of items (per line)'),
3847 (b'', b'nitems', 100, b'number of items (per line)'),
3842 (b'', b'item', b'x', b'item that is written'),
3848 (b'', b'item', b'x', b'item that is written'),
3843 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3849 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3844 (b'', b'flush-line', None, b'flush after each line'),
3850 (b'', b'flush-line', None, b'flush after each line'),
3845 ],
3851 ],
3846 )
3852 )
3847 def perfwrite(ui, repo, **opts):
3853 def perfwrite(ui, repo, **opts):
3848 """microbenchmark ui.write (and others)"""
3854 """microbenchmark ui.write (and others)"""
3849 opts = _byteskwargs(opts)
3855 opts = _byteskwargs(opts)
3850
3856
3851 write = getattr(ui, _sysstr(opts[b'write_method']))
3857 write = getattr(ui, _sysstr(opts[b'write_method']))
3852 nlines = int(opts[b'nlines'])
3858 nlines = int(opts[b'nlines'])
3853 nitems = int(opts[b'nitems'])
3859 nitems = int(opts[b'nitems'])
3854 item = opts[b'item']
3860 item = opts[b'item']
3855 batch_line = opts.get(b'batch_line')
3861 batch_line = opts.get(b'batch_line')
3856 flush_line = opts.get(b'flush_line')
3862 flush_line = opts.get(b'flush_line')
3857
3863
3858 if batch_line:
3864 if batch_line:
3859 line = item * nitems + b'\n'
3865 line = item * nitems + b'\n'
3860
3866
3861 def benchmark():
3867 def benchmark():
3862 for i in pycompat.xrange(nlines):
3868 for i in pycompat.xrange(nlines):
3863 if batch_line:
3869 if batch_line:
3864 write(line)
3870 write(line)
3865 else:
3871 else:
3866 for i in pycompat.xrange(nitems):
3872 for i in pycompat.xrange(nitems):
3867 write(item)
3873 write(item)
3868 write(b'\n')
3874 write(b'\n')
3869 if flush_line:
3875 if flush_line:
3870 ui.flush()
3876 ui.flush()
3871 ui.flush()
3877 ui.flush()
3872
3878
3873 timer, fm = gettimer(ui, opts)
3879 timer, fm = gettimer(ui, opts)
3874 timer(benchmark)
3880 timer(benchmark)
3875 fm.end()
3881 fm.end()
3876
3882
3877
3883
3878 def uisetup(ui):
3884 def uisetup(ui):
3879 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3885 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3880 commands, b'debugrevlogopts'
3886 commands, b'debugrevlogopts'
3881 ):
3887 ):
3882 # for "historical portability":
3888 # for "historical portability":
3883 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3889 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3884 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3890 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3885 # openrevlog() should cause failure, because it has been
3891 # openrevlog() should cause failure, because it has been
3886 # available since 3.5 (or 49c583ca48c4).
3892 # available since 3.5 (or 49c583ca48c4).
3887 def openrevlog(orig, repo, cmd, file_, opts):
3893 def openrevlog(orig, repo, cmd, file_, opts):
3888 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3894 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3889 raise error.Abort(
3895 raise error.Abort(
3890 b"This version doesn't support --dir option",
3896 b"This version doesn't support --dir option",
3891 hint=b"use 3.5 or later",
3897 hint=b"use 3.5 or later",
3892 )
3898 )
3893 return orig(repo, cmd, file_, opts)
3899 return orig(repo, cmd, file_, opts)
3894
3900
3895 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3901 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3896
3902
3897
3903
3898 @command(
3904 @command(
3899 b'perf::progress|perfprogress',
3905 b'perf::progress|perfprogress',
3900 formatteropts
3906 formatteropts
3901 + [
3907 + [
3902 (b'', b'topic', b'topic', b'topic for progress messages'),
3908 (b'', b'topic', b'topic', b'topic for progress messages'),
3903 (b'c', b'total', 1000000, b'total value we are progressing to'),
3909 (b'c', b'total', 1000000, b'total value we are progressing to'),
3904 ],
3910 ],
3905 norepo=True,
3911 norepo=True,
3906 )
3912 )
3907 def perfprogress(ui, topic=None, total=None, **opts):
3913 def perfprogress(ui, topic=None, total=None, **opts):
3908 """printing of progress bars"""
3914 """printing of progress bars"""
3909 opts = _byteskwargs(opts)
3915 opts = _byteskwargs(opts)
3910
3916
3911 timer, fm = gettimer(ui, opts)
3917 timer, fm = gettimer(ui, opts)
3912
3918
3913 def doprogress():
3919 def doprogress():
3914 with ui.makeprogress(topic, total=total) as progress:
3920 with ui.makeprogress(topic, total=total) as progress:
3915 for i in _xrange(total):
3921 for i in _xrange(total):
3916 progress.increment()
3922 progress.increment()
3917
3923
3918 timer(doprogress)
3924 timer(doprogress)
3919 fm.end()
3925 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now