##// END OF EJS Templates
perf: don't turn byte to string when formatting perfbranchmap...
marmoute -
r46873:cdbde70e default draft
parent child Browse files
Show More
@@ -1,3915 +1,3915
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if 'norepo' not in getargspec(command).args:
234 if 'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf',
294 b'perf',
295 b'pre-run',
295 b'pre-run',
296 default=mercurial.configitems.dynamicdefault,
296 default=mercurial.configitems.dynamicdefault,
297 )
297 )
298 configitem(
298 configitem(
299 b'perf',
299 b'perf',
300 b'profile-benchmark',
300 b'profile-benchmark',
301 default=mercurial.configitems.dynamicdefault,
301 default=mercurial.configitems.dynamicdefault,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'run-limits',
305 b'run-limits',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 except (ImportError, AttributeError):
309 except (ImportError, AttributeError):
310 pass
310 pass
311 except TypeError:
311 except TypeError:
312 # compatibility fix for a11fd395e83f
312 # compatibility fix for a11fd395e83f
313 # hg version: 5.2
313 # hg version: 5.2
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'presleep',
316 b'presleep',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'stub',
321 b'stub',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf',
325 b'perf',
326 b'parentscount',
326 b'parentscount',
327 default=mercurial.configitems.dynamicdefault,
327 default=mercurial.configitems.dynamicdefault,
328 )
328 )
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'all-timing',
331 b'all-timing',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'pre-run',
336 b'pre-run',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'profile-benchmark',
341 b'profile-benchmark',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'run-limits',
346 b'run-limits',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349
349
350
350
351 def getlen(ui):
351 def getlen(ui):
352 if ui.configbool(b"perf", b"stub", False):
352 if ui.configbool(b"perf", b"stub", False):
353 return lambda x: 1
353 return lambda x: 1
354 return len
354 return len
355
355
356
356
357 class noop(object):
357 class noop(object):
358 """dummy context manager"""
358 """dummy context manager"""
359
359
360 def __enter__(self):
360 def __enter__(self):
361 pass
361 pass
362
362
363 def __exit__(self, *args):
363 def __exit__(self, *args):
364 pass
364 pass
365
365
366
366
367 NOOPCTX = noop()
367 NOOPCTX = noop()
368
368
369
369
370 def gettimer(ui, opts=None):
370 def gettimer(ui, opts=None):
371 """return a timer function and formatter: (timer, formatter)
371 """return a timer function and formatter: (timer, formatter)
372
372
373 This function exists to gather the creation of formatter in a single
373 This function exists to gather the creation of formatter in a single
374 place instead of duplicating it in all performance commands."""
374 place instead of duplicating it in all performance commands."""
375
375
376 # enforce an idle period before execution to counteract power management
376 # enforce an idle period before execution to counteract power management
377 # experimental config: perf.presleep
377 # experimental config: perf.presleep
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379
379
380 if opts is None:
380 if opts is None:
381 opts = {}
381 opts = {}
382 # redirect all to stderr unless buffer api is in use
382 # redirect all to stderr unless buffer api is in use
383 if not ui._buffers:
383 if not ui._buffers:
384 ui = ui.copy()
384 ui = ui.copy()
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 if uifout:
386 if uifout:
387 # for "historical portability":
387 # for "historical portability":
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 uifout.set(ui.ferr)
389 uifout.set(ui.ferr)
390
390
391 # get a formatter
391 # get a formatter
392 uiformatter = getattr(ui, 'formatter', None)
392 uiformatter = getattr(ui, 'formatter', None)
393 if uiformatter:
393 if uiformatter:
394 fm = uiformatter(b'perf', opts)
394 fm = uiformatter(b'perf', opts)
395 else:
395 else:
396 # for "historical portability":
396 # for "historical portability":
397 # define formatter locally, because ui.formatter has been
397 # define formatter locally, because ui.formatter has been
398 # available since 2.2 (or ae5f92e154d3)
398 # available since 2.2 (or ae5f92e154d3)
399 from mercurial import node
399 from mercurial import node
400
400
401 class defaultformatter(object):
401 class defaultformatter(object):
402 """Minimized composition of baseformatter and plainformatter"""
402 """Minimized composition of baseformatter and plainformatter"""
403
403
404 def __init__(self, ui, topic, opts):
404 def __init__(self, ui, topic, opts):
405 self._ui = ui
405 self._ui = ui
406 if ui.debugflag:
406 if ui.debugflag:
407 self.hexfunc = node.hex
407 self.hexfunc = node.hex
408 else:
408 else:
409 self.hexfunc = node.short
409 self.hexfunc = node.short
410
410
411 def __nonzero__(self):
411 def __nonzero__(self):
412 return False
412 return False
413
413
414 __bool__ = __nonzero__
414 __bool__ = __nonzero__
415
415
416 def startitem(self):
416 def startitem(self):
417 pass
417 pass
418
418
419 def data(self, **data):
419 def data(self, **data):
420 pass
420 pass
421
421
422 def write(self, fields, deftext, *fielddata, **opts):
422 def write(self, fields, deftext, *fielddata, **opts):
423 self._ui.write(deftext % fielddata, **opts)
423 self._ui.write(deftext % fielddata, **opts)
424
424
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 if cond:
426 if cond:
427 self._ui.write(deftext % fielddata, **opts)
427 self._ui.write(deftext % fielddata, **opts)
428
428
429 def plain(self, text, **opts):
429 def plain(self, text, **opts):
430 self._ui.write(text, **opts)
430 self._ui.write(text, **opts)
431
431
432 def end(self):
432 def end(self):
433 pass
433 pass
434
434
435 fm = defaultformatter(ui, b'perf', opts)
435 fm = defaultformatter(ui, b'perf', opts)
436
436
437 # stub function, runs code only once instead of in a loop
437 # stub function, runs code only once instead of in a loop
438 # experimental config: perf.stub
438 # experimental config: perf.stub
439 if ui.configbool(b"perf", b"stub", False):
439 if ui.configbool(b"perf", b"stub", False):
440 return functools.partial(stub_timer, fm), fm
440 return functools.partial(stub_timer, fm), fm
441
441
442 # experimental config: perf.all-timing
442 # experimental config: perf.all-timing
443 displayall = ui.configbool(b"perf", b"all-timing", False)
443 displayall = ui.configbool(b"perf", b"all-timing", False)
444
444
445 # experimental config: perf.run-limits
445 # experimental config: perf.run-limits
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 limits = []
447 limits = []
448 for item in limitspec:
448 for item in limitspec:
449 parts = item.split(b'-', 1)
449 parts = item.split(b'-', 1)
450 if len(parts) < 2:
450 if len(parts) < 2:
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 continue
452 continue
453 try:
453 try:
454 time_limit = float(_sysstr(parts[0]))
454 time_limit = float(_sysstr(parts[0]))
455 except ValueError as e:
455 except ValueError as e:
456 ui.warn(
456 ui.warn(
457 (
457 (
458 b'malformatted run limit entry, %s: %s\n'
458 b'malformatted run limit entry, %s: %s\n'
459 % (_bytestr(e), item)
459 % (_bytestr(e), item)
460 )
460 )
461 )
461 )
462 continue
462 continue
463 try:
463 try:
464 run_limit = int(_sysstr(parts[1]))
464 run_limit = int(_sysstr(parts[1]))
465 except ValueError as e:
465 except ValueError as e:
466 ui.warn(
466 ui.warn(
467 (
467 (
468 b'malformatted run limit entry, %s: %s\n'
468 b'malformatted run limit entry, %s: %s\n'
469 % (_bytestr(e), item)
469 % (_bytestr(e), item)
470 )
470 )
471 )
471 )
472 continue
472 continue
473 limits.append((time_limit, run_limit))
473 limits.append((time_limit, run_limit))
474 if not limits:
474 if not limits:
475 limits = DEFAULTLIMITS
475 limits = DEFAULTLIMITS
476
476
477 profiler = None
477 profiler = None
478 if profiling is not None:
478 if profiling is not None:
479 if ui.configbool(b"perf", b"profile-benchmark", False):
479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 profiler = profiling.profile(ui)
480 profiler = profiling.profile(ui)
481
481
482 prerun = getint(ui, b"perf", b"pre-run", 0)
482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 t = functools.partial(
483 t = functools.partial(
484 _timer,
484 _timer,
485 fm,
485 fm,
486 displayall=displayall,
486 displayall=displayall,
487 limits=limits,
487 limits=limits,
488 prerun=prerun,
488 prerun=prerun,
489 profiler=profiler,
489 profiler=profiler,
490 )
490 )
491 return t, fm
491 return t, fm
492
492
493
493
494 def stub_timer(fm, func, setup=None, title=None):
494 def stub_timer(fm, func, setup=None, title=None):
495 if setup is not None:
495 if setup is not None:
496 setup()
496 setup()
497 func()
497 func()
498
498
499
499
500 @contextlib.contextmanager
500 @contextlib.contextmanager
501 def timeone():
501 def timeone():
502 r = []
502 r = []
503 ostart = os.times()
503 ostart = os.times()
504 cstart = util.timer()
504 cstart = util.timer()
505 yield r
505 yield r
506 cstop = util.timer()
506 cstop = util.timer()
507 ostop = os.times()
507 ostop = os.times()
508 a, b = ostart, ostop
508 a, b = ostart, ostop
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510
510
511
511
512 # list of stop condition (elapsed time, minimal run count)
512 # list of stop condition (elapsed time, minimal run count)
513 DEFAULTLIMITS = (
513 DEFAULTLIMITS = (
514 (3.0, 100),
514 (3.0, 100),
515 (10.0, 3),
515 (10.0, 3),
516 )
516 )
517
517
518
518
519 def _timer(
519 def _timer(
520 fm,
520 fm,
521 func,
521 func,
522 setup=None,
522 setup=None,
523 title=None,
523 title=None,
524 displayall=False,
524 displayall=False,
525 limits=DEFAULTLIMITS,
525 limits=DEFAULTLIMITS,
526 prerun=0,
526 prerun=0,
527 profiler=None,
527 profiler=None,
528 ):
528 ):
529 gc.collect()
529 gc.collect()
530 results = []
530 results = []
531 begin = util.timer()
531 begin = util.timer()
532 count = 0
532 count = 0
533 if profiler is None:
533 if profiler is None:
534 profiler = NOOPCTX
534 profiler = NOOPCTX
535 for i in range(prerun):
535 for i in range(prerun):
536 if setup is not None:
536 if setup is not None:
537 setup()
537 setup()
538 func()
538 func()
539 keepgoing = True
539 keepgoing = True
540 while keepgoing:
540 while keepgoing:
541 if setup is not None:
541 if setup is not None:
542 setup()
542 setup()
543 with profiler:
543 with profiler:
544 with timeone() as item:
544 with timeone() as item:
545 r = func()
545 r = func()
546 profiler = NOOPCTX
546 profiler = NOOPCTX
547 count += 1
547 count += 1
548 results.append(item[0])
548 results.append(item[0])
549 cstop = util.timer()
549 cstop = util.timer()
550 # Look for a stop condition.
550 # Look for a stop condition.
551 elapsed = cstop - begin
551 elapsed = cstop - begin
552 for t, mincount in limits:
552 for t, mincount in limits:
553 if elapsed >= t and count >= mincount:
553 if elapsed >= t and count >= mincount:
554 keepgoing = False
554 keepgoing = False
555 break
555 break
556
556
557 formatone(fm, results, title=title, result=r, displayall=displayall)
557 formatone(fm, results, title=title, result=r, displayall=displayall)
558
558
559
559
560 def formatone(fm, timings, title=None, result=None, displayall=False):
560 def formatone(fm, timings, title=None, result=None, displayall=False):
561
561
562 count = len(timings)
562 count = len(timings)
563
563
564 fm.startitem()
564 fm.startitem()
565
565
566 if title:
566 if title:
567 fm.write(b'title', b'! %s\n', title)
567 fm.write(b'title', b'! %s\n', title)
568 if result:
568 if result:
569 fm.write(b'result', b'! result: %s\n', result)
569 fm.write(b'result', b'! result: %s\n', result)
570
570
571 def display(role, entry):
571 def display(role, entry):
572 prefix = b''
572 prefix = b''
573 if role != b'best':
573 if role != b'best':
574 prefix = b'%s.' % role
574 prefix = b'%s.' % role
575 fm.plain(b'!')
575 fm.plain(b'!')
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 fm.write(prefix + b'user', b' user %f', entry[1])
578 fm.write(prefix + b'user', b' user %f', entry[1])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 fm.plain(b'\n')
581 fm.plain(b'\n')
582
582
583 timings.sort()
583 timings.sort()
584 min_val = timings[0]
584 min_val = timings[0]
585 display(b'best', min_val)
585 display(b'best', min_val)
586 if displayall:
586 if displayall:
587 max_val = timings[-1]
587 max_val = timings[-1]
588 display(b'max', max_val)
588 display(b'max', max_val)
589 avg = tuple([sum(x) / count for x in zip(*timings)])
589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 display(b'avg', avg)
590 display(b'avg', avg)
591 median = timings[len(timings) // 2]
591 median = timings[len(timings) // 2]
592 display(b'median', median)
592 display(b'median', median)
593
593
594
594
595 # utilities for historical portability
595 # utilities for historical portability
596
596
597
597
598 def getint(ui, section, name, default):
598 def getint(ui, section, name, default):
599 # for "historical portability":
599 # for "historical portability":
600 # ui.configint has been available since 1.9 (or fa2b596db182)
600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 v = ui.config(section, name, None)
601 v = ui.config(section, name, None)
602 if v is None:
602 if v is None:
603 return default
603 return default
604 try:
604 try:
605 return int(v)
605 return int(v)
606 except ValueError:
606 except ValueError:
607 raise error.ConfigError(
607 raise error.ConfigError(
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 )
609 )
610
610
611
611
612 def safeattrsetter(obj, name, ignoremissing=False):
612 def safeattrsetter(obj, name, ignoremissing=False):
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614
614
615 This function is aborted, if 'obj' doesn't have 'name' attribute
615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 at runtime. This avoids overlooking removal of an attribute, which
616 at runtime. This avoids overlooking removal of an attribute, which
617 breaks assumption of performance measurement, in the future.
617 breaks assumption of performance measurement, in the future.
618
618
619 This function returns the object to (1) assign a new value, and
619 This function returns the object to (1) assign a new value, and
620 (2) restore an original value to the attribute.
620 (2) restore an original value to the attribute.
621
621
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 abortion, and this function returns None. This is useful to
623 abortion, and this function returns None. This is useful to
624 examine an attribute, which isn't ensured in all Mercurial
624 examine an attribute, which isn't ensured in all Mercurial
625 versions.
625 versions.
626 """
626 """
627 if not util.safehasattr(obj, name):
627 if not util.safehasattr(obj, name):
628 if ignoremissing:
628 if ignoremissing:
629 return None
629 return None
630 raise error.Abort(
630 raise error.Abort(
631 (
631 (
632 b"missing attribute %s of %s might break assumption"
632 b"missing attribute %s of %s might break assumption"
633 b" of performance measurement"
633 b" of performance measurement"
634 )
634 )
635 % (name, obj)
635 % (name, obj)
636 )
636 )
637
637
638 origvalue = getattr(obj, _sysstr(name))
638 origvalue = getattr(obj, _sysstr(name))
639
639
640 class attrutil(object):
640 class attrutil(object):
641 def set(self, newvalue):
641 def set(self, newvalue):
642 setattr(obj, _sysstr(name), newvalue)
642 setattr(obj, _sysstr(name), newvalue)
643
643
644 def restore(self):
644 def restore(self):
645 setattr(obj, _sysstr(name), origvalue)
645 setattr(obj, _sysstr(name), origvalue)
646
646
647 return attrutil()
647 return attrutil()
648
648
649
649
650 # utilities to examine each internal API changes
650 # utilities to examine each internal API changes
651
651
652
652
653 def getbranchmapsubsettable():
653 def getbranchmapsubsettable():
654 # for "historical portability":
654 # for "historical portability":
655 # subsettable is defined in:
655 # subsettable is defined in:
656 # - branchmap since 2.9 (or 175c6fd8cacc)
656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 # - repoview since 2.5 (or 59a9f18d4587)
657 # - repoview since 2.5 (or 59a9f18d4587)
658 # - repoviewutil since 5.0
658 # - repoviewutil since 5.0
659 for mod in (branchmap, repoview, repoviewutil):
659 for mod in (branchmap, repoview, repoviewutil):
660 subsettable = getattr(mod, 'subsettable', None)
660 subsettable = getattr(mod, 'subsettable', None)
661 if subsettable:
661 if subsettable:
662 return subsettable
662 return subsettable
663
663
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 # branchmap and repoview modules exist, but subsettable attribute
665 # branchmap and repoview modules exist, but subsettable attribute
666 # doesn't)
666 # doesn't)
667 raise error.Abort(
667 raise error.Abort(
668 b"perfbranchmap not available with this Mercurial",
668 b"perfbranchmap not available with this Mercurial",
669 hint=b"use 2.5 or later",
669 hint=b"use 2.5 or later",
670 )
670 )
671
671
672
672
673 def getsvfs(repo):
673 def getsvfs(repo):
674 """Return appropriate object to access files under .hg/store"""
674 """Return appropriate object to access files under .hg/store"""
675 # for "historical portability":
675 # for "historical portability":
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 svfs = getattr(repo, 'svfs', None)
677 svfs = getattr(repo, 'svfs', None)
678 if svfs:
678 if svfs:
679 return svfs
679 return svfs
680 else:
680 else:
681 return getattr(repo, 'sopener')
681 return getattr(repo, 'sopener')
682
682
683
683
684 def getvfs(repo):
684 def getvfs(repo):
685 """Return appropriate object to access files under .hg"""
685 """Return appropriate object to access files under .hg"""
686 # for "historical portability":
686 # for "historical portability":
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 vfs = getattr(repo, 'vfs', None)
688 vfs = getattr(repo, 'vfs', None)
689 if vfs:
689 if vfs:
690 return vfs
690 return vfs
691 else:
691 else:
692 return getattr(repo, 'opener')
692 return getattr(repo, 'opener')
693
693
694
694
695 def repocleartagscachefunc(repo):
695 def repocleartagscachefunc(repo):
696 """Return the function to clear tags cache according to repo internal API"""
696 """Return the function to clear tags cache according to repo internal API"""
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 # correct way to clear tags cache, because existing code paths
699 # correct way to clear tags cache, because existing code paths
700 # expect _tagscache to be a structured object.
700 # expect _tagscache to be a structured object.
701 def clearcache():
701 def clearcache():
702 # _tagscache has been filteredpropertycache since 2.5 (or
702 # _tagscache has been filteredpropertycache since 2.5 (or
703 # 98c867ac1330), and delattr() can't work in such case
703 # 98c867ac1330), and delattr() can't work in such case
704 if '_tagscache' in vars(repo):
704 if '_tagscache' in vars(repo):
705 del repo.__dict__['_tagscache']
705 del repo.__dict__['_tagscache']
706
706
707 return clearcache
707 return clearcache
708
708
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 if repotags: # since 1.4 (or 5614a628d173)
710 if repotags: # since 1.4 (or 5614a628d173)
711 return lambda: repotags.set(None)
711 return lambda: repotags.set(None)
712
712
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 return lambda: repotagscache.set(None)
715 return lambda: repotagscache.set(None)
716
716
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 # this point, but it isn't so problematic, because:
718 # this point, but it isn't so problematic, because:
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 # in perftags() causes failure soon
720 # in perftags() causes failure soon
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 raise error.Abort(b"tags API of this hg command is unknown")
722 raise error.Abort(b"tags API of this hg command is unknown")
723
723
724
724
725 # utilities to clear cache
725 # utilities to clear cache
726
726
727
727
728 def clearfilecache(obj, attrname):
728 def clearfilecache(obj, attrname):
729 unfiltered = getattr(obj, 'unfiltered', None)
729 unfiltered = getattr(obj, 'unfiltered', None)
730 if unfiltered is not None:
730 if unfiltered is not None:
731 obj = obj.unfiltered()
731 obj = obj.unfiltered()
732 if attrname in vars(obj):
732 if attrname in vars(obj):
733 delattr(obj, attrname)
733 delattr(obj, attrname)
734 obj._filecache.pop(attrname, None)
734 obj._filecache.pop(attrname, None)
735
735
736
736
737 def clearchangelog(repo):
737 def clearchangelog(repo):
738 if repo is not repo.unfiltered():
738 if repo is not repo.unfiltered():
739 object.__setattr__(repo, '_clcachekey', None)
739 object.__setattr__(repo, '_clcachekey', None)
740 object.__setattr__(repo, '_clcache', None)
740 object.__setattr__(repo, '_clcache', None)
741 clearfilecache(repo.unfiltered(), 'changelog')
741 clearfilecache(repo.unfiltered(), 'changelog')
742
742
743
743
744 # perf commands
744 # perf commands
745
745
746
746
747 @command(b'perfwalk', formatteropts)
747 @command(b'perfwalk', formatteropts)
748 def perfwalk(ui, repo, *pats, **opts):
748 def perfwalk(ui, repo, *pats, **opts):
749 opts = _byteskwargs(opts)
749 opts = _byteskwargs(opts)
750 timer, fm = gettimer(ui, opts)
750 timer, fm = gettimer(ui, opts)
751 m = scmutil.match(repo[None], pats, {})
751 m = scmutil.match(repo[None], pats, {})
752 timer(
752 timer(
753 lambda: len(
753 lambda: len(
754 list(
754 list(
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 )
756 )
757 )
757 )
758 )
758 )
759 fm.end()
759 fm.end()
760
760
761
761
762 @command(b'perfannotate', formatteropts)
762 @command(b'perfannotate', formatteropts)
763 def perfannotate(ui, repo, f, **opts):
763 def perfannotate(ui, repo, f, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 fc = repo[b'.'][f]
766 fc = repo[b'.'][f]
767 timer(lambda: len(fc.annotate(True)))
767 timer(lambda: len(fc.annotate(True)))
768 fm.end()
768 fm.end()
769
769
770
770
771 @command(
771 @command(
772 b'perfstatus',
772 b'perfstatus',
773 [
773 [
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 ]
776 ]
777 + formatteropts,
777 + formatteropts,
778 )
778 )
779 def perfstatus(ui, repo, **opts):
779 def perfstatus(ui, repo, **opts):
780 """benchmark the performance of a single status call
780 """benchmark the performance of a single status call
781
781
782 The repository data are preserved between each call.
782 The repository data are preserved between each call.
783
783
784 By default, only the status of the tracked file are requested. If
784 By default, only the status of the tracked file are requested. If
785 `--unknown` is passed, the "unknown" files are also tracked.
785 `--unknown` is passed, the "unknown" files are also tracked.
786 """
786 """
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 # m = match.always(repo.root, repo.getcwd())
788 # m = match.always(repo.root, repo.getcwd())
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 # False))))
790 # False))))
791 timer, fm = gettimer(ui, opts)
791 timer, fm = gettimer(ui, opts)
792 if opts[b'dirstate']:
792 if opts[b'dirstate']:
793 dirstate = repo.dirstate
793 dirstate = repo.dirstate
794 m = scmutil.matchall(repo)
794 m = scmutil.matchall(repo)
795 unknown = opts[b'unknown']
795 unknown = opts[b'unknown']
796
796
797 def status_dirstate():
797 def status_dirstate():
798 s = dirstate.status(
798 s = dirstate.status(
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 )
800 )
801 sum(map(bool, s))
801 sum(map(bool, s))
802
802
803 timer(status_dirstate)
803 timer(status_dirstate)
804 else:
804 else:
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 fm.end()
806 fm.end()
807
807
808
808
809 @command(b'perfaddremove', formatteropts)
809 @command(b'perfaddremove', formatteropts)
810 def perfaddremove(ui, repo, **opts):
810 def perfaddremove(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 try:
813 try:
814 oldquiet = repo.ui.quiet
814 oldquiet = repo.ui.quiet
815 repo.ui.quiet = True
815 repo.ui.quiet = True
816 matcher = scmutil.match(repo[None])
816 matcher = scmutil.match(repo[None])
817 opts[b'dry_run'] = True
817 opts[b'dry_run'] = True
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 uipathfn = scmutil.getuipathfn(repo)
819 uipathfn = scmutil.getuipathfn(repo)
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 else:
821 else:
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 finally:
823 finally:
824 repo.ui.quiet = oldquiet
824 repo.ui.quiet = oldquiet
825 fm.end()
825 fm.end()
826
826
827
827
828 def clearcaches(cl):
828 def clearcaches(cl):
829 # behave somewhat consistently across internal API changes
829 # behave somewhat consistently across internal API changes
830 if util.safehasattr(cl, b'clearcaches'):
830 if util.safehasattr(cl, b'clearcaches'):
831 cl.clearcaches()
831 cl.clearcaches()
832 elif util.safehasattr(cl, b'_nodecache'):
832 elif util.safehasattr(cl, b'_nodecache'):
833 # <= hg-5.2
833 # <= hg-5.2
834 from mercurial.node import nullid, nullrev
834 from mercurial.node import nullid, nullrev
835
835
836 cl._nodecache = {nullid: nullrev}
836 cl._nodecache = {nullid: nullrev}
837 cl._nodepos = None
837 cl._nodepos = None
838
838
839
839
840 @command(b'perfheads', formatteropts)
840 @command(b'perfheads', formatteropts)
841 def perfheads(ui, repo, **opts):
841 def perfheads(ui, repo, **opts):
842 """benchmark the computation of a changelog heads"""
842 """benchmark the computation of a changelog heads"""
843 opts = _byteskwargs(opts)
843 opts = _byteskwargs(opts)
844 timer, fm = gettimer(ui, opts)
844 timer, fm = gettimer(ui, opts)
845 cl = repo.changelog
845 cl = repo.changelog
846
846
847 def s():
847 def s():
848 clearcaches(cl)
848 clearcaches(cl)
849
849
850 def d():
850 def d():
851 len(cl.headrevs())
851 len(cl.headrevs())
852
852
853 timer(d, setup=s)
853 timer(d, setup=s)
854 fm.end()
854 fm.end()
855
855
856
856
857 @command(
857 @command(
858 b'perftags',
858 b'perftags',
859 formatteropts
859 formatteropts
860 + [
860 + [
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 ],
862 ],
863 )
863 )
864 def perftags(ui, repo, **opts):
864 def perftags(ui, repo, **opts):
865 opts = _byteskwargs(opts)
865 opts = _byteskwargs(opts)
866 timer, fm = gettimer(ui, opts)
866 timer, fm = gettimer(ui, opts)
867 repocleartagscache = repocleartagscachefunc(repo)
867 repocleartagscache = repocleartagscachefunc(repo)
868 clearrevlogs = opts[b'clear_revlogs']
868 clearrevlogs = opts[b'clear_revlogs']
869
869
870 def s():
870 def s():
871 if clearrevlogs:
871 if clearrevlogs:
872 clearchangelog(repo)
872 clearchangelog(repo)
873 clearfilecache(repo.unfiltered(), 'manifest')
873 clearfilecache(repo.unfiltered(), 'manifest')
874 repocleartagscache()
874 repocleartagscache()
875
875
876 def t():
876 def t():
877 return len(repo.tags())
877 return len(repo.tags())
878
878
879 timer(t, setup=s)
879 timer(t, setup=s)
880 fm.end()
880 fm.end()
881
881
882
882
883 @command(b'perfancestors', formatteropts)
883 @command(b'perfancestors', formatteropts)
884 def perfancestors(ui, repo, **opts):
884 def perfancestors(ui, repo, **opts):
885 opts = _byteskwargs(opts)
885 opts = _byteskwargs(opts)
886 timer, fm = gettimer(ui, opts)
886 timer, fm = gettimer(ui, opts)
887 heads = repo.changelog.headrevs()
887 heads = repo.changelog.headrevs()
888
888
889 def d():
889 def d():
890 for a in repo.changelog.ancestors(heads):
890 for a in repo.changelog.ancestors(heads):
891 pass
891 pass
892
892
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896
896
897 @command(b'perfancestorset', formatteropts)
897 @command(b'perfancestorset', formatteropts)
898 def perfancestorset(ui, repo, revset, **opts):
898 def perfancestorset(ui, repo, revset, **opts):
899 opts = _byteskwargs(opts)
899 opts = _byteskwargs(opts)
900 timer, fm = gettimer(ui, opts)
900 timer, fm = gettimer(ui, opts)
901 revs = repo.revs(revset)
901 revs = repo.revs(revset)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 s = repo.changelog.ancestors(heads)
905 s = repo.changelog.ancestors(heads)
906 for rev in revs:
906 for rev in revs:
907 rev in s
907 rev in s
908
908
909 timer(d)
909 timer(d)
910 fm.end()
910 fm.end()
911
911
912
912
913 @command(b'perfdiscovery', formatteropts, b'PATH')
913 @command(b'perfdiscovery', formatteropts, b'PATH')
914 def perfdiscovery(ui, repo, path, **opts):
914 def perfdiscovery(ui, repo, path, **opts):
915 """benchmark discovery between local repo and the peer at given path"""
915 """benchmark discovery between local repo and the peer at given path"""
916 repos = [repo, None]
916 repos = [repo, None]
917 timer, fm = gettimer(ui, opts)
917 timer, fm = gettimer(ui, opts)
918 path = ui.expandpath(path)
918 path = ui.expandpath(path)
919
919
920 def s():
920 def s():
921 repos[1] = hg.peer(ui, opts, path)
921 repos[1] = hg.peer(ui, opts, path)
922
922
923 def d():
923 def d():
924 setdiscovery.findcommonheads(ui, *repos)
924 setdiscovery.findcommonheads(ui, *repos)
925
925
926 timer(d, setup=s)
926 timer(d, setup=s)
927 fm.end()
927 fm.end()
928
928
929
929
930 @command(
930 @command(
931 b'perfbookmarks',
931 b'perfbookmarks',
932 formatteropts
932 formatteropts
933 + [
933 + [
934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 ],
935 ],
936 )
936 )
937 def perfbookmarks(ui, repo, **opts):
937 def perfbookmarks(ui, repo, **opts):
938 """benchmark parsing bookmarks from disk to memory"""
938 """benchmark parsing bookmarks from disk to memory"""
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940 timer, fm = gettimer(ui, opts)
940 timer, fm = gettimer(ui, opts)
941
941
942 clearrevlogs = opts[b'clear_revlogs']
942 clearrevlogs = opts[b'clear_revlogs']
943
943
944 def s():
944 def s():
945 if clearrevlogs:
945 if clearrevlogs:
946 clearchangelog(repo)
946 clearchangelog(repo)
947 clearfilecache(repo, b'_bookmarks')
947 clearfilecache(repo, b'_bookmarks')
948
948
949 def d():
949 def d():
950 repo._bookmarks
950 repo._bookmarks
951
951
952 timer(d, setup=s)
952 timer(d, setup=s)
953 fm.end()
953 fm.end()
954
954
955
955
956 @command(b'perfbundleread', formatteropts, b'BUNDLE')
956 @command(b'perfbundleread', formatteropts, b'BUNDLE')
957 def perfbundleread(ui, repo, bundlepath, **opts):
957 def perfbundleread(ui, repo, bundlepath, **opts):
958 """Benchmark reading of bundle files.
958 """Benchmark reading of bundle files.
959
959
960 This command is meant to isolate the I/O part of bundle reading as
960 This command is meant to isolate the I/O part of bundle reading as
961 much as possible.
961 much as possible.
962 """
962 """
963 from mercurial import (
963 from mercurial import (
964 bundle2,
964 bundle2,
965 exchange,
965 exchange,
966 streamclone,
966 streamclone,
967 )
967 )
968
968
969 opts = _byteskwargs(opts)
969 opts = _byteskwargs(opts)
970
970
971 def makebench(fn):
971 def makebench(fn):
972 def run():
972 def run():
973 with open(bundlepath, b'rb') as fh:
973 with open(bundlepath, b'rb') as fh:
974 bundle = exchange.readbundle(ui, fh, bundlepath)
974 bundle = exchange.readbundle(ui, fh, bundlepath)
975 fn(bundle)
975 fn(bundle)
976
976
977 return run
977 return run
978
978
979 def makereadnbytes(size):
979 def makereadnbytes(size):
980 def run():
980 def run():
981 with open(bundlepath, b'rb') as fh:
981 with open(bundlepath, b'rb') as fh:
982 bundle = exchange.readbundle(ui, fh, bundlepath)
982 bundle = exchange.readbundle(ui, fh, bundlepath)
983 while bundle.read(size):
983 while bundle.read(size):
984 pass
984 pass
985
985
986 return run
986 return run
987
987
988 def makestdioread(size):
988 def makestdioread(size):
989 def run():
989 def run():
990 with open(bundlepath, b'rb') as fh:
990 with open(bundlepath, b'rb') as fh:
991 while fh.read(size):
991 while fh.read(size):
992 pass
992 pass
993
993
994 return run
994 return run
995
995
996 # bundle1
996 # bundle1
997
997
998 def deltaiter(bundle):
998 def deltaiter(bundle):
999 for delta in bundle.deltaiter():
999 for delta in bundle.deltaiter():
1000 pass
1000 pass
1001
1001
1002 def iterchunks(bundle):
1002 def iterchunks(bundle):
1003 for chunk in bundle.getchunks():
1003 for chunk in bundle.getchunks():
1004 pass
1004 pass
1005
1005
1006 # bundle2
1006 # bundle2
1007
1007
1008 def forwardchunks(bundle):
1008 def forwardchunks(bundle):
1009 for chunk in bundle._forwardchunks():
1009 for chunk in bundle._forwardchunks():
1010 pass
1010 pass
1011
1011
1012 def iterparts(bundle):
1012 def iterparts(bundle):
1013 for part in bundle.iterparts():
1013 for part in bundle.iterparts():
1014 pass
1014 pass
1015
1015
1016 def iterpartsseekable(bundle):
1016 def iterpartsseekable(bundle):
1017 for part in bundle.iterparts(seekable=True):
1017 for part in bundle.iterparts(seekable=True):
1018 pass
1018 pass
1019
1019
1020 def seek(bundle):
1020 def seek(bundle):
1021 for part in bundle.iterparts(seekable=True):
1021 for part in bundle.iterparts(seekable=True):
1022 part.seek(0, os.SEEK_END)
1022 part.seek(0, os.SEEK_END)
1023
1023
1024 def makepartreadnbytes(size):
1024 def makepartreadnbytes(size):
1025 def run():
1025 def run():
1026 with open(bundlepath, b'rb') as fh:
1026 with open(bundlepath, b'rb') as fh:
1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 for part in bundle.iterparts():
1028 for part in bundle.iterparts():
1029 while part.read(size):
1029 while part.read(size):
1030 pass
1030 pass
1031
1031
1032 return run
1032 return run
1033
1033
1034 benches = [
1034 benches = [
1035 (makestdioread(8192), b'read(8k)'),
1035 (makestdioread(8192), b'read(8k)'),
1036 (makestdioread(16384), b'read(16k)'),
1036 (makestdioread(16384), b'read(16k)'),
1037 (makestdioread(32768), b'read(32k)'),
1037 (makestdioread(32768), b'read(32k)'),
1038 (makestdioread(131072), b'read(128k)'),
1038 (makestdioread(131072), b'read(128k)'),
1039 ]
1039 ]
1040
1040
1041 with open(bundlepath, b'rb') as fh:
1041 with open(bundlepath, b'rb') as fh:
1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1043
1043
1044 if isinstance(bundle, changegroup.cg1unpacker):
1044 if isinstance(bundle, changegroup.cg1unpacker):
1045 benches.extend(
1045 benches.extend(
1046 [
1046 [
1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 (makebench(iterchunks), b'cg1 getchunks()'),
1048 (makebench(iterchunks), b'cg1 getchunks()'),
1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 ]
1053 ]
1054 )
1054 )
1055 elif isinstance(bundle, bundle2.unbundle20):
1055 elif isinstance(bundle, bundle2.unbundle20):
1056 benches.extend(
1056 benches.extend(
1057 [
1057 [
1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 (makebench(iterparts), b'bundle2 iterparts()'),
1059 (makebench(iterparts), b'bundle2 iterparts()'),
1060 (
1060 (
1061 makebench(iterpartsseekable),
1061 makebench(iterpartsseekable),
1062 b'bundle2 iterparts() seekable',
1062 b'bundle2 iterparts() seekable',
1063 ),
1063 ),
1064 (makebench(seek), b'bundle2 part seek()'),
1064 (makebench(seek), b'bundle2 part seek()'),
1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 ]
1069 ]
1070 )
1070 )
1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 raise error.Abort(b'stream clone bundles not supported')
1072 raise error.Abort(b'stream clone bundles not supported')
1073 else:
1073 else:
1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075
1075
1076 for fn, title in benches:
1076 for fn, title in benches:
1077 timer, fm = gettimer(ui, opts)
1077 timer, fm = gettimer(ui, opts)
1078 timer(fn, title=title)
1078 timer(fn, title=title)
1079 fm.end()
1079 fm.end()
1080
1080
1081
1081
1082 @command(
1082 @command(
1083 b'perfchangegroupchangelog',
1083 b'perfchangegroupchangelog',
1084 formatteropts
1084 formatteropts
1085 + [
1085 + [
1086 (b'', b'cgversion', b'02', b'changegroup version'),
1086 (b'', b'cgversion', b'02', b'changegroup version'),
1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 ],
1088 ],
1089 )
1089 )
1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 """Benchmark producing a changelog group for a changegroup.
1091 """Benchmark producing a changelog group for a changegroup.
1092
1092
1093 This measures the time spent processing the changelog during a
1093 This measures the time spent processing the changelog during a
1094 bundle operation. This occurs during `hg bundle` and on a server
1094 bundle operation. This occurs during `hg bundle` and on a server
1095 processing a `getbundle` wire protocol request (handles clones
1095 processing a `getbundle` wire protocol request (handles clones
1096 and pull requests).
1096 and pull requests).
1097
1097
1098 By default, all revisions are added to the changegroup.
1098 By default, all revisions are added to the changegroup.
1099 """
1099 """
1100 opts = _byteskwargs(opts)
1100 opts = _byteskwargs(opts)
1101 cl = repo.changelog
1101 cl = repo.changelog
1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 bundler = changegroup.getbundler(cgversion, repo)
1103 bundler = changegroup.getbundler(cgversion, repo)
1104
1104
1105 def d():
1105 def d():
1106 state, chunks = bundler._generatechangelog(cl, nodes)
1106 state, chunks = bundler._generatechangelog(cl, nodes)
1107 for chunk in chunks:
1107 for chunk in chunks:
1108 pass
1108 pass
1109
1109
1110 timer, fm = gettimer(ui, opts)
1110 timer, fm = gettimer(ui, opts)
1111
1111
1112 # Terminal printing can interfere with timing. So disable it.
1112 # Terminal printing can interfere with timing. So disable it.
1113 with ui.configoverride({(b'progress', b'disable'): True}):
1113 with ui.configoverride({(b'progress', b'disable'): True}):
1114 timer(d)
1114 timer(d)
1115
1115
1116 fm.end()
1116 fm.end()
1117
1117
1118
1118
1119 @command(b'perfdirs', formatteropts)
1119 @command(b'perfdirs', formatteropts)
1120 def perfdirs(ui, repo, **opts):
1120 def perfdirs(ui, repo, **opts):
1121 opts = _byteskwargs(opts)
1121 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1122 timer, fm = gettimer(ui, opts)
1123 dirstate = repo.dirstate
1123 dirstate = repo.dirstate
1124 b'a' in dirstate
1124 b'a' in dirstate
1125
1125
1126 def d():
1126 def d():
1127 dirstate.hasdir(b'a')
1127 dirstate.hasdir(b'a')
1128 del dirstate._map._dirs
1128 del dirstate._map._dirs
1129
1129
1130 timer(d)
1130 timer(d)
1131 fm.end()
1131 fm.end()
1132
1132
1133
1133
1134 @command(
1134 @command(
1135 b'perfdirstate',
1135 b'perfdirstate',
1136 [
1136 [
1137 (
1137 (
1138 b'',
1138 b'',
1139 b'iteration',
1139 b'iteration',
1140 None,
1140 None,
1141 b'benchmark a full iteration for the dirstate',
1141 b'benchmark a full iteration for the dirstate',
1142 ),
1142 ),
1143 (
1143 (
1144 b'',
1144 b'',
1145 b'contains',
1145 b'contains',
1146 None,
1146 None,
1147 b'benchmark a large amount of `nf in dirstate` calls',
1147 b'benchmark a large amount of `nf in dirstate` calls',
1148 ),
1148 ),
1149 ]
1149 ]
1150 + formatteropts,
1150 + formatteropts,
1151 )
1151 )
1152 def perfdirstate(ui, repo, **opts):
1152 def perfdirstate(ui, repo, **opts):
1153 """benchmap the time of various distate operations
1153 """benchmap the time of various distate operations
1154
1154
1155 By default benchmark the time necessary to load a dirstate from scratch.
1155 By default benchmark the time necessary to load a dirstate from scratch.
1156 The dirstate is loaded to the point were a "contains" request can be
1156 The dirstate is loaded to the point were a "contains" request can be
1157 answered.
1157 answered.
1158 """
1158 """
1159 opts = _byteskwargs(opts)
1159 opts = _byteskwargs(opts)
1160 timer, fm = gettimer(ui, opts)
1160 timer, fm = gettimer(ui, opts)
1161 b"a" in repo.dirstate
1161 b"a" in repo.dirstate
1162
1162
1163 if opts[b'iteration'] and opts[b'contains']:
1163 if opts[b'iteration'] and opts[b'contains']:
1164 msg = b'only specify one of --iteration or --contains'
1164 msg = b'only specify one of --iteration or --contains'
1165 raise error.Abort(msg)
1165 raise error.Abort(msg)
1166
1166
1167 if opts[b'iteration']:
1167 if opts[b'iteration']:
1168 setup = None
1168 setup = None
1169 dirstate = repo.dirstate
1169 dirstate = repo.dirstate
1170
1170
1171 def d():
1171 def d():
1172 for f in dirstate:
1172 for f in dirstate:
1173 pass
1173 pass
1174
1174
1175 elif opts[b'contains']:
1175 elif opts[b'contains']:
1176 setup = None
1176 setup = None
1177 dirstate = repo.dirstate
1177 dirstate = repo.dirstate
1178 allfiles = list(dirstate)
1178 allfiles = list(dirstate)
1179 # also add file path that will be "missing" from the dirstate
1179 # also add file path that will be "missing" from the dirstate
1180 allfiles.extend([f[::-1] for f in allfiles])
1180 allfiles.extend([f[::-1] for f in allfiles])
1181
1181
1182 def d():
1182 def d():
1183 for f in allfiles:
1183 for f in allfiles:
1184 f in dirstate
1184 f in dirstate
1185
1185
1186 else:
1186 else:
1187
1187
1188 def setup():
1188 def setup():
1189 repo.dirstate.invalidate()
1189 repo.dirstate.invalidate()
1190
1190
1191 def d():
1191 def d():
1192 b"a" in repo.dirstate
1192 b"a" in repo.dirstate
1193
1193
1194 timer(d, setup=setup)
1194 timer(d, setup=setup)
1195 fm.end()
1195 fm.end()
1196
1196
1197
1197
1198 @command(b'perfdirstatedirs', formatteropts)
1198 @command(b'perfdirstatedirs', formatteropts)
1199 def perfdirstatedirs(ui, repo, **opts):
1199 def perfdirstatedirs(ui, repo, **opts):
1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 opts = _byteskwargs(opts)
1201 opts = _byteskwargs(opts)
1202 timer, fm = gettimer(ui, opts)
1202 timer, fm = gettimer(ui, opts)
1203 repo.dirstate.hasdir(b"a")
1203 repo.dirstate.hasdir(b"a")
1204
1204
1205 def setup():
1205 def setup():
1206 del repo.dirstate._map._dirs
1206 del repo.dirstate._map._dirs
1207
1207
1208 def d():
1208 def d():
1209 repo.dirstate.hasdir(b"a")
1209 repo.dirstate.hasdir(b"a")
1210
1210
1211 timer(d, setup=setup)
1211 timer(d, setup=setup)
1212 fm.end()
1212 fm.end()
1213
1213
1214
1214
1215 @command(b'perfdirstatefoldmap', formatteropts)
1215 @command(b'perfdirstatefoldmap', formatteropts)
1216 def perfdirstatefoldmap(ui, repo, **opts):
1216 def perfdirstatefoldmap(ui, repo, **opts):
1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1218
1218
1219 The dirstate filefoldmap cache is dropped between every request.
1219 The dirstate filefoldmap cache is dropped between every request.
1220 """
1220 """
1221 opts = _byteskwargs(opts)
1221 opts = _byteskwargs(opts)
1222 timer, fm = gettimer(ui, opts)
1222 timer, fm = gettimer(ui, opts)
1223 dirstate = repo.dirstate
1223 dirstate = repo.dirstate
1224 dirstate._map.filefoldmap.get(b'a')
1224 dirstate._map.filefoldmap.get(b'a')
1225
1225
1226 def setup():
1226 def setup():
1227 del dirstate._map.filefoldmap
1227 del dirstate._map.filefoldmap
1228
1228
1229 def d():
1229 def d():
1230 dirstate._map.filefoldmap.get(b'a')
1230 dirstate._map.filefoldmap.get(b'a')
1231
1231
1232 timer(d, setup=setup)
1232 timer(d, setup=setup)
1233 fm.end()
1233 fm.end()
1234
1234
1235
1235
1236 @command(b'perfdirfoldmap', formatteropts)
1236 @command(b'perfdirfoldmap', formatteropts)
1237 def perfdirfoldmap(ui, repo, **opts):
1237 def perfdirfoldmap(ui, repo, **opts):
1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239
1239
1240 The dirstate dirfoldmap cache is dropped between every request.
1240 The dirstate dirfoldmap cache is dropped between every request.
1241 """
1241 """
1242 opts = _byteskwargs(opts)
1242 opts = _byteskwargs(opts)
1243 timer, fm = gettimer(ui, opts)
1243 timer, fm = gettimer(ui, opts)
1244 dirstate = repo.dirstate
1244 dirstate = repo.dirstate
1245 dirstate._map.dirfoldmap.get(b'a')
1245 dirstate._map.dirfoldmap.get(b'a')
1246
1246
1247 def setup():
1247 def setup():
1248 del dirstate._map.dirfoldmap
1248 del dirstate._map.dirfoldmap
1249 del dirstate._map._dirs
1249 del dirstate._map._dirs
1250
1250
1251 def d():
1251 def d():
1252 dirstate._map.dirfoldmap.get(b'a')
1252 dirstate._map.dirfoldmap.get(b'a')
1253
1253
1254 timer(d, setup=setup)
1254 timer(d, setup=setup)
1255 fm.end()
1255 fm.end()
1256
1256
1257
1257
1258 @command(b'perfdirstatewrite', formatteropts)
1258 @command(b'perfdirstatewrite', formatteropts)
1259 def perfdirstatewrite(ui, repo, **opts):
1259 def perfdirstatewrite(ui, repo, **opts):
1260 """benchmap the time it take to write a dirstate on disk"""
1260 """benchmap the time it take to write a dirstate on disk"""
1261 opts = _byteskwargs(opts)
1261 opts = _byteskwargs(opts)
1262 timer, fm = gettimer(ui, opts)
1262 timer, fm = gettimer(ui, opts)
1263 ds = repo.dirstate
1263 ds = repo.dirstate
1264 b"a" in ds
1264 b"a" in ds
1265
1265
1266 def setup():
1266 def setup():
1267 ds._dirty = True
1267 ds._dirty = True
1268
1268
1269 def d():
1269 def d():
1270 ds.write(repo.currenttransaction())
1270 ds.write(repo.currenttransaction())
1271
1271
1272 timer(d, setup=setup)
1272 timer(d, setup=setup)
1273 fm.end()
1273 fm.end()
1274
1274
1275
1275
1276 def _getmergerevs(repo, opts):
1276 def _getmergerevs(repo, opts):
1277 """parse command argument to return rev involved in merge
1277 """parse command argument to return rev involved in merge
1278
1278
1279 input: options dictionnary with `rev`, `from` and `bse`
1279 input: options dictionnary with `rev`, `from` and `bse`
1280 output: (localctx, otherctx, basectx)
1280 output: (localctx, otherctx, basectx)
1281 """
1281 """
1282 if opts[b'from']:
1282 if opts[b'from']:
1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 wctx = repo[fromrev]
1284 wctx = repo[fromrev]
1285 else:
1285 else:
1286 wctx = repo[None]
1286 wctx = repo[None]
1287 # we don't want working dir files to be stat'd in the benchmark, so
1287 # we don't want working dir files to be stat'd in the benchmark, so
1288 # prime that cache
1288 # prime that cache
1289 wctx.dirty()
1289 wctx.dirty()
1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 if opts[b'base']:
1291 if opts[b'base']:
1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 ancestor = repo[fromrev]
1293 ancestor = repo[fromrev]
1294 else:
1294 else:
1295 ancestor = wctx.ancestor(rctx)
1295 ancestor = wctx.ancestor(rctx)
1296 return (wctx, rctx, ancestor)
1296 return (wctx, rctx, ancestor)
1297
1297
1298
1298
1299 @command(
1299 @command(
1300 b'perfmergecalculate',
1300 b'perfmergecalculate',
1301 [
1301 [
1302 (b'r', b'rev', b'.', b'rev to merge against'),
1302 (b'r', b'rev', b'.', b'rev to merge against'),
1303 (b'', b'from', b'', b'rev to merge from'),
1303 (b'', b'from', b'', b'rev to merge from'),
1304 (b'', b'base', b'', b'the revision to use as base'),
1304 (b'', b'base', b'', b'the revision to use as base'),
1305 ]
1305 ]
1306 + formatteropts,
1306 + formatteropts,
1307 )
1307 )
1308 def perfmergecalculate(ui, repo, **opts):
1308 def perfmergecalculate(ui, repo, **opts):
1309 opts = _byteskwargs(opts)
1309 opts = _byteskwargs(opts)
1310 timer, fm = gettimer(ui, opts)
1310 timer, fm = gettimer(ui, opts)
1311
1311
1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313
1313
1314 def d():
1314 def d():
1315 # acceptremote is True because we don't want prompts in the middle of
1315 # acceptremote is True because we don't want prompts in the middle of
1316 # our benchmark
1316 # our benchmark
1317 merge.calculateupdates(
1317 merge.calculateupdates(
1318 repo,
1318 repo,
1319 wctx,
1319 wctx,
1320 rctx,
1320 rctx,
1321 [ancestor],
1321 [ancestor],
1322 branchmerge=False,
1322 branchmerge=False,
1323 force=False,
1323 force=False,
1324 acceptremote=True,
1324 acceptremote=True,
1325 followcopies=True,
1325 followcopies=True,
1326 )
1326 )
1327
1327
1328 timer(d)
1328 timer(d)
1329 fm.end()
1329 fm.end()
1330
1330
1331
1331
1332 @command(
1332 @command(
1333 b'perfmergecopies',
1333 b'perfmergecopies',
1334 [
1334 [
1335 (b'r', b'rev', b'.', b'rev to merge against'),
1335 (b'r', b'rev', b'.', b'rev to merge against'),
1336 (b'', b'from', b'', b'rev to merge from'),
1336 (b'', b'from', b'', b'rev to merge from'),
1337 (b'', b'base', b'', b'the revision to use as base'),
1337 (b'', b'base', b'', b'the revision to use as base'),
1338 ]
1338 ]
1339 + formatteropts,
1339 + formatteropts,
1340 )
1340 )
1341 def perfmergecopies(ui, repo, **opts):
1341 def perfmergecopies(ui, repo, **opts):
1342 """measure runtime of `copies.mergecopies`"""
1342 """measure runtime of `copies.mergecopies`"""
1343 opts = _byteskwargs(opts)
1343 opts = _byteskwargs(opts)
1344 timer, fm = gettimer(ui, opts)
1344 timer, fm = gettimer(ui, opts)
1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346
1346
1347 def d():
1347 def d():
1348 # acceptremote is True because we don't want prompts in the middle of
1348 # acceptremote is True because we don't want prompts in the middle of
1349 # our benchmark
1349 # our benchmark
1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1351
1351
1352 timer(d)
1352 timer(d)
1353 fm.end()
1353 fm.end()
1354
1354
1355
1355
1356 @command(b'perfpathcopies', [], b"REV REV")
1356 @command(b'perfpathcopies', [], b"REV REV")
1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 """benchmark the copy tracing logic"""
1358 """benchmark the copy tracing logic"""
1359 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1360 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363
1363
1364 def d():
1364 def d():
1365 copies.pathcopies(ctx1, ctx2)
1365 copies.pathcopies(ctx1, ctx2)
1366
1366
1367 timer(d)
1367 timer(d)
1368 fm.end()
1368 fm.end()
1369
1369
1370
1370
1371 @command(
1371 @command(
1372 b'perfphases',
1372 b'perfphases',
1373 [
1373 [
1374 (b'', b'full', False, b'include file reading time too'),
1374 (b'', b'full', False, b'include file reading time too'),
1375 ],
1375 ],
1376 b"",
1376 b"",
1377 )
1377 )
1378 def perfphases(ui, repo, **opts):
1378 def perfphases(ui, repo, **opts):
1379 """benchmark phasesets computation"""
1379 """benchmark phasesets computation"""
1380 opts = _byteskwargs(opts)
1380 opts = _byteskwargs(opts)
1381 timer, fm = gettimer(ui, opts)
1381 timer, fm = gettimer(ui, opts)
1382 _phases = repo._phasecache
1382 _phases = repo._phasecache
1383 full = opts.get(b'full')
1383 full = opts.get(b'full')
1384
1384
1385 def d():
1385 def d():
1386 phases = _phases
1386 phases = _phases
1387 if full:
1387 if full:
1388 clearfilecache(repo, b'_phasecache')
1388 clearfilecache(repo, b'_phasecache')
1389 phases = repo._phasecache
1389 phases = repo._phasecache
1390 phases.invalidate()
1390 phases.invalidate()
1391 phases.loadphaserevs(repo)
1391 phases.loadphaserevs(repo)
1392
1392
1393 timer(d)
1393 timer(d)
1394 fm.end()
1394 fm.end()
1395
1395
1396
1396
1397 @command(b'perfphasesremote', [], b"[DEST]")
1397 @command(b'perfphasesremote', [], b"[DEST]")
1398 def perfphasesremote(ui, repo, dest=None, **opts):
1398 def perfphasesremote(ui, repo, dest=None, **opts):
1399 """benchmark time needed to analyse phases of the remote server"""
1399 """benchmark time needed to analyse phases of the remote server"""
1400 from mercurial.node import bin
1400 from mercurial.node import bin
1401 from mercurial import (
1401 from mercurial import (
1402 exchange,
1402 exchange,
1403 hg,
1403 hg,
1404 phases,
1404 phases,
1405 )
1405 )
1406
1406
1407 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1408 timer, fm = gettimer(ui, opts)
1409
1409
1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1411 if not path:
1411 if not path:
1412 raise error.Abort(
1412 raise error.Abort(
1413 b'default repository not configured!',
1413 b'default repository not configured!',
1414 hint=b"see 'hg help config.paths'",
1414 hint=b"see 'hg help config.paths'",
1415 )
1415 )
1416 dest = path.pushloc or path.loc
1416 dest = path.pushloc or path.loc
1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 other = hg.peer(repo, opts, dest)
1418 other = hg.peer(repo, opts, dest)
1419
1419
1420 # easier to perform discovery through the operation
1420 # easier to perform discovery through the operation
1421 op = exchange.pushoperation(repo, other)
1421 op = exchange.pushoperation(repo, other)
1422 exchange._pushdiscoverychangeset(op)
1422 exchange._pushdiscoverychangeset(op)
1423
1423
1424 remotesubset = op.fallbackheads
1424 remotesubset = op.fallbackheads
1425
1425
1426 with other.commandexecutor() as e:
1426 with other.commandexecutor() as e:
1427 remotephases = e.callcommand(
1427 remotephases = e.callcommand(
1428 b'listkeys', {b'namespace': b'phases'}
1428 b'listkeys', {b'namespace': b'phases'}
1429 ).result()
1429 ).result()
1430 del other
1430 del other
1431 publishing = remotephases.get(b'publishing', False)
1431 publishing = remotephases.get(b'publishing', False)
1432 if publishing:
1432 if publishing:
1433 ui.statusnoi18n(b'publishing: yes\n')
1433 ui.statusnoi18n(b'publishing: yes\n')
1434 else:
1434 else:
1435 ui.statusnoi18n(b'publishing: no\n')
1435 ui.statusnoi18n(b'publishing: no\n')
1436
1436
1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 if has_node is None:
1438 if has_node is None:
1439 has_node = repo.changelog.nodemap.__contains__
1439 has_node = repo.changelog.nodemap.__contains__
1440 nonpublishroots = 0
1440 nonpublishroots = 0
1441 for nhex, phase in remotephases.iteritems():
1441 for nhex, phase in remotephases.iteritems():
1442 if nhex == b'publishing': # ignore data related to publish option
1442 if nhex == b'publishing': # ignore data related to publish option
1443 continue
1443 continue
1444 node = bin(nhex)
1444 node = bin(nhex)
1445 if has_node(node) and int(phase):
1445 if has_node(node) and int(phase):
1446 nonpublishroots += 1
1446 nonpublishroots += 1
1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449
1449
1450 def d():
1450 def d():
1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1452
1452
1453 timer(d)
1453 timer(d)
1454 fm.end()
1454 fm.end()
1455
1455
1456
1456
1457 @command(
1457 @command(
1458 b'perfmanifest',
1458 b'perfmanifest',
1459 [
1459 [
1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 ]
1462 ]
1463 + formatteropts,
1463 + formatteropts,
1464 b'REV|NODE',
1464 b'REV|NODE',
1465 )
1465 )
1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 """benchmark the time to read a manifest from disk and return a usable
1467 """benchmark the time to read a manifest from disk and return a usable
1468 dict-like object
1468 dict-like object
1469
1469
1470 Manifest caches are cleared before retrieval."""
1470 Manifest caches are cleared before retrieval."""
1471 opts = _byteskwargs(opts)
1471 opts = _byteskwargs(opts)
1472 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1473 if not manifest_rev:
1473 if not manifest_rev:
1474 ctx = scmutil.revsingle(repo, rev, rev)
1474 ctx = scmutil.revsingle(repo, rev, rev)
1475 t = ctx.manifestnode()
1475 t = ctx.manifestnode()
1476 else:
1476 else:
1477 from mercurial.node import bin
1477 from mercurial.node import bin
1478
1478
1479 if len(rev) == 40:
1479 if len(rev) == 40:
1480 t = bin(rev)
1480 t = bin(rev)
1481 else:
1481 else:
1482 try:
1482 try:
1483 rev = int(rev)
1483 rev = int(rev)
1484
1484
1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 t = repo.manifestlog.getstorage(b'').node(rev)
1486 t = repo.manifestlog.getstorage(b'').node(rev)
1487 else:
1487 else:
1488 t = repo.manifestlog._revlog.lookup(rev)
1488 t = repo.manifestlog._revlog.lookup(rev)
1489 except ValueError:
1489 except ValueError:
1490 raise error.Abort(
1490 raise error.Abort(
1491 b'manifest revision must be integer or full node'
1491 b'manifest revision must be integer or full node'
1492 )
1492 )
1493
1493
1494 def d():
1494 def d():
1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 repo.manifestlog[t].read()
1496 repo.manifestlog[t].read()
1497
1497
1498 timer(d)
1498 timer(d)
1499 fm.end()
1499 fm.end()
1500
1500
1501
1501
1502 @command(b'perfchangeset', formatteropts)
1502 @command(b'perfchangeset', formatteropts)
1503 def perfchangeset(ui, repo, rev, **opts):
1503 def perfchangeset(ui, repo, rev, **opts):
1504 opts = _byteskwargs(opts)
1504 opts = _byteskwargs(opts)
1505 timer, fm = gettimer(ui, opts)
1505 timer, fm = gettimer(ui, opts)
1506 n = scmutil.revsingle(repo, rev).node()
1506 n = scmutil.revsingle(repo, rev).node()
1507
1507
1508 def d():
1508 def d():
1509 repo.changelog.read(n)
1509 repo.changelog.read(n)
1510 # repo.changelog._cache = None
1510 # repo.changelog._cache = None
1511
1511
1512 timer(d)
1512 timer(d)
1513 fm.end()
1513 fm.end()
1514
1514
1515
1515
1516 @command(b'perfignore', formatteropts)
1516 @command(b'perfignore', formatteropts)
1517 def perfignore(ui, repo, **opts):
1517 def perfignore(ui, repo, **opts):
1518 """benchmark operation related to computing ignore"""
1518 """benchmark operation related to computing ignore"""
1519 opts = _byteskwargs(opts)
1519 opts = _byteskwargs(opts)
1520 timer, fm = gettimer(ui, opts)
1520 timer, fm = gettimer(ui, opts)
1521 dirstate = repo.dirstate
1521 dirstate = repo.dirstate
1522
1522
1523 def setupone():
1523 def setupone():
1524 dirstate.invalidate()
1524 dirstate.invalidate()
1525 clearfilecache(dirstate, b'_ignore')
1525 clearfilecache(dirstate, b'_ignore')
1526
1526
1527 def runone():
1527 def runone():
1528 dirstate._ignore
1528 dirstate._ignore
1529
1529
1530 timer(runone, setup=setupone, title=b"load")
1530 timer(runone, setup=setupone, title=b"load")
1531 fm.end()
1531 fm.end()
1532
1532
1533
1533
1534 @command(
1534 @command(
1535 b'perfindex',
1535 b'perfindex',
1536 [
1536 [
1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 ]
1539 ]
1540 + formatteropts,
1540 + formatteropts,
1541 )
1541 )
1542 def perfindex(ui, repo, **opts):
1542 def perfindex(ui, repo, **opts):
1543 """benchmark index creation time followed by a lookup
1543 """benchmark index creation time followed by a lookup
1544
1544
1545 The default is to look `tip` up. Depending on the index implementation,
1545 The default is to look `tip` up. Depending on the index implementation,
1546 the revision looked up can matters. For example, an implementation
1546 the revision looked up can matters. For example, an implementation
1547 scanning the index will have a faster lookup time for `--rev tip` than for
1547 scanning the index will have a faster lookup time for `--rev tip` than for
1548 `--rev 0`. The number of looked up revisions and their order can also
1548 `--rev 0`. The number of looked up revisions and their order can also
1549 matters.
1549 matters.
1550
1550
1551 Example of useful set to test:
1551 Example of useful set to test:
1552
1552
1553 * tip
1553 * tip
1554 * 0
1554 * 0
1555 * -10:
1555 * -10:
1556 * :10
1556 * :10
1557 * -10: + :10
1557 * -10: + :10
1558 * :10: + -10:
1558 * :10: + -10:
1559 * -10000:
1559 * -10000:
1560 * -10000: + 0
1560 * -10000: + 0
1561
1561
1562 It is not currently possible to check for lookup of a missing node. For
1562 It is not currently possible to check for lookup of a missing node. For
1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 import mercurial.revlog
1564 import mercurial.revlog
1565
1565
1566 opts = _byteskwargs(opts)
1566 opts = _byteskwargs(opts)
1567 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 if opts[b'no_lookup']:
1569 if opts[b'no_lookup']:
1570 if opts['rev']:
1570 if opts['rev']:
1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 nodes = []
1572 nodes = []
1573 elif not opts[b'rev']:
1573 elif not opts[b'rev']:
1574 nodes = [repo[b"tip"].node()]
1574 nodes = [repo[b"tip"].node()]
1575 else:
1575 else:
1576 revs = scmutil.revrange(repo, opts[b'rev'])
1576 revs = scmutil.revrange(repo, opts[b'rev'])
1577 cl = repo.changelog
1577 cl = repo.changelog
1578 nodes = [cl.node(r) for r in revs]
1578 nodes = [cl.node(r) for r in revs]
1579
1579
1580 unfi = repo.unfiltered()
1580 unfi = repo.unfiltered()
1581 # find the filecache func directly
1581 # find the filecache func directly
1582 # This avoid polluting the benchmark with the filecache logic
1582 # This avoid polluting the benchmark with the filecache logic
1583 makecl = unfi.__class__.changelog.func
1583 makecl = unfi.__class__.changelog.func
1584
1584
1585 def setup():
1585 def setup():
1586 # probably not necessary, but for good measure
1586 # probably not necessary, but for good measure
1587 clearchangelog(unfi)
1587 clearchangelog(unfi)
1588
1588
1589 def d():
1589 def d():
1590 cl = makecl(unfi)
1590 cl = makecl(unfi)
1591 for n in nodes:
1591 for n in nodes:
1592 cl.rev(n)
1592 cl.rev(n)
1593
1593
1594 timer(d, setup=setup)
1594 timer(d, setup=setup)
1595 fm.end()
1595 fm.end()
1596
1596
1597
1597
1598 @command(
1598 @command(
1599 b'perfnodemap',
1599 b'perfnodemap',
1600 [
1600 [
1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 ]
1603 ]
1604 + formatteropts,
1604 + formatteropts,
1605 )
1605 )
1606 def perfnodemap(ui, repo, **opts):
1606 def perfnodemap(ui, repo, **opts):
1607 """benchmark the time necessary to look up revision from a cold nodemap
1607 """benchmark the time necessary to look up revision from a cold nodemap
1608
1608
1609 Depending on the implementation, the amount and order of revision we look
1609 Depending on the implementation, the amount and order of revision we look
1610 up can varies. Example of useful set to test:
1610 up can varies. Example of useful set to test:
1611 * tip
1611 * tip
1612 * 0
1612 * 0
1613 * -10:
1613 * -10:
1614 * :10
1614 * :10
1615 * -10: + :10
1615 * -10: + :10
1616 * :10: + -10:
1616 * :10: + -10:
1617 * -10000:
1617 * -10000:
1618 * -10000: + 0
1618 * -10000: + 0
1619
1619
1620 The command currently focus on valid binary lookup. Benchmarking for
1620 The command currently focus on valid binary lookup. Benchmarking for
1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 """
1622 """
1623 import mercurial.revlog
1623 import mercurial.revlog
1624
1624
1625 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1626 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628
1628
1629 unfi = repo.unfiltered()
1629 unfi = repo.unfiltered()
1630 clearcaches = opts[b'clear_caches']
1630 clearcaches = opts[b'clear_caches']
1631 # find the filecache func directly
1631 # find the filecache func directly
1632 # This avoid polluting the benchmark with the filecache logic
1632 # This avoid polluting the benchmark with the filecache logic
1633 makecl = unfi.__class__.changelog.func
1633 makecl = unfi.__class__.changelog.func
1634 if not opts[b'rev']:
1634 if not opts[b'rev']:
1635 raise error.Abort(b'use --rev to specify revisions to look up')
1635 raise error.Abort(b'use --rev to specify revisions to look up')
1636 revs = scmutil.revrange(repo, opts[b'rev'])
1636 revs = scmutil.revrange(repo, opts[b'rev'])
1637 cl = repo.changelog
1637 cl = repo.changelog
1638 nodes = [cl.node(r) for r in revs]
1638 nodes = [cl.node(r) for r in revs]
1639
1639
1640 # use a list to pass reference to a nodemap from one closure to the next
1640 # use a list to pass reference to a nodemap from one closure to the next
1641 nodeget = [None]
1641 nodeget = [None]
1642
1642
1643 def setnodeget():
1643 def setnodeget():
1644 # probably not necessary, but for good measure
1644 # probably not necessary, but for good measure
1645 clearchangelog(unfi)
1645 clearchangelog(unfi)
1646 cl = makecl(unfi)
1646 cl = makecl(unfi)
1647 if util.safehasattr(cl.index, 'get_rev'):
1647 if util.safehasattr(cl.index, 'get_rev'):
1648 nodeget[0] = cl.index.get_rev
1648 nodeget[0] = cl.index.get_rev
1649 else:
1649 else:
1650 nodeget[0] = cl.nodemap.get
1650 nodeget[0] = cl.nodemap.get
1651
1651
1652 def d():
1652 def d():
1653 get = nodeget[0]
1653 get = nodeget[0]
1654 for n in nodes:
1654 for n in nodes:
1655 get(n)
1655 get(n)
1656
1656
1657 setup = None
1657 setup = None
1658 if clearcaches:
1658 if clearcaches:
1659
1659
1660 def setup():
1660 def setup():
1661 setnodeget()
1661 setnodeget()
1662
1662
1663 else:
1663 else:
1664 setnodeget()
1664 setnodeget()
1665 d() # prewarm the data structure
1665 d() # prewarm the data structure
1666 timer(d, setup=setup)
1666 timer(d, setup=setup)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(b'perfstartup', formatteropts)
1670 @command(b'perfstartup', formatteropts)
1671 def perfstartup(ui, repo, **opts):
1671 def perfstartup(ui, repo, **opts):
1672 opts = _byteskwargs(opts)
1672 opts = _byteskwargs(opts)
1673 timer, fm = gettimer(ui, opts)
1673 timer, fm = gettimer(ui, opts)
1674
1674
1675 def d():
1675 def d():
1676 if os.name != 'nt':
1676 if os.name != 'nt':
1677 os.system(
1677 os.system(
1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 )
1679 )
1680 else:
1680 else:
1681 os.environ['HGRCPATH'] = r' '
1681 os.environ['HGRCPATH'] = r' '
1682 os.system("%s version -q > NUL" % sys.argv[0])
1682 os.system("%s version -q > NUL" % sys.argv[0])
1683
1683
1684 timer(d)
1684 timer(d)
1685 fm.end()
1685 fm.end()
1686
1686
1687
1687
1688 @command(b'perfparents', formatteropts)
1688 @command(b'perfparents', formatteropts)
1689 def perfparents(ui, repo, **opts):
1689 def perfparents(ui, repo, **opts):
1690 """benchmark the time necessary to fetch one changeset's parents.
1690 """benchmark the time necessary to fetch one changeset's parents.
1691
1691
1692 The fetch is done using the `node identifier`, traversing all object layers
1692 The fetch is done using the `node identifier`, traversing all object layers
1693 from the repository object. The first N revisions will be used for this
1693 from the repository object. The first N revisions will be used for this
1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 (default: 1000).
1695 (default: 1000).
1696 """
1696 """
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 # control the number of commits perfparents iterates over
1699 # control the number of commits perfparents iterates over
1700 # experimental config: perf.parentscount
1700 # experimental config: perf.parentscount
1701 count = getint(ui, b"perf", b"parentscount", 1000)
1701 count = getint(ui, b"perf", b"parentscount", 1000)
1702 if len(repo.changelog) < count:
1702 if len(repo.changelog) < count:
1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 repo = repo.unfiltered()
1704 repo = repo.unfiltered()
1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706
1706
1707 def d():
1707 def d():
1708 for n in nl:
1708 for n in nl:
1709 repo.changelog.parents(n)
1709 repo.changelog.parents(n)
1710
1710
1711 timer(d)
1711 timer(d)
1712 fm.end()
1712 fm.end()
1713
1713
1714
1714
1715 @command(b'perfctxfiles', formatteropts)
1715 @command(b'perfctxfiles', formatteropts)
1716 def perfctxfiles(ui, repo, x, **opts):
1716 def perfctxfiles(ui, repo, x, **opts):
1717 opts = _byteskwargs(opts)
1717 opts = _byteskwargs(opts)
1718 x = int(x)
1718 x = int(x)
1719 timer, fm = gettimer(ui, opts)
1719 timer, fm = gettimer(ui, opts)
1720
1720
1721 def d():
1721 def d():
1722 len(repo[x].files())
1722 len(repo[x].files())
1723
1723
1724 timer(d)
1724 timer(d)
1725 fm.end()
1725 fm.end()
1726
1726
1727
1727
1728 @command(b'perfrawfiles', formatteropts)
1728 @command(b'perfrawfiles', formatteropts)
1729 def perfrawfiles(ui, repo, x, **opts):
1729 def perfrawfiles(ui, repo, x, **opts):
1730 opts = _byteskwargs(opts)
1730 opts = _byteskwargs(opts)
1731 x = int(x)
1731 x = int(x)
1732 timer, fm = gettimer(ui, opts)
1732 timer, fm = gettimer(ui, opts)
1733 cl = repo.changelog
1733 cl = repo.changelog
1734
1734
1735 def d():
1735 def d():
1736 len(cl.read(x)[3])
1736 len(cl.read(x)[3])
1737
1737
1738 timer(d)
1738 timer(d)
1739 fm.end()
1739 fm.end()
1740
1740
1741
1741
1742 @command(b'perflookup', formatteropts)
1742 @command(b'perflookup', formatteropts)
1743 def perflookup(ui, repo, rev, **opts):
1743 def perflookup(ui, repo, rev, **opts):
1744 opts = _byteskwargs(opts)
1744 opts = _byteskwargs(opts)
1745 timer, fm = gettimer(ui, opts)
1745 timer, fm = gettimer(ui, opts)
1746 timer(lambda: len(repo.lookup(rev)))
1746 timer(lambda: len(repo.lookup(rev)))
1747 fm.end()
1747 fm.end()
1748
1748
1749
1749
1750 @command(
1750 @command(
1751 b'perflinelogedits',
1751 b'perflinelogedits',
1752 [
1752 [
1753 (b'n', b'edits', 10000, b'number of edits'),
1753 (b'n', b'edits', 10000, b'number of edits'),
1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 ],
1755 ],
1756 norepo=True,
1756 norepo=True,
1757 )
1757 )
1758 def perflinelogedits(ui, **opts):
1758 def perflinelogedits(ui, **opts):
1759 from mercurial import linelog
1759 from mercurial import linelog
1760
1760
1761 opts = _byteskwargs(opts)
1761 opts = _byteskwargs(opts)
1762
1762
1763 edits = opts[b'edits']
1763 edits = opts[b'edits']
1764 maxhunklines = opts[b'max_hunk_lines']
1764 maxhunklines = opts[b'max_hunk_lines']
1765
1765
1766 maxb1 = 100000
1766 maxb1 = 100000
1767 random.seed(0)
1767 random.seed(0)
1768 randint = random.randint
1768 randint = random.randint
1769 currentlines = 0
1769 currentlines = 0
1770 arglist = []
1770 arglist = []
1771 for rev in _xrange(edits):
1771 for rev in _xrange(edits):
1772 a1 = randint(0, currentlines)
1772 a1 = randint(0, currentlines)
1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 b1 = randint(0, maxb1)
1774 b1 = randint(0, maxb1)
1775 b2 = randint(b1, b1 + maxhunklines)
1775 b2 = randint(b1, b1 + maxhunklines)
1776 currentlines += (b2 - b1) - (a2 - a1)
1776 currentlines += (b2 - b1) - (a2 - a1)
1777 arglist.append((rev, a1, a2, b1, b2))
1777 arglist.append((rev, a1, a2, b1, b2))
1778
1778
1779 def d():
1779 def d():
1780 ll = linelog.linelog()
1780 ll = linelog.linelog()
1781 for args in arglist:
1781 for args in arglist:
1782 ll.replacelines(*args)
1782 ll.replacelines(*args)
1783
1783
1784 timer, fm = gettimer(ui, opts)
1784 timer, fm = gettimer(ui, opts)
1785 timer(d)
1785 timer(d)
1786 fm.end()
1786 fm.end()
1787
1787
1788
1788
1789 @command(b'perfrevrange', formatteropts)
1789 @command(b'perfrevrange', formatteropts)
1790 def perfrevrange(ui, repo, *specs, **opts):
1790 def perfrevrange(ui, repo, *specs, **opts):
1791 opts = _byteskwargs(opts)
1791 opts = _byteskwargs(opts)
1792 timer, fm = gettimer(ui, opts)
1792 timer, fm = gettimer(ui, opts)
1793 revrange = scmutil.revrange
1793 revrange = scmutil.revrange
1794 timer(lambda: len(revrange(repo, specs)))
1794 timer(lambda: len(revrange(repo, specs)))
1795 fm.end()
1795 fm.end()
1796
1796
1797
1797
1798 @command(b'perfnodelookup', formatteropts)
1798 @command(b'perfnodelookup', formatteropts)
1799 def perfnodelookup(ui, repo, rev, **opts):
1799 def perfnodelookup(ui, repo, rev, **opts):
1800 opts = _byteskwargs(opts)
1800 opts = _byteskwargs(opts)
1801 timer, fm = gettimer(ui, opts)
1801 timer, fm = gettimer(ui, opts)
1802 import mercurial.revlog
1802 import mercurial.revlog
1803
1803
1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 n = scmutil.revsingle(repo, rev).node()
1805 n = scmutil.revsingle(repo, rev).node()
1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807
1807
1808 def d():
1808 def d():
1809 cl.rev(n)
1809 cl.rev(n)
1810 clearcaches(cl)
1810 clearcaches(cl)
1811
1811
1812 timer(d)
1812 timer(d)
1813 fm.end()
1813 fm.end()
1814
1814
1815
1815
1816 @command(
1816 @command(
1817 b'perflog',
1817 b'perflog',
1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 )
1819 )
1820 def perflog(ui, repo, rev=None, **opts):
1820 def perflog(ui, repo, rev=None, **opts):
1821 opts = _byteskwargs(opts)
1821 opts = _byteskwargs(opts)
1822 if rev is None:
1822 if rev is None:
1823 rev = []
1823 rev = []
1824 timer, fm = gettimer(ui, opts)
1824 timer, fm = gettimer(ui, opts)
1825 ui.pushbuffer()
1825 ui.pushbuffer()
1826 timer(
1826 timer(
1827 lambda: commands.log(
1827 lambda: commands.log(
1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 )
1829 )
1830 )
1830 )
1831 ui.popbuffer()
1831 ui.popbuffer()
1832 fm.end()
1832 fm.end()
1833
1833
1834
1834
1835 @command(b'perfmoonwalk', formatteropts)
1835 @command(b'perfmoonwalk', formatteropts)
1836 def perfmoonwalk(ui, repo, **opts):
1836 def perfmoonwalk(ui, repo, **opts):
1837 """benchmark walking the changelog backwards
1837 """benchmark walking the changelog backwards
1838
1838
1839 This also loads the changelog data for each revision in the changelog.
1839 This also loads the changelog data for each revision in the changelog.
1840 """
1840 """
1841 opts = _byteskwargs(opts)
1841 opts = _byteskwargs(opts)
1842 timer, fm = gettimer(ui, opts)
1842 timer, fm = gettimer(ui, opts)
1843
1843
1844 def moonwalk():
1844 def moonwalk():
1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 ctx = repo[i]
1846 ctx = repo[i]
1847 ctx.branch() # read changelog data (in addition to the index)
1847 ctx.branch() # read changelog data (in addition to the index)
1848
1848
1849 timer(moonwalk)
1849 timer(moonwalk)
1850 fm.end()
1850 fm.end()
1851
1851
1852
1852
1853 @command(
1853 @command(
1854 b'perftemplating',
1854 b'perftemplating',
1855 [
1855 [
1856 (b'r', b'rev', [], b'revisions to run the template on'),
1856 (b'r', b'rev', [], b'revisions to run the template on'),
1857 ]
1857 ]
1858 + formatteropts,
1858 + formatteropts,
1859 )
1859 )
1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 """test the rendering time of a given template"""
1861 """test the rendering time of a given template"""
1862 if makelogtemplater is None:
1862 if makelogtemplater is None:
1863 raise error.Abort(
1863 raise error.Abort(
1864 b"perftemplating not available with this Mercurial",
1864 b"perftemplating not available with this Mercurial",
1865 hint=b"use 4.3 or later",
1865 hint=b"use 4.3 or later",
1866 )
1866 )
1867
1867
1868 opts = _byteskwargs(opts)
1868 opts = _byteskwargs(opts)
1869
1869
1870 nullui = ui.copy()
1870 nullui = ui.copy()
1871 nullui.fout = open(os.devnull, 'wb')
1871 nullui.fout = open(os.devnull, 'wb')
1872 nullui.disablepager()
1872 nullui.disablepager()
1873 revs = opts.get(b'rev')
1873 revs = opts.get(b'rev')
1874 if not revs:
1874 if not revs:
1875 revs = [b'all()']
1875 revs = [b'all()']
1876 revs = list(scmutil.revrange(repo, revs))
1876 revs = list(scmutil.revrange(repo, revs))
1877
1877
1878 defaulttemplate = (
1878 defaulttemplate = (
1879 b'{date|shortdate} [{rev}:{node|short}]'
1879 b'{date|shortdate} [{rev}:{node|short}]'
1880 b' {author|person}: {desc|firstline}\n'
1880 b' {author|person}: {desc|firstline}\n'
1881 )
1881 )
1882 if testedtemplate is None:
1882 if testedtemplate is None:
1883 testedtemplate = defaulttemplate
1883 testedtemplate = defaulttemplate
1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885
1885
1886 def format():
1886 def format():
1887 for r in revs:
1887 for r in revs:
1888 ctx = repo[r]
1888 ctx = repo[r]
1889 displayer.show(ctx)
1889 displayer.show(ctx)
1890 displayer.flush(ctx)
1890 displayer.flush(ctx)
1891
1891
1892 timer, fm = gettimer(ui, opts)
1892 timer, fm = gettimer(ui, opts)
1893 timer(format)
1893 timer(format)
1894 fm.end()
1894 fm.end()
1895
1895
1896
1896
1897 def _displaystats(ui, opts, entries, data):
1897 def _displaystats(ui, opts, entries, data):
1898 # use a second formatter because the data are quite different, not sure
1898 # use a second formatter because the data are quite different, not sure
1899 # how it flies with the templater.
1899 # how it flies with the templater.
1900 fm = ui.formatter(b'perf-stats', opts)
1900 fm = ui.formatter(b'perf-stats', opts)
1901 for key, title in entries:
1901 for key, title in entries:
1902 values = data[key]
1902 values = data[key]
1903 nbvalues = len(data)
1903 nbvalues = len(data)
1904 values.sort()
1904 values.sort()
1905 stats = {
1905 stats = {
1906 'key': key,
1906 'key': key,
1907 'title': title,
1907 'title': title,
1908 'nbitems': len(values),
1908 'nbitems': len(values),
1909 'min': values[0][0],
1909 'min': values[0][0],
1910 '10%': values[(nbvalues * 10) // 100][0],
1910 '10%': values[(nbvalues * 10) // 100][0],
1911 '25%': values[(nbvalues * 25) // 100][0],
1911 '25%': values[(nbvalues * 25) // 100][0],
1912 '50%': values[(nbvalues * 50) // 100][0],
1912 '50%': values[(nbvalues * 50) // 100][0],
1913 '75%': values[(nbvalues * 75) // 100][0],
1913 '75%': values[(nbvalues * 75) // 100][0],
1914 '80%': values[(nbvalues * 80) // 100][0],
1914 '80%': values[(nbvalues * 80) // 100][0],
1915 '85%': values[(nbvalues * 85) // 100][0],
1915 '85%': values[(nbvalues * 85) // 100][0],
1916 '90%': values[(nbvalues * 90) // 100][0],
1916 '90%': values[(nbvalues * 90) // 100][0],
1917 '95%': values[(nbvalues * 95) // 100][0],
1917 '95%': values[(nbvalues * 95) // 100][0],
1918 '99%': values[(nbvalues * 99) // 100][0],
1918 '99%': values[(nbvalues * 99) // 100][0],
1919 'max': values[-1][0],
1919 'max': values[-1][0],
1920 }
1920 }
1921 fm.startitem()
1921 fm.startitem()
1922 fm.data(**stats)
1922 fm.data(**stats)
1923 # make node pretty for the human output
1923 # make node pretty for the human output
1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 lines = [
1925 lines = [
1926 'min',
1926 'min',
1927 '10%',
1927 '10%',
1928 '25%',
1928 '25%',
1929 '50%',
1929 '50%',
1930 '75%',
1930 '75%',
1931 '80%',
1931 '80%',
1932 '85%',
1932 '85%',
1933 '90%',
1933 '90%',
1934 '95%',
1934 '95%',
1935 '99%',
1935 '99%',
1936 'max',
1936 'max',
1937 ]
1937 ]
1938 for l in lines:
1938 for l in lines:
1939 fm.plain('%s: %s\n' % (l, stats[l]))
1939 fm.plain('%s: %s\n' % (l, stats[l]))
1940 fm.end()
1940 fm.end()
1941
1941
1942
1942
1943 @command(
1943 @command(
1944 b'perfhelper-mergecopies',
1944 b'perfhelper-mergecopies',
1945 formatteropts
1945 formatteropts
1946 + [
1946 + [
1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 (b'', b'timing', False, b'provides extra data (costly)'),
1948 (b'', b'timing', False, b'provides extra data (costly)'),
1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 ],
1950 ],
1951 )
1951 )
1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 """find statistics about potential parameters for `perfmergecopies`
1953 """find statistics about potential parameters for `perfmergecopies`
1954
1954
1955 This command find (base, p1, p2) triplet relevant for copytracing
1955 This command find (base, p1, p2) triplet relevant for copytracing
1956 benchmarking in the context of a merge. It reports values for some of the
1956 benchmarking in the context of a merge. It reports values for some of the
1957 parameters that impact merge copy tracing time during merge.
1957 parameters that impact merge copy tracing time during merge.
1958
1958
1959 If `--timing` is set, rename detection is run and the associated timing
1959 If `--timing` is set, rename detection is run and the associated timing
1960 will be reported. The extra details come at the cost of slower command
1960 will be reported. The extra details come at the cost of slower command
1961 execution.
1961 execution.
1962
1962
1963 Since rename detection is only run once, other factors might easily
1963 Since rename detection is only run once, other factors might easily
1964 affect the precision of the timing. However it should give a good
1964 affect the precision of the timing. However it should give a good
1965 approximation of which revision triplets are very costly.
1965 approximation of which revision triplets are very costly.
1966 """
1966 """
1967 opts = _byteskwargs(opts)
1967 opts = _byteskwargs(opts)
1968 fm = ui.formatter(b'perf', opts)
1968 fm = ui.formatter(b'perf', opts)
1969 dotiming = opts[b'timing']
1969 dotiming = opts[b'timing']
1970 dostats = opts[b'stats']
1970 dostats = opts[b'stats']
1971
1971
1972 output_template = [
1972 output_template = [
1973 ("base", "%(base)12s"),
1973 ("base", "%(base)12s"),
1974 ("p1", "%(p1.node)12s"),
1974 ("p1", "%(p1.node)12s"),
1975 ("p2", "%(p2.node)12s"),
1975 ("p2", "%(p2.node)12s"),
1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 ("p1.time", "%(p1.time)12.3f"),
1979 ("p1.time", "%(p1.time)12.3f"),
1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 ("p2.time", "%(p2.time)12.3f"),
1983 ("p2.time", "%(p2.time)12.3f"),
1984 ("renames", "%(nbrenamedfiles)12d"),
1984 ("renames", "%(nbrenamedfiles)12d"),
1985 ("total.time", "%(time)12.3f"),
1985 ("total.time", "%(time)12.3f"),
1986 ]
1986 ]
1987 if not dotiming:
1987 if not dotiming:
1988 output_template = [
1988 output_template = [
1989 i
1989 i
1990 for i in output_template
1990 for i in output_template
1991 if not ('time' in i[0] or 'renames' in i[0])
1991 if not ('time' in i[0] or 'renames' in i[0])
1992 ]
1992 ]
1993 header_names = [h for (h, v) in output_template]
1993 header_names = [h for (h, v) in output_template]
1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 fm.plain(header % tuple(header_names))
1996 fm.plain(header % tuple(header_names))
1997
1997
1998 if not revs:
1998 if not revs:
1999 revs = ['all()']
1999 revs = ['all()']
2000 revs = scmutil.revrange(repo, revs)
2000 revs = scmutil.revrange(repo, revs)
2001
2001
2002 if dostats:
2002 if dostats:
2003 alldata = {
2003 alldata = {
2004 'nbrevs': [],
2004 'nbrevs': [],
2005 'nbmissingfiles': [],
2005 'nbmissingfiles': [],
2006 }
2006 }
2007 if dotiming:
2007 if dotiming:
2008 alldata['parentnbrenames'] = []
2008 alldata['parentnbrenames'] = []
2009 alldata['totalnbrenames'] = []
2009 alldata['totalnbrenames'] = []
2010 alldata['parenttime'] = []
2010 alldata['parenttime'] = []
2011 alldata['totaltime'] = []
2011 alldata['totaltime'] = []
2012
2012
2013 roi = repo.revs('merge() and %ld', revs)
2013 roi = repo.revs('merge() and %ld', revs)
2014 for r in roi:
2014 for r in roi:
2015 ctx = repo[r]
2015 ctx = repo[r]
2016 p1 = ctx.p1()
2016 p1 = ctx.p1()
2017 p2 = ctx.p2()
2017 p2 = ctx.p2()
2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 for b in bases:
2019 for b in bases:
2020 b = repo[b]
2020 b = repo[b]
2021 p1missing = copies._computeforwardmissing(b, p1)
2021 p1missing = copies._computeforwardmissing(b, p1)
2022 p2missing = copies._computeforwardmissing(b, p2)
2022 p2missing = copies._computeforwardmissing(b, p2)
2023 data = {
2023 data = {
2024 b'base': b.hex(),
2024 b'base': b.hex(),
2025 b'p1.node': p1.hex(),
2025 b'p1.node': p1.hex(),
2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 b'p1.nbmissingfiles': len(p1missing),
2027 b'p1.nbmissingfiles': len(p1missing),
2028 b'p2.node': p2.hex(),
2028 b'p2.node': p2.hex(),
2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 b'p2.nbmissingfiles': len(p2missing),
2030 b'p2.nbmissingfiles': len(p2missing),
2031 }
2031 }
2032 if dostats:
2032 if dostats:
2033 if p1missing:
2033 if p1missing:
2034 alldata['nbrevs'].append(
2034 alldata['nbrevs'].append(
2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 )
2036 )
2037 alldata['nbmissingfiles'].append(
2037 alldata['nbmissingfiles'].append(
2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 )
2039 )
2040 if p2missing:
2040 if p2missing:
2041 alldata['nbrevs'].append(
2041 alldata['nbrevs'].append(
2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 )
2043 )
2044 alldata['nbmissingfiles'].append(
2044 alldata['nbmissingfiles'].append(
2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 )
2046 )
2047 if dotiming:
2047 if dotiming:
2048 begin = util.timer()
2048 begin = util.timer()
2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 end = util.timer()
2050 end = util.timer()
2051 # not very stable timing since we did only one run
2051 # not very stable timing since we did only one run
2052 data['time'] = end - begin
2052 data['time'] = end - begin
2053 # mergedata contains five dicts: "copy", "movewithdir",
2053 # mergedata contains five dicts: "copy", "movewithdir",
2054 # "diverge", "renamedelete" and "dirmove".
2054 # "diverge", "renamedelete" and "dirmove".
2055 # The first 4 are about renamed file so lets count that.
2055 # The first 4 are about renamed file so lets count that.
2056 renames = len(mergedata[0])
2056 renames = len(mergedata[0])
2057 renames += len(mergedata[1])
2057 renames += len(mergedata[1])
2058 renames += len(mergedata[2])
2058 renames += len(mergedata[2])
2059 renames += len(mergedata[3])
2059 renames += len(mergedata[3])
2060 data['nbrenamedfiles'] = renames
2060 data['nbrenamedfiles'] = renames
2061 begin = util.timer()
2061 begin = util.timer()
2062 p1renames = copies.pathcopies(b, p1)
2062 p1renames = copies.pathcopies(b, p1)
2063 end = util.timer()
2063 end = util.timer()
2064 data['p1.time'] = end - begin
2064 data['p1.time'] = end - begin
2065 begin = util.timer()
2065 begin = util.timer()
2066 p2renames = copies.pathcopies(b, p2)
2066 p2renames = copies.pathcopies(b, p2)
2067 end = util.timer()
2067 end = util.timer()
2068 data['p2.time'] = end - begin
2068 data['p2.time'] = end - begin
2069 data['p1.renamedfiles'] = len(p1renames)
2069 data['p1.renamedfiles'] = len(p1renames)
2070 data['p2.renamedfiles'] = len(p2renames)
2070 data['p2.renamedfiles'] = len(p2renames)
2071
2071
2072 if dostats:
2072 if dostats:
2073 if p1missing:
2073 if p1missing:
2074 alldata['parentnbrenames'].append(
2074 alldata['parentnbrenames'].append(
2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 )
2076 )
2077 alldata['parenttime'].append(
2077 alldata['parenttime'].append(
2078 (data['p1.time'], b.hex(), p1.hex())
2078 (data['p1.time'], b.hex(), p1.hex())
2079 )
2079 )
2080 if p2missing:
2080 if p2missing:
2081 alldata['parentnbrenames'].append(
2081 alldata['parentnbrenames'].append(
2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 )
2083 )
2084 alldata['parenttime'].append(
2084 alldata['parenttime'].append(
2085 (data['p2.time'], b.hex(), p2.hex())
2085 (data['p2.time'], b.hex(), p2.hex())
2086 )
2086 )
2087 if p1missing or p2missing:
2087 if p1missing or p2missing:
2088 alldata['totalnbrenames'].append(
2088 alldata['totalnbrenames'].append(
2089 (
2089 (
2090 data['nbrenamedfiles'],
2090 data['nbrenamedfiles'],
2091 b.hex(),
2091 b.hex(),
2092 p1.hex(),
2092 p1.hex(),
2093 p2.hex(),
2093 p2.hex(),
2094 )
2094 )
2095 )
2095 )
2096 alldata['totaltime'].append(
2096 alldata['totaltime'].append(
2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 )
2098 )
2099 fm.startitem()
2099 fm.startitem()
2100 fm.data(**data)
2100 fm.data(**data)
2101 # make node pretty for the human output
2101 # make node pretty for the human output
2102 out = data.copy()
2102 out = data.copy()
2103 out['base'] = fm.hexfunc(b.node())
2103 out['base'] = fm.hexfunc(b.node())
2104 out['p1.node'] = fm.hexfunc(p1.node())
2104 out['p1.node'] = fm.hexfunc(p1.node())
2105 out['p2.node'] = fm.hexfunc(p2.node())
2105 out['p2.node'] = fm.hexfunc(p2.node())
2106 fm.plain(output % out)
2106 fm.plain(output % out)
2107
2107
2108 fm.end()
2108 fm.end()
2109 if dostats:
2109 if dostats:
2110 # use a second formatter because the data are quite different, not sure
2110 # use a second formatter because the data are quite different, not sure
2111 # how it flies with the templater.
2111 # how it flies with the templater.
2112 entries = [
2112 entries = [
2113 ('nbrevs', 'number of revision covered'),
2113 ('nbrevs', 'number of revision covered'),
2114 ('nbmissingfiles', 'number of missing files at head'),
2114 ('nbmissingfiles', 'number of missing files at head'),
2115 ]
2115 ]
2116 if dotiming:
2116 if dotiming:
2117 entries.append(
2117 entries.append(
2118 ('parentnbrenames', 'rename from one parent to base')
2118 ('parentnbrenames', 'rename from one parent to base')
2119 )
2119 )
2120 entries.append(('totalnbrenames', 'total number of renames'))
2120 entries.append(('totalnbrenames', 'total number of renames'))
2121 entries.append(('parenttime', 'time for one parent'))
2121 entries.append(('parenttime', 'time for one parent'))
2122 entries.append(('totaltime', 'time for both parents'))
2122 entries.append(('totaltime', 'time for both parents'))
2123 _displaystats(ui, opts, entries, alldata)
2123 _displaystats(ui, opts, entries, alldata)
2124
2124
2125
2125
2126 @command(
2126 @command(
2127 b'perfhelper-pathcopies',
2127 b'perfhelper-pathcopies',
2128 formatteropts
2128 formatteropts
2129 + [
2129 + [
2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 (b'', b'timing', False, b'provides extra data (costly)'),
2131 (b'', b'timing', False, b'provides extra data (costly)'),
2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 ],
2133 ],
2134 )
2134 )
2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 """find statistic about potential parameters for the `perftracecopies`
2136 """find statistic about potential parameters for the `perftracecopies`
2137
2137
2138 This command find source-destination pair relevant for copytracing testing.
2138 This command find source-destination pair relevant for copytracing testing.
2139 It report value for some of the parameters that impact copy tracing time.
2139 It report value for some of the parameters that impact copy tracing time.
2140
2140
2141 If `--timing` is set, rename detection is run and the associated timing
2141 If `--timing` is set, rename detection is run and the associated timing
2142 will be reported. The extra details comes at the cost of a slower command
2142 will be reported. The extra details comes at the cost of a slower command
2143 execution.
2143 execution.
2144
2144
2145 Since the rename detection is only run once, other factors might easily
2145 Since the rename detection is only run once, other factors might easily
2146 affect the precision of the timing. However it should give a good
2146 affect the precision of the timing. However it should give a good
2147 approximation of which revision pairs are very costly.
2147 approximation of which revision pairs are very costly.
2148 """
2148 """
2149 opts = _byteskwargs(opts)
2149 opts = _byteskwargs(opts)
2150 fm = ui.formatter(b'perf', opts)
2150 fm = ui.formatter(b'perf', opts)
2151 dotiming = opts[b'timing']
2151 dotiming = opts[b'timing']
2152 dostats = opts[b'stats']
2152 dostats = opts[b'stats']
2153
2153
2154 if dotiming:
2154 if dotiming:
2155 header = '%12s %12s %12s %12s %12s %12s\n'
2155 header = '%12s %12s %12s %12s %12s %12s\n'
2156 output = (
2156 output = (
2157 "%(source)12s %(destination)12s "
2157 "%(source)12s %(destination)12s "
2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 )
2160 )
2161 header_names = (
2161 header_names = (
2162 "source",
2162 "source",
2163 "destination",
2163 "destination",
2164 "nb-revs",
2164 "nb-revs",
2165 "nb-files",
2165 "nb-files",
2166 "nb-renames",
2166 "nb-renames",
2167 "time",
2167 "time",
2168 )
2168 )
2169 fm.plain(header % header_names)
2169 fm.plain(header % header_names)
2170 else:
2170 else:
2171 header = '%12s %12s %12s %12s\n'
2171 header = '%12s %12s %12s %12s\n'
2172 output = (
2172 output = (
2173 "%(source)12s %(destination)12s "
2173 "%(source)12s %(destination)12s "
2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 )
2175 )
2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177
2177
2178 if not revs:
2178 if not revs:
2179 revs = ['all()']
2179 revs = ['all()']
2180 revs = scmutil.revrange(repo, revs)
2180 revs = scmutil.revrange(repo, revs)
2181
2181
2182 if dostats:
2182 if dostats:
2183 alldata = {
2183 alldata = {
2184 'nbrevs': [],
2184 'nbrevs': [],
2185 'nbmissingfiles': [],
2185 'nbmissingfiles': [],
2186 }
2186 }
2187 if dotiming:
2187 if dotiming:
2188 alldata['nbrenames'] = []
2188 alldata['nbrenames'] = []
2189 alldata['time'] = []
2189 alldata['time'] = []
2190
2190
2191 roi = repo.revs('merge() and %ld', revs)
2191 roi = repo.revs('merge() and %ld', revs)
2192 for r in roi:
2192 for r in roi:
2193 ctx = repo[r]
2193 ctx = repo[r]
2194 p1 = ctx.p1().rev()
2194 p1 = ctx.p1().rev()
2195 p2 = ctx.p2().rev()
2195 p2 = ctx.p2().rev()
2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 for p in (p1, p2):
2197 for p in (p1, p2):
2198 for b in bases:
2198 for b in bases:
2199 base = repo[b]
2199 base = repo[b]
2200 parent = repo[p]
2200 parent = repo[p]
2201 missing = copies._computeforwardmissing(base, parent)
2201 missing = copies._computeforwardmissing(base, parent)
2202 if not missing:
2202 if not missing:
2203 continue
2203 continue
2204 data = {
2204 data = {
2205 b'source': base.hex(),
2205 b'source': base.hex(),
2206 b'destination': parent.hex(),
2206 b'destination': parent.hex(),
2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 b'nbmissingfiles': len(missing),
2208 b'nbmissingfiles': len(missing),
2209 }
2209 }
2210 if dostats:
2210 if dostats:
2211 alldata['nbrevs'].append(
2211 alldata['nbrevs'].append(
2212 (
2212 (
2213 data['nbrevs'],
2213 data['nbrevs'],
2214 base.hex(),
2214 base.hex(),
2215 parent.hex(),
2215 parent.hex(),
2216 )
2216 )
2217 )
2217 )
2218 alldata['nbmissingfiles'].append(
2218 alldata['nbmissingfiles'].append(
2219 (
2219 (
2220 data['nbmissingfiles'],
2220 data['nbmissingfiles'],
2221 base.hex(),
2221 base.hex(),
2222 parent.hex(),
2222 parent.hex(),
2223 )
2223 )
2224 )
2224 )
2225 if dotiming:
2225 if dotiming:
2226 begin = util.timer()
2226 begin = util.timer()
2227 renames = copies.pathcopies(base, parent)
2227 renames = copies.pathcopies(base, parent)
2228 end = util.timer()
2228 end = util.timer()
2229 # not very stable timing since we did only one run
2229 # not very stable timing since we did only one run
2230 data['time'] = end - begin
2230 data['time'] = end - begin
2231 data['nbrenamedfiles'] = len(renames)
2231 data['nbrenamedfiles'] = len(renames)
2232 if dostats:
2232 if dostats:
2233 alldata['time'].append(
2233 alldata['time'].append(
2234 (
2234 (
2235 data['time'],
2235 data['time'],
2236 base.hex(),
2236 base.hex(),
2237 parent.hex(),
2237 parent.hex(),
2238 )
2238 )
2239 )
2239 )
2240 alldata['nbrenames'].append(
2240 alldata['nbrenames'].append(
2241 (
2241 (
2242 data['nbrenamedfiles'],
2242 data['nbrenamedfiles'],
2243 base.hex(),
2243 base.hex(),
2244 parent.hex(),
2244 parent.hex(),
2245 )
2245 )
2246 )
2246 )
2247 fm.startitem()
2247 fm.startitem()
2248 fm.data(**data)
2248 fm.data(**data)
2249 out = data.copy()
2249 out = data.copy()
2250 out['source'] = fm.hexfunc(base.node())
2250 out['source'] = fm.hexfunc(base.node())
2251 out['destination'] = fm.hexfunc(parent.node())
2251 out['destination'] = fm.hexfunc(parent.node())
2252 fm.plain(output % out)
2252 fm.plain(output % out)
2253
2253
2254 fm.end()
2254 fm.end()
2255 if dostats:
2255 if dostats:
2256 entries = [
2256 entries = [
2257 ('nbrevs', 'number of revision covered'),
2257 ('nbrevs', 'number of revision covered'),
2258 ('nbmissingfiles', 'number of missing files at head'),
2258 ('nbmissingfiles', 'number of missing files at head'),
2259 ]
2259 ]
2260 if dotiming:
2260 if dotiming:
2261 entries.append(('nbrenames', 'renamed files'))
2261 entries.append(('nbrenames', 'renamed files'))
2262 entries.append(('time', 'time'))
2262 entries.append(('time', 'time'))
2263 _displaystats(ui, opts, entries, alldata)
2263 _displaystats(ui, opts, entries, alldata)
2264
2264
2265
2265
2266 @command(b'perfcca', formatteropts)
2266 @command(b'perfcca', formatteropts)
2267 def perfcca(ui, repo, **opts):
2267 def perfcca(ui, repo, **opts):
2268 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2269 timer, fm = gettimer(ui, opts)
2269 timer, fm = gettimer(ui, opts)
2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 fm.end()
2271 fm.end()
2272
2272
2273
2273
2274 @command(b'perffncacheload', formatteropts)
2274 @command(b'perffncacheload', formatteropts)
2275 def perffncacheload(ui, repo, **opts):
2275 def perffncacheload(ui, repo, **opts):
2276 opts = _byteskwargs(opts)
2276 opts = _byteskwargs(opts)
2277 timer, fm = gettimer(ui, opts)
2277 timer, fm = gettimer(ui, opts)
2278 s = repo.store
2278 s = repo.store
2279
2279
2280 def d():
2280 def d():
2281 s.fncache._load()
2281 s.fncache._load()
2282
2282
2283 timer(d)
2283 timer(d)
2284 fm.end()
2284 fm.end()
2285
2285
2286
2286
2287 @command(b'perffncachewrite', formatteropts)
2287 @command(b'perffncachewrite', formatteropts)
2288 def perffncachewrite(ui, repo, **opts):
2288 def perffncachewrite(ui, repo, **opts):
2289 opts = _byteskwargs(opts)
2289 opts = _byteskwargs(opts)
2290 timer, fm = gettimer(ui, opts)
2290 timer, fm = gettimer(ui, opts)
2291 s = repo.store
2291 s = repo.store
2292 lock = repo.lock()
2292 lock = repo.lock()
2293 s.fncache._load()
2293 s.fncache._load()
2294 tr = repo.transaction(b'perffncachewrite')
2294 tr = repo.transaction(b'perffncachewrite')
2295 tr.addbackup(b'fncache')
2295 tr.addbackup(b'fncache')
2296
2296
2297 def d():
2297 def d():
2298 s.fncache._dirty = True
2298 s.fncache._dirty = True
2299 s.fncache.write(tr)
2299 s.fncache.write(tr)
2300
2300
2301 timer(d)
2301 timer(d)
2302 tr.close()
2302 tr.close()
2303 lock.release()
2303 lock.release()
2304 fm.end()
2304 fm.end()
2305
2305
2306
2306
2307 @command(b'perffncacheencode', formatteropts)
2307 @command(b'perffncacheencode', formatteropts)
2308 def perffncacheencode(ui, repo, **opts):
2308 def perffncacheencode(ui, repo, **opts):
2309 opts = _byteskwargs(opts)
2309 opts = _byteskwargs(opts)
2310 timer, fm = gettimer(ui, opts)
2310 timer, fm = gettimer(ui, opts)
2311 s = repo.store
2311 s = repo.store
2312 s.fncache._load()
2312 s.fncache._load()
2313
2313
2314 def d():
2314 def d():
2315 for p in s.fncache.entries:
2315 for p in s.fncache.entries:
2316 s.encode(p)
2316 s.encode(p)
2317
2317
2318 timer(d)
2318 timer(d)
2319 fm.end()
2319 fm.end()
2320
2320
2321
2321
2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 while not done.is_set():
2323 while not done.is_set():
2324 pair = q.get()
2324 pair = q.get()
2325 while pair is not None:
2325 while pair is not None:
2326 if xdiff:
2326 if xdiff:
2327 mdiff.bdiff.xdiffblocks(*pair)
2327 mdiff.bdiff.xdiffblocks(*pair)
2328 elif blocks:
2328 elif blocks:
2329 mdiff.bdiff.blocks(*pair)
2329 mdiff.bdiff.blocks(*pair)
2330 else:
2330 else:
2331 mdiff.textdiff(*pair)
2331 mdiff.textdiff(*pair)
2332 q.task_done()
2332 q.task_done()
2333 pair = q.get()
2333 pair = q.get()
2334 q.task_done() # for the None one
2334 q.task_done() # for the None one
2335 with ready:
2335 with ready:
2336 ready.wait()
2336 ready.wait()
2337
2337
2338
2338
2339 def _manifestrevision(repo, mnode):
2339 def _manifestrevision(repo, mnode):
2340 ml = repo.manifestlog
2340 ml = repo.manifestlog
2341
2341
2342 if util.safehasattr(ml, b'getstorage'):
2342 if util.safehasattr(ml, b'getstorage'):
2343 store = ml.getstorage(b'')
2343 store = ml.getstorage(b'')
2344 else:
2344 else:
2345 store = ml._revlog
2345 store = ml._revlog
2346
2346
2347 return store.revision(mnode)
2347 return store.revision(mnode)
2348
2348
2349
2349
2350 @command(
2350 @command(
2351 b'perfbdiff',
2351 b'perfbdiff',
2352 revlogopts
2352 revlogopts
2353 + formatteropts
2353 + formatteropts
2354 + [
2354 + [
2355 (
2355 (
2356 b'',
2356 b'',
2357 b'count',
2357 b'count',
2358 1,
2358 1,
2359 b'number of revisions to test (when using --startrev)',
2359 b'number of revisions to test (when using --startrev)',
2360 ),
2360 ),
2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 ],
2365 ],
2366 b'-c|-m|FILE REV',
2366 b'-c|-m|FILE REV',
2367 )
2367 )
2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 """benchmark a bdiff between revisions
2369 """benchmark a bdiff between revisions
2370
2370
2371 By default, benchmark a bdiff between its delta parent and itself.
2371 By default, benchmark a bdiff between its delta parent and itself.
2372
2372
2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 revisions starting at the specified revision.
2374 revisions starting at the specified revision.
2375
2375
2376 With ``--alldata``, assume the requested revision is a changeset and
2376 With ``--alldata``, assume the requested revision is a changeset and
2377 measure bdiffs for all changes related to that changeset (manifest
2377 measure bdiffs for all changes related to that changeset (manifest
2378 and filelogs).
2378 and filelogs).
2379 """
2379 """
2380 opts = _byteskwargs(opts)
2380 opts = _byteskwargs(opts)
2381
2381
2382 if opts[b'xdiff'] and not opts[b'blocks']:
2382 if opts[b'xdiff'] and not opts[b'blocks']:
2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384
2384
2385 if opts[b'alldata']:
2385 if opts[b'alldata']:
2386 opts[b'changelog'] = True
2386 opts[b'changelog'] = True
2387
2387
2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 file_, rev = None, file_
2389 file_, rev = None, file_
2390 elif rev is None:
2390 elif rev is None:
2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392
2392
2393 blocks = opts[b'blocks']
2393 blocks = opts[b'blocks']
2394 xdiff = opts[b'xdiff']
2394 xdiff = opts[b'xdiff']
2395 textpairs = []
2395 textpairs = []
2396
2396
2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398
2398
2399 startrev = r.rev(r.lookup(rev))
2399 startrev = r.rev(r.lookup(rev))
2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 if opts[b'alldata']:
2401 if opts[b'alldata']:
2402 # Load revisions associated with changeset.
2402 # Load revisions associated with changeset.
2403 ctx = repo[rev]
2403 ctx = repo[rev]
2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 for pctx in ctx.parents():
2405 for pctx in ctx.parents():
2406 pman = _manifestrevision(repo, pctx.manifestnode())
2406 pman = _manifestrevision(repo, pctx.manifestnode())
2407 textpairs.append((pman, mtext))
2407 textpairs.append((pman, mtext))
2408
2408
2409 # Load filelog revisions by iterating manifest delta.
2409 # Load filelog revisions by iterating manifest delta.
2410 man = ctx.manifest()
2410 man = ctx.manifest()
2411 pman = ctx.p1().manifest()
2411 pman = ctx.p1().manifest()
2412 for filename, change in pman.diff(man).items():
2412 for filename, change in pman.diff(man).items():
2413 fctx = repo.file(filename)
2413 fctx = repo.file(filename)
2414 f1 = fctx.revision(change[0][0] or -1)
2414 f1 = fctx.revision(change[0][0] or -1)
2415 f2 = fctx.revision(change[1][0] or -1)
2415 f2 = fctx.revision(change[1][0] or -1)
2416 textpairs.append((f1, f2))
2416 textpairs.append((f1, f2))
2417 else:
2417 else:
2418 dp = r.deltaparent(rev)
2418 dp = r.deltaparent(rev)
2419 textpairs.append((r.revision(dp), r.revision(rev)))
2419 textpairs.append((r.revision(dp), r.revision(rev)))
2420
2420
2421 withthreads = threads > 0
2421 withthreads = threads > 0
2422 if not withthreads:
2422 if not withthreads:
2423
2423
2424 def d():
2424 def d():
2425 for pair in textpairs:
2425 for pair in textpairs:
2426 if xdiff:
2426 if xdiff:
2427 mdiff.bdiff.xdiffblocks(*pair)
2427 mdiff.bdiff.xdiffblocks(*pair)
2428 elif blocks:
2428 elif blocks:
2429 mdiff.bdiff.blocks(*pair)
2429 mdiff.bdiff.blocks(*pair)
2430 else:
2430 else:
2431 mdiff.textdiff(*pair)
2431 mdiff.textdiff(*pair)
2432
2432
2433 else:
2433 else:
2434 q = queue()
2434 q = queue()
2435 for i in _xrange(threads):
2435 for i in _xrange(threads):
2436 q.put(None)
2436 q.put(None)
2437 ready = threading.Condition()
2437 ready = threading.Condition()
2438 done = threading.Event()
2438 done = threading.Event()
2439 for i in _xrange(threads):
2439 for i in _xrange(threads):
2440 threading.Thread(
2440 threading.Thread(
2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 ).start()
2442 ).start()
2443 q.join()
2443 q.join()
2444
2444
2445 def d():
2445 def d():
2446 for pair in textpairs:
2446 for pair in textpairs:
2447 q.put(pair)
2447 q.put(pair)
2448 for i in _xrange(threads):
2448 for i in _xrange(threads):
2449 q.put(None)
2449 q.put(None)
2450 with ready:
2450 with ready:
2451 ready.notify_all()
2451 ready.notify_all()
2452 q.join()
2452 q.join()
2453
2453
2454 timer, fm = gettimer(ui, opts)
2454 timer, fm = gettimer(ui, opts)
2455 timer(d)
2455 timer(d)
2456 fm.end()
2456 fm.end()
2457
2457
2458 if withthreads:
2458 if withthreads:
2459 done.set()
2459 done.set()
2460 for i in _xrange(threads):
2460 for i in _xrange(threads):
2461 q.put(None)
2461 q.put(None)
2462 with ready:
2462 with ready:
2463 ready.notify_all()
2463 ready.notify_all()
2464
2464
2465
2465
2466 @command(
2466 @command(
2467 b'perfunidiff',
2467 b'perfunidiff',
2468 revlogopts
2468 revlogopts
2469 + formatteropts
2469 + formatteropts
2470 + [
2470 + [
2471 (
2471 (
2472 b'',
2472 b'',
2473 b'count',
2473 b'count',
2474 1,
2474 1,
2475 b'number of revisions to test (when using --startrev)',
2475 b'number of revisions to test (when using --startrev)',
2476 ),
2476 ),
2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 ],
2478 ],
2479 b'-c|-m|FILE REV',
2479 b'-c|-m|FILE REV',
2480 )
2480 )
2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 """benchmark a unified diff between revisions
2482 """benchmark a unified diff between revisions
2483
2483
2484 This doesn't include any copy tracing - it's just a unified diff
2484 This doesn't include any copy tracing - it's just a unified diff
2485 of the texts.
2485 of the texts.
2486
2486
2487 By default, benchmark a diff between its delta parent and itself.
2487 By default, benchmark a diff between its delta parent and itself.
2488
2488
2489 With ``--count``, benchmark diffs between delta parents and self for N
2489 With ``--count``, benchmark diffs between delta parents and self for N
2490 revisions starting at the specified revision.
2490 revisions starting at the specified revision.
2491
2491
2492 With ``--alldata``, assume the requested revision is a changeset and
2492 With ``--alldata``, assume the requested revision is a changeset and
2493 measure diffs for all changes related to that changeset (manifest
2493 measure diffs for all changes related to that changeset (manifest
2494 and filelogs).
2494 and filelogs).
2495 """
2495 """
2496 opts = _byteskwargs(opts)
2496 opts = _byteskwargs(opts)
2497 if opts[b'alldata']:
2497 if opts[b'alldata']:
2498 opts[b'changelog'] = True
2498 opts[b'changelog'] = True
2499
2499
2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 file_, rev = None, file_
2501 file_, rev = None, file_
2502 elif rev is None:
2502 elif rev is None:
2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504
2504
2505 textpairs = []
2505 textpairs = []
2506
2506
2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508
2508
2509 startrev = r.rev(r.lookup(rev))
2509 startrev = r.rev(r.lookup(rev))
2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 if opts[b'alldata']:
2511 if opts[b'alldata']:
2512 # Load revisions associated with changeset.
2512 # Load revisions associated with changeset.
2513 ctx = repo[rev]
2513 ctx = repo[rev]
2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 for pctx in ctx.parents():
2515 for pctx in ctx.parents():
2516 pman = _manifestrevision(repo, pctx.manifestnode())
2516 pman = _manifestrevision(repo, pctx.manifestnode())
2517 textpairs.append((pman, mtext))
2517 textpairs.append((pman, mtext))
2518
2518
2519 # Load filelog revisions by iterating manifest delta.
2519 # Load filelog revisions by iterating manifest delta.
2520 man = ctx.manifest()
2520 man = ctx.manifest()
2521 pman = ctx.p1().manifest()
2521 pman = ctx.p1().manifest()
2522 for filename, change in pman.diff(man).items():
2522 for filename, change in pman.diff(man).items():
2523 fctx = repo.file(filename)
2523 fctx = repo.file(filename)
2524 f1 = fctx.revision(change[0][0] or -1)
2524 f1 = fctx.revision(change[0][0] or -1)
2525 f2 = fctx.revision(change[1][0] or -1)
2525 f2 = fctx.revision(change[1][0] or -1)
2526 textpairs.append((f1, f2))
2526 textpairs.append((f1, f2))
2527 else:
2527 else:
2528 dp = r.deltaparent(rev)
2528 dp = r.deltaparent(rev)
2529 textpairs.append((r.revision(dp), r.revision(rev)))
2529 textpairs.append((r.revision(dp), r.revision(rev)))
2530
2530
2531 def d():
2531 def d():
2532 for left, right in textpairs:
2532 for left, right in textpairs:
2533 # The date strings don't matter, so we pass empty strings.
2533 # The date strings don't matter, so we pass empty strings.
2534 headerlines, hunks = mdiff.unidiff(
2534 headerlines, hunks = mdiff.unidiff(
2535 left, b'', right, b'', b'left', b'right', binary=False
2535 left, b'', right, b'', b'left', b'right', binary=False
2536 )
2536 )
2537 # consume iterators in roughly the way patch.py does
2537 # consume iterators in roughly the way patch.py does
2538 b'\n'.join(headerlines)
2538 b'\n'.join(headerlines)
2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540
2540
2541 timer, fm = gettimer(ui, opts)
2541 timer, fm = gettimer(ui, opts)
2542 timer(d)
2542 timer(d)
2543 fm.end()
2543 fm.end()
2544
2544
2545
2545
2546 @command(b'perfdiffwd', formatteropts)
2546 @command(b'perfdiffwd', formatteropts)
2547 def perfdiffwd(ui, repo, **opts):
2547 def perfdiffwd(ui, repo, **opts):
2548 """Profile diff of working directory changes"""
2548 """Profile diff of working directory changes"""
2549 opts = _byteskwargs(opts)
2549 opts = _byteskwargs(opts)
2550 timer, fm = gettimer(ui, opts)
2550 timer, fm = gettimer(ui, opts)
2551 options = {
2551 options = {
2552 'w': 'ignore_all_space',
2552 'w': 'ignore_all_space',
2553 'b': 'ignore_space_change',
2553 'b': 'ignore_space_change',
2554 'B': 'ignore_blank_lines',
2554 'B': 'ignore_blank_lines',
2555 }
2555 }
2556
2556
2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 opts = {options[c]: b'1' for c in diffopt}
2558 opts = {options[c]: b'1' for c in diffopt}
2559
2559
2560 def d():
2560 def d():
2561 ui.pushbuffer()
2561 ui.pushbuffer()
2562 commands.diff(ui, repo, **opts)
2562 commands.diff(ui, repo, **opts)
2563 ui.popbuffer()
2563 ui.popbuffer()
2564
2564
2565 diffopt = diffopt.encode('ascii')
2565 diffopt = diffopt.encode('ascii')
2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 timer(d, title=title)
2567 timer(d, title=title)
2568 fm.end()
2568 fm.end()
2569
2569
2570
2570
2571 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2571 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2572 def perfrevlogindex(ui, repo, file_=None, **opts):
2572 def perfrevlogindex(ui, repo, file_=None, **opts):
2573 """Benchmark operations against a revlog index.
2573 """Benchmark operations against a revlog index.
2574
2574
2575 This tests constructing a revlog instance, reading index data,
2575 This tests constructing a revlog instance, reading index data,
2576 parsing index data, and performing various operations related to
2576 parsing index data, and performing various operations related to
2577 index data.
2577 index data.
2578 """
2578 """
2579
2579
2580 opts = _byteskwargs(opts)
2580 opts = _byteskwargs(opts)
2581
2581
2582 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2582 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2583
2583
2584 opener = getattr(rl, 'opener') # trick linter
2584 opener = getattr(rl, 'opener') # trick linter
2585 indexfile = rl.indexfile
2585 indexfile = rl.indexfile
2586 data = opener.read(indexfile)
2586 data = opener.read(indexfile)
2587
2587
2588 header = struct.unpack(b'>I', data[0:4])[0]
2588 header = struct.unpack(b'>I', data[0:4])[0]
2589 version = header & 0xFFFF
2589 version = header & 0xFFFF
2590 if version == 1:
2590 if version == 1:
2591 revlogio = revlog.revlogio()
2591 revlogio = revlog.revlogio()
2592 inline = header & (1 << 16)
2592 inline = header & (1 << 16)
2593 else:
2593 else:
2594 raise error.Abort(b'unsupported revlog version: %d' % version)
2594 raise error.Abort(b'unsupported revlog version: %d' % version)
2595
2595
2596 rllen = len(rl)
2596 rllen = len(rl)
2597
2597
2598 node0 = rl.node(0)
2598 node0 = rl.node(0)
2599 node25 = rl.node(rllen // 4)
2599 node25 = rl.node(rllen // 4)
2600 node50 = rl.node(rllen // 2)
2600 node50 = rl.node(rllen // 2)
2601 node75 = rl.node(rllen // 4 * 3)
2601 node75 = rl.node(rllen // 4 * 3)
2602 node100 = rl.node(rllen - 1)
2602 node100 = rl.node(rllen - 1)
2603
2603
2604 allrevs = range(rllen)
2604 allrevs = range(rllen)
2605 allrevsrev = list(reversed(allrevs))
2605 allrevsrev = list(reversed(allrevs))
2606 allnodes = [rl.node(rev) for rev in range(rllen)]
2606 allnodes = [rl.node(rev) for rev in range(rllen)]
2607 allnodesrev = list(reversed(allnodes))
2607 allnodesrev = list(reversed(allnodes))
2608
2608
2609 def constructor():
2609 def constructor():
2610 revlog.revlog(opener, indexfile)
2610 revlog.revlog(opener, indexfile)
2611
2611
2612 def read():
2612 def read():
2613 with opener(indexfile) as fh:
2613 with opener(indexfile) as fh:
2614 fh.read()
2614 fh.read()
2615
2615
2616 def parseindex():
2616 def parseindex():
2617 revlogio.parseindex(data, inline)
2617 revlogio.parseindex(data, inline)
2618
2618
2619 def getentry(revornode):
2619 def getentry(revornode):
2620 index = revlogio.parseindex(data, inline)[0]
2620 index = revlogio.parseindex(data, inline)[0]
2621 index[revornode]
2621 index[revornode]
2622
2622
2623 def getentries(revs, count=1):
2623 def getentries(revs, count=1):
2624 index = revlogio.parseindex(data, inline)[0]
2624 index = revlogio.parseindex(data, inline)[0]
2625
2625
2626 for i in range(count):
2626 for i in range(count):
2627 for rev in revs:
2627 for rev in revs:
2628 index[rev]
2628 index[rev]
2629
2629
2630 def resolvenode(node):
2630 def resolvenode(node):
2631 index = revlogio.parseindex(data, inline)[0]
2631 index = revlogio.parseindex(data, inline)[0]
2632 rev = getattr(index, 'rev', None)
2632 rev = getattr(index, 'rev', None)
2633 if rev is None:
2633 if rev is None:
2634 nodemap = getattr(
2634 nodemap = getattr(
2635 revlogio.parseindex(data, inline)[0], 'nodemap', None
2635 revlogio.parseindex(data, inline)[0], 'nodemap', None
2636 )
2636 )
2637 # This only works for the C code.
2637 # This only works for the C code.
2638 if nodemap is None:
2638 if nodemap is None:
2639 return
2639 return
2640 rev = nodemap.__getitem__
2640 rev = nodemap.__getitem__
2641
2641
2642 try:
2642 try:
2643 rev(node)
2643 rev(node)
2644 except error.RevlogError:
2644 except error.RevlogError:
2645 pass
2645 pass
2646
2646
2647 def resolvenodes(nodes, count=1):
2647 def resolvenodes(nodes, count=1):
2648 index = revlogio.parseindex(data, inline)[0]
2648 index = revlogio.parseindex(data, inline)[0]
2649 rev = getattr(index, 'rev', None)
2649 rev = getattr(index, 'rev', None)
2650 if rev is None:
2650 if rev is None:
2651 nodemap = getattr(
2651 nodemap = getattr(
2652 revlogio.parseindex(data, inline)[0], 'nodemap', None
2652 revlogio.parseindex(data, inline)[0], 'nodemap', None
2653 )
2653 )
2654 # This only works for the C code.
2654 # This only works for the C code.
2655 if nodemap is None:
2655 if nodemap is None:
2656 return
2656 return
2657 rev = nodemap.__getitem__
2657 rev = nodemap.__getitem__
2658
2658
2659 for i in range(count):
2659 for i in range(count):
2660 for node in nodes:
2660 for node in nodes:
2661 try:
2661 try:
2662 rev(node)
2662 rev(node)
2663 except error.RevlogError:
2663 except error.RevlogError:
2664 pass
2664 pass
2665
2665
2666 benches = [
2666 benches = [
2667 (constructor, b'revlog constructor'),
2667 (constructor, b'revlog constructor'),
2668 (read, b'read'),
2668 (read, b'read'),
2669 (parseindex, b'create index object'),
2669 (parseindex, b'create index object'),
2670 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2670 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2671 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2671 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2672 (lambda: resolvenode(node0), b'look up node at rev 0'),
2672 (lambda: resolvenode(node0), b'look up node at rev 0'),
2673 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2673 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2674 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2674 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2675 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2675 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2676 (lambda: resolvenode(node100), b'look up node at tip'),
2676 (lambda: resolvenode(node100), b'look up node at tip'),
2677 # 2x variation is to measure caching impact.
2677 # 2x variation is to measure caching impact.
2678 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2678 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2679 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2679 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2680 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2680 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2681 (
2681 (
2682 lambda: resolvenodes(allnodesrev, 2),
2682 lambda: resolvenodes(allnodesrev, 2),
2683 b'look up all nodes 2x (reverse)',
2683 b'look up all nodes 2x (reverse)',
2684 ),
2684 ),
2685 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2685 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2686 (
2686 (
2687 lambda: getentries(allrevs, 2),
2687 lambda: getentries(allrevs, 2),
2688 b'retrieve all index entries 2x (forward)',
2688 b'retrieve all index entries 2x (forward)',
2689 ),
2689 ),
2690 (
2690 (
2691 lambda: getentries(allrevsrev),
2691 lambda: getentries(allrevsrev),
2692 b'retrieve all index entries (reverse)',
2692 b'retrieve all index entries (reverse)',
2693 ),
2693 ),
2694 (
2694 (
2695 lambda: getentries(allrevsrev, 2),
2695 lambda: getentries(allrevsrev, 2),
2696 b'retrieve all index entries 2x (reverse)',
2696 b'retrieve all index entries 2x (reverse)',
2697 ),
2697 ),
2698 ]
2698 ]
2699
2699
2700 for fn, title in benches:
2700 for fn, title in benches:
2701 timer, fm = gettimer(ui, opts)
2701 timer, fm = gettimer(ui, opts)
2702 timer(fn, title=title)
2702 timer(fn, title=title)
2703 fm.end()
2703 fm.end()
2704
2704
2705
2705
2706 @command(
2706 @command(
2707 b'perfrevlogrevisions',
2707 b'perfrevlogrevisions',
2708 revlogopts
2708 revlogopts
2709 + formatteropts
2709 + formatteropts
2710 + [
2710 + [
2711 (b'd', b'dist', 100, b'distance between the revisions'),
2711 (b'd', b'dist', 100, b'distance between the revisions'),
2712 (b's', b'startrev', 0, b'revision to start reading at'),
2712 (b's', b'startrev', 0, b'revision to start reading at'),
2713 (b'', b'reverse', False, b'read in reverse'),
2713 (b'', b'reverse', False, b'read in reverse'),
2714 ],
2714 ],
2715 b'-c|-m|FILE',
2715 b'-c|-m|FILE',
2716 )
2716 )
2717 def perfrevlogrevisions(
2717 def perfrevlogrevisions(
2718 ui, repo, file_=None, startrev=0, reverse=False, **opts
2718 ui, repo, file_=None, startrev=0, reverse=False, **opts
2719 ):
2719 ):
2720 """Benchmark reading a series of revisions from a revlog.
2720 """Benchmark reading a series of revisions from a revlog.
2721
2721
2722 By default, we read every ``-d/--dist`` revision from 0 to tip of
2722 By default, we read every ``-d/--dist`` revision from 0 to tip of
2723 the specified revlog.
2723 the specified revlog.
2724
2724
2725 The start revision can be defined via ``-s/--startrev``.
2725 The start revision can be defined via ``-s/--startrev``.
2726 """
2726 """
2727 opts = _byteskwargs(opts)
2727 opts = _byteskwargs(opts)
2728
2728
2729 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2729 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2730 rllen = getlen(ui)(rl)
2730 rllen = getlen(ui)(rl)
2731
2731
2732 if startrev < 0:
2732 if startrev < 0:
2733 startrev = rllen + startrev
2733 startrev = rllen + startrev
2734
2734
2735 def d():
2735 def d():
2736 rl.clearcaches()
2736 rl.clearcaches()
2737
2737
2738 beginrev = startrev
2738 beginrev = startrev
2739 endrev = rllen
2739 endrev = rllen
2740 dist = opts[b'dist']
2740 dist = opts[b'dist']
2741
2741
2742 if reverse:
2742 if reverse:
2743 beginrev, endrev = endrev - 1, beginrev - 1
2743 beginrev, endrev = endrev - 1, beginrev - 1
2744 dist = -1 * dist
2744 dist = -1 * dist
2745
2745
2746 for x in _xrange(beginrev, endrev, dist):
2746 for x in _xrange(beginrev, endrev, dist):
2747 # Old revisions don't support passing int.
2747 # Old revisions don't support passing int.
2748 n = rl.node(x)
2748 n = rl.node(x)
2749 rl.revision(n)
2749 rl.revision(n)
2750
2750
2751 timer, fm = gettimer(ui, opts)
2751 timer, fm = gettimer(ui, opts)
2752 timer(d)
2752 timer(d)
2753 fm.end()
2753 fm.end()
2754
2754
2755
2755
2756 @command(
2756 @command(
2757 b'perfrevlogwrite',
2757 b'perfrevlogwrite',
2758 revlogopts
2758 revlogopts
2759 + formatteropts
2759 + formatteropts
2760 + [
2760 + [
2761 (b's', b'startrev', 1000, b'revision to start writing at'),
2761 (b's', b'startrev', 1000, b'revision to start writing at'),
2762 (b'', b'stoprev', -1, b'last revision to write'),
2762 (b'', b'stoprev', -1, b'last revision to write'),
2763 (b'', b'count', 3, b'number of passes to perform'),
2763 (b'', b'count', 3, b'number of passes to perform'),
2764 (b'', b'details', False, b'print timing for every revisions tested'),
2764 (b'', b'details', False, b'print timing for every revisions tested'),
2765 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2765 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2766 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2766 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2767 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2767 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2768 ],
2768 ],
2769 b'-c|-m|FILE',
2769 b'-c|-m|FILE',
2770 )
2770 )
2771 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2771 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2772 """Benchmark writing a series of revisions to a revlog.
2772 """Benchmark writing a series of revisions to a revlog.
2773
2773
2774 Possible source values are:
2774 Possible source values are:
2775 * `full`: add from a full text (default).
2775 * `full`: add from a full text (default).
2776 * `parent-1`: add from a delta to the first parent
2776 * `parent-1`: add from a delta to the first parent
2777 * `parent-2`: add from a delta to the second parent if it exists
2777 * `parent-2`: add from a delta to the second parent if it exists
2778 (use a delta from the first parent otherwise)
2778 (use a delta from the first parent otherwise)
2779 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2779 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2780 * `storage`: add from the existing precomputed deltas
2780 * `storage`: add from the existing precomputed deltas
2781
2781
2782 Note: This performance command measures performance in a custom way. As a
2782 Note: This performance command measures performance in a custom way. As a
2783 result some of the global configuration of the 'perf' command does not
2783 result some of the global configuration of the 'perf' command does not
2784 apply to it:
2784 apply to it:
2785
2785
2786 * ``pre-run``: disabled
2786 * ``pre-run``: disabled
2787
2787
2788 * ``profile-benchmark``: disabled
2788 * ``profile-benchmark``: disabled
2789
2789
2790 * ``run-limits``: disabled use --count instead
2790 * ``run-limits``: disabled use --count instead
2791 """
2791 """
2792 opts = _byteskwargs(opts)
2792 opts = _byteskwargs(opts)
2793
2793
2794 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2794 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2795 rllen = getlen(ui)(rl)
2795 rllen = getlen(ui)(rl)
2796 if startrev < 0:
2796 if startrev < 0:
2797 startrev = rllen + startrev
2797 startrev = rllen + startrev
2798 if stoprev < 0:
2798 if stoprev < 0:
2799 stoprev = rllen + stoprev
2799 stoprev = rllen + stoprev
2800
2800
2801 lazydeltabase = opts['lazydeltabase']
2801 lazydeltabase = opts['lazydeltabase']
2802 source = opts['source']
2802 source = opts['source']
2803 clearcaches = opts['clear_caches']
2803 clearcaches = opts['clear_caches']
2804 validsource = (
2804 validsource = (
2805 b'full',
2805 b'full',
2806 b'parent-1',
2806 b'parent-1',
2807 b'parent-2',
2807 b'parent-2',
2808 b'parent-smallest',
2808 b'parent-smallest',
2809 b'storage',
2809 b'storage',
2810 )
2810 )
2811 if source not in validsource:
2811 if source not in validsource:
2812 raise error.Abort('invalid source type: %s' % source)
2812 raise error.Abort('invalid source type: %s' % source)
2813
2813
2814 ### actually gather results
2814 ### actually gather results
2815 count = opts['count']
2815 count = opts['count']
2816 if count <= 0:
2816 if count <= 0:
2817 raise error.Abort('invalide run count: %d' % count)
2817 raise error.Abort('invalide run count: %d' % count)
2818 allresults = []
2818 allresults = []
2819 for c in range(count):
2819 for c in range(count):
2820 timing = _timeonewrite(
2820 timing = _timeonewrite(
2821 ui,
2821 ui,
2822 rl,
2822 rl,
2823 source,
2823 source,
2824 startrev,
2824 startrev,
2825 stoprev,
2825 stoprev,
2826 c + 1,
2826 c + 1,
2827 lazydeltabase=lazydeltabase,
2827 lazydeltabase=lazydeltabase,
2828 clearcaches=clearcaches,
2828 clearcaches=clearcaches,
2829 )
2829 )
2830 allresults.append(timing)
2830 allresults.append(timing)
2831
2831
2832 ### consolidate the results in a single list
2832 ### consolidate the results in a single list
2833 results = []
2833 results = []
2834 for idx, (rev, t) in enumerate(allresults[0]):
2834 for idx, (rev, t) in enumerate(allresults[0]):
2835 ts = [t]
2835 ts = [t]
2836 for other in allresults[1:]:
2836 for other in allresults[1:]:
2837 orev, ot = other[idx]
2837 orev, ot = other[idx]
2838 assert orev == rev
2838 assert orev == rev
2839 ts.append(ot)
2839 ts.append(ot)
2840 results.append((rev, ts))
2840 results.append((rev, ts))
2841 resultcount = len(results)
2841 resultcount = len(results)
2842
2842
2843 ### Compute and display relevant statistics
2843 ### Compute and display relevant statistics
2844
2844
2845 # get a formatter
2845 # get a formatter
2846 fm = ui.formatter(b'perf', opts)
2846 fm = ui.formatter(b'perf', opts)
2847 displayall = ui.configbool(b"perf", b"all-timing", False)
2847 displayall = ui.configbool(b"perf", b"all-timing", False)
2848
2848
2849 # print individual details if requested
2849 # print individual details if requested
2850 if opts['details']:
2850 if opts['details']:
2851 for idx, item in enumerate(results, 1):
2851 for idx, item in enumerate(results, 1):
2852 rev, data = item
2852 rev, data = item
2853 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2853 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2854 formatone(fm, data, title=title, displayall=displayall)
2854 formatone(fm, data, title=title, displayall=displayall)
2855
2855
2856 # sorts results by median time
2856 # sorts results by median time
2857 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2857 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2858 # list of (name, index) to display)
2858 # list of (name, index) to display)
2859 relevants = [
2859 relevants = [
2860 ("min", 0),
2860 ("min", 0),
2861 ("10%", resultcount * 10 // 100),
2861 ("10%", resultcount * 10 // 100),
2862 ("25%", resultcount * 25 // 100),
2862 ("25%", resultcount * 25 // 100),
2863 ("50%", resultcount * 70 // 100),
2863 ("50%", resultcount * 70 // 100),
2864 ("75%", resultcount * 75 // 100),
2864 ("75%", resultcount * 75 // 100),
2865 ("90%", resultcount * 90 // 100),
2865 ("90%", resultcount * 90 // 100),
2866 ("95%", resultcount * 95 // 100),
2866 ("95%", resultcount * 95 // 100),
2867 ("99%", resultcount * 99 // 100),
2867 ("99%", resultcount * 99 // 100),
2868 ("99.9%", resultcount * 999 // 1000),
2868 ("99.9%", resultcount * 999 // 1000),
2869 ("99.99%", resultcount * 9999 // 10000),
2869 ("99.99%", resultcount * 9999 // 10000),
2870 ("99.999%", resultcount * 99999 // 100000),
2870 ("99.999%", resultcount * 99999 // 100000),
2871 ("max", -1),
2871 ("max", -1),
2872 ]
2872 ]
2873 if not ui.quiet:
2873 if not ui.quiet:
2874 for name, idx in relevants:
2874 for name, idx in relevants:
2875 data = results[idx]
2875 data = results[idx]
2876 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2876 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2877 formatone(fm, data[1], title=title, displayall=displayall)
2877 formatone(fm, data[1], title=title, displayall=displayall)
2878
2878
2879 # XXX summing that many float will not be very precise, we ignore this fact
2879 # XXX summing that many float will not be very precise, we ignore this fact
2880 # for now
2880 # for now
2881 totaltime = []
2881 totaltime = []
2882 for item in allresults:
2882 for item in allresults:
2883 totaltime.append(
2883 totaltime.append(
2884 (
2884 (
2885 sum(x[1][0] for x in item),
2885 sum(x[1][0] for x in item),
2886 sum(x[1][1] for x in item),
2886 sum(x[1][1] for x in item),
2887 sum(x[1][2] for x in item),
2887 sum(x[1][2] for x in item),
2888 )
2888 )
2889 )
2889 )
2890 formatone(
2890 formatone(
2891 fm,
2891 fm,
2892 totaltime,
2892 totaltime,
2893 title="total time (%d revs)" % resultcount,
2893 title="total time (%d revs)" % resultcount,
2894 displayall=displayall,
2894 displayall=displayall,
2895 )
2895 )
2896 fm.end()
2896 fm.end()
2897
2897
2898
2898
2899 class _faketr(object):
2899 class _faketr(object):
2900 def add(s, x, y, z=None):
2900 def add(s, x, y, z=None):
2901 return None
2901 return None
2902
2902
2903
2903
2904 def _timeonewrite(
2904 def _timeonewrite(
2905 ui,
2905 ui,
2906 orig,
2906 orig,
2907 source,
2907 source,
2908 startrev,
2908 startrev,
2909 stoprev,
2909 stoprev,
2910 runidx=None,
2910 runidx=None,
2911 lazydeltabase=True,
2911 lazydeltabase=True,
2912 clearcaches=True,
2912 clearcaches=True,
2913 ):
2913 ):
2914 timings = []
2914 timings = []
2915 tr = _faketr()
2915 tr = _faketr()
2916 with _temprevlog(ui, orig, startrev) as dest:
2916 with _temprevlog(ui, orig, startrev) as dest:
2917 dest._lazydeltabase = lazydeltabase
2917 dest._lazydeltabase = lazydeltabase
2918 revs = list(orig.revs(startrev, stoprev))
2918 revs = list(orig.revs(startrev, stoprev))
2919 total = len(revs)
2919 total = len(revs)
2920 topic = 'adding'
2920 topic = 'adding'
2921 if runidx is not None:
2921 if runidx is not None:
2922 topic += ' (run #%d)' % runidx
2922 topic += ' (run #%d)' % runidx
2923 # Support both old and new progress API
2923 # Support both old and new progress API
2924 if util.safehasattr(ui, 'makeprogress'):
2924 if util.safehasattr(ui, 'makeprogress'):
2925 progress = ui.makeprogress(topic, unit='revs', total=total)
2925 progress = ui.makeprogress(topic, unit='revs', total=total)
2926
2926
2927 def updateprogress(pos):
2927 def updateprogress(pos):
2928 progress.update(pos)
2928 progress.update(pos)
2929
2929
2930 def completeprogress():
2930 def completeprogress():
2931 progress.complete()
2931 progress.complete()
2932
2932
2933 else:
2933 else:
2934
2934
2935 def updateprogress(pos):
2935 def updateprogress(pos):
2936 ui.progress(topic, pos, unit='revs', total=total)
2936 ui.progress(topic, pos, unit='revs', total=total)
2937
2937
2938 def completeprogress():
2938 def completeprogress():
2939 ui.progress(topic, None, unit='revs', total=total)
2939 ui.progress(topic, None, unit='revs', total=total)
2940
2940
2941 for idx, rev in enumerate(revs):
2941 for idx, rev in enumerate(revs):
2942 updateprogress(idx)
2942 updateprogress(idx)
2943 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2943 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2944 if clearcaches:
2944 if clearcaches:
2945 dest.index.clearcaches()
2945 dest.index.clearcaches()
2946 dest.clearcaches()
2946 dest.clearcaches()
2947 with timeone() as r:
2947 with timeone() as r:
2948 dest.addrawrevision(*addargs, **addkwargs)
2948 dest.addrawrevision(*addargs, **addkwargs)
2949 timings.append((rev, r[0]))
2949 timings.append((rev, r[0]))
2950 updateprogress(total)
2950 updateprogress(total)
2951 completeprogress()
2951 completeprogress()
2952 return timings
2952 return timings
2953
2953
2954
2954
2955 def _getrevisionseed(orig, rev, tr, source):
2955 def _getrevisionseed(orig, rev, tr, source):
2956 from mercurial.node import nullid
2956 from mercurial.node import nullid
2957
2957
2958 linkrev = orig.linkrev(rev)
2958 linkrev = orig.linkrev(rev)
2959 node = orig.node(rev)
2959 node = orig.node(rev)
2960 p1, p2 = orig.parents(node)
2960 p1, p2 = orig.parents(node)
2961 flags = orig.flags(rev)
2961 flags = orig.flags(rev)
2962 cachedelta = None
2962 cachedelta = None
2963 text = None
2963 text = None
2964
2964
2965 if source == b'full':
2965 if source == b'full':
2966 text = orig.revision(rev)
2966 text = orig.revision(rev)
2967 elif source == b'parent-1':
2967 elif source == b'parent-1':
2968 baserev = orig.rev(p1)
2968 baserev = orig.rev(p1)
2969 cachedelta = (baserev, orig.revdiff(p1, rev))
2969 cachedelta = (baserev, orig.revdiff(p1, rev))
2970 elif source == b'parent-2':
2970 elif source == b'parent-2':
2971 parent = p2
2971 parent = p2
2972 if p2 == nullid:
2972 if p2 == nullid:
2973 parent = p1
2973 parent = p1
2974 baserev = orig.rev(parent)
2974 baserev = orig.rev(parent)
2975 cachedelta = (baserev, orig.revdiff(parent, rev))
2975 cachedelta = (baserev, orig.revdiff(parent, rev))
2976 elif source == b'parent-smallest':
2976 elif source == b'parent-smallest':
2977 p1diff = orig.revdiff(p1, rev)
2977 p1diff = orig.revdiff(p1, rev)
2978 parent = p1
2978 parent = p1
2979 diff = p1diff
2979 diff = p1diff
2980 if p2 != nullid:
2980 if p2 != nullid:
2981 p2diff = orig.revdiff(p2, rev)
2981 p2diff = orig.revdiff(p2, rev)
2982 if len(p1diff) > len(p2diff):
2982 if len(p1diff) > len(p2diff):
2983 parent = p2
2983 parent = p2
2984 diff = p2diff
2984 diff = p2diff
2985 baserev = orig.rev(parent)
2985 baserev = orig.rev(parent)
2986 cachedelta = (baserev, diff)
2986 cachedelta = (baserev, diff)
2987 elif source == b'storage':
2987 elif source == b'storage':
2988 baserev = orig.deltaparent(rev)
2988 baserev = orig.deltaparent(rev)
2989 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2989 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2990
2990
2991 return (
2991 return (
2992 (text, tr, linkrev, p1, p2),
2992 (text, tr, linkrev, p1, p2),
2993 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2993 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2994 )
2994 )
2995
2995
2996
2996
2997 @contextlib.contextmanager
2997 @contextlib.contextmanager
2998 def _temprevlog(ui, orig, truncaterev):
2998 def _temprevlog(ui, orig, truncaterev):
2999 from mercurial import vfs as vfsmod
2999 from mercurial import vfs as vfsmod
3000
3000
3001 if orig._inline:
3001 if orig._inline:
3002 raise error.Abort('not supporting inline revlog (yet)')
3002 raise error.Abort('not supporting inline revlog (yet)')
3003 revlogkwargs = {}
3003 revlogkwargs = {}
3004 k = 'upperboundcomp'
3004 k = 'upperboundcomp'
3005 if util.safehasattr(orig, k):
3005 if util.safehasattr(orig, k):
3006 revlogkwargs[k] = getattr(orig, k)
3006 revlogkwargs[k] = getattr(orig, k)
3007
3007
3008 origindexpath = orig.opener.join(orig.indexfile)
3008 origindexpath = orig.opener.join(orig.indexfile)
3009 origdatapath = orig.opener.join(orig.datafile)
3009 origdatapath = orig.opener.join(orig.datafile)
3010 indexname = 'revlog.i'
3010 indexname = 'revlog.i'
3011 dataname = 'revlog.d'
3011 dataname = 'revlog.d'
3012
3012
3013 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3013 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3014 try:
3014 try:
3015 # copy the data file in a temporary directory
3015 # copy the data file in a temporary directory
3016 ui.debug('copying data in %s\n' % tmpdir)
3016 ui.debug('copying data in %s\n' % tmpdir)
3017 destindexpath = os.path.join(tmpdir, 'revlog.i')
3017 destindexpath = os.path.join(tmpdir, 'revlog.i')
3018 destdatapath = os.path.join(tmpdir, 'revlog.d')
3018 destdatapath = os.path.join(tmpdir, 'revlog.d')
3019 shutil.copyfile(origindexpath, destindexpath)
3019 shutil.copyfile(origindexpath, destindexpath)
3020 shutil.copyfile(origdatapath, destdatapath)
3020 shutil.copyfile(origdatapath, destdatapath)
3021
3021
3022 # remove the data we want to add again
3022 # remove the data we want to add again
3023 ui.debug('truncating data to be rewritten\n')
3023 ui.debug('truncating data to be rewritten\n')
3024 with open(destindexpath, 'ab') as index:
3024 with open(destindexpath, 'ab') as index:
3025 index.seek(0)
3025 index.seek(0)
3026 index.truncate(truncaterev * orig._io.size)
3026 index.truncate(truncaterev * orig._io.size)
3027 with open(destdatapath, 'ab') as data:
3027 with open(destdatapath, 'ab') as data:
3028 data.seek(0)
3028 data.seek(0)
3029 data.truncate(orig.start(truncaterev))
3029 data.truncate(orig.start(truncaterev))
3030
3030
3031 # instantiate a new revlog from the temporary copy
3031 # instantiate a new revlog from the temporary copy
3032 ui.debug('truncating adding to be rewritten\n')
3032 ui.debug('truncating adding to be rewritten\n')
3033 vfs = vfsmod.vfs(tmpdir)
3033 vfs = vfsmod.vfs(tmpdir)
3034 vfs.options = getattr(orig.opener, 'options', None)
3034 vfs.options = getattr(orig.opener, 'options', None)
3035
3035
3036 dest = revlog.revlog(
3036 dest = revlog.revlog(
3037 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3037 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3038 )
3038 )
3039 if dest._inline:
3039 if dest._inline:
3040 raise error.Abort('not supporting inline revlog (yet)')
3040 raise error.Abort('not supporting inline revlog (yet)')
3041 # make sure internals are initialized
3041 # make sure internals are initialized
3042 dest.revision(len(dest) - 1)
3042 dest.revision(len(dest) - 1)
3043 yield dest
3043 yield dest
3044 del dest, vfs
3044 del dest, vfs
3045 finally:
3045 finally:
3046 shutil.rmtree(tmpdir, True)
3046 shutil.rmtree(tmpdir, True)
3047
3047
3048
3048
3049 @command(
3049 @command(
3050 b'perfrevlogchunks',
3050 b'perfrevlogchunks',
3051 revlogopts
3051 revlogopts
3052 + formatteropts
3052 + formatteropts
3053 + [
3053 + [
3054 (b'e', b'engines', b'', b'compression engines to use'),
3054 (b'e', b'engines', b'', b'compression engines to use'),
3055 (b's', b'startrev', 0, b'revision to start at'),
3055 (b's', b'startrev', 0, b'revision to start at'),
3056 ],
3056 ],
3057 b'-c|-m|FILE',
3057 b'-c|-m|FILE',
3058 )
3058 )
3059 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3059 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3060 """Benchmark operations on revlog chunks.
3060 """Benchmark operations on revlog chunks.
3061
3061
3062 Logically, each revlog is a collection of fulltext revisions. However,
3062 Logically, each revlog is a collection of fulltext revisions. However,
3063 stored within each revlog are "chunks" of possibly compressed data. This
3063 stored within each revlog are "chunks" of possibly compressed data. This
3064 data needs to be read and decompressed or compressed and written.
3064 data needs to be read and decompressed or compressed and written.
3065
3065
3066 This command measures the time it takes to read+decompress and recompress
3066 This command measures the time it takes to read+decompress and recompress
3067 chunks in a revlog. It effectively isolates I/O and compression performance.
3067 chunks in a revlog. It effectively isolates I/O and compression performance.
3068 For measurements of higher-level operations like resolving revisions,
3068 For measurements of higher-level operations like resolving revisions,
3069 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3069 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3070 """
3070 """
3071 opts = _byteskwargs(opts)
3071 opts = _byteskwargs(opts)
3072
3072
3073 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3073 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3074
3074
3075 # _chunkraw was renamed to _getsegmentforrevs.
3075 # _chunkraw was renamed to _getsegmentforrevs.
3076 try:
3076 try:
3077 segmentforrevs = rl._getsegmentforrevs
3077 segmentforrevs = rl._getsegmentforrevs
3078 except AttributeError:
3078 except AttributeError:
3079 segmentforrevs = rl._chunkraw
3079 segmentforrevs = rl._chunkraw
3080
3080
3081 # Verify engines argument.
3081 # Verify engines argument.
3082 if engines:
3082 if engines:
3083 engines = {e.strip() for e in engines.split(b',')}
3083 engines = {e.strip() for e in engines.split(b',')}
3084 for engine in engines:
3084 for engine in engines:
3085 try:
3085 try:
3086 util.compressionengines[engine]
3086 util.compressionengines[engine]
3087 except KeyError:
3087 except KeyError:
3088 raise error.Abort(b'unknown compression engine: %s' % engine)
3088 raise error.Abort(b'unknown compression engine: %s' % engine)
3089 else:
3089 else:
3090 engines = []
3090 engines = []
3091 for e in util.compengines:
3091 for e in util.compengines:
3092 engine = util.compengines[e]
3092 engine = util.compengines[e]
3093 try:
3093 try:
3094 if engine.available():
3094 if engine.available():
3095 engine.revlogcompressor().compress(b'dummy')
3095 engine.revlogcompressor().compress(b'dummy')
3096 engines.append(e)
3096 engines.append(e)
3097 except NotImplementedError:
3097 except NotImplementedError:
3098 pass
3098 pass
3099
3099
3100 revs = list(rl.revs(startrev, len(rl) - 1))
3100 revs = list(rl.revs(startrev, len(rl) - 1))
3101
3101
3102 def rlfh(rl):
3102 def rlfh(rl):
3103 if rl._inline:
3103 if rl._inline:
3104 return getsvfs(repo)(rl.indexfile)
3104 return getsvfs(repo)(rl.indexfile)
3105 else:
3105 else:
3106 return getsvfs(repo)(rl.datafile)
3106 return getsvfs(repo)(rl.datafile)
3107
3107
3108 def doread():
3108 def doread():
3109 rl.clearcaches()
3109 rl.clearcaches()
3110 for rev in revs:
3110 for rev in revs:
3111 segmentforrevs(rev, rev)
3111 segmentforrevs(rev, rev)
3112
3112
3113 def doreadcachedfh():
3113 def doreadcachedfh():
3114 rl.clearcaches()
3114 rl.clearcaches()
3115 fh = rlfh(rl)
3115 fh = rlfh(rl)
3116 for rev in revs:
3116 for rev in revs:
3117 segmentforrevs(rev, rev, df=fh)
3117 segmentforrevs(rev, rev, df=fh)
3118
3118
3119 def doreadbatch():
3119 def doreadbatch():
3120 rl.clearcaches()
3120 rl.clearcaches()
3121 segmentforrevs(revs[0], revs[-1])
3121 segmentforrevs(revs[0], revs[-1])
3122
3122
3123 def doreadbatchcachedfh():
3123 def doreadbatchcachedfh():
3124 rl.clearcaches()
3124 rl.clearcaches()
3125 fh = rlfh(rl)
3125 fh = rlfh(rl)
3126 segmentforrevs(revs[0], revs[-1], df=fh)
3126 segmentforrevs(revs[0], revs[-1], df=fh)
3127
3127
3128 def dochunk():
3128 def dochunk():
3129 rl.clearcaches()
3129 rl.clearcaches()
3130 fh = rlfh(rl)
3130 fh = rlfh(rl)
3131 for rev in revs:
3131 for rev in revs:
3132 rl._chunk(rev, df=fh)
3132 rl._chunk(rev, df=fh)
3133
3133
3134 chunks = [None]
3134 chunks = [None]
3135
3135
3136 def dochunkbatch():
3136 def dochunkbatch():
3137 rl.clearcaches()
3137 rl.clearcaches()
3138 fh = rlfh(rl)
3138 fh = rlfh(rl)
3139 # Save chunks as a side-effect.
3139 # Save chunks as a side-effect.
3140 chunks[0] = rl._chunks(revs, df=fh)
3140 chunks[0] = rl._chunks(revs, df=fh)
3141
3141
3142 def docompress(compressor):
3142 def docompress(compressor):
3143 rl.clearcaches()
3143 rl.clearcaches()
3144
3144
3145 try:
3145 try:
3146 # Swap in the requested compression engine.
3146 # Swap in the requested compression engine.
3147 oldcompressor = rl._compressor
3147 oldcompressor = rl._compressor
3148 rl._compressor = compressor
3148 rl._compressor = compressor
3149 for chunk in chunks[0]:
3149 for chunk in chunks[0]:
3150 rl.compress(chunk)
3150 rl.compress(chunk)
3151 finally:
3151 finally:
3152 rl._compressor = oldcompressor
3152 rl._compressor = oldcompressor
3153
3153
3154 benches = [
3154 benches = [
3155 (lambda: doread(), b'read'),
3155 (lambda: doread(), b'read'),
3156 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3156 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3157 (lambda: doreadbatch(), b'read batch'),
3157 (lambda: doreadbatch(), b'read batch'),
3158 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3158 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3159 (lambda: dochunk(), b'chunk'),
3159 (lambda: dochunk(), b'chunk'),
3160 (lambda: dochunkbatch(), b'chunk batch'),
3160 (lambda: dochunkbatch(), b'chunk batch'),
3161 ]
3161 ]
3162
3162
3163 for engine in sorted(engines):
3163 for engine in sorted(engines):
3164 compressor = util.compengines[engine].revlogcompressor()
3164 compressor = util.compengines[engine].revlogcompressor()
3165 benches.append(
3165 benches.append(
3166 (
3166 (
3167 functools.partial(docompress, compressor),
3167 functools.partial(docompress, compressor),
3168 b'compress w/ %s' % engine,
3168 b'compress w/ %s' % engine,
3169 )
3169 )
3170 )
3170 )
3171
3171
3172 for fn, title in benches:
3172 for fn, title in benches:
3173 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3174 timer(fn, title=title)
3174 timer(fn, title=title)
3175 fm.end()
3175 fm.end()
3176
3176
3177
3177
3178 @command(
3178 @command(
3179 b'perfrevlogrevision',
3179 b'perfrevlogrevision',
3180 revlogopts
3180 revlogopts
3181 + formatteropts
3181 + formatteropts
3182 + [(b'', b'cache', False, b'use caches instead of clearing')],
3182 + [(b'', b'cache', False, b'use caches instead of clearing')],
3183 b'-c|-m|FILE REV',
3183 b'-c|-m|FILE REV',
3184 )
3184 )
3185 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3185 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3186 """Benchmark obtaining a revlog revision.
3186 """Benchmark obtaining a revlog revision.
3187
3187
3188 Obtaining a revlog revision consists of roughly the following steps:
3188 Obtaining a revlog revision consists of roughly the following steps:
3189
3189
3190 1. Compute the delta chain
3190 1. Compute the delta chain
3191 2. Slice the delta chain if applicable
3191 2. Slice the delta chain if applicable
3192 3. Obtain the raw chunks for that delta chain
3192 3. Obtain the raw chunks for that delta chain
3193 4. Decompress each raw chunk
3193 4. Decompress each raw chunk
3194 5. Apply binary patches to obtain fulltext
3194 5. Apply binary patches to obtain fulltext
3195 6. Verify hash of fulltext
3195 6. Verify hash of fulltext
3196
3196
3197 This command measures the time spent in each of these phases.
3197 This command measures the time spent in each of these phases.
3198 """
3198 """
3199 opts = _byteskwargs(opts)
3199 opts = _byteskwargs(opts)
3200
3200
3201 if opts.get(b'changelog') or opts.get(b'manifest'):
3201 if opts.get(b'changelog') or opts.get(b'manifest'):
3202 file_, rev = None, file_
3202 file_, rev = None, file_
3203 elif rev is None:
3203 elif rev is None:
3204 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3204 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3205
3205
3206 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3206 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3207
3207
3208 # _chunkraw was renamed to _getsegmentforrevs.
3208 # _chunkraw was renamed to _getsegmentforrevs.
3209 try:
3209 try:
3210 segmentforrevs = r._getsegmentforrevs
3210 segmentforrevs = r._getsegmentforrevs
3211 except AttributeError:
3211 except AttributeError:
3212 segmentforrevs = r._chunkraw
3212 segmentforrevs = r._chunkraw
3213
3213
3214 node = r.lookup(rev)
3214 node = r.lookup(rev)
3215 rev = r.rev(node)
3215 rev = r.rev(node)
3216
3216
3217 def getrawchunks(data, chain):
3217 def getrawchunks(data, chain):
3218 start = r.start
3218 start = r.start
3219 length = r.length
3219 length = r.length
3220 inline = r._inline
3220 inline = r._inline
3221 iosize = r._io.size
3221 iosize = r._io.size
3222 buffer = util.buffer
3222 buffer = util.buffer
3223
3223
3224 chunks = []
3224 chunks = []
3225 ladd = chunks.append
3225 ladd = chunks.append
3226 for idx, item in enumerate(chain):
3226 for idx, item in enumerate(chain):
3227 offset = start(item[0])
3227 offset = start(item[0])
3228 bits = data[idx]
3228 bits = data[idx]
3229 for rev in item:
3229 for rev in item:
3230 chunkstart = start(rev)
3230 chunkstart = start(rev)
3231 if inline:
3231 if inline:
3232 chunkstart += (rev + 1) * iosize
3232 chunkstart += (rev + 1) * iosize
3233 chunklength = length(rev)
3233 chunklength = length(rev)
3234 ladd(buffer(bits, chunkstart - offset, chunklength))
3234 ladd(buffer(bits, chunkstart - offset, chunklength))
3235
3235
3236 return chunks
3236 return chunks
3237
3237
3238 def dodeltachain(rev):
3238 def dodeltachain(rev):
3239 if not cache:
3239 if not cache:
3240 r.clearcaches()
3240 r.clearcaches()
3241 r._deltachain(rev)
3241 r._deltachain(rev)
3242
3242
3243 def doread(chain):
3243 def doread(chain):
3244 if not cache:
3244 if not cache:
3245 r.clearcaches()
3245 r.clearcaches()
3246 for item in slicedchain:
3246 for item in slicedchain:
3247 segmentforrevs(item[0], item[-1])
3247 segmentforrevs(item[0], item[-1])
3248
3248
3249 def doslice(r, chain, size):
3249 def doslice(r, chain, size):
3250 for s in slicechunk(r, chain, targetsize=size):
3250 for s in slicechunk(r, chain, targetsize=size):
3251 pass
3251 pass
3252
3252
3253 def dorawchunks(data, chain):
3253 def dorawchunks(data, chain):
3254 if not cache:
3254 if not cache:
3255 r.clearcaches()
3255 r.clearcaches()
3256 getrawchunks(data, chain)
3256 getrawchunks(data, chain)
3257
3257
3258 def dodecompress(chunks):
3258 def dodecompress(chunks):
3259 decomp = r.decompress
3259 decomp = r.decompress
3260 for chunk in chunks:
3260 for chunk in chunks:
3261 decomp(chunk)
3261 decomp(chunk)
3262
3262
3263 def dopatch(text, bins):
3263 def dopatch(text, bins):
3264 if not cache:
3264 if not cache:
3265 r.clearcaches()
3265 r.clearcaches()
3266 mdiff.patches(text, bins)
3266 mdiff.patches(text, bins)
3267
3267
3268 def dohash(text):
3268 def dohash(text):
3269 if not cache:
3269 if not cache:
3270 r.clearcaches()
3270 r.clearcaches()
3271 r.checkhash(text, node, rev=rev)
3271 r.checkhash(text, node, rev=rev)
3272
3272
3273 def dorevision():
3273 def dorevision():
3274 if not cache:
3274 if not cache:
3275 r.clearcaches()
3275 r.clearcaches()
3276 r.revision(node)
3276 r.revision(node)
3277
3277
3278 try:
3278 try:
3279 from mercurial.revlogutils.deltas import slicechunk
3279 from mercurial.revlogutils.deltas import slicechunk
3280 except ImportError:
3280 except ImportError:
3281 slicechunk = getattr(revlog, '_slicechunk', None)
3281 slicechunk = getattr(revlog, '_slicechunk', None)
3282
3282
3283 size = r.length(rev)
3283 size = r.length(rev)
3284 chain = r._deltachain(rev)[0]
3284 chain = r._deltachain(rev)[0]
3285 if not getattr(r, '_withsparseread', False):
3285 if not getattr(r, '_withsparseread', False):
3286 slicedchain = (chain,)
3286 slicedchain = (chain,)
3287 else:
3287 else:
3288 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3288 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3289 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3289 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3290 rawchunks = getrawchunks(data, slicedchain)
3290 rawchunks = getrawchunks(data, slicedchain)
3291 bins = r._chunks(chain)
3291 bins = r._chunks(chain)
3292 text = bytes(bins[0])
3292 text = bytes(bins[0])
3293 bins = bins[1:]
3293 bins = bins[1:]
3294 text = mdiff.patches(text, bins)
3294 text = mdiff.patches(text, bins)
3295
3295
3296 benches = [
3296 benches = [
3297 (lambda: dorevision(), b'full'),
3297 (lambda: dorevision(), b'full'),
3298 (lambda: dodeltachain(rev), b'deltachain'),
3298 (lambda: dodeltachain(rev), b'deltachain'),
3299 (lambda: doread(chain), b'read'),
3299 (lambda: doread(chain), b'read'),
3300 ]
3300 ]
3301
3301
3302 if getattr(r, '_withsparseread', False):
3302 if getattr(r, '_withsparseread', False):
3303 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3303 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3304 benches.append(slicing)
3304 benches.append(slicing)
3305
3305
3306 benches.extend(
3306 benches.extend(
3307 [
3307 [
3308 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3308 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3309 (lambda: dodecompress(rawchunks), b'decompress'),
3309 (lambda: dodecompress(rawchunks), b'decompress'),
3310 (lambda: dopatch(text, bins), b'patch'),
3310 (lambda: dopatch(text, bins), b'patch'),
3311 (lambda: dohash(text), b'hash'),
3311 (lambda: dohash(text), b'hash'),
3312 ]
3312 ]
3313 )
3313 )
3314
3314
3315 timer, fm = gettimer(ui, opts)
3315 timer, fm = gettimer(ui, opts)
3316 for fn, title in benches:
3316 for fn, title in benches:
3317 timer(fn, title=title)
3317 timer(fn, title=title)
3318 fm.end()
3318 fm.end()
3319
3319
3320
3320
3321 @command(
3321 @command(
3322 b'perfrevset',
3322 b'perfrevset',
3323 [
3323 [
3324 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3324 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3325 (b'', b'contexts', False, b'obtain changectx for each revision'),
3325 (b'', b'contexts', False, b'obtain changectx for each revision'),
3326 ]
3326 ]
3327 + formatteropts,
3327 + formatteropts,
3328 b"REVSET",
3328 b"REVSET",
3329 )
3329 )
3330 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3330 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3331 """benchmark the execution time of a revset
3331 """benchmark the execution time of a revset
3332
3332
3333 Use the --clean option if need to evaluate the impact of build volatile
3333 Use the --clean option if need to evaluate the impact of build volatile
3334 revisions set cache on the revset execution. Volatile cache hold filtered
3334 revisions set cache on the revset execution. Volatile cache hold filtered
3335 and obsolete related cache."""
3335 and obsolete related cache."""
3336 opts = _byteskwargs(opts)
3336 opts = _byteskwargs(opts)
3337
3337
3338 timer, fm = gettimer(ui, opts)
3338 timer, fm = gettimer(ui, opts)
3339
3339
3340 def d():
3340 def d():
3341 if clear:
3341 if clear:
3342 repo.invalidatevolatilesets()
3342 repo.invalidatevolatilesets()
3343 if contexts:
3343 if contexts:
3344 for ctx in repo.set(expr):
3344 for ctx in repo.set(expr):
3345 pass
3345 pass
3346 else:
3346 else:
3347 for r in repo.revs(expr):
3347 for r in repo.revs(expr):
3348 pass
3348 pass
3349
3349
3350 timer(d)
3350 timer(d)
3351 fm.end()
3351 fm.end()
3352
3352
3353
3353
3354 @command(
3354 @command(
3355 b'perfvolatilesets',
3355 b'perfvolatilesets',
3356 [
3356 [
3357 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3357 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3358 ]
3358 ]
3359 + formatteropts,
3359 + formatteropts,
3360 )
3360 )
3361 def perfvolatilesets(ui, repo, *names, **opts):
3361 def perfvolatilesets(ui, repo, *names, **opts):
3362 """benchmark the computation of various volatile set
3362 """benchmark the computation of various volatile set
3363
3363
3364 Volatile set computes element related to filtering and obsolescence."""
3364 Volatile set computes element related to filtering and obsolescence."""
3365 opts = _byteskwargs(opts)
3365 opts = _byteskwargs(opts)
3366 timer, fm = gettimer(ui, opts)
3366 timer, fm = gettimer(ui, opts)
3367 repo = repo.unfiltered()
3367 repo = repo.unfiltered()
3368
3368
3369 def getobs(name):
3369 def getobs(name):
3370 def d():
3370 def d():
3371 repo.invalidatevolatilesets()
3371 repo.invalidatevolatilesets()
3372 if opts[b'clear_obsstore']:
3372 if opts[b'clear_obsstore']:
3373 clearfilecache(repo, b'obsstore')
3373 clearfilecache(repo, b'obsstore')
3374 obsolete.getrevs(repo, name)
3374 obsolete.getrevs(repo, name)
3375
3375
3376 return d
3376 return d
3377
3377
3378 allobs = sorted(obsolete.cachefuncs)
3378 allobs = sorted(obsolete.cachefuncs)
3379 if names:
3379 if names:
3380 allobs = [n for n in allobs if n in names]
3380 allobs = [n for n in allobs if n in names]
3381
3381
3382 for name in allobs:
3382 for name in allobs:
3383 timer(getobs(name), title=name)
3383 timer(getobs(name), title=name)
3384
3384
3385 def getfiltered(name):
3385 def getfiltered(name):
3386 def d():
3386 def d():
3387 repo.invalidatevolatilesets()
3387 repo.invalidatevolatilesets()
3388 if opts[b'clear_obsstore']:
3388 if opts[b'clear_obsstore']:
3389 clearfilecache(repo, b'obsstore')
3389 clearfilecache(repo, b'obsstore')
3390 repoview.filterrevs(repo, name)
3390 repoview.filterrevs(repo, name)
3391
3391
3392 return d
3392 return d
3393
3393
3394 allfilter = sorted(repoview.filtertable)
3394 allfilter = sorted(repoview.filtertable)
3395 if names:
3395 if names:
3396 allfilter = [n for n in allfilter if n in names]
3396 allfilter = [n for n in allfilter if n in names]
3397
3397
3398 for name in allfilter:
3398 for name in allfilter:
3399 timer(getfiltered(name), title=name)
3399 timer(getfiltered(name), title=name)
3400 fm.end()
3400 fm.end()
3401
3401
3402
3402
3403 @command(
3403 @command(
3404 b'perfbranchmap',
3404 b'perfbranchmap',
3405 [
3405 [
3406 (b'f', b'full', False, b'Includes build time of subset'),
3406 (b'f', b'full', False, b'Includes build time of subset'),
3407 (
3407 (
3408 b'',
3408 b'',
3409 b'clear-revbranch',
3409 b'clear-revbranch',
3410 False,
3410 False,
3411 b'purge the revbranch cache between computation',
3411 b'purge the revbranch cache between computation',
3412 ),
3412 ),
3413 ]
3413 ]
3414 + formatteropts,
3414 + formatteropts,
3415 )
3415 )
3416 def perfbranchmap(ui, repo, *filternames, **opts):
3416 def perfbranchmap(ui, repo, *filternames, **opts):
3417 """benchmark the update of a branchmap
3417 """benchmark the update of a branchmap
3418
3418
3419 This benchmarks the full repo.branchmap() call with read and write disabled
3419 This benchmarks the full repo.branchmap() call with read and write disabled
3420 """
3420 """
3421 opts = _byteskwargs(opts)
3421 opts = _byteskwargs(opts)
3422 full = opts.get(b"full", False)
3422 full = opts.get(b"full", False)
3423 clear_revbranch = opts.get(b"clear_revbranch", False)
3423 clear_revbranch = opts.get(b"clear_revbranch", False)
3424 timer, fm = gettimer(ui, opts)
3424 timer, fm = gettimer(ui, opts)
3425
3425
3426 def getbranchmap(filtername):
3426 def getbranchmap(filtername):
3427 """generate a benchmark function for the filtername"""
3427 """generate a benchmark function for the filtername"""
3428 if filtername is None:
3428 if filtername is None:
3429 view = repo
3429 view = repo
3430 else:
3430 else:
3431 view = repo.filtered(filtername)
3431 view = repo.filtered(filtername)
3432 if util.safehasattr(view._branchcaches, '_per_filter'):
3432 if util.safehasattr(view._branchcaches, '_per_filter'):
3433 filtered = view._branchcaches._per_filter
3433 filtered = view._branchcaches._per_filter
3434 else:
3434 else:
3435 # older versions
3435 # older versions
3436 filtered = view._branchcaches
3436 filtered = view._branchcaches
3437
3437
3438 def d():
3438 def d():
3439 if clear_revbranch:
3439 if clear_revbranch:
3440 repo.revbranchcache()._clear()
3440 repo.revbranchcache()._clear()
3441 if full:
3441 if full:
3442 view._branchcaches.clear()
3442 view._branchcaches.clear()
3443 else:
3443 else:
3444 filtered.pop(filtername, None)
3444 filtered.pop(filtername, None)
3445 view.branchmap()
3445 view.branchmap()
3446
3446
3447 return d
3447 return d
3448
3448
3449 # add filter in smaller subset to bigger subset
3449 # add filter in smaller subset to bigger subset
3450 possiblefilters = set(repoview.filtertable)
3450 possiblefilters = set(repoview.filtertable)
3451 if filternames:
3451 if filternames:
3452 possiblefilters &= set(filternames)
3452 possiblefilters &= set(filternames)
3453 subsettable = getbranchmapsubsettable()
3453 subsettable = getbranchmapsubsettable()
3454 allfilters = []
3454 allfilters = []
3455 while possiblefilters:
3455 while possiblefilters:
3456 for name in possiblefilters:
3456 for name in possiblefilters:
3457 subset = subsettable.get(name)
3457 subset = subsettable.get(name)
3458 if subset not in possiblefilters:
3458 if subset not in possiblefilters:
3459 break
3459 break
3460 else:
3460 else:
3461 assert False, b'subset cycle %s!' % possiblefilters
3461 assert False, b'subset cycle %s!' % possiblefilters
3462 allfilters.append(name)
3462 allfilters.append(name)
3463 possiblefilters.remove(name)
3463 possiblefilters.remove(name)
3464
3464
3465 # warm the cache
3465 # warm the cache
3466 if not full:
3466 if not full:
3467 for name in allfilters:
3467 for name in allfilters:
3468 repo.filtered(name).branchmap()
3468 repo.filtered(name).branchmap()
3469 if not filternames or b'unfiltered' in filternames:
3469 if not filternames or b'unfiltered' in filternames:
3470 # add unfiltered
3470 # add unfiltered
3471 allfilters.append(None)
3471 allfilters.append(None)
3472
3472
3473 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3473 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3474 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3474 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3475 branchcacheread.set(classmethod(lambda *args: None))
3475 branchcacheread.set(classmethod(lambda *args: None))
3476 else:
3476 else:
3477 # older versions
3477 # older versions
3478 branchcacheread = safeattrsetter(branchmap, b'read')
3478 branchcacheread = safeattrsetter(branchmap, b'read')
3479 branchcacheread.set(lambda *args: None)
3479 branchcacheread.set(lambda *args: None)
3480 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3480 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3481 branchcachewrite.set(lambda *args: None)
3481 branchcachewrite.set(lambda *args: None)
3482 try:
3482 try:
3483 for name in allfilters:
3483 for name in allfilters:
3484 printname = name
3484 printname = name
3485 if name is None:
3485 if name is None:
3486 printname = b'unfiltered'
3486 printname = b'unfiltered'
3487 timer(getbranchmap(name), title=str(printname))
3487 timer(getbranchmap(name), title=printname)
3488 finally:
3488 finally:
3489 branchcacheread.restore()
3489 branchcacheread.restore()
3490 branchcachewrite.restore()
3490 branchcachewrite.restore()
3491 fm.end()
3491 fm.end()
3492
3492
3493
3493
3494 @command(
3494 @command(
3495 b'perfbranchmapupdate',
3495 b'perfbranchmapupdate',
3496 [
3496 [
3497 (b'', b'base', [], b'subset of revision to start from'),
3497 (b'', b'base', [], b'subset of revision to start from'),
3498 (b'', b'target', [], b'subset of revision to end with'),
3498 (b'', b'target', [], b'subset of revision to end with'),
3499 (b'', b'clear-caches', False, b'clear cache between each runs'),
3499 (b'', b'clear-caches', False, b'clear cache between each runs'),
3500 ]
3500 ]
3501 + formatteropts,
3501 + formatteropts,
3502 )
3502 )
3503 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3503 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3504 """benchmark branchmap update from for <base> revs to <target> revs
3504 """benchmark branchmap update from for <base> revs to <target> revs
3505
3505
3506 If `--clear-caches` is passed, the following items will be reset before
3506 If `--clear-caches` is passed, the following items will be reset before
3507 each update:
3507 each update:
3508 * the changelog instance and associated indexes
3508 * the changelog instance and associated indexes
3509 * the rev-branch-cache instance
3509 * the rev-branch-cache instance
3510
3510
3511 Examples:
3511 Examples:
3512
3512
3513 # update for the one last revision
3513 # update for the one last revision
3514 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3514 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3515
3515
3516 $ update for change coming with a new branch
3516 $ update for change coming with a new branch
3517 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3517 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3518 """
3518 """
3519 from mercurial import branchmap
3519 from mercurial import branchmap
3520 from mercurial import repoview
3520 from mercurial import repoview
3521
3521
3522 opts = _byteskwargs(opts)
3522 opts = _byteskwargs(opts)
3523 timer, fm = gettimer(ui, opts)
3523 timer, fm = gettimer(ui, opts)
3524 clearcaches = opts[b'clear_caches']
3524 clearcaches = opts[b'clear_caches']
3525 unfi = repo.unfiltered()
3525 unfi = repo.unfiltered()
3526 x = [None] # used to pass data between closure
3526 x = [None] # used to pass data between closure
3527
3527
3528 # we use a `list` here to avoid possible side effect from smartset
3528 # we use a `list` here to avoid possible side effect from smartset
3529 baserevs = list(scmutil.revrange(repo, base))
3529 baserevs = list(scmutil.revrange(repo, base))
3530 targetrevs = list(scmutil.revrange(repo, target))
3530 targetrevs = list(scmutil.revrange(repo, target))
3531 if not baserevs:
3531 if not baserevs:
3532 raise error.Abort(b'no revisions selected for --base')
3532 raise error.Abort(b'no revisions selected for --base')
3533 if not targetrevs:
3533 if not targetrevs:
3534 raise error.Abort(b'no revisions selected for --target')
3534 raise error.Abort(b'no revisions selected for --target')
3535
3535
3536 # make sure the target branchmap also contains the one in the base
3536 # make sure the target branchmap also contains the one in the base
3537 targetrevs = list(set(baserevs) | set(targetrevs))
3537 targetrevs = list(set(baserevs) | set(targetrevs))
3538 targetrevs.sort()
3538 targetrevs.sort()
3539
3539
3540 cl = repo.changelog
3540 cl = repo.changelog
3541 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3541 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3542 allbaserevs.sort()
3542 allbaserevs.sort()
3543 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3543 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3544
3544
3545 newrevs = list(alltargetrevs.difference(allbaserevs))
3545 newrevs = list(alltargetrevs.difference(allbaserevs))
3546 newrevs.sort()
3546 newrevs.sort()
3547
3547
3548 allrevs = frozenset(unfi.changelog.revs())
3548 allrevs = frozenset(unfi.changelog.revs())
3549 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3549 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3550 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3550 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3551
3551
3552 def basefilter(repo, visibilityexceptions=None):
3552 def basefilter(repo, visibilityexceptions=None):
3553 return basefilterrevs
3553 return basefilterrevs
3554
3554
3555 def targetfilter(repo, visibilityexceptions=None):
3555 def targetfilter(repo, visibilityexceptions=None):
3556 return targetfilterrevs
3556 return targetfilterrevs
3557
3557
3558 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3558 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3559 ui.status(msg % (len(allbaserevs), len(newrevs)))
3559 ui.status(msg % (len(allbaserevs), len(newrevs)))
3560 if targetfilterrevs:
3560 if targetfilterrevs:
3561 msg = b'(%d revisions still filtered)\n'
3561 msg = b'(%d revisions still filtered)\n'
3562 ui.status(msg % len(targetfilterrevs))
3562 ui.status(msg % len(targetfilterrevs))
3563
3563
3564 try:
3564 try:
3565 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3565 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3566 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3566 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3567
3567
3568 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3568 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3569 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3569 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3570
3570
3571 # try to find an existing branchmap to reuse
3571 # try to find an existing branchmap to reuse
3572 subsettable = getbranchmapsubsettable()
3572 subsettable = getbranchmapsubsettable()
3573 candidatefilter = subsettable.get(None)
3573 candidatefilter = subsettable.get(None)
3574 while candidatefilter is not None:
3574 while candidatefilter is not None:
3575 candidatebm = repo.filtered(candidatefilter).branchmap()
3575 candidatebm = repo.filtered(candidatefilter).branchmap()
3576 if candidatebm.validfor(baserepo):
3576 if candidatebm.validfor(baserepo):
3577 filtered = repoview.filterrevs(repo, candidatefilter)
3577 filtered = repoview.filterrevs(repo, candidatefilter)
3578 missing = [r for r in allbaserevs if r in filtered]
3578 missing = [r for r in allbaserevs if r in filtered]
3579 base = candidatebm.copy()
3579 base = candidatebm.copy()
3580 base.update(baserepo, missing)
3580 base.update(baserepo, missing)
3581 break
3581 break
3582 candidatefilter = subsettable.get(candidatefilter)
3582 candidatefilter = subsettable.get(candidatefilter)
3583 else:
3583 else:
3584 # no suitable subset where found
3584 # no suitable subset where found
3585 base = branchmap.branchcache()
3585 base = branchmap.branchcache()
3586 base.update(baserepo, allbaserevs)
3586 base.update(baserepo, allbaserevs)
3587
3587
3588 def setup():
3588 def setup():
3589 x[0] = base.copy()
3589 x[0] = base.copy()
3590 if clearcaches:
3590 if clearcaches:
3591 unfi._revbranchcache = None
3591 unfi._revbranchcache = None
3592 clearchangelog(repo)
3592 clearchangelog(repo)
3593
3593
3594 def bench():
3594 def bench():
3595 x[0].update(targetrepo, newrevs)
3595 x[0].update(targetrepo, newrevs)
3596
3596
3597 timer(bench, setup=setup)
3597 timer(bench, setup=setup)
3598 fm.end()
3598 fm.end()
3599 finally:
3599 finally:
3600 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3600 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3601 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3601 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3602
3602
3603
3603
3604 @command(
3604 @command(
3605 b'perfbranchmapload',
3605 b'perfbranchmapload',
3606 [
3606 [
3607 (b'f', b'filter', b'', b'Specify repoview filter'),
3607 (b'f', b'filter', b'', b'Specify repoview filter'),
3608 (b'', b'list', False, b'List brachmap filter caches'),
3608 (b'', b'list', False, b'List brachmap filter caches'),
3609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3610 ]
3610 ]
3611 + formatteropts,
3611 + formatteropts,
3612 )
3612 )
3613 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3613 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3614 """benchmark reading the branchmap"""
3614 """benchmark reading the branchmap"""
3615 opts = _byteskwargs(opts)
3615 opts = _byteskwargs(opts)
3616 clearrevlogs = opts[b'clear_revlogs']
3616 clearrevlogs = opts[b'clear_revlogs']
3617
3617
3618 if list:
3618 if list:
3619 for name, kind, st in repo.cachevfs.readdir(stat=True):
3619 for name, kind, st in repo.cachevfs.readdir(stat=True):
3620 if name.startswith(b'branch2'):
3620 if name.startswith(b'branch2'):
3621 filtername = name.partition(b'-')[2] or b'unfiltered'
3621 filtername = name.partition(b'-')[2] or b'unfiltered'
3622 ui.status(
3622 ui.status(
3623 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3623 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3624 )
3624 )
3625 return
3625 return
3626 if not filter:
3626 if not filter:
3627 filter = None
3627 filter = None
3628 subsettable = getbranchmapsubsettable()
3628 subsettable = getbranchmapsubsettable()
3629 if filter is None:
3629 if filter is None:
3630 repo = repo.unfiltered()
3630 repo = repo.unfiltered()
3631 else:
3631 else:
3632 repo = repoview.repoview(repo, filter)
3632 repo = repoview.repoview(repo, filter)
3633
3633
3634 repo.branchmap() # make sure we have a relevant, up to date branchmap
3634 repo.branchmap() # make sure we have a relevant, up to date branchmap
3635
3635
3636 try:
3636 try:
3637 fromfile = branchmap.branchcache.fromfile
3637 fromfile = branchmap.branchcache.fromfile
3638 except AttributeError:
3638 except AttributeError:
3639 # older versions
3639 # older versions
3640 fromfile = branchmap.read
3640 fromfile = branchmap.read
3641
3641
3642 currentfilter = filter
3642 currentfilter = filter
3643 # try once without timer, the filter may not be cached
3643 # try once without timer, the filter may not be cached
3644 while fromfile(repo) is None:
3644 while fromfile(repo) is None:
3645 currentfilter = subsettable.get(currentfilter)
3645 currentfilter = subsettable.get(currentfilter)
3646 if currentfilter is None:
3646 if currentfilter is None:
3647 raise error.Abort(
3647 raise error.Abort(
3648 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3648 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3649 )
3649 )
3650 repo = repo.filtered(currentfilter)
3650 repo = repo.filtered(currentfilter)
3651 timer, fm = gettimer(ui, opts)
3651 timer, fm = gettimer(ui, opts)
3652
3652
3653 def setup():
3653 def setup():
3654 if clearrevlogs:
3654 if clearrevlogs:
3655 clearchangelog(repo)
3655 clearchangelog(repo)
3656
3656
3657 def bench():
3657 def bench():
3658 fromfile(repo)
3658 fromfile(repo)
3659
3659
3660 timer(bench, setup=setup)
3660 timer(bench, setup=setup)
3661 fm.end()
3661 fm.end()
3662
3662
3663
3663
3664 @command(b'perfloadmarkers')
3664 @command(b'perfloadmarkers')
3665 def perfloadmarkers(ui, repo):
3665 def perfloadmarkers(ui, repo):
3666 """benchmark the time to parse the on-disk markers for a repo
3666 """benchmark the time to parse the on-disk markers for a repo
3667
3667
3668 Result is the number of markers in the repo."""
3668 Result is the number of markers in the repo."""
3669 timer, fm = gettimer(ui)
3669 timer, fm = gettimer(ui)
3670 svfs = getsvfs(repo)
3670 svfs = getsvfs(repo)
3671 timer(lambda: len(obsolete.obsstore(svfs)))
3671 timer(lambda: len(obsolete.obsstore(svfs)))
3672 fm.end()
3672 fm.end()
3673
3673
3674
3674
3675 @command(
3675 @command(
3676 b'perflrucachedict',
3676 b'perflrucachedict',
3677 formatteropts
3677 formatteropts
3678 + [
3678 + [
3679 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3679 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3680 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3680 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3681 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3681 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3682 (b'', b'size', 4, b'size of cache'),
3682 (b'', b'size', 4, b'size of cache'),
3683 (b'', b'gets', 10000, b'number of key lookups'),
3683 (b'', b'gets', 10000, b'number of key lookups'),
3684 (b'', b'sets', 10000, b'number of key sets'),
3684 (b'', b'sets', 10000, b'number of key sets'),
3685 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3685 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3686 (
3686 (
3687 b'',
3687 b'',
3688 b'mixedgetfreq',
3688 b'mixedgetfreq',
3689 50,
3689 50,
3690 b'frequency of get vs set ops in mixed mode',
3690 b'frequency of get vs set ops in mixed mode',
3691 ),
3691 ),
3692 ],
3692 ],
3693 norepo=True,
3693 norepo=True,
3694 )
3694 )
3695 def perflrucache(
3695 def perflrucache(
3696 ui,
3696 ui,
3697 mincost=0,
3697 mincost=0,
3698 maxcost=100,
3698 maxcost=100,
3699 costlimit=0,
3699 costlimit=0,
3700 size=4,
3700 size=4,
3701 gets=10000,
3701 gets=10000,
3702 sets=10000,
3702 sets=10000,
3703 mixed=10000,
3703 mixed=10000,
3704 mixedgetfreq=50,
3704 mixedgetfreq=50,
3705 **opts
3705 **opts
3706 ):
3706 ):
3707 opts = _byteskwargs(opts)
3707 opts = _byteskwargs(opts)
3708
3708
3709 def doinit():
3709 def doinit():
3710 for i in _xrange(10000):
3710 for i in _xrange(10000):
3711 util.lrucachedict(size)
3711 util.lrucachedict(size)
3712
3712
3713 costrange = list(range(mincost, maxcost + 1))
3713 costrange = list(range(mincost, maxcost + 1))
3714
3714
3715 values = []
3715 values = []
3716 for i in _xrange(size):
3716 for i in _xrange(size):
3717 values.append(random.randint(0, _maxint))
3717 values.append(random.randint(0, _maxint))
3718
3718
3719 # Get mode fills the cache and tests raw lookup performance with no
3719 # Get mode fills the cache and tests raw lookup performance with no
3720 # eviction.
3720 # eviction.
3721 getseq = []
3721 getseq = []
3722 for i in _xrange(gets):
3722 for i in _xrange(gets):
3723 getseq.append(random.choice(values))
3723 getseq.append(random.choice(values))
3724
3724
3725 def dogets():
3725 def dogets():
3726 d = util.lrucachedict(size)
3726 d = util.lrucachedict(size)
3727 for v in values:
3727 for v in values:
3728 d[v] = v
3728 d[v] = v
3729 for key in getseq:
3729 for key in getseq:
3730 value = d[key]
3730 value = d[key]
3731 value # silence pyflakes warning
3731 value # silence pyflakes warning
3732
3732
3733 def dogetscost():
3733 def dogetscost():
3734 d = util.lrucachedict(size, maxcost=costlimit)
3734 d = util.lrucachedict(size, maxcost=costlimit)
3735 for i, v in enumerate(values):
3735 for i, v in enumerate(values):
3736 d.insert(v, v, cost=costs[i])
3736 d.insert(v, v, cost=costs[i])
3737 for key in getseq:
3737 for key in getseq:
3738 try:
3738 try:
3739 value = d[key]
3739 value = d[key]
3740 value # silence pyflakes warning
3740 value # silence pyflakes warning
3741 except KeyError:
3741 except KeyError:
3742 pass
3742 pass
3743
3743
3744 # Set mode tests insertion speed with cache eviction.
3744 # Set mode tests insertion speed with cache eviction.
3745 setseq = []
3745 setseq = []
3746 costs = []
3746 costs = []
3747 for i in _xrange(sets):
3747 for i in _xrange(sets):
3748 setseq.append(random.randint(0, _maxint))
3748 setseq.append(random.randint(0, _maxint))
3749 costs.append(random.choice(costrange))
3749 costs.append(random.choice(costrange))
3750
3750
3751 def doinserts():
3751 def doinserts():
3752 d = util.lrucachedict(size)
3752 d = util.lrucachedict(size)
3753 for v in setseq:
3753 for v in setseq:
3754 d.insert(v, v)
3754 d.insert(v, v)
3755
3755
3756 def doinsertscost():
3756 def doinsertscost():
3757 d = util.lrucachedict(size, maxcost=costlimit)
3757 d = util.lrucachedict(size, maxcost=costlimit)
3758 for i, v in enumerate(setseq):
3758 for i, v in enumerate(setseq):
3759 d.insert(v, v, cost=costs[i])
3759 d.insert(v, v, cost=costs[i])
3760
3760
3761 def dosets():
3761 def dosets():
3762 d = util.lrucachedict(size)
3762 d = util.lrucachedict(size)
3763 for v in setseq:
3763 for v in setseq:
3764 d[v] = v
3764 d[v] = v
3765
3765
3766 # Mixed mode randomly performs gets and sets with eviction.
3766 # Mixed mode randomly performs gets and sets with eviction.
3767 mixedops = []
3767 mixedops = []
3768 for i in _xrange(mixed):
3768 for i in _xrange(mixed):
3769 r = random.randint(0, 100)
3769 r = random.randint(0, 100)
3770 if r < mixedgetfreq:
3770 if r < mixedgetfreq:
3771 op = 0
3771 op = 0
3772 else:
3772 else:
3773 op = 1
3773 op = 1
3774
3774
3775 mixedops.append(
3775 mixedops.append(
3776 (op, random.randint(0, size * 2), random.choice(costrange))
3776 (op, random.randint(0, size * 2), random.choice(costrange))
3777 )
3777 )
3778
3778
3779 def domixed():
3779 def domixed():
3780 d = util.lrucachedict(size)
3780 d = util.lrucachedict(size)
3781
3781
3782 for op, v, cost in mixedops:
3782 for op, v, cost in mixedops:
3783 if op == 0:
3783 if op == 0:
3784 try:
3784 try:
3785 d[v]
3785 d[v]
3786 except KeyError:
3786 except KeyError:
3787 pass
3787 pass
3788 else:
3788 else:
3789 d[v] = v
3789 d[v] = v
3790
3790
3791 def domixedcost():
3791 def domixedcost():
3792 d = util.lrucachedict(size, maxcost=costlimit)
3792 d = util.lrucachedict(size, maxcost=costlimit)
3793
3793
3794 for op, v, cost in mixedops:
3794 for op, v, cost in mixedops:
3795 if op == 0:
3795 if op == 0:
3796 try:
3796 try:
3797 d[v]
3797 d[v]
3798 except KeyError:
3798 except KeyError:
3799 pass
3799 pass
3800 else:
3800 else:
3801 d.insert(v, v, cost=cost)
3801 d.insert(v, v, cost=cost)
3802
3802
3803 benches = [
3803 benches = [
3804 (doinit, b'init'),
3804 (doinit, b'init'),
3805 ]
3805 ]
3806
3806
3807 if costlimit:
3807 if costlimit:
3808 benches.extend(
3808 benches.extend(
3809 [
3809 [
3810 (dogetscost, b'gets w/ cost limit'),
3810 (dogetscost, b'gets w/ cost limit'),
3811 (doinsertscost, b'inserts w/ cost limit'),
3811 (doinsertscost, b'inserts w/ cost limit'),
3812 (domixedcost, b'mixed w/ cost limit'),
3812 (domixedcost, b'mixed w/ cost limit'),
3813 ]
3813 ]
3814 )
3814 )
3815 else:
3815 else:
3816 benches.extend(
3816 benches.extend(
3817 [
3817 [
3818 (dogets, b'gets'),
3818 (dogets, b'gets'),
3819 (doinserts, b'inserts'),
3819 (doinserts, b'inserts'),
3820 (dosets, b'sets'),
3820 (dosets, b'sets'),
3821 (domixed, b'mixed'),
3821 (domixed, b'mixed'),
3822 ]
3822 ]
3823 )
3823 )
3824
3824
3825 for fn, title in benches:
3825 for fn, title in benches:
3826 timer, fm = gettimer(ui, opts)
3826 timer, fm = gettimer(ui, opts)
3827 timer(fn, title=title)
3827 timer(fn, title=title)
3828 fm.end()
3828 fm.end()
3829
3829
3830
3830
3831 @command(
3831 @command(
3832 b'perfwrite',
3832 b'perfwrite',
3833 formatteropts
3833 formatteropts
3834 + [
3834 + [
3835 (b'', b'write-method', b'write', b'ui write method'),
3835 (b'', b'write-method', b'write', b'ui write method'),
3836 (b'', b'nlines', 100, b'number of lines'),
3836 (b'', b'nlines', 100, b'number of lines'),
3837 (b'', b'nitems', 100, b'number of items (per line)'),
3837 (b'', b'nitems', 100, b'number of items (per line)'),
3838 (b'', b'item', b'x', b'item that is written'),
3838 (b'', b'item', b'x', b'item that is written'),
3839 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3839 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3840 (b'', b'flush-line', None, b'flush after each line'),
3840 (b'', b'flush-line', None, b'flush after each line'),
3841 ],
3841 ],
3842 )
3842 )
3843 def perfwrite(ui, repo, **opts):
3843 def perfwrite(ui, repo, **opts):
3844 """microbenchmark ui.write (and others)"""
3844 """microbenchmark ui.write (and others)"""
3845 opts = _byteskwargs(opts)
3845 opts = _byteskwargs(opts)
3846
3846
3847 write = getattr(ui, _sysstr(opts[b'write_method']))
3847 write = getattr(ui, _sysstr(opts[b'write_method']))
3848 nlines = int(opts[b'nlines'])
3848 nlines = int(opts[b'nlines'])
3849 nitems = int(opts[b'nitems'])
3849 nitems = int(opts[b'nitems'])
3850 item = opts[b'item']
3850 item = opts[b'item']
3851 batch_line = opts.get(b'batch_line')
3851 batch_line = opts.get(b'batch_line')
3852 flush_line = opts.get(b'flush_line')
3852 flush_line = opts.get(b'flush_line')
3853
3853
3854 if batch_line:
3854 if batch_line:
3855 line = item * nitems + b'\n'
3855 line = item * nitems + b'\n'
3856
3856
3857 def benchmark():
3857 def benchmark():
3858 for i in pycompat.xrange(nlines):
3858 for i in pycompat.xrange(nlines):
3859 if batch_line:
3859 if batch_line:
3860 write(line)
3860 write(line)
3861 else:
3861 else:
3862 for i in pycompat.xrange(nitems):
3862 for i in pycompat.xrange(nitems):
3863 write(item)
3863 write(item)
3864 write(b'\n')
3864 write(b'\n')
3865 if flush_line:
3865 if flush_line:
3866 ui.flush()
3866 ui.flush()
3867 ui.flush()
3867 ui.flush()
3868
3868
3869 timer, fm = gettimer(ui, opts)
3869 timer, fm = gettimer(ui, opts)
3870 timer(benchmark)
3870 timer(benchmark)
3871 fm.end()
3871 fm.end()
3872
3872
3873
3873
3874 def uisetup(ui):
3874 def uisetup(ui):
3875 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3875 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3876 commands, b'debugrevlogopts'
3876 commands, b'debugrevlogopts'
3877 ):
3877 ):
3878 # for "historical portability":
3878 # for "historical portability":
3879 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3879 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3880 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3880 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3881 # openrevlog() should cause failure, because it has been
3881 # openrevlog() should cause failure, because it has been
3882 # available since 3.5 (or 49c583ca48c4).
3882 # available since 3.5 (or 49c583ca48c4).
3883 def openrevlog(orig, repo, cmd, file_, opts):
3883 def openrevlog(orig, repo, cmd, file_, opts):
3884 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3884 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3885 raise error.Abort(
3885 raise error.Abort(
3886 b"This version doesn't support --dir option",
3886 b"This version doesn't support --dir option",
3887 hint=b"use 3.5 or later",
3887 hint=b"use 3.5 or later",
3888 )
3888 )
3889 return orig(repo, cmd, file_, opts)
3889 return orig(repo, cmd, file_, opts)
3890
3890
3891 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3891 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3892
3892
3893
3893
3894 @command(
3894 @command(
3895 b'perfprogress',
3895 b'perfprogress',
3896 formatteropts
3896 formatteropts
3897 + [
3897 + [
3898 (b'', b'topic', b'topic', b'topic for progress messages'),
3898 (b'', b'topic', b'topic', b'topic for progress messages'),
3899 (b'c', b'total', 1000000, b'total value we are progressing to'),
3899 (b'c', b'total', 1000000, b'total value we are progressing to'),
3900 ],
3900 ],
3901 norepo=True,
3901 norepo=True,
3902 )
3902 )
3903 def perfprogress(ui, topic=None, total=None, **opts):
3903 def perfprogress(ui, topic=None, total=None, **opts):
3904 """printing of progress bars"""
3904 """printing of progress bars"""
3905 opts = _byteskwargs(opts)
3905 opts = _byteskwargs(opts)
3906
3906
3907 timer, fm = gettimer(ui, opts)
3907 timer, fm = gettimer(ui, opts)
3908
3908
3909 def doprogress():
3909 def doprogress():
3910 with ui.makeprogress(topic, total=total) as progress:
3910 with ui.makeprogress(topic, total=total) as progress:
3911 for i in _xrange(total):
3911 for i in _xrange(total):
3912 progress.increment()
3912 progress.increment()
3913
3913
3914 timer(doprogress)
3914 timer(doprogress)
3915 fm.end()
3915 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now