##// END OF EJS Templates
perf: use `setup` function in `perfdirstate`...
marmoute -
r43392:436a6a31 default
parent child Browse files
Show More
@@ -1,3756 +1,3758 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1116 repo.dirstate.invalidate()
1117
1115 def d():
1118 def d():
1116 repo.dirstate.invalidate()
1117 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1118
1120
1119 timer(d)
1121 timer(d, setup=setup)
1120 fm.end()
1122 fm.end()
1121
1123
1122
1124
1123 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1124 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1125 opts = _byteskwargs(opts)
1127 opts = _byteskwargs(opts)
1126 timer, fm = gettimer(ui, opts)
1128 timer, fm = gettimer(ui, opts)
1127 b"a" in repo.dirstate
1129 b"a" in repo.dirstate
1128
1130
1129 def d():
1131 def d():
1130 repo.dirstate.hasdir(b"a")
1132 repo.dirstate.hasdir(b"a")
1131 del repo.dirstate._map._dirs
1133 del repo.dirstate._map._dirs
1132
1134
1133 timer(d)
1135 timer(d)
1134 fm.end()
1136 fm.end()
1135
1137
1136
1138
1137 @command(b'perfdirstatefoldmap', formatteropts)
1139 @command(b'perfdirstatefoldmap', formatteropts)
1138 def perfdirstatefoldmap(ui, repo, **opts):
1140 def perfdirstatefoldmap(ui, repo, **opts):
1139 opts = _byteskwargs(opts)
1141 opts = _byteskwargs(opts)
1140 timer, fm = gettimer(ui, opts)
1142 timer, fm = gettimer(ui, opts)
1141 dirstate = repo.dirstate
1143 dirstate = repo.dirstate
1142 b'a' in dirstate
1144 b'a' in dirstate
1143
1145
1144 def d():
1146 def d():
1145 dirstate._map.filefoldmap.get(b'a')
1147 dirstate._map.filefoldmap.get(b'a')
1146 del dirstate._map.filefoldmap
1148 del dirstate._map.filefoldmap
1147
1149
1148 timer(d)
1150 timer(d)
1149 fm.end()
1151 fm.end()
1150
1152
1151
1153
1152 @command(b'perfdirfoldmap', formatteropts)
1154 @command(b'perfdirfoldmap', formatteropts)
1153 def perfdirfoldmap(ui, repo, **opts):
1155 def perfdirfoldmap(ui, repo, **opts):
1154 opts = _byteskwargs(opts)
1156 opts = _byteskwargs(opts)
1155 timer, fm = gettimer(ui, opts)
1157 timer, fm = gettimer(ui, opts)
1156 dirstate = repo.dirstate
1158 dirstate = repo.dirstate
1157 b'a' in dirstate
1159 b'a' in dirstate
1158
1160
1159 def d():
1161 def d():
1160 dirstate._map.dirfoldmap.get(b'a')
1162 dirstate._map.dirfoldmap.get(b'a')
1161 del dirstate._map.dirfoldmap
1163 del dirstate._map.dirfoldmap
1162 del dirstate._map._dirs
1164 del dirstate._map._dirs
1163
1165
1164 timer(d)
1166 timer(d)
1165 fm.end()
1167 fm.end()
1166
1168
1167
1169
1168 @command(b'perfdirstatewrite', formatteropts)
1170 @command(b'perfdirstatewrite', formatteropts)
1169 def perfdirstatewrite(ui, repo, **opts):
1171 def perfdirstatewrite(ui, repo, **opts):
1170 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1171 timer, fm = gettimer(ui, opts)
1173 timer, fm = gettimer(ui, opts)
1172 ds = repo.dirstate
1174 ds = repo.dirstate
1173 b"a" in ds
1175 b"a" in ds
1174
1176
1175 def d():
1177 def d():
1176 ds._dirty = True
1178 ds._dirty = True
1177 ds.write(repo.currenttransaction())
1179 ds.write(repo.currenttransaction())
1178
1180
1179 timer(d)
1181 timer(d)
1180 fm.end()
1182 fm.end()
1181
1183
1182
1184
1183 def _getmergerevs(repo, opts):
1185 def _getmergerevs(repo, opts):
1184 """parse command argument to return rev involved in merge
1186 """parse command argument to return rev involved in merge
1185
1187
1186 input: options dictionnary with `rev`, `from` and `bse`
1188 input: options dictionnary with `rev`, `from` and `bse`
1187 output: (localctx, otherctx, basectx)
1189 output: (localctx, otherctx, basectx)
1188 """
1190 """
1189 if opts[b'from']:
1191 if opts[b'from']:
1190 fromrev = scmutil.revsingle(repo, opts[b'from'])
1192 fromrev = scmutil.revsingle(repo, opts[b'from'])
1191 wctx = repo[fromrev]
1193 wctx = repo[fromrev]
1192 else:
1194 else:
1193 wctx = repo[None]
1195 wctx = repo[None]
1194 # we don't want working dir files to be stat'd in the benchmark, so
1196 # we don't want working dir files to be stat'd in the benchmark, so
1195 # prime that cache
1197 # prime that cache
1196 wctx.dirty()
1198 wctx.dirty()
1197 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1199 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1198 if opts[b'base']:
1200 if opts[b'base']:
1199 fromrev = scmutil.revsingle(repo, opts[b'base'])
1201 fromrev = scmutil.revsingle(repo, opts[b'base'])
1200 ancestor = repo[fromrev]
1202 ancestor = repo[fromrev]
1201 else:
1203 else:
1202 ancestor = wctx.ancestor(rctx)
1204 ancestor = wctx.ancestor(rctx)
1203 return (wctx, rctx, ancestor)
1205 return (wctx, rctx, ancestor)
1204
1206
1205
1207
1206 @command(
1208 @command(
1207 b'perfmergecalculate',
1209 b'perfmergecalculate',
1208 [
1210 [
1209 (b'r', b'rev', b'.', b'rev to merge against'),
1211 (b'r', b'rev', b'.', b'rev to merge against'),
1210 (b'', b'from', b'', b'rev to merge from'),
1212 (b'', b'from', b'', b'rev to merge from'),
1211 (b'', b'base', b'', b'the revision to use as base'),
1213 (b'', b'base', b'', b'the revision to use as base'),
1212 ]
1214 ]
1213 + formatteropts,
1215 + formatteropts,
1214 )
1216 )
1215 def perfmergecalculate(ui, repo, **opts):
1217 def perfmergecalculate(ui, repo, **opts):
1216 opts = _byteskwargs(opts)
1218 opts = _byteskwargs(opts)
1217 timer, fm = gettimer(ui, opts)
1219 timer, fm = gettimer(ui, opts)
1218
1220
1219 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1221 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1220
1222
1221 def d():
1223 def d():
1222 # acceptremote is True because we don't want prompts in the middle of
1224 # acceptremote is True because we don't want prompts in the middle of
1223 # our benchmark
1225 # our benchmark
1224 merge.calculateupdates(
1226 merge.calculateupdates(
1225 repo,
1227 repo,
1226 wctx,
1228 wctx,
1227 rctx,
1229 rctx,
1228 [ancestor],
1230 [ancestor],
1229 branchmerge=False,
1231 branchmerge=False,
1230 force=False,
1232 force=False,
1231 acceptremote=True,
1233 acceptremote=True,
1232 followcopies=True,
1234 followcopies=True,
1233 )
1235 )
1234
1236
1235 timer(d)
1237 timer(d)
1236 fm.end()
1238 fm.end()
1237
1239
1238
1240
1239 @command(
1241 @command(
1240 b'perfmergecopies',
1242 b'perfmergecopies',
1241 [
1243 [
1242 (b'r', b'rev', b'.', b'rev to merge against'),
1244 (b'r', b'rev', b'.', b'rev to merge against'),
1243 (b'', b'from', b'', b'rev to merge from'),
1245 (b'', b'from', b'', b'rev to merge from'),
1244 (b'', b'base', b'', b'the revision to use as base'),
1246 (b'', b'base', b'', b'the revision to use as base'),
1245 ]
1247 ]
1246 + formatteropts,
1248 + formatteropts,
1247 )
1249 )
1248 def perfmergecopies(ui, repo, **opts):
1250 def perfmergecopies(ui, repo, **opts):
1249 """measure runtime of `copies.mergecopies`"""
1251 """measure runtime of `copies.mergecopies`"""
1250 opts = _byteskwargs(opts)
1252 opts = _byteskwargs(opts)
1251 timer, fm = gettimer(ui, opts)
1253 timer, fm = gettimer(ui, opts)
1252 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1254 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1253
1255
1254 def d():
1256 def d():
1255 # acceptremote is True because we don't want prompts in the middle of
1257 # acceptremote is True because we don't want prompts in the middle of
1256 # our benchmark
1258 # our benchmark
1257 copies.mergecopies(repo, wctx, rctx, ancestor)
1259 copies.mergecopies(repo, wctx, rctx, ancestor)
1258
1260
1259 timer(d)
1261 timer(d)
1260 fm.end()
1262 fm.end()
1261
1263
1262
1264
1263 @command(b'perfpathcopies', [], b"REV REV")
1265 @command(b'perfpathcopies', [], b"REV REV")
1264 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1266 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1265 """benchmark the copy tracing logic"""
1267 """benchmark the copy tracing logic"""
1266 opts = _byteskwargs(opts)
1268 opts = _byteskwargs(opts)
1267 timer, fm = gettimer(ui, opts)
1269 timer, fm = gettimer(ui, opts)
1268 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1270 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1269 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1271 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1270
1272
1271 def d():
1273 def d():
1272 copies.pathcopies(ctx1, ctx2)
1274 copies.pathcopies(ctx1, ctx2)
1273
1275
1274 timer(d)
1276 timer(d)
1275 fm.end()
1277 fm.end()
1276
1278
1277
1279
1278 @command(
1280 @command(
1279 b'perfphases',
1281 b'perfphases',
1280 [(b'', b'full', False, b'include file reading time too'),],
1282 [(b'', b'full', False, b'include file reading time too'),],
1281 b"",
1283 b"",
1282 )
1284 )
1283 def perfphases(ui, repo, **opts):
1285 def perfphases(ui, repo, **opts):
1284 """benchmark phasesets computation"""
1286 """benchmark phasesets computation"""
1285 opts = _byteskwargs(opts)
1287 opts = _byteskwargs(opts)
1286 timer, fm = gettimer(ui, opts)
1288 timer, fm = gettimer(ui, opts)
1287 _phases = repo._phasecache
1289 _phases = repo._phasecache
1288 full = opts.get(b'full')
1290 full = opts.get(b'full')
1289
1291
1290 def d():
1292 def d():
1291 phases = _phases
1293 phases = _phases
1292 if full:
1294 if full:
1293 clearfilecache(repo, b'_phasecache')
1295 clearfilecache(repo, b'_phasecache')
1294 phases = repo._phasecache
1296 phases = repo._phasecache
1295 phases.invalidate()
1297 phases.invalidate()
1296 phases.loadphaserevs(repo)
1298 phases.loadphaserevs(repo)
1297
1299
1298 timer(d)
1300 timer(d)
1299 fm.end()
1301 fm.end()
1300
1302
1301
1303
1302 @command(b'perfphasesremote', [], b"[DEST]")
1304 @command(b'perfphasesremote', [], b"[DEST]")
1303 def perfphasesremote(ui, repo, dest=None, **opts):
1305 def perfphasesremote(ui, repo, dest=None, **opts):
1304 """benchmark time needed to analyse phases of the remote server"""
1306 """benchmark time needed to analyse phases of the remote server"""
1305 from mercurial.node import bin
1307 from mercurial.node import bin
1306 from mercurial import (
1308 from mercurial import (
1307 exchange,
1309 exchange,
1308 hg,
1310 hg,
1309 phases,
1311 phases,
1310 )
1312 )
1311
1313
1312 opts = _byteskwargs(opts)
1314 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1315 timer, fm = gettimer(ui, opts)
1314
1316
1315 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1317 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1316 if not path:
1318 if not path:
1317 raise error.Abort(
1319 raise error.Abort(
1318 b'default repository not configured!',
1320 b'default repository not configured!',
1319 hint=b"see 'hg help config.paths'",
1321 hint=b"see 'hg help config.paths'",
1320 )
1322 )
1321 dest = path.pushloc or path.loc
1323 dest = path.pushloc or path.loc
1322 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1324 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1323 other = hg.peer(repo, opts, dest)
1325 other = hg.peer(repo, opts, dest)
1324
1326
1325 # easier to perform discovery through the operation
1327 # easier to perform discovery through the operation
1326 op = exchange.pushoperation(repo, other)
1328 op = exchange.pushoperation(repo, other)
1327 exchange._pushdiscoverychangeset(op)
1329 exchange._pushdiscoverychangeset(op)
1328
1330
1329 remotesubset = op.fallbackheads
1331 remotesubset = op.fallbackheads
1330
1332
1331 with other.commandexecutor() as e:
1333 with other.commandexecutor() as e:
1332 remotephases = e.callcommand(
1334 remotephases = e.callcommand(
1333 b'listkeys', {b'namespace': b'phases'}
1335 b'listkeys', {b'namespace': b'phases'}
1334 ).result()
1336 ).result()
1335 del other
1337 del other
1336 publishing = remotephases.get(b'publishing', False)
1338 publishing = remotephases.get(b'publishing', False)
1337 if publishing:
1339 if publishing:
1338 ui.statusnoi18n(b'publishing: yes\n')
1340 ui.statusnoi18n(b'publishing: yes\n')
1339 else:
1341 else:
1340 ui.statusnoi18n(b'publishing: no\n')
1342 ui.statusnoi18n(b'publishing: no\n')
1341
1343
1342 nodemap = repo.changelog.nodemap
1344 nodemap = repo.changelog.nodemap
1343 nonpublishroots = 0
1345 nonpublishroots = 0
1344 for nhex, phase in remotephases.iteritems():
1346 for nhex, phase in remotephases.iteritems():
1345 if nhex == b'publishing': # ignore data related to publish option
1347 if nhex == b'publishing': # ignore data related to publish option
1346 continue
1348 continue
1347 node = bin(nhex)
1349 node = bin(nhex)
1348 if node in nodemap and int(phase):
1350 if node in nodemap and int(phase):
1349 nonpublishroots += 1
1351 nonpublishroots += 1
1350 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1352 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1351 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1353 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1352
1354
1353 def d():
1355 def d():
1354 phases.remotephasessummary(repo, remotesubset, remotephases)
1356 phases.remotephasessummary(repo, remotesubset, remotephases)
1355
1357
1356 timer(d)
1358 timer(d)
1357 fm.end()
1359 fm.end()
1358
1360
1359
1361
1360 @command(
1362 @command(
1361 b'perfmanifest',
1363 b'perfmanifest',
1362 [
1364 [
1363 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1365 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1364 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1366 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1365 ]
1367 ]
1366 + formatteropts,
1368 + formatteropts,
1367 b'REV|NODE',
1369 b'REV|NODE',
1368 )
1370 )
1369 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1371 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1370 """benchmark the time to read a manifest from disk and return a usable
1372 """benchmark the time to read a manifest from disk and return a usable
1371 dict-like object
1373 dict-like object
1372
1374
1373 Manifest caches are cleared before retrieval."""
1375 Manifest caches are cleared before retrieval."""
1374 opts = _byteskwargs(opts)
1376 opts = _byteskwargs(opts)
1375 timer, fm = gettimer(ui, opts)
1377 timer, fm = gettimer(ui, opts)
1376 if not manifest_rev:
1378 if not manifest_rev:
1377 ctx = scmutil.revsingle(repo, rev, rev)
1379 ctx = scmutil.revsingle(repo, rev, rev)
1378 t = ctx.manifestnode()
1380 t = ctx.manifestnode()
1379 else:
1381 else:
1380 from mercurial.node import bin
1382 from mercurial.node import bin
1381
1383
1382 if len(rev) == 40:
1384 if len(rev) == 40:
1383 t = bin(rev)
1385 t = bin(rev)
1384 else:
1386 else:
1385 try:
1387 try:
1386 rev = int(rev)
1388 rev = int(rev)
1387
1389
1388 if util.safehasattr(repo.manifestlog, b'getstorage'):
1390 if util.safehasattr(repo.manifestlog, b'getstorage'):
1389 t = repo.manifestlog.getstorage(b'').node(rev)
1391 t = repo.manifestlog.getstorage(b'').node(rev)
1390 else:
1392 else:
1391 t = repo.manifestlog._revlog.lookup(rev)
1393 t = repo.manifestlog._revlog.lookup(rev)
1392 except ValueError:
1394 except ValueError:
1393 raise error.Abort(
1395 raise error.Abort(
1394 b'manifest revision must be integer or full node'
1396 b'manifest revision must be integer or full node'
1395 )
1397 )
1396
1398
1397 def d():
1399 def d():
1398 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1400 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1399 repo.manifestlog[t].read()
1401 repo.manifestlog[t].read()
1400
1402
1401 timer(d)
1403 timer(d)
1402 fm.end()
1404 fm.end()
1403
1405
1404
1406
1405 @command(b'perfchangeset', formatteropts)
1407 @command(b'perfchangeset', formatteropts)
1406 def perfchangeset(ui, repo, rev, **opts):
1408 def perfchangeset(ui, repo, rev, **opts):
1407 opts = _byteskwargs(opts)
1409 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1410 timer, fm = gettimer(ui, opts)
1409 n = scmutil.revsingle(repo, rev).node()
1411 n = scmutil.revsingle(repo, rev).node()
1410
1412
1411 def d():
1413 def d():
1412 repo.changelog.read(n)
1414 repo.changelog.read(n)
1413 # repo.changelog._cache = None
1415 # repo.changelog._cache = None
1414
1416
1415 timer(d)
1417 timer(d)
1416 fm.end()
1418 fm.end()
1417
1419
1418
1420
1419 @command(b'perfignore', formatteropts)
1421 @command(b'perfignore', formatteropts)
1420 def perfignore(ui, repo, **opts):
1422 def perfignore(ui, repo, **opts):
1421 """benchmark operation related to computing ignore"""
1423 """benchmark operation related to computing ignore"""
1422 opts = _byteskwargs(opts)
1424 opts = _byteskwargs(opts)
1423 timer, fm = gettimer(ui, opts)
1425 timer, fm = gettimer(ui, opts)
1424 dirstate = repo.dirstate
1426 dirstate = repo.dirstate
1425
1427
1426 def setupone():
1428 def setupone():
1427 dirstate.invalidate()
1429 dirstate.invalidate()
1428 clearfilecache(dirstate, b'_ignore')
1430 clearfilecache(dirstate, b'_ignore')
1429
1431
1430 def runone():
1432 def runone():
1431 dirstate._ignore
1433 dirstate._ignore
1432
1434
1433 timer(runone, setup=setupone, title=b"load")
1435 timer(runone, setup=setupone, title=b"load")
1434 fm.end()
1436 fm.end()
1435
1437
1436
1438
1437 @command(
1439 @command(
1438 b'perfindex',
1440 b'perfindex',
1439 [
1441 [
1440 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1442 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1441 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1443 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1442 ]
1444 ]
1443 + formatteropts,
1445 + formatteropts,
1444 )
1446 )
1445 def perfindex(ui, repo, **opts):
1447 def perfindex(ui, repo, **opts):
1446 """benchmark index creation time followed by a lookup
1448 """benchmark index creation time followed by a lookup
1447
1449
1448 The default is to look `tip` up. Depending on the index implementation,
1450 The default is to look `tip` up. Depending on the index implementation,
1449 the revision looked up can matters. For example, an implementation
1451 the revision looked up can matters. For example, an implementation
1450 scanning the index will have a faster lookup time for `--rev tip` than for
1452 scanning the index will have a faster lookup time for `--rev tip` than for
1451 `--rev 0`. The number of looked up revisions and their order can also
1453 `--rev 0`. The number of looked up revisions and their order can also
1452 matters.
1454 matters.
1453
1455
1454 Example of useful set to test:
1456 Example of useful set to test:
1455 * tip
1457 * tip
1456 * 0
1458 * 0
1457 * -10:
1459 * -10:
1458 * :10
1460 * :10
1459 * -10: + :10
1461 * -10: + :10
1460 * :10: + -10:
1462 * :10: + -10:
1461 * -10000:
1463 * -10000:
1462 * -10000: + 0
1464 * -10000: + 0
1463
1465
1464 It is not currently possible to check for lookup of a missing node. For
1466 It is not currently possible to check for lookup of a missing node. For
1465 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1467 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1466 import mercurial.revlog
1468 import mercurial.revlog
1467
1469
1468 opts = _byteskwargs(opts)
1470 opts = _byteskwargs(opts)
1469 timer, fm = gettimer(ui, opts)
1471 timer, fm = gettimer(ui, opts)
1470 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1472 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1471 if opts[b'no_lookup']:
1473 if opts[b'no_lookup']:
1472 if opts['rev']:
1474 if opts['rev']:
1473 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1475 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1474 nodes = []
1476 nodes = []
1475 elif not opts[b'rev']:
1477 elif not opts[b'rev']:
1476 nodes = [repo[b"tip"].node()]
1478 nodes = [repo[b"tip"].node()]
1477 else:
1479 else:
1478 revs = scmutil.revrange(repo, opts[b'rev'])
1480 revs = scmutil.revrange(repo, opts[b'rev'])
1479 cl = repo.changelog
1481 cl = repo.changelog
1480 nodes = [cl.node(r) for r in revs]
1482 nodes = [cl.node(r) for r in revs]
1481
1483
1482 unfi = repo.unfiltered()
1484 unfi = repo.unfiltered()
1483 # find the filecache func directly
1485 # find the filecache func directly
1484 # This avoid polluting the benchmark with the filecache logic
1486 # This avoid polluting the benchmark with the filecache logic
1485 makecl = unfi.__class__.changelog.func
1487 makecl = unfi.__class__.changelog.func
1486
1488
1487 def setup():
1489 def setup():
1488 # probably not necessary, but for good measure
1490 # probably not necessary, but for good measure
1489 clearchangelog(unfi)
1491 clearchangelog(unfi)
1490
1492
1491 def d():
1493 def d():
1492 cl = makecl(unfi)
1494 cl = makecl(unfi)
1493 for n in nodes:
1495 for n in nodes:
1494 cl.rev(n)
1496 cl.rev(n)
1495
1497
1496 timer(d, setup=setup)
1498 timer(d, setup=setup)
1497 fm.end()
1499 fm.end()
1498
1500
1499
1501
1500 @command(
1502 @command(
1501 b'perfnodemap',
1503 b'perfnodemap',
1502 [
1504 [
1503 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1505 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1504 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1506 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1505 ]
1507 ]
1506 + formatteropts,
1508 + formatteropts,
1507 )
1509 )
1508 def perfnodemap(ui, repo, **opts):
1510 def perfnodemap(ui, repo, **opts):
1509 """benchmark the time necessary to look up revision from a cold nodemap
1511 """benchmark the time necessary to look up revision from a cold nodemap
1510
1512
1511 Depending on the implementation, the amount and order of revision we look
1513 Depending on the implementation, the amount and order of revision we look
1512 up can varies. Example of useful set to test:
1514 up can varies. Example of useful set to test:
1513 * tip
1515 * tip
1514 * 0
1516 * 0
1515 * -10:
1517 * -10:
1516 * :10
1518 * :10
1517 * -10: + :10
1519 * -10: + :10
1518 * :10: + -10:
1520 * :10: + -10:
1519 * -10000:
1521 * -10000:
1520 * -10000: + 0
1522 * -10000: + 0
1521
1523
1522 The command currently focus on valid binary lookup. Benchmarking for
1524 The command currently focus on valid binary lookup. Benchmarking for
1523 hexlookup, prefix lookup and missing lookup would also be valuable.
1525 hexlookup, prefix lookup and missing lookup would also be valuable.
1524 """
1526 """
1525 import mercurial.revlog
1527 import mercurial.revlog
1526
1528
1527 opts = _byteskwargs(opts)
1529 opts = _byteskwargs(opts)
1528 timer, fm = gettimer(ui, opts)
1530 timer, fm = gettimer(ui, opts)
1529 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1531 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1530
1532
1531 unfi = repo.unfiltered()
1533 unfi = repo.unfiltered()
1532 clearcaches = opts['clear_caches']
1534 clearcaches = opts['clear_caches']
1533 # find the filecache func directly
1535 # find the filecache func directly
1534 # This avoid polluting the benchmark with the filecache logic
1536 # This avoid polluting the benchmark with the filecache logic
1535 makecl = unfi.__class__.changelog.func
1537 makecl = unfi.__class__.changelog.func
1536 if not opts[b'rev']:
1538 if not opts[b'rev']:
1537 raise error.Abort('use --rev to specify revisions to look up')
1539 raise error.Abort('use --rev to specify revisions to look up')
1538 revs = scmutil.revrange(repo, opts[b'rev'])
1540 revs = scmutil.revrange(repo, opts[b'rev'])
1539 cl = repo.changelog
1541 cl = repo.changelog
1540 nodes = [cl.node(r) for r in revs]
1542 nodes = [cl.node(r) for r in revs]
1541
1543
1542 # use a list to pass reference to a nodemap from one closure to the next
1544 # use a list to pass reference to a nodemap from one closure to the next
1543 nodeget = [None]
1545 nodeget = [None]
1544
1546
1545 def setnodeget():
1547 def setnodeget():
1546 # probably not necessary, but for good measure
1548 # probably not necessary, but for good measure
1547 clearchangelog(unfi)
1549 clearchangelog(unfi)
1548 nodeget[0] = makecl(unfi).nodemap.get
1550 nodeget[0] = makecl(unfi).nodemap.get
1549
1551
1550 def d():
1552 def d():
1551 get = nodeget[0]
1553 get = nodeget[0]
1552 for n in nodes:
1554 for n in nodes:
1553 get(n)
1555 get(n)
1554
1556
1555 setup = None
1557 setup = None
1556 if clearcaches:
1558 if clearcaches:
1557
1559
1558 def setup():
1560 def setup():
1559 setnodeget()
1561 setnodeget()
1560
1562
1561 else:
1563 else:
1562 setnodeget()
1564 setnodeget()
1563 d() # prewarm the data structure
1565 d() # prewarm the data structure
1564 timer(d, setup=setup)
1566 timer(d, setup=setup)
1565 fm.end()
1567 fm.end()
1566
1568
1567
1569
1568 @command(b'perfstartup', formatteropts)
1570 @command(b'perfstartup', formatteropts)
1569 def perfstartup(ui, repo, **opts):
1571 def perfstartup(ui, repo, **opts):
1570 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1571 timer, fm = gettimer(ui, opts)
1573 timer, fm = gettimer(ui, opts)
1572
1574
1573 def d():
1575 def d():
1574 if os.name != r'nt':
1576 if os.name != r'nt':
1575 os.system(
1577 os.system(
1576 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1578 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1577 )
1579 )
1578 else:
1580 else:
1579 os.environ[r'HGRCPATH'] = r' '
1581 os.environ[r'HGRCPATH'] = r' '
1580 os.system(r"%s version -q > NUL" % sys.argv[0])
1582 os.system(r"%s version -q > NUL" % sys.argv[0])
1581
1583
1582 timer(d)
1584 timer(d)
1583 fm.end()
1585 fm.end()
1584
1586
1585
1587
1586 @command(b'perfparents', formatteropts)
1588 @command(b'perfparents', formatteropts)
1587 def perfparents(ui, repo, **opts):
1589 def perfparents(ui, repo, **opts):
1588 """benchmark the time necessary to fetch one changeset's parents.
1590 """benchmark the time necessary to fetch one changeset's parents.
1589
1591
1590 The fetch is done using the `node identifier`, traversing all object layers
1592 The fetch is done using the `node identifier`, traversing all object layers
1591 from the repository object. The first N revisions will be used for this
1593 from the repository object. The first N revisions will be used for this
1592 benchmark. N is controlled by the ``perf.parentscount`` config option
1594 benchmark. N is controlled by the ``perf.parentscount`` config option
1593 (default: 1000).
1595 (default: 1000).
1594 """
1596 """
1595 opts = _byteskwargs(opts)
1597 opts = _byteskwargs(opts)
1596 timer, fm = gettimer(ui, opts)
1598 timer, fm = gettimer(ui, opts)
1597 # control the number of commits perfparents iterates over
1599 # control the number of commits perfparents iterates over
1598 # experimental config: perf.parentscount
1600 # experimental config: perf.parentscount
1599 count = getint(ui, b"perf", b"parentscount", 1000)
1601 count = getint(ui, b"perf", b"parentscount", 1000)
1600 if len(repo.changelog) < count:
1602 if len(repo.changelog) < count:
1601 raise error.Abort(b"repo needs %d commits for this test" % count)
1603 raise error.Abort(b"repo needs %d commits for this test" % count)
1602 repo = repo.unfiltered()
1604 repo = repo.unfiltered()
1603 nl = [repo.changelog.node(i) for i in _xrange(count)]
1605 nl = [repo.changelog.node(i) for i in _xrange(count)]
1604
1606
1605 def d():
1607 def d():
1606 for n in nl:
1608 for n in nl:
1607 repo.changelog.parents(n)
1609 repo.changelog.parents(n)
1608
1610
1609 timer(d)
1611 timer(d)
1610 fm.end()
1612 fm.end()
1611
1613
1612
1614
1613 @command(b'perfctxfiles', formatteropts)
1615 @command(b'perfctxfiles', formatteropts)
1614 def perfctxfiles(ui, repo, x, **opts):
1616 def perfctxfiles(ui, repo, x, **opts):
1615 opts = _byteskwargs(opts)
1617 opts = _byteskwargs(opts)
1616 x = int(x)
1618 x = int(x)
1617 timer, fm = gettimer(ui, opts)
1619 timer, fm = gettimer(ui, opts)
1618
1620
1619 def d():
1621 def d():
1620 len(repo[x].files())
1622 len(repo[x].files())
1621
1623
1622 timer(d)
1624 timer(d)
1623 fm.end()
1625 fm.end()
1624
1626
1625
1627
1626 @command(b'perfrawfiles', formatteropts)
1628 @command(b'perfrawfiles', formatteropts)
1627 def perfrawfiles(ui, repo, x, **opts):
1629 def perfrawfiles(ui, repo, x, **opts):
1628 opts = _byteskwargs(opts)
1630 opts = _byteskwargs(opts)
1629 x = int(x)
1631 x = int(x)
1630 timer, fm = gettimer(ui, opts)
1632 timer, fm = gettimer(ui, opts)
1631 cl = repo.changelog
1633 cl = repo.changelog
1632
1634
1633 def d():
1635 def d():
1634 len(cl.read(x)[3])
1636 len(cl.read(x)[3])
1635
1637
1636 timer(d)
1638 timer(d)
1637 fm.end()
1639 fm.end()
1638
1640
1639
1641
1640 @command(b'perflookup', formatteropts)
1642 @command(b'perflookup', formatteropts)
1641 def perflookup(ui, repo, rev, **opts):
1643 def perflookup(ui, repo, rev, **opts):
1642 opts = _byteskwargs(opts)
1644 opts = _byteskwargs(opts)
1643 timer, fm = gettimer(ui, opts)
1645 timer, fm = gettimer(ui, opts)
1644 timer(lambda: len(repo.lookup(rev)))
1646 timer(lambda: len(repo.lookup(rev)))
1645 fm.end()
1647 fm.end()
1646
1648
1647
1649
1648 @command(
1650 @command(
1649 b'perflinelogedits',
1651 b'perflinelogedits',
1650 [
1652 [
1651 (b'n', b'edits', 10000, b'number of edits'),
1653 (b'n', b'edits', 10000, b'number of edits'),
1652 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1654 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1653 ],
1655 ],
1654 norepo=True,
1656 norepo=True,
1655 )
1657 )
1656 def perflinelogedits(ui, **opts):
1658 def perflinelogedits(ui, **opts):
1657 from mercurial import linelog
1659 from mercurial import linelog
1658
1660
1659 opts = _byteskwargs(opts)
1661 opts = _byteskwargs(opts)
1660
1662
1661 edits = opts[b'edits']
1663 edits = opts[b'edits']
1662 maxhunklines = opts[b'max_hunk_lines']
1664 maxhunklines = opts[b'max_hunk_lines']
1663
1665
1664 maxb1 = 100000
1666 maxb1 = 100000
1665 random.seed(0)
1667 random.seed(0)
1666 randint = random.randint
1668 randint = random.randint
1667 currentlines = 0
1669 currentlines = 0
1668 arglist = []
1670 arglist = []
1669 for rev in _xrange(edits):
1671 for rev in _xrange(edits):
1670 a1 = randint(0, currentlines)
1672 a1 = randint(0, currentlines)
1671 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1673 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1672 b1 = randint(0, maxb1)
1674 b1 = randint(0, maxb1)
1673 b2 = randint(b1, b1 + maxhunklines)
1675 b2 = randint(b1, b1 + maxhunklines)
1674 currentlines += (b2 - b1) - (a2 - a1)
1676 currentlines += (b2 - b1) - (a2 - a1)
1675 arglist.append((rev, a1, a2, b1, b2))
1677 arglist.append((rev, a1, a2, b1, b2))
1676
1678
1677 def d():
1679 def d():
1678 ll = linelog.linelog()
1680 ll = linelog.linelog()
1679 for args in arglist:
1681 for args in arglist:
1680 ll.replacelines(*args)
1682 ll.replacelines(*args)
1681
1683
1682 timer, fm = gettimer(ui, opts)
1684 timer, fm = gettimer(ui, opts)
1683 timer(d)
1685 timer(d)
1684 fm.end()
1686 fm.end()
1685
1687
1686
1688
1687 @command(b'perfrevrange', formatteropts)
1689 @command(b'perfrevrange', formatteropts)
1688 def perfrevrange(ui, repo, *specs, **opts):
1690 def perfrevrange(ui, repo, *specs, **opts):
1689 opts = _byteskwargs(opts)
1691 opts = _byteskwargs(opts)
1690 timer, fm = gettimer(ui, opts)
1692 timer, fm = gettimer(ui, opts)
1691 revrange = scmutil.revrange
1693 revrange = scmutil.revrange
1692 timer(lambda: len(revrange(repo, specs)))
1694 timer(lambda: len(revrange(repo, specs)))
1693 fm.end()
1695 fm.end()
1694
1696
1695
1697
1696 @command(b'perfnodelookup', formatteropts)
1698 @command(b'perfnodelookup', formatteropts)
1697 def perfnodelookup(ui, repo, rev, **opts):
1699 def perfnodelookup(ui, repo, rev, **opts):
1698 opts = _byteskwargs(opts)
1700 opts = _byteskwargs(opts)
1699 timer, fm = gettimer(ui, opts)
1701 timer, fm = gettimer(ui, opts)
1700 import mercurial.revlog
1702 import mercurial.revlog
1701
1703
1702 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1704 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1703 n = scmutil.revsingle(repo, rev).node()
1705 n = scmutil.revsingle(repo, rev).node()
1704 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1706 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1705
1707
1706 def d():
1708 def d():
1707 cl.rev(n)
1709 cl.rev(n)
1708 clearcaches(cl)
1710 clearcaches(cl)
1709
1711
1710 timer(d)
1712 timer(d)
1711 fm.end()
1713 fm.end()
1712
1714
1713
1715
1714 @command(
1716 @command(
1715 b'perflog',
1717 b'perflog',
1716 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1718 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1717 )
1719 )
1718 def perflog(ui, repo, rev=None, **opts):
1720 def perflog(ui, repo, rev=None, **opts):
1719 opts = _byteskwargs(opts)
1721 opts = _byteskwargs(opts)
1720 if rev is None:
1722 if rev is None:
1721 rev = []
1723 rev = []
1722 timer, fm = gettimer(ui, opts)
1724 timer, fm = gettimer(ui, opts)
1723 ui.pushbuffer()
1725 ui.pushbuffer()
1724 timer(
1726 timer(
1725 lambda: commands.log(
1727 lambda: commands.log(
1726 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1728 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1727 )
1729 )
1728 )
1730 )
1729 ui.popbuffer()
1731 ui.popbuffer()
1730 fm.end()
1732 fm.end()
1731
1733
1732
1734
1733 @command(b'perfmoonwalk', formatteropts)
1735 @command(b'perfmoonwalk', formatteropts)
1734 def perfmoonwalk(ui, repo, **opts):
1736 def perfmoonwalk(ui, repo, **opts):
1735 """benchmark walking the changelog backwards
1737 """benchmark walking the changelog backwards
1736
1738
1737 This also loads the changelog data for each revision in the changelog.
1739 This also loads the changelog data for each revision in the changelog.
1738 """
1740 """
1739 opts = _byteskwargs(opts)
1741 opts = _byteskwargs(opts)
1740 timer, fm = gettimer(ui, opts)
1742 timer, fm = gettimer(ui, opts)
1741
1743
1742 def moonwalk():
1744 def moonwalk():
1743 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1745 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1744 ctx = repo[i]
1746 ctx = repo[i]
1745 ctx.branch() # read changelog data (in addition to the index)
1747 ctx.branch() # read changelog data (in addition to the index)
1746
1748
1747 timer(moonwalk)
1749 timer(moonwalk)
1748 fm.end()
1750 fm.end()
1749
1751
1750
1752
1751 @command(
1753 @command(
1752 b'perftemplating',
1754 b'perftemplating',
1753 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1755 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1754 )
1756 )
1755 def perftemplating(ui, repo, testedtemplate=None, **opts):
1757 def perftemplating(ui, repo, testedtemplate=None, **opts):
1756 """test the rendering time of a given template"""
1758 """test the rendering time of a given template"""
1757 if makelogtemplater is None:
1759 if makelogtemplater is None:
1758 raise error.Abort(
1760 raise error.Abort(
1759 b"perftemplating not available with this Mercurial",
1761 b"perftemplating not available with this Mercurial",
1760 hint=b"use 4.3 or later",
1762 hint=b"use 4.3 or later",
1761 )
1763 )
1762
1764
1763 opts = _byteskwargs(opts)
1765 opts = _byteskwargs(opts)
1764
1766
1765 nullui = ui.copy()
1767 nullui = ui.copy()
1766 nullui.fout = open(os.devnull, r'wb')
1768 nullui.fout = open(os.devnull, r'wb')
1767 nullui.disablepager()
1769 nullui.disablepager()
1768 revs = opts.get(b'rev')
1770 revs = opts.get(b'rev')
1769 if not revs:
1771 if not revs:
1770 revs = [b'all()']
1772 revs = [b'all()']
1771 revs = list(scmutil.revrange(repo, revs))
1773 revs = list(scmutil.revrange(repo, revs))
1772
1774
1773 defaulttemplate = (
1775 defaulttemplate = (
1774 b'{date|shortdate} [{rev}:{node|short}]'
1776 b'{date|shortdate} [{rev}:{node|short}]'
1775 b' {author|person}: {desc|firstline}\n'
1777 b' {author|person}: {desc|firstline}\n'
1776 )
1778 )
1777 if testedtemplate is None:
1779 if testedtemplate is None:
1778 testedtemplate = defaulttemplate
1780 testedtemplate = defaulttemplate
1779 displayer = makelogtemplater(nullui, repo, testedtemplate)
1781 displayer = makelogtemplater(nullui, repo, testedtemplate)
1780
1782
1781 def format():
1783 def format():
1782 for r in revs:
1784 for r in revs:
1783 ctx = repo[r]
1785 ctx = repo[r]
1784 displayer.show(ctx)
1786 displayer.show(ctx)
1785 displayer.flush(ctx)
1787 displayer.flush(ctx)
1786
1788
1787 timer, fm = gettimer(ui, opts)
1789 timer, fm = gettimer(ui, opts)
1788 timer(format)
1790 timer(format)
1789 fm.end()
1791 fm.end()
1790
1792
1791
1793
1792 def _displaystats(ui, opts, entries, data):
1794 def _displaystats(ui, opts, entries, data):
1793 pass
1795 pass
1794 # use a second formatter because the data are quite different, not sure
1796 # use a second formatter because the data are quite different, not sure
1795 # how it flies with the templater.
1797 # how it flies with the templater.
1796 fm = ui.formatter(b'perf-stats', opts)
1798 fm = ui.formatter(b'perf-stats', opts)
1797 for key, title in entries:
1799 for key, title in entries:
1798 values = data[key]
1800 values = data[key]
1799 nbvalues = len(data)
1801 nbvalues = len(data)
1800 values.sort()
1802 values.sort()
1801 stats = {
1803 stats = {
1802 'key': key,
1804 'key': key,
1803 'title': title,
1805 'title': title,
1804 'nbitems': len(values),
1806 'nbitems': len(values),
1805 'min': values[0][0],
1807 'min': values[0][0],
1806 '10%': values[(nbvalues * 10) // 100][0],
1808 '10%': values[(nbvalues * 10) // 100][0],
1807 '25%': values[(nbvalues * 25) // 100][0],
1809 '25%': values[(nbvalues * 25) // 100][0],
1808 '50%': values[(nbvalues * 50) // 100][0],
1810 '50%': values[(nbvalues * 50) // 100][0],
1809 '75%': values[(nbvalues * 75) // 100][0],
1811 '75%': values[(nbvalues * 75) // 100][0],
1810 '80%': values[(nbvalues * 80) // 100][0],
1812 '80%': values[(nbvalues * 80) // 100][0],
1811 '85%': values[(nbvalues * 85) // 100][0],
1813 '85%': values[(nbvalues * 85) // 100][0],
1812 '90%': values[(nbvalues * 90) // 100][0],
1814 '90%': values[(nbvalues * 90) // 100][0],
1813 '95%': values[(nbvalues * 95) // 100][0],
1815 '95%': values[(nbvalues * 95) // 100][0],
1814 '99%': values[(nbvalues * 99) // 100][0],
1816 '99%': values[(nbvalues * 99) // 100][0],
1815 'max': values[-1][0],
1817 'max': values[-1][0],
1816 }
1818 }
1817 fm.startitem()
1819 fm.startitem()
1818 fm.data(**stats)
1820 fm.data(**stats)
1819 # make node pretty for the human output
1821 # make node pretty for the human output
1820 fm.plain('### %s (%d items)\n' % (title, len(values)))
1822 fm.plain('### %s (%d items)\n' % (title, len(values)))
1821 lines = [
1823 lines = [
1822 'min',
1824 'min',
1823 '10%',
1825 '10%',
1824 '25%',
1826 '25%',
1825 '50%',
1827 '50%',
1826 '75%',
1828 '75%',
1827 '80%',
1829 '80%',
1828 '85%',
1830 '85%',
1829 '90%',
1831 '90%',
1830 '95%',
1832 '95%',
1831 '99%',
1833 '99%',
1832 'max',
1834 'max',
1833 ]
1835 ]
1834 for l in lines:
1836 for l in lines:
1835 fm.plain('%s: %s\n' % (l, stats[l]))
1837 fm.plain('%s: %s\n' % (l, stats[l]))
1836 fm.end()
1838 fm.end()
1837
1839
1838
1840
1839 @command(
1841 @command(
1840 b'perfhelper-mergecopies',
1842 b'perfhelper-mergecopies',
1841 formatteropts
1843 formatteropts
1842 + [
1844 + [
1843 (b'r', b'revs', [], b'restrict search to these revisions'),
1845 (b'r', b'revs', [], b'restrict search to these revisions'),
1844 (b'', b'timing', False, b'provides extra data (costly)'),
1846 (b'', b'timing', False, b'provides extra data (costly)'),
1845 (b'', b'stats', False, b'provides statistic about the measured data'),
1847 (b'', b'stats', False, b'provides statistic about the measured data'),
1846 ],
1848 ],
1847 )
1849 )
1848 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1850 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1849 """find statistics about potential parameters for `perfmergecopies`
1851 """find statistics about potential parameters for `perfmergecopies`
1850
1852
1851 This command find (base, p1, p2) triplet relevant for copytracing
1853 This command find (base, p1, p2) triplet relevant for copytracing
1852 benchmarking in the context of a merge. It reports values for some of the
1854 benchmarking in the context of a merge. It reports values for some of the
1853 parameters that impact merge copy tracing time during merge.
1855 parameters that impact merge copy tracing time during merge.
1854
1856
1855 If `--timing` is set, rename detection is run and the associated timing
1857 If `--timing` is set, rename detection is run and the associated timing
1856 will be reported. The extra details come at the cost of slower command
1858 will be reported. The extra details come at the cost of slower command
1857 execution.
1859 execution.
1858
1860
1859 Since rename detection is only run once, other factors might easily
1861 Since rename detection is only run once, other factors might easily
1860 affect the precision of the timing. However it should give a good
1862 affect the precision of the timing. However it should give a good
1861 approximation of which revision triplets are very costly.
1863 approximation of which revision triplets are very costly.
1862 """
1864 """
1863 opts = _byteskwargs(opts)
1865 opts = _byteskwargs(opts)
1864 fm = ui.formatter(b'perf', opts)
1866 fm = ui.formatter(b'perf', opts)
1865 dotiming = opts[b'timing']
1867 dotiming = opts[b'timing']
1866 dostats = opts[b'stats']
1868 dostats = opts[b'stats']
1867
1869
1868 output_template = [
1870 output_template = [
1869 ("base", "%(base)12s"),
1871 ("base", "%(base)12s"),
1870 ("p1", "%(p1.node)12s"),
1872 ("p1", "%(p1.node)12s"),
1871 ("p2", "%(p2.node)12s"),
1873 ("p2", "%(p2.node)12s"),
1872 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1874 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1873 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1875 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1874 ("p1.renames", "%(p1.renamedfiles)12d"),
1876 ("p1.renames", "%(p1.renamedfiles)12d"),
1875 ("p1.time", "%(p1.time)12.3f"),
1877 ("p1.time", "%(p1.time)12.3f"),
1876 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1878 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1877 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1879 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1878 ("p2.renames", "%(p2.renamedfiles)12d"),
1880 ("p2.renames", "%(p2.renamedfiles)12d"),
1879 ("p2.time", "%(p2.time)12.3f"),
1881 ("p2.time", "%(p2.time)12.3f"),
1880 ("renames", "%(nbrenamedfiles)12d"),
1882 ("renames", "%(nbrenamedfiles)12d"),
1881 ("total.time", "%(time)12.3f"),
1883 ("total.time", "%(time)12.3f"),
1882 ]
1884 ]
1883 if not dotiming:
1885 if not dotiming:
1884 output_template = [
1886 output_template = [
1885 i
1887 i
1886 for i in output_template
1888 for i in output_template
1887 if not ('time' in i[0] or 'renames' in i[0])
1889 if not ('time' in i[0] or 'renames' in i[0])
1888 ]
1890 ]
1889 header_names = [h for (h, v) in output_template]
1891 header_names = [h for (h, v) in output_template]
1890 output = ' '.join([v for (h, v) in output_template]) + '\n'
1892 output = ' '.join([v for (h, v) in output_template]) + '\n'
1891 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1893 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1892 fm.plain(header % tuple(header_names))
1894 fm.plain(header % tuple(header_names))
1893
1895
1894 if not revs:
1896 if not revs:
1895 revs = ['all()']
1897 revs = ['all()']
1896 revs = scmutil.revrange(repo, revs)
1898 revs = scmutil.revrange(repo, revs)
1897
1899
1898 if dostats:
1900 if dostats:
1899 alldata = {
1901 alldata = {
1900 'nbrevs': [],
1902 'nbrevs': [],
1901 'nbmissingfiles': [],
1903 'nbmissingfiles': [],
1902 }
1904 }
1903 if dotiming:
1905 if dotiming:
1904 alldata['parentnbrenames'] = []
1906 alldata['parentnbrenames'] = []
1905 alldata['totalnbrenames'] = []
1907 alldata['totalnbrenames'] = []
1906 alldata['parenttime'] = []
1908 alldata['parenttime'] = []
1907 alldata['totaltime'] = []
1909 alldata['totaltime'] = []
1908
1910
1909 roi = repo.revs('merge() and %ld', revs)
1911 roi = repo.revs('merge() and %ld', revs)
1910 for r in roi:
1912 for r in roi:
1911 ctx = repo[r]
1913 ctx = repo[r]
1912 p1 = ctx.p1()
1914 p1 = ctx.p1()
1913 p2 = ctx.p2()
1915 p2 = ctx.p2()
1914 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1916 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1915 for b in bases:
1917 for b in bases:
1916 b = repo[b]
1918 b = repo[b]
1917 p1missing = copies._computeforwardmissing(b, p1)
1919 p1missing = copies._computeforwardmissing(b, p1)
1918 p2missing = copies._computeforwardmissing(b, p2)
1920 p2missing = copies._computeforwardmissing(b, p2)
1919 data = {
1921 data = {
1920 b'base': b.hex(),
1922 b'base': b.hex(),
1921 b'p1.node': p1.hex(),
1923 b'p1.node': p1.hex(),
1922 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1924 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1923 b'p1.nbmissingfiles': len(p1missing),
1925 b'p1.nbmissingfiles': len(p1missing),
1924 b'p2.node': p2.hex(),
1926 b'p2.node': p2.hex(),
1925 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1927 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1926 b'p2.nbmissingfiles': len(p2missing),
1928 b'p2.nbmissingfiles': len(p2missing),
1927 }
1929 }
1928 if dostats:
1930 if dostats:
1929 if p1missing:
1931 if p1missing:
1930 alldata['nbrevs'].append(
1932 alldata['nbrevs'].append(
1931 (data['p1.nbrevs'], b.hex(), p1.hex())
1933 (data['p1.nbrevs'], b.hex(), p1.hex())
1932 )
1934 )
1933 alldata['nbmissingfiles'].append(
1935 alldata['nbmissingfiles'].append(
1934 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1936 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1935 )
1937 )
1936 if p2missing:
1938 if p2missing:
1937 alldata['nbrevs'].append(
1939 alldata['nbrevs'].append(
1938 (data['p2.nbrevs'], b.hex(), p2.hex())
1940 (data['p2.nbrevs'], b.hex(), p2.hex())
1939 )
1941 )
1940 alldata['nbmissingfiles'].append(
1942 alldata['nbmissingfiles'].append(
1941 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1943 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1942 )
1944 )
1943 if dotiming:
1945 if dotiming:
1944 begin = util.timer()
1946 begin = util.timer()
1945 mergedata = copies.mergecopies(repo, p1, p2, b)
1947 mergedata = copies.mergecopies(repo, p1, p2, b)
1946 end = util.timer()
1948 end = util.timer()
1947 # not very stable timing since we did only one run
1949 # not very stable timing since we did only one run
1948 data['time'] = end - begin
1950 data['time'] = end - begin
1949 # mergedata contains five dicts: "copy", "movewithdir",
1951 # mergedata contains five dicts: "copy", "movewithdir",
1950 # "diverge", "renamedelete" and "dirmove".
1952 # "diverge", "renamedelete" and "dirmove".
1951 # The first 4 are about renamed file so lets count that.
1953 # The first 4 are about renamed file so lets count that.
1952 renames = len(mergedata[0])
1954 renames = len(mergedata[0])
1953 renames += len(mergedata[1])
1955 renames += len(mergedata[1])
1954 renames += len(mergedata[2])
1956 renames += len(mergedata[2])
1955 renames += len(mergedata[3])
1957 renames += len(mergedata[3])
1956 data['nbrenamedfiles'] = renames
1958 data['nbrenamedfiles'] = renames
1957 begin = util.timer()
1959 begin = util.timer()
1958 p1renames = copies.pathcopies(b, p1)
1960 p1renames = copies.pathcopies(b, p1)
1959 end = util.timer()
1961 end = util.timer()
1960 data['p1.time'] = end - begin
1962 data['p1.time'] = end - begin
1961 begin = util.timer()
1963 begin = util.timer()
1962 p2renames = copies.pathcopies(b, p2)
1964 p2renames = copies.pathcopies(b, p2)
1963 data['p2.time'] = end - begin
1965 data['p2.time'] = end - begin
1964 end = util.timer()
1966 end = util.timer()
1965 data['p1.renamedfiles'] = len(p1renames)
1967 data['p1.renamedfiles'] = len(p1renames)
1966 data['p2.renamedfiles'] = len(p2renames)
1968 data['p2.renamedfiles'] = len(p2renames)
1967
1969
1968 if dostats:
1970 if dostats:
1969 if p1missing:
1971 if p1missing:
1970 alldata['parentnbrenames'].append(
1972 alldata['parentnbrenames'].append(
1971 (data['p1.renamedfiles'], b.hex(), p1.hex())
1973 (data['p1.renamedfiles'], b.hex(), p1.hex())
1972 )
1974 )
1973 alldata['parenttime'].append(
1975 alldata['parenttime'].append(
1974 (data['p1.time'], b.hex(), p1.hex())
1976 (data['p1.time'], b.hex(), p1.hex())
1975 )
1977 )
1976 if p2missing:
1978 if p2missing:
1977 alldata['parentnbrenames'].append(
1979 alldata['parentnbrenames'].append(
1978 (data['p2.renamedfiles'], b.hex(), p2.hex())
1980 (data['p2.renamedfiles'], b.hex(), p2.hex())
1979 )
1981 )
1980 alldata['parenttime'].append(
1982 alldata['parenttime'].append(
1981 (data['p2.time'], b.hex(), p2.hex())
1983 (data['p2.time'], b.hex(), p2.hex())
1982 )
1984 )
1983 if p1missing or p2missing:
1985 if p1missing or p2missing:
1984 alldata['totalnbrenames'].append(
1986 alldata['totalnbrenames'].append(
1985 (
1987 (
1986 data['nbrenamedfiles'],
1988 data['nbrenamedfiles'],
1987 b.hex(),
1989 b.hex(),
1988 p1.hex(),
1990 p1.hex(),
1989 p2.hex(),
1991 p2.hex(),
1990 )
1992 )
1991 )
1993 )
1992 alldata['totaltime'].append(
1994 alldata['totaltime'].append(
1993 (data['time'], b.hex(), p1.hex(), p2.hex())
1995 (data['time'], b.hex(), p1.hex(), p2.hex())
1994 )
1996 )
1995 fm.startitem()
1997 fm.startitem()
1996 fm.data(**data)
1998 fm.data(**data)
1997 # make node pretty for the human output
1999 # make node pretty for the human output
1998 out = data.copy()
2000 out = data.copy()
1999 out['base'] = fm.hexfunc(b.node())
2001 out['base'] = fm.hexfunc(b.node())
2000 out['p1.node'] = fm.hexfunc(p1.node())
2002 out['p1.node'] = fm.hexfunc(p1.node())
2001 out['p2.node'] = fm.hexfunc(p2.node())
2003 out['p2.node'] = fm.hexfunc(p2.node())
2002 fm.plain(output % out)
2004 fm.plain(output % out)
2003
2005
2004 fm.end()
2006 fm.end()
2005 if dostats:
2007 if dostats:
2006 # use a second formatter because the data are quite different, not sure
2008 # use a second formatter because the data are quite different, not sure
2007 # how it flies with the templater.
2009 # how it flies with the templater.
2008 entries = [
2010 entries = [
2009 ('nbrevs', 'number of revision covered'),
2011 ('nbrevs', 'number of revision covered'),
2010 ('nbmissingfiles', 'number of missing files at head'),
2012 ('nbmissingfiles', 'number of missing files at head'),
2011 ]
2013 ]
2012 if dotiming:
2014 if dotiming:
2013 entries.append(
2015 entries.append(
2014 ('parentnbrenames', 'rename from one parent to base')
2016 ('parentnbrenames', 'rename from one parent to base')
2015 )
2017 )
2016 entries.append(('totalnbrenames', 'total number of renames'))
2018 entries.append(('totalnbrenames', 'total number of renames'))
2017 entries.append(('parenttime', 'time for one parent'))
2019 entries.append(('parenttime', 'time for one parent'))
2018 entries.append(('totaltime', 'time for both parents'))
2020 entries.append(('totaltime', 'time for both parents'))
2019 _displaystats(ui, opts, entries, alldata)
2021 _displaystats(ui, opts, entries, alldata)
2020
2022
2021
2023
2022 @command(
2024 @command(
2023 b'perfhelper-pathcopies',
2025 b'perfhelper-pathcopies',
2024 formatteropts
2026 formatteropts
2025 + [
2027 + [
2026 (b'r', b'revs', [], b'restrict search to these revisions'),
2028 (b'r', b'revs', [], b'restrict search to these revisions'),
2027 (b'', b'timing', False, b'provides extra data (costly)'),
2029 (b'', b'timing', False, b'provides extra data (costly)'),
2028 (b'', b'stats', False, b'provides statistic about the measured data'),
2030 (b'', b'stats', False, b'provides statistic about the measured data'),
2029 ],
2031 ],
2030 )
2032 )
2031 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2033 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2032 """find statistic about potential parameters for the `perftracecopies`
2034 """find statistic about potential parameters for the `perftracecopies`
2033
2035
2034 This command find source-destination pair relevant for copytracing testing.
2036 This command find source-destination pair relevant for copytracing testing.
2035 It report value for some of the parameters that impact copy tracing time.
2037 It report value for some of the parameters that impact copy tracing time.
2036
2038
2037 If `--timing` is set, rename detection is run and the associated timing
2039 If `--timing` is set, rename detection is run and the associated timing
2038 will be reported. The extra details comes at the cost of a slower command
2040 will be reported. The extra details comes at the cost of a slower command
2039 execution.
2041 execution.
2040
2042
2041 Since the rename detection is only run once, other factors might easily
2043 Since the rename detection is only run once, other factors might easily
2042 affect the precision of the timing. However it should give a good
2044 affect the precision of the timing. However it should give a good
2043 approximation of which revision pairs are very costly.
2045 approximation of which revision pairs are very costly.
2044 """
2046 """
2045 opts = _byteskwargs(opts)
2047 opts = _byteskwargs(opts)
2046 fm = ui.formatter(b'perf', opts)
2048 fm = ui.formatter(b'perf', opts)
2047 dotiming = opts[b'timing']
2049 dotiming = opts[b'timing']
2048 dostats = opts[b'stats']
2050 dostats = opts[b'stats']
2049
2051
2050 if dotiming:
2052 if dotiming:
2051 header = '%12s %12s %12s %12s %12s %12s\n'
2053 header = '%12s %12s %12s %12s %12s %12s\n'
2052 output = (
2054 output = (
2053 "%(source)12s %(destination)12s "
2055 "%(source)12s %(destination)12s "
2054 "%(nbrevs)12d %(nbmissingfiles)12d "
2056 "%(nbrevs)12d %(nbmissingfiles)12d "
2055 "%(nbrenamedfiles)12d %(time)18.5f\n"
2057 "%(nbrenamedfiles)12d %(time)18.5f\n"
2056 )
2058 )
2057 header_names = (
2059 header_names = (
2058 "source",
2060 "source",
2059 "destination",
2061 "destination",
2060 "nb-revs",
2062 "nb-revs",
2061 "nb-files",
2063 "nb-files",
2062 "nb-renames",
2064 "nb-renames",
2063 "time",
2065 "time",
2064 )
2066 )
2065 fm.plain(header % header_names)
2067 fm.plain(header % header_names)
2066 else:
2068 else:
2067 header = '%12s %12s %12s %12s\n'
2069 header = '%12s %12s %12s %12s\n'
2068 output = (
2070 output = (
2069 "%(source)12s %(destination)12s "
2071 "%(source)12s %(destination)12s "
2070 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2072 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2071 )
2073 )
2072 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2074 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2073
2075
2074 if not revs:
2076 if not revs:
2075 revs = ['all()']
2077 revs = ['all()']
2076 revs = scmutil.revrange(repo, revs)
2078 revs = scmutil.revrange(repo, revs)
2077
2079
2078 if dostats:
2080 if dostats:
2079 alldata = {
2081 alldata = {
2080 'nbrevs': [],
2082 'nbrevs': [],
2081 'nbmissingfiles': [],
2083 'nbmissingfiles': [],
2082 }
2084 }
2083 if dotiming:
2085 if dotiming:
2084 alldata['nbrenames'] = []
2086 alldata['nbrenames'] = []
2085 alldata['time'] = []
2087 alldata['time'] = []
2086
2088
2087 roi = repo.revs('merge() and %ld', revs)
2089 roi = repo.revs('merge() and %ld', revs)
2088 for r in roi:
2090 for r in roi:
2089 ctx = repo[r]
2091 ctx = repo[r]
2090 p1 = ctx.p1().rev()
2092 p1 = ctx.p1().rev()
2091 p2 = ctx.p2().rev()
2093 p2 = ctx.p2().rev()
2092 bases = repo.changelog._commonancestorsheads(p1, p2)
2094 bases = repo.changelog._commonancestorsheads(p1, p2)
2093 for p in (p1, p2):
2095 for p in (p1, p2):
2094 for b in bases:
2096 for b in bases:
2095 base = repo[b]
2097 base = repo[b]
2096 parent = repo[p]
2098 parent = repo[p]
2097 missing = copies._computeforwardmissing(base, parent)
2099 missing = copies._computeforwardmissing(base, parent)
2098 if not missing:
2100 if not missing:
2099 continue
2101 continue
2100 data = {
2102 data = {
2101 b'source': base.hex(),
2103 b'source': base.hex(),
2102 b'destination': parent.hex(),
2104 b'destination': parent.hex(),
2103 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2105 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2104 b'nbmissingfiles': len(missing),
2106 b'nbmissingfiles': len(missing),
2105 }
2107 }
2106 if dostats:
2108 if dostats:
2107 alldata['nbrevs'].append(
2109 alldata['nbrevs'].append(
2108 (data['nbrevs'], base.hex(), parent.hex(),)
2110 (data['nbrevs'], base.hex(), parent.hex(),)
2109 )
2111 )
2110 alldata['nbmissingfiles'].append(
2112 alldata['nbmissingfiles'].append(
2111 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2113 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2112 )
2114 )
2113 if dotiming:
2115 if dotiming:
2114 begin = util.timer()
2116 begin = util.timer()
2115 renames = copies.pathcopies(base, parent)
2117 renames = copies.pathcopies(base, parent)
2116 end = util.timer()
2118 end = util.timer()
2117 # not very stable timing since we did only one run
2119 # not very stable timing since we did only one run
2118 data['time'] = end - begin
2120 data['time'] = end - begin
2119 data['nbrenamedfiles'] = len(renames)
2121 data['nbrenamedfiles'] = len(renames)
2120 if dostats:
2122 if dostats:
2121 alldata['time'].append(
2123 alldata['time'].append(
2122 (data['time'], base.hex(), parent.hex(),)
2124 (data['time'], base.hex(), parent.hex(),)
2123 )
2125 )
2124 alldata['nbrenames'].append(
2126 alldata['nbrenames'].append(
2125 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2127 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2126 )
2128 )
2127 fm.startitem()
2129 fm.startitem()
2128 fm.data(**data)
2130 fm.data(**data)
2129 out = data.copy()
2131 out = data.copy()
2130 out['source'] = fm.hexfunc(base.node())
2132 out['source'] = fm.hexfunc(base.node())
2131 out['destination'] = fm.hexfunc(parent.node())
2133 out['destination'] = fm.hexfunc(parent.node())
2132 fm.plain(output % out)
2134 fm.plain(output % out)
2133
2135
2134 fm.end()
2136 fm.end()
2135 if dostats:
2137 if dostats:
2136 # use a second formatter because the data are quite different, not sure
2138 # use a second formatter because the data are quite different, not sure
2137 # how it flies with the templater.
2139 # how it flies with the templater.
2138 fm = ui.formatter(b'perf', opts)
2140 fm = ui.formatter(b'perf', opts)
2139 entries = [
2141 entries = [
2140 ('nbrevs', 'number of revision covered'),
2142 ('nbrevs', 'number of revision covered'),
2141 ('nbmissingfiles', 'number of missing files at head'),
2143 ('nbmissingfiles', 'number of missing files at head'),
2142 ]
2144 ]
2143 if dotiming:
2145 if dotiming:
2144 entries.append(('nbrenames', 'renamed files'))
2146 entries.append(('nbrenames', 'renamed files'))
2145 entries.append(('time', 'time'))
2147 entries.append(('time', 'time'))
2146 _displaystats(ui, opts, entries, alldata)
2148 _displaystats(ui, opts, entries, alldata)
2147
2149
2148
2150
2149 @command(b'perfcca', formatteropts)
2151 @command(b'perfcca', formatteropts)
2150 def perfcca(ui, repo, **opts):
2152 def perfcca(ui, repo, **opts):
2151 opts = _byteskwargs(opts)
2153 opts = _byteskwargs(opts)
2152 timer, fm = gettimer(ui, opts)
2154 timer, fm = gettimer(ui, opts)
2153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2155 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2154 fm.end()
2156 fm.end()
2155
2157
2156
2158
2157 @command(b'perffncacheload', formatteropts)
2159 @command(b'perffncacheload', formatteropts)
2158 def perffncacheload(ui, repo, **opts):
2160 def perffncacheload(ui, repo, **opts):
2159 opts = _byteskwargs(opts)
2161 opts = _byteskwargs(opts)
2160 timer, fm = gettimer(ui, opts)
2162 timer, fm = gettimer(ui, opts)
2161 s = repo.store
2163 s = repo.store
2162
2164
2163 def d():
2165 def d():
2164 s.fncache._load()
2166 s.fncache._load()
2165
2167
2166 timer(d)
2168 timer(d)
2167 fm.end()
2169 fm.end()
2168
2170
2169
2171
2170 @command(b'perffncachewrite', formatteropts)
2172 @command(b'perffncachewrite', formatteropts)
2171 def perffncachewrite(ui, repo, **opts):
2173 def perffncachewrite(ui, repo, **opts):
2172 opts = _byteskwargs(opts)
2174 opts = _byteskwargs(opts)
2173 timer, fm = gettimer(ui, opts)
2175 timer, fm = gettimer(ui, opts)
2174 s = repo.store
2176 s = repo.store
2175 lock = repo.lock()
2177 lock = repo.lock()
2176 s.fncache._load()
2178 s.fncache._load()
2177 tr = repo.transaction(b'perffncachewrite')
2179 tr = repo.transaction(b'perffncachewrite')
2178 tr.addbackup(b'fncache')
2180 tr.addbackup(b'fncache')
2179
2181
2180 def d():
2182 def d():
2181 s.fncache._dirty = True
2183 s.fncache._dirty = True
2182 s.fncache.write(tr)
2184 s.fncache.write(tr)
2183
2185
2184 timer(d)
2186 timer(d)
2185 tr.close()
2187 tr.close()
2186 lock.release()
2188 lock.release()
2187 fm.end()
2189 fm.end()
2188
2190
2189
2191
2190 @command(b'perffncacheencode', formatteropts)
2192 @command(b'perffncacheencode', formatteropts)
2191 def perffncacheencode(ui, repo, **opts):
2193 def perffncacheencode(ui, repo, **opts):
2192 opts = _byteskwargs(opts)
2194 opts = _byteskwargs(opts)
2193 timer, fm = gettimer(ui, opts)
2195 timer, fm = gettimer(ui, opts)
2194 s = repo.store
2196 s = repo.store
2195 s.fncache._load()
2197 s.fncache._load()
2196
2198
2197 def d():
2199 def d():
2198 for p in s.fncache.entries:
2200 for p in s.fncache.entries:
2199 s.encode(p)
2201 s.encode(p)
2200
2202
2201 timer(d)
2203 timer(d)
2202 fm.end()
2204 fm.end()
2203
2205
2204
2206
2205 def _bdiffworker(q, blocks, xdiff, ready, done):
2207 def _bdiffworker(q, blocks, xdiff, ready, done):
2206 while not done.is_set():
2208 while not done.is_set():
2207 pair = q.get()
2209 pair = q.get()
2208 while pair is not None:
2210 while pair is not None:
2209 if xdiff:
2211 if xdiff:
2210 mdiff.bdiff.xdiffblocks(*pair)
2212 mdiff.bdiff.xdiffblocks(*pair)
2211 elif blocks:
2213 elif blocks:
2212 mdiff.bdiff.blocks(*pair)
2214 mdiff.bdiff.blocks(*pair)
2213 else:
2215 else:
2214 mdiff.textdiff(*pair)
2216 mdiff.textdiff(*pair)
2215 q.task_done()
2217 q.task_done()
2216 pair = q.get()
2218 pair = q.get()
2217 q.task_done() # for the None one
2219 q.task_done() # for the None one
2218 with ready:
2220 with ready:
2219 ready.wait()
2221 ready.wait()
2220
2222
2221
2223
2222 def _manifestrevision(repo, mnode):
2224 def _manifestrevision(repo, mnode):
2223 ml = repo.manifestlog
2225 ml = repo.manifestlog
2224
2226
2225 if util.safehasattr(ml, b'getstorage'):
2227 if util.safehasattr(ml, b'getstorage'):
2226 store = ml.getstorage(b'')
2228 store = ml.getstorage(b'')
2227 else:
2229 else:
2228 store = ml._revlog
2230 store = ml._revlog
2229
2231
2230 return store.revision(mnode)
2232 return store.revision(mnode)
2231
2233
2232
2234
2233 @command(
2235 @command(
2234 b'perfbdiff',
2236 b'perfbdiff',
2235 revlogopts
2237 revlogopts
2236 + formatteropts
2238 + formatteropts
2237 + [
2239 + [
2238 (
2240 (
2239 b'',
2241 b'',
2240 b'count',
2242 b'count',
2241 1,
2243 1,
2242 b'number of revisions to test (when using --startrev)',
2244 b'number of revisions to test (when using --startrev)',
2243 ),
2245 ),
2244 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2246 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2245 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2247 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2246 (b'', b'blocks', False, b'test computing diffs into blocks'),
2248 (b'', b'blocks', False, b'test computing diffs into blocks'),
2247 (b'', b'xdiff', False, b'use xdiff algorithm'),
2249 (b'', b'xdiff', False, b'use xdiff algorithm'),
2248 ],
2250 ],
2249 b'-c|-m|FILE REV',
2251 b'-c|-m|FILE REV',
2250 )
2252 )
2251 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2253 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2252 """benchmark a bdiff between revisions
2254 """benchmark a bdiff between revisions
2253
2255
2254 By default, benchmark a bdiff between its delta parent and itself.
2256 By default, benchmark a bdiff between its delta parent and itself.
2255
2257
2256 With ``--count``, benchmark bdiffs between delta parents and self for N
2258 With ``--count``, benchmark bdiffs between delta parents and self for N
2257 revisions starting at the specified revision.
2259 revisions starting at the specified revision.
2258
2260
2259 With ``--alldata``, assume the requested revision is a changeset and
2261 With ``--alldata``, assume the requested revision is a changeset and
2260 measure bdiffs for all changes related to that changeset (manifest
2262 measure bdiffs for all changes related to that changeset (manifest
2261 and filelogs).
2263 and filelogs).
2262 """
2264 """
2263 opts = _byteskwargs(opts)
2265 opts = _byteskwargs(opts)
2264
2266
2265 if opts[b'xdiff'] and not opts[b'blocks']:
2267 if opts[b'xdiff'] and not opts[b'blocks']:
2266 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2268 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2267
2269
2268 if opts[b'alldata']:
2270 if opts[b'alldata']:
2269 opts[b'changelog'] = True
2271 opts[b'changelog'] = True
2270
2272
2271 if opts.get(b'changelog') or opts.get(b'manifest'):
2273 if opts.get(b'changelog') or opts.get(b'manifest'):
2272 file_, rev = None, file_
2274 file_, rev = None, file_
2273 elif rev is None:
2275 elif rev is None:
2274 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2276 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2275
2277
2276 blocks = opts[b'blocks']
2278 blocks = opts[b'blocks']
2277 xdiff = opts[b'xdiff']
2279 xdiff = opts[b'xdiff']
2278 textpairs = []
2280 textpairs = []
2279
2281
2280 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2282 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2281
2283
2282 startrev = r.rev(r.lookup(rev))
2284 startrev = r.rev(r.lookup(rev))
2283 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2285 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2284 if opts[b'alldata']:
2286 if opts[b'alldata']:
2285 # Load revisions associated with changeset.
2287 # Load revisions associated with changeset.
2286 ctx = repo[rev]
2288 ctx = repo[rev]
2287 mtext = _manifestrevision(repo, ctx.manifestnode())
2289 mtext = _manifestrevision(repo, ctx.manifestnode())
2288 for pctx in ctx.parents():
2290 for pctx in ctx.parents():
2289 pman = _manifestrevision(repo, pctx.manifestnode())
2291 pman = _manifestrevision(repo, pctx.manifestnode())
2290 textpairs.append((pman, mtext))
2292 textpairs.append((pman, mtext))
2291
2293
2292 # Load filelog revisions by iterating manifest delta.
2294 # Load filelog revisions by iterating manifest delta.
2293 man = ctx.manifest()
2295 man = ctx.manifest()
2294 pman = ctx.p1().manifest()
2296 pman = ctx.p1().manifest()
2295 for filename, change in pman.diff(man).items():
2297 for filename, change in pman.diff(man).items():
2296 fctx = repo.file(filename)
2298 fctx = repo.file(filename)
2297 f1 = fctx.revision(change[0][0] or -1)
2299 f1 = fctx.revision(change[0][0] or -1)
2298 f2 = fctx.revision(change[1][0] or -1)
2300 f2 = fctx.revision(change[1][0] or -1)
2299 textpairs.append((f1, f2))
2301 textpairs.append((f1, f2))
2300 else:
2302 else:
2301 dp = r.deltaparent(rev)
2303 dp = r.deltaparent(rev)
2302 textpairs.append((r.revision(dp), r.revision(rev)))
2304 textpairs.append((r.revision(dp), r.revision(rev)))
2303
2305
2304 withthreads = threads > 0
2306 withthreads = threads > 0
2305 if not withthreads:
2307 if not withthreads:
2306
2308
2307 def d():
2309 def d():
2308 for pair in textpairs:
2310 for pair in textpairs:
2309 if xdiff:
2311 if xdiff:
2310 mdiff.bdiff.xdiffblocks(*pair)
2312 mdiff.bdiff.xdiffblocks(*pair)
2311 elif blocks:
2313 elif blocks:
2312 mdiff.bdiff.blocks(*pair)
2314 mdiff.bdiff.blocks(*pair)
2313 else:
2315 else:
2314 mdiff.textdiff(*pair)
2316 mdiff.textdiff(*pair)
2315
2317
2316 else:
2318 else:
2317 q = queue()
2319 q = queue()
2318 for i in _xrange(threads):
2320 for i in _xrange(threads):
2319 q.put(None)
2321 q.put(None)
2320 ready = threading.Condition()
2322 ready = threading.Condition()
2321 done = threading.Event()
2323 done = threading.Event()
2322 for i in _xrange(threads):
2324 for i in _xrange(threads):
2323 threading.Thread(
2325 threading.Thread(
2324 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2326 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2325 ).start()
2327 ).start()
2326 q.join()
2328 q.join()
2327
2329
2328 def d():
2330 def d():
2329 for pair in textpairs:
2331 for pair in textpairs:
2330 q.put(pair)
2332 q.put(pair)
2331 for i in _xrange(threads):
2333 for i in _xrange(threads):
2332 q.put(None)
2334 q.put(None)
2333 with ready:
2335 with ready:
2334 ready.notify_all()
2336 ready.notify_all()
2335 q.join()
2337 q.join()
2336
2338
2337 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2338 timer(d)
2340 timer(d)
2339 fm.end()
2341 fm.end()
2340
2342
2341 if withthreads:
2343 if withthreads:
2342 done.set()
2344 done.set()
2343 for i in _xrange(threads):
2345 for i in _xrange(threads):
2344 q.put(None)
2346 q.put(None)
2345 with ready:
2347 with ready:
2346 ready.notify_all()
2348 ready.notify_all()
2347
2349
2348
2350
2349 @command(
2351 @command(
2350 b'perfunidiff',
2352 b'perfunidiff',
2351 revlogopts
2353 revlogopts
2352 + formatteropts
2354 + formatteropts
2353 + [
2355 + [
2354 (
2356 (
2355 b'',
2357 b'',
2356 b'count',
2358 b'count',
2357 1,
2359 1,
2358 b'number of revisions to test (when using --startrev)',
2360 b'number of revisions to test (when using --startrev)',
2359 ),
2361 ),
2360 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2362 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2361 ],
2363 ],
2362 b'-c|-m|FILE REV',
2364 b'-c|-m|FILE REV',
2363 )
2365 )
2364 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2366 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2365 """benchmark a unified diff between revisions
2367 """benchmark a unified diff between revisions
2366
2368
2367 This doesn't include any copy tracing - it's just a unified diff
2369 This doesn't include any copy tracing - it's just a unified diff
2368 of the texts.
2370 of the texts.
2369
2371
2370 By default, benchmark a diff between its delta parent and itself.
2372 By default, benchmark a diff between its delta parent and itself.
2371
2373
2372 With ``--count``, benchmark diffs between delta parents and self for N
2374 With ``--count``, benchmark diffs between delta parents and self for N
2373 revisions starting at the specified revision.
2375 revisions starting at the specified revision.
2374
2376
2375 With ``--alldata``, assume the requested revision is a changeset and
2377 With ``--alldata``, assume the requested revision is a changeset and
2376 measure diffs for all changes related to that changeset (manifest
2378 measure diffs for all changes related to that changeset (manifest
2377 and filelogs).
2379 and filelogs).
2378 """
2380 """
2379 opts = _byteskwargs(opts)
2381 opts = _byteskwargs(opts)
2380 if opts[b'alldata']:
2382 if opts[b'alldata']:
2381 opts[b'changelog'] = True
2383 opts[b'changelog'] = True
2382
2384
2383 if opts.get(b'changelog') or opts.get(b'manifest'):
2385 if opts.get(b'changelog') or opts.get(b'manifest'):
2384 file_, rev = None, file_
2386 file_, rev = None, file_
2385 elif rev is None:
2387 elif rev is None:
2386 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2388 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2387
2389
2388 textpairs = []
2390 textpairs = []
2389
2391
2390 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2392 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2391
2393
2392 startrev = r.rev(r.lookup(rev))
2394 startrev = r.rev(r.lookup(rev))
2393 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2395 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2394 if opts[b'alldata']:
2396 if opts[b'alldata']:
2395 # Load revisions associated with changeset.
2397 # Load revisions associated with changeset.
2396 ctx = repo[rev]
2398 ctx = repo[rev]
2397 mtext = _manifestrevision(repo, ctx.manifestnode())
2399 mtext = _manifestrevision(repo, ctx.manifestnode())
2398 for pctx in ctx.parents():
2400 for pctx in ctx.parents():
2399 pman = _manifestrevision(repo, pctx.manifestnode())
2401 pman = _manifestrevision(repo, pctx.manifestnode())
2400 textpairs.append((pman, mtext))
2402 textpairs.append((pman, mtext))
2401
2403
2402 # Load filelog revisions by iterating manifest delta.
2404 # Load filelog revisions by iterating manifest delta.
2403 man = ctx.manifest()
2405 man = ctx.manifest()
2404 pman = ctx.p1().manifest()
2406 pman = ctx.p1().manifest()
2405 for filename, change in pman.diff(man).items():
2407 for filename, change in pman.diff(man).items():
2406 fctx = repo.file(filename)
2408 fctx = repo.file(filename)
2407 f1 = fctx.revision(change[0][0] or -1)
2409 f1 = fctx.revision(change[0][0] or -1)
2408 f2 = fctx.revision(change[1][0] or -1)
2410 f2 = fctx.revision(change[1][0] or -1)
2409 textpairs.append((f1, f2))
2411 textpairs.append((f1, f2))
2410 else:
2412 else:
2411 dp = r.deltaparent(rev)
2413 dp = r.deltaparent(rev)
2412 textpairs.append((r.revision(dp), r.revision(rev)))
2414 textpairs.append((r.revision(dp), r.revision(rev)))
2413
2415
2414 def d():
2416 def d():
2415 for left, right in textpairs:
2417 for left, right in textpairs:
2416 # The date strings don't matter, so we pass empty strings.
2418 # The date strings don't matter, so we pass empty strings.
2417 headerlines, hunks = mdiff.unidiff(
2419 headerlines, hunks = mdiff.unidiff(
2418 left, b'', right, b'', b'left', b'right', binary=False
2420 left, b'', right, b'', b'left', b'right', binary=False
2419 )
2421 )
2420 # consume iterators in roughly the way patch.py does
2422 # consume iterators in roughly the way patch.py does
2421 b'\n'.join(headerlines)
2423 b'\n'.join(headerlines)
2422 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2424 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2423
2425
2424 timer, fm = gettimer(ui, opts)
2426 timer, fm = gettimer(ui, opts)
2425 timer(d)
2427 timer(d)
2426 fm.end()
2428 fm.end()
2427
2429
2428
2430
2429 @command(b'perfdiffwd', formatteropts)
2431 @command(b'perfdiffwd', formatteropts)
2430 def perfdiffwd(ui, repo, **opts):
2432 def perfdiffwd(ui, repo, **opts):
2431 """Profile diff of working directory changes"""
2433 """Profile diff of working directory changes"""
2432 opts = _byteskwargs(opts)
2434 opts = _byteskwargs(opts)
2433 timer, fm = gettimer(ui, opts)
2435 timer, fm = gettimer(ui, opts)
2434 options = {
2436 options = {
2435 'w': 'ignore_all_space',
2437 'w': 'ignore_all_space',
2436 'b': 'ignore_space_change',
2438 'b': 'ignore_space_change',
2437 'B': 'ignore_blank_lines',
2439 'B': 'ignore_blank_lines',
2438 }
2440 }
2439
2441
2440 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2442 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2441 opts = dict((options[c], b'1') for c in diffopt)
2443 opts = dict((options[c], b'1') for c in diffopt)
2442
2444
2443 def d():
2445 def d():
2444 ui.pushbuffer()
2446 ui.pushbuffer()
2445 commands.diff(ui, repo, **opts)
2447 commands.diff(ui, repo, **opts)
2446 ui.popbuffer()
2448 ui.popbuffer()
2447
2449
2448 diffopt = diffopt.encode('ascii')
2450 diffopt = diffopt.encode('ascii')
2449 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2451 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2450 timer(d, title=title)
2452 timer(d, title=title)
2451 fm.end()
2453 fm.end()
2452
2454
2453
2455
2454 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2456 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2455 def perfrevlogindex(ui, repo, file_=None, **opts):
2457 def perfrevlogindex(ui, repo, file_=None, **opts):
2456 """Benchmark operations against a revlog index.
2458 """Benchmark operations against a revlog index.
2457
2459
2458 This tests constructing a revlog instance, reading index data,
2460 This tests constructing a revlog instance, reading index data,
2459 parsing index data, and performing various operations related to
2461 parsing index data, and performing various operations related to
2460 index data.
2462 index data.
2461 """
2463 """
2462
2464
2463 opts = _byteskwargs(opts)
2465 opts = _byteskwargs(opts)
2464
2466
2465 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2467 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2466
2468
2467 opener = getattr(rl, 'opener') # trick linter
2469 opener = getattr(rl, 'opener') # trick linter
2468 indexfile = rl.indexfile
2470 indexfile = rl.indexfile
2469 data = opener.read(indexfile)
2471 data = opener.read(indexfile)
2470
2472
2471 header = struct.unpack(b'>I', data[0:4])[0]
2473 header = struct.unpack(b'>I', data[0:4])[0]
2472 version = header & 0xFFFF
2474 version = header & 0xFFFF
2473 if version == 1:
2475 if version == 1:
2474 revlogio = revlog.revlogio()
2476 revlogio = revlog.revlogio()
2475 inline = header & (1 << 16)
2477 inline = header & (1 << 16)
2476 else:
2478 else:
2477 raise error.Abort(b'unsupported revlog version: %d' % version)
2479 raise error.Abort(b'unsupported revlog version: %d' % version)
2478
2480
2479 rllen = len(rl)
2481 rllen = len(rl)
2480
2482
2481 node0 = rl.node(0)
2483 node0 = rl.node(0)
2482 node25 = rl.node(rllen // 4)
2484 node25 = rl.node(rllen // 4)
2483 node50 = rl.node(rllen // 2)
2485 node50 = rl.node(rllen // 2)
2484 node75 = rl.node(rllen // 4 * 3)
2486 node75 = rl.node(rllen // 4 * 3)
2485 node100 = rl.node(rllen - 1)
2487 node100 = rl.node(rllen - 1)
2486
2488
2487 allrevs = range(rllen)
2489 allrevs = range(rllen)
2488 allrevsrev = list(reversed(allrevs))
2490 allrevsrev = list(reversed(allrevs))
2489 allnodes = [rl.node(rev) for rev in range(rllen)]
2491 allnodes = [rl.node(rev) for rev in range(rllen)]
2490 allnodesrev = list(reversed(allnodes))
2492 allnodesrev = list(reversed(allnodes))
2491
2493
2492 def constructor():
2494 def constructor():
2493 revlog.revlog(opener, indexfile)
2495 revlog.revlog(opener, indexfile)
2494
2496
2495 def read():
2497 def read():
2496 with opener(indexfile) as fh:
2498 with opener(indexfile) as fh:
2497 fh.read()
2499 fh.read()
2498
2500
2499 def parseindex():
2501 def parseindex():
2500 revlogio.parseindex(data, inline)
2502 revlogio.parseindex(data, inline)
2501
2503
2502 def getentry(revornode):
2504 def getentry(revornode):
2503 index = revlogio.parseindex(data, inline)[0]
2505 index = revlogio.parseindex(data, inline)[0]
2504 index[revornode]
2506 index[revornode]
2505
2507
2506 def getentries(revs, count=1):
2508 def getentries(revs, count=1):
2507 index = revlogio.parseindex(data, inline)[0]
2509 index = revlogio.parseindex(data, inline)[0]
2508
2510
2509 for i in range(count):
2511 for i in range(count):
2510 for rev in revs:
2512 for rev in revs:
2511 index[rev]
2513 index[rev]
2512
2514
2513 def resolvenode(node):
2515 def resolvenode(node):
2514 nodemap = revlogio.parseindex(data, inline)[1]
2516 nodemap = revlogio.parseindex(data, inline)[1]
2515 # This only works for the C code.
2517 # This only works for the C code.
2516 if nodemap is None:
2518 if nodemap is None:
2517 return
2519 return
2518
2520
2519 try:
2521 try:
2520 nodemap[node]
2522 nodemap[node]
2521 except error.RevlogError:
2523 except error.RevlogError:
2522 pass
2524 pass
2523
2525
2524 def resolvenodes(nodes, count=1):
2526 def resolvenodes(nodes, count=1):
2525 nodemap = revlogio.parseindex(data, inline)[1]
2527 nodemap = revlogio.parseindex(data, inline)[1]
2526 if nodemap is None:
2528 if nodemap is None:
2527 return
2529 return
2528
2530
2529 for i in range(count):
2531 for i in range(count):
2530 for node in nodes:
2532 for node in nodes:
2531 try:
2533 try:
2532 nodemap[node]
2534 nodemap[node]
2533 except error.RevlogError:
2535 except error.RevlogError:
2534 pass
2536 pass
2535
2537
2536 benches = [
2538 benches = [
2537 (constructor, b'revlog constructor'),
2539 (constructor, b'revlog constructor'),
2538 (read, b'read'),
2540 (read, b'read'),
2539 (parseindex, b'create index object'),
2541 (parseindex, b'create index object'),
2540 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2542 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2541 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2543 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2542 (lambda: resolvenode(node0), b'look up node at rev 0'),
2544 (lambda: resolvenode(node0), b'look up node at rev 0'),
2543 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2545 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2544 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2546 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2545 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2547 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2546 (lambda: resolvenode(node100), b'look up node at tip'),
2548 (lambda: resolvenode(node100), b'look up node at tip'),
2547 # 2x variation is to measure caching impact.
2549 # 2x variation is to measure caching impact.
2548 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2550 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2549 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2551 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2550 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2552 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2551 (
2553 (
2552 lambda: resolvenodes(allnodesrev, 2),
2554 lambda: resolvenodes(allnodesrev, 2),
2553 b'look up all nodes 2x (reverse)',
2555 b'look up all nodes 2x (reverse)',
2554 ),
2556 ),
2555 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2557 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2556 (
2558 (
2557 lambda: getentries(allrevs, 2),
2559 lambda: getentries(allrevs, 2),
2558 b'retrieve all index entries 2x (forward)',
2560 b'retrieve all index entries 2x (forward)',
2559 ),
2561 ),
2560 (
2562 (
2561 lambda: getentries(allrevsrev),
2563 lambda: getentries(allrevsrev),
2562 b'retrieve all index entries (reverse)',
2564 b'retrieve all index entries (reverse)',
2563 ),
2565 ),
2564 (
2566 (
2565 lambda: getentries(allrevsrev, 2),
2567 lambda: getentries(allrevsrev, 2),
2566 b'retrieve all index entries 2x (reverse)',
2568 b'retrieve all index entries 2x (reverse)',
2567 ),
2569 ),
2568 ]
2570 ]
2569
2571
2570 for fn, title in benches:
2572 for fn, title in benches:
2571 timer, fm = gettimer(ui, opts)
2573 timer, fm = gettimer(ui, opts)
2572 timer(fn, title=title)
2574 timer(fn, title=title)
2573 fm.end()
2575 fm.end()
2574
2576
2575
2577
2576 @command(
2578 @command(
2577 b'perfrevlogrevisions',
2579 b'perfrevlogrevisions',
2578 revlogopts
2580 revlogopts
2579 + formatteropts
2581 + formatteropts
2580 + [
2582 + [
2581 (b'd', b'dist', 100, b'distance between the revisions'),
2583 (b'd', b'dist', 100, b'distance between the revisions'),
2582 (b's', b'startrev', 0, b'revision to start reading at'),
2584 (b's', b'startrev', 0, b'revision to start reading at'),
2583 (b'', b'reverse', False, b'read in reverse'),
2585 (b'', b'reverse', False, b'read in reverse'),
2584 ],
2586 ],
2585 b'-c|-m|FILE',
2587 b'-c|-m|FILE',
2586 )
2588 )
2587 def perfrevlogrevisions(
2589 def perfrevlogrevisions(
2588 ui, repo, file_=None, startrev=0, reverse=False, **opts
2590 ui, repo, file_=None, startrev=0, reverse=False, **opts
2589 ):
2591 ):
2590 """Benchmark reading a series of revisions from a revlog.
2592 """Benchmark reading a series of revisions from a revlog.
2591
2593
2592 By default, we read every ``-d/--dist`` revision from 0 to tip of
2594 By default, we read every ``-d/--dist`` revision from 0 to tip of
2593 the specified revlog.
2595 the specified revlog.
2594
2596
2595 The start revision can be defined via ``-s/--startrev``.
2597 The start revision can be defined via ``-s/--startrev``.
2596 """
2598 """
2597 opts = _byteskwargs(opts)
2599 opts = _byteskwargs(opts)
2598
2600
2599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2601 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2600 rllen = getlen(ui)(rl)
2602 rllen = getlen(ui)(rl)
2601
2603
2602 if startrev < 0:
2604 if startrev < 0:
2603 startrev = rllen + startrev
2605 startrev = rllen + startrev
2604
2606
2605 def d():
2607 def d():
2606 rl.clearcaches()
2608 rl.clearcaches()
2607
2609
2608 beginrev = startrev
2610 beginrev = startrev
2609 endrev = rllen
2611 endrev = rllen
2610 dist = opts[b'dist']
2612 dist = opts[b'dist']
2611
2613
2612 if reverse:
2614 if reverse:
2613 beginrev, endrev = endrev - 1, beginrev - 1
2615 beginrev, endrev = endrev - 1, beginrev - 1
2614 dist = -1 * dist
2616 dist = -1 * dist
2615
2617
2616 for x in _xrange(beginrev, endrev, dist):
2618 for x in _xrange(beginrev, endrev, dist):
2617 # Old revisions don't support passing int.
2619 # Old revisions don't support passing int.
2618 n = rl.node(x)
2620 n = rl.node(x)
2619 rl.revision(n)
2621 rl.revision(n)
2620
2622
2621 timer, fm = gettimer(ui, opts)
2623 timer, fm = gettimer(ui, opts)
2622 timer(d)
2624 timer(d)
2623 fm.end()
2625 fm.end()
2624
2626
2625
2627
2626 @command(
2628 @command(
2627 b'perfrevlogwrite',
2629 b'perfrevlogwrite',
2628 revlogopts
2630 revlogopts
2629 + formatteropts
2631 + formatteropts
2630 + [
2632 + [
2631 (b's', b'startrev', 1000, b'revision to start writing at'),
2633 (b's', b'startrev', 1000, b'revision to start writing at'),
2632 (b'', b'stoprev', -1, b'last revision to write'),
2634 (b'', b'stoprev', -1, b'last revision to write'),
2633 (b'', b'count', 3, b'number of passes to perform'),
2635 (b'', b'count', 3, b'number of passes to perform'),
2634 (b'', b'details', False, b'print timing for every revisions tested'),
2636 (b'', b'details', False, b'print timing for every revisions tested'),
2635 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2637 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2636 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2638 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2637 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2639 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2638 ],
2640 ],
2639 b'-c|-m|FILE',
2641 b'-c|-m|FILE',
2640 )
2642 )
2641 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2643 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2642 """Benchmark writing a series of revisions to a revlog.
2644 """Benchmark writing a series of revisions to a revlog.
2643
2645
2644 Possible source values are:
2646 Possible source values are:
2645 * `full`: add from a full text (default).
2647 * `full`: add from a full text (default).
2646 * `parent-1`: add from a delta to the first parent
2648 * `parent-1`: add from a delta to the first parent
2647 * `parent-2`: add from a delta to the second parent if it exists
2649 * `parent-2`: add from a delta to the second parent if it exists
2648 (use a delta from the first parent otherwise)
2650 (use a delta from the first parent otherwise)
2649 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2651 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2650 * `storage`: add from the existing precomputed deltas
2652 * `storage`: add from the existing precomputed deltas
2651
2653
2652 Note: This performance command measures performance in a custom way. As a
2654 Note: This performance command measures performance in a custom way. As a
2653 result some of the global configuration of the 'perf' command does not
2655 result some of the global configuration of the 'perf' command does not
2654 apply to it:
2656 apply to it:
2655
2657
2656 * ``pre-run``: disabled
2658 * ``pre-run``: disabled
2657
2659
2658 * ``profile-benchmark``: disabled
2660 * ``profile-benchmark``: disabled
2659
2661
2660 * ``run-limits``: disabled use --count instead
2662 * ``run-limits``: disabled use --count instead
2661 """
2663 """
2662 opts = _byteskwargs(opts)
2664 opts = _byteskwargs(opts)
2663
2665
2664 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2666 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2665 rllen = getlen(ui)(rl)
2667 rllen = getlen(ui)(rl)
2666 if startrev < 0:
2668 if startrev < 0:
2667 startrev = rllen + startrev
2669 startrev = rllen + startrev
2668 if stoprev < 0:
2670 if stoprev < 0:
2669 stoprev = rllen + stoprev
2671 stoprev = rllen + stoprev
2670
2672
2671 lazydeltabase = opts['lazydeltabase']
2673 lazydeltabase = opts['lazydeltabase']
2672 source = opts['source']
2674 source = opts['source']
2673 clearcaches = opts['clear_caches']
2675 clearcaches = opts['clear_caches']
2674 validsource = (
2676 validsource = (
2675 b'full',
2677 b'full',
2676 b'parent-1',
2678 b'parent-1',
2677 b'parent-2',
2679 b'parent-2',
2678 b'parent-smallest',
2680 b'parent-smallest',
2679 b'storage',
2681 b'storage',
2680 )
2682 )
2681 if source not in validsource:
2683 if source not in validsource:
2682 raise error.Abort('invalid source type: %s' % source)
2684 raise error.Abort('invalid source type: %s' % source)
2683
2685
2684 ### actually gather results
2686 ### actually gather results
2685 count = opts['count']
2687 count = opts['count']
2686 if count <= 0:
2688 if count <= 0:
2687 raise error.Abort('invalide run count: %d' % count)
2689 raise error.Abort('invalide run count: %d' % count)
2688 allresults = []
2690 allresults = []
2689 for c in range(count):
2691 for c in range(count):
2690 timing = _timeonewrite(
2692 timing = _timeonewrite(
2691 ui,
2693 ui,
2692 rl,
2694 rl,
2693 source,
2695 source,
2694 startrev,
2696 startrev,
2695 stoprev,
2697 stoprev,
2696 c + 1,
2698 c + 1,
2697 lazydeltabase=lazydeltabase,
2699 lazydeltabase=lazydeltabase,
2698 clearcaches=clearcaches,
2700 clearcaches=clearcaches,
2699 )
2701 )
2700 allresults.append(timing)
2702 allresults.append(timing)
2701
2703
2702 ### consolidate the results in a single list
2704 ### consolidate the results in a single list
2703 results = []
2705 results = []
2704 for idx, (rev, t) in enumerate(allresults[0]):
2706 for idx, (rev, t) in enumerate(allresults[0]):
2705 ts = [t]
2707 ts = [t]
2706 for other in allresults[1:]:
2708 for other in allresults[1:]:
2707 orev, ot = other[idx]
2709 orev, ot = other[idx]
2708 assert orev == rev
2710 assert orev == rev
2709 ts.append(ot)
2711 ts.append(ot)
2710 results.append((rev, ts))
2712 results.append((rev, ts))
2711 resultcount = len(results)
2713 resultcount = len(results)
2712
2714
2713 ### Compute and display relevant statistics
2715 ### Compute and display relevant statistics
2714
2716
2715 # get a formatter
2717 # get a formatter
2716 fm = ui.formatter(b'perf', opts)
2718 fm = ui.formatter(b'perf', opts)
2717 displayall = ui.configbool(b"perf", b"all-timing", False)
2719 displayall = ui.configbool(b"perf", b"all-timing", False)
2718
2720
2719 # print individual details if requested
2721 # print individual details if requested
2720 if opts['details']:
2722 if opts['details']:
2721 for idx, item in enumerate(results, 1):
2723 for idx, item in enumerate(results, 1):
2722 rev, data = item
2724 rev, data = item
2723 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2725 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2724 formatone(fm, data, title=title, displayall=displayall)
2726 formatone(fm, data, title=title, displayall=displayall)
2725
2727
2726 # sorts results by median time
2728 # sorts results by median time
2727 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2729 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2728 # list of (name, index) to display)
2730 # list of (name, index) to display)
2729 relevants = [
2731 relevants = [
2730 ("min", 0),
2732 ("min", 0),
2731 ("10%", resultcount * 10 // 100),
2733 ("10%", resultcount * 10 // 100),
2732 ("25%", resultcount * 25 // 100),
2734 ("25%", resultcount * 25 // 100),
2733 ("50%", resultcount * 70 // 100),
2735 ("50%", resultcount * 70 // 100),
2734 ("75%", resultcount * 75 // 100),
2736 ("75%", resultcount * 75 // 100),
2735 ("90%", resultcount * 90 // 100),
2737 ("90%", resultcount * 90 // 100),
2736 ("95%", resultcount * 95 // 100),
2738 ("95%", resultcount * 95 // 100),
2737 ("99%", resultcount * 99 // 100),
2739 ("99%", resultcount * 99 // 100),
2738 ("99.9%", resultcount * 999 // 1000),
2740 ("99.9%", resultcount * 999 // 1000),
2739 ("99.99%", resultcount * 9999 // 10000),
2741 ("99.99%", resultcount * 9999 // 10000),
2740 ("99.999%", resultcount * 99999 // 100000),
2742 ("99.999%", resultcount * 99999 // 100000),
2741 ("max", -1),
2743 ("max", -1),
2742 ]
2744 ]
2743 if not ui.quiet:
2745 if not ui.quiet:
2744 for name, idx in relevants:
2746 for name, idx in relevants:
2745 data = results[idx]
2747 data = results[idx]
2746 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2748 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2747 formatone(fm, data[1], title=title, displayall=displayall)
2749 formatone(fm, data[1], title=title, displayall=displayall)
2748
2750
2749 # XXX summing that many float will not be very precise, we ignore this fact
2751 # XXX summing that many float will not be very precise, we ignore this fact
2750 # for now
2752 # for now
2751 totaltime = []
2753 totaltime = []
2752 for item in allresults:
2754 for item in allresults:
2753 totaltime.append(
2755 totaltime.append(
2754 (
2756 (
2755 sum(x[1][0] for x in item),
2757 sum(x[1][0] for x in item),
2756 sum(x[1][1] for x in item),
2758 sum(x[1][1] for x in item),
2757 sum(x[1][2] for x in item),
2759 sum(x[1][2] for x in item),
2758 )
2760 )
2759 )
2761 )
2760 formatone(
2762 formatone(
2761 fm,
2763 fm,
2762 totaltime,
2764 totaltime,
2763 title="total time (%d revs)" % resultcount,
2765 title="total time (%d revs)" % resultcount,
2764 displayall=displayall,
2766 displayall=displayall,
2765 )
2767 )
2766 fm.end()
2768 fm.end()
2767
2769
2768
2770
2769 class _faketr(object):
2771 class _faketr(object):
2770 def add(s, x, y, z=None):
2772 def add(s, x, y, z=None):
2771 return None
2773 return None
2772
2774
2773
2775
2774 def _timeonewrite(
2776 def _timeonewrite(
2775 ui,
2777 ui,
2776 orig,
2778 orig,
2777 source,
2779 source,
2778 startrev,
2780 startrev,
2779 stoprev,
2781 stoprev,
2780 runidx=None,
2782 runidx=None,
2781 lazydeltabase=True,
2783 lazydeltabase=True,
2782 clearcaches=True,
2784 clearcaches=True,
2783 ):
2785 ):
2784 timings = []
2786 timings = []
2785 tr = _faketr()
2787 tr = _faketr()
2786 with _temprevlog(ui, orig, startrev) as dest:
2788 with _temprevlog(ui, orig, startrev) as dest:
2787 dest._lazydeltabase = lazydeltabase
2789 dest._lazydeltabase = lazydeltabase
2788 revs = list(orig.revs(startrev, stoprev))
2790 revs = list(orig.revs(startrev, stoprev))
2789 total = len(revs)
2791 total = len(revs)
2790 topic = 'adding'
2792 topic = 'adding'
2791 if runidx is not None:
2793 if runidx is not None:
2792 topic += ' (run #%d)' % runidx
2794 topic += ' (run #%d)' % runidx
2793 # Support both old and new progress API
2795 # Support both old and new progress API
2794 if util.safehasattr(ui, 'makeprogress'):
2796 if util.safehasattr(ui, 'makeprogress'):
2795 progress = ui.makeprogress(topic, unit='revs', total=total)
2797 progress = ui.makeprogress(topic, unit='revs', total=total)
2796
2798
2797 def updateprogress(pos):
2799 def updateprogress(pos):
2798 progress.update(pos)
2800 progress.update(pos)
2799
2801
2800 def completeprogress():
2802 def completeprogress():
2801 progress.complete()
2803 progress.complete()
2802
2804
2803 else:
2805 else:
2804
2806
2805 def updateprogress(pos):
2807 def updateprogress(pos):
2806 ui.progress(topic, pos, unit='revs', total=total)
2808 ui.progress(topic, pos, unit='revs', total=total)
2807
2809
2808 def completeprogress():
2810 def completeprogress():
2809 ui.progress(topic, None, unit='revs', total=total)
2811 ui.progress(topic, None, unit='revs', total=total)
2810
2812
2811 for idx, rev in enumerate(revs):
2813 for idx, rev in enumerate(revs):
2812 updateprogress(idx)
2814 updateprogress(idx)
2813 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2815 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2814 if clearcaches:
2816 if clearcaches:
2815 dest.index.clearcaches()
2817 dest.index.clearcaches()
2816 dest.clearcaches()
2818 dest.clearcaches()
2817 with timeone() as r:
2819 with timeone() as r:
2818 dest.addrawrevision(*addargs, **addkwargs)
2820 dest.addrawrevision(*addargs, **addkwargs)
2819 timings.append((rev, r[0]))
2821 timings.append((rev, r[0]))
2820 updateprogress(total)
2822 updateprogress(total)
2821 completeprogress()
2823 completeprogress()
2822 return timings
2824 return timings
2823
2825
2824
2826
2825 def _getrevisionseed(orig, rev, tr, source):
2827 def _getrevisionseed(orig, rev, tr, source):
2826 from mercurial.node import nullid
2828 from mercurial.node import nullid
2827
2829
2828 linkrev = orig.linkrev(rev)
2830 linkrev = orig.linkrev(rev)
2829 node = orig.node(rev)
2831 node = orig.node(rev)
2830 p1, p2 = orig.parents(node)
2832 p1, p2 = orig.parents(node)
2831 flags = orig.flags(rev)
2833 flags = orig.flags(rev)
2832 cachedelta = None
2834 cachedelta = None
2833 text = None
2835 text = None
2834
2836
2835 if source == b'full':
2837 if source == b'full':
2836 text = orig.revision(rev)
2838 text = orig.revision(rev)
2837 elif source == b'parent-1':
2839 elif source == b'parent-1':
2838 baserev = orig.rev(p1)
2840 baserev = orig.rev(p1)
2839 cachedelta = (baserev, orig.revdiff(p1, rev))
2841 cachedelta = (baserev, orig.revdiff(p1, rev))
2840 elif source == b'parent-2':
2842 elif source == b'parent-2':
2841 parent = p2
2843 parent = p2
2842 if p2 == nullid:
2844 if p2 == nullid:
2843 parent = p1
2845 parent = p1
2844 baserev = orig.rev(parent)
2846 baserev = orig.rev(parent)
2845 cachedelta = (baserev, orig.revdiff(parent, rev))
2847 cachedelta = (baserev, orig.revdiff(parent, rev))
2846 elif source == b'parent-smallest':
2848 elif source == b'parent-smallest':
2847 p1diff = orig.revdiff(p1, rev)
2849 p1diff = orig.revdiff(p1, rev)
2848 parent = p1
2850 parent = p1
2849 diff = p1diff
2851 diff = p1diff
2850 if p2 != nullid:
2852 if p2 != nullid:
2851 p2diff = orig.revdiff(p2, rev)
2853 p2diff = orig.revdiff(p2, rev)
2852 if len(p1diff) > len(p2diff):
2854 if len(p1diff) > len(p2diff):
2853 parent = p2
2855 parent = p2
2854 diff = p2diff
2856 diff = p2diff
2855 baserev = orig.rev(parent)
2857 baserev = orig.rev(parent)
2856 cachedelta = (baserev, diff)
2858 cachedelta = (baserev, diff)
2857 elif source == b'storage':
2859 elif source == b'storage':
2858 baserev = orig.deltaparent(rev)
2860 baserev = orig.deltaparent(rev)
2859 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2861 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2860
2862
2861 return (
2863 return (
2862 (text, tr, linkrev, p1, p2),
2864 (text, tr, linkrev, p1, p2),
2863 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2865 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2864 )
2866 )
2865
2867
2866
2868
2867 @contextlib.contextmanager
2869 @contextlib.contextmanager
2868 def _temprevlog(ui, orig, truncaterev):
2870 def _temprevlog(ui, orig, truncaterev):
2869 from mercurial import vfs as vfsmod
2871 from mercurial import vfs as vfsmod
2870
2872
2871 if orig._inline:
2873 if orig._inline:
2872 raise error.Abort('not supporting inline revlog (yet)')
2874 raise error.Abort('not supporting inline revlog (yet)')
2873 revlogkwargs = {}
2875 revlogkwargs = {}
2874 k = 'upperboundcomp'
2876 k = 'upperboundcomp'
2875 if util.safehasattr(orig, k):
2877 if util.safehasattr(orig, k):
2876 revlogkwargs[k] = getattr(orig, k)
2878 revlogkwargs[k] = getattr(orig, k)
2877
2879
2878 origindexpath = orig.opener.join(orig.indexfile)
2880 origindexpath = orig.opener.join(orig.indexfile)
2879 origdatapath = orig.opener.join(orig.datafile)
2881 origdatapath = orig.opener.join(orig.datafile)
2880 indexname = 'revlog.i'
2882 indexname = 'revlog.i'
2881 dataname = 'revlog.d'
2883 dataname = 'revlog.d'
2882
2884
2883 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2885 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2884 try:
2886 try:
2885 # copy the data file in a temporary directory
2887 # copy the data file in a temporary directory
2886 ui.debug('copying data in %s\n' % tmpdir)
2888 ui.debug('copying data in %s\n' % tmpdir)
2887 destindexpath = os.path.join(tmpdir, 'revlog.i')
2889 destindexpath = os.path.join(tmpdir, 'revlog.i')
2888 destdatapath = os.path.join(tmpdir, 'revlog.d')
2890 destdatapath = os.path.join(tmpdir, 'revlog.d')
2889 shutil.copyfile(origindexpath, destindexpath)
2891 shutil.copyfile(origindexpath, destindexpath)
2890 shutil.copyfile(origdatapath, destdatapath)
2892 shutil.copyfile(origdatapath, destdatapath)
2891
2893
2892 # remove the data we want to add again
2894 # remove the data we want to add again
2893 ui.debug('truncating data to be rewritten\n')
2895 ui.debug('truncating data to be rewritten\n')
2894 with open(destindexpath, 'ab') as index:
2896 with open(destindexpath, 'ab') as index:
2895 index.seek(0)
2897 index.seek(0)
2896 index.truncate(truncaterev * orig._io.size)
2898 index.truncate(truncaterev * orig._io.size)
2897 with open(destdatapath, 'ab') as data:
2899 with open(destdatapath, 'ab') as data:
2898 data.seek(0)
2900 data.seek(0)
2899 data.truncate(orig.start(truncaterev))
2901 data.truncate(orig.start(truncaterev))
2900
2902
2901 # instantiate a new revlog from the temporary copy
2903 # instantiate a new revlog from the temporary copy
2902 ui.debug('truncating adding to be rewritten\n')
2904 ui.debug('truncating adding to be rewritten\n')
2903 vfs = vfsmod.vfs(tmpdir)
2905 vfs = vfsmod.vfs(tmpdir)
2904 vfs.options = getattr(orig.opener, 'options', None)
2906 vfs.options = getattr(orig.opener, 'options', None)
2905
2907
2906 dest = revlog.revlog(
2908 dest = revlog.revlog(
2907 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2909 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2908 )
2910 )
2909 if dest._inline:
2911 if dest._inline:
2910 raise error.Abort('not supporting inline revlog (yet)')
2912 raise error.Abort('not supporting inline revlog (yet)')
2911 # make sure internals are initialized
2913 # make sure internals are initialized
2912 dest.revision(len(dest) - 1)
2914 dest.revision(len(dest) - 1)
2913 yield dest
2915 yield dest
2914 del dest, vfs
2916 del dest, vfs
2915 finally:
2917 finally:
2916 shutil.rmtree(tmpdir, True)
2918 shutil.rmtree(tmpdir, True)
2917
2919
2918
2920
2919 @command(
2921 @command(
2920 b'perfrevlogchunks',
2922 b'perfrevlogchunks',
2921 revlogopts
2923 revlogopts
2922 + formatteropts
2924 + formatteropts
2923 + [
2925 + [
2924 (b'e', b'engines', b'', b'compression engines to use'),
2926 (b'e', b'engines', b'', b'compression engines to use'),
2925 (b's', b'startrev', 0, b'revision to start at'),
2927 (b's', b'startrev', 0, b'revision to start at'),
2926 ],
2928 ],
2927 b'-c|-m|FILE',
2929 b'-c|-m|FILE',
2928 )
2930 )
2929 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2931 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2930 """Benchmark operations on revlog chunks.
2932 """Benchmark operations on revlog chunks.
2931
2933
2932 Logically, each revlog is a collection of fulltext revisions. However,
2934 Logically, each revlog is a collection of fulltext revisions. However,
2933 stored within each revlog are "chunks" of possibly compressed data. This
2935 stored within each revlog are "chunks" of possibly compressed data. This
2934 data needs to be read and decompressed or compressed and written.
2936 data needs to be read and decompressed or compressed and written.
2935
2937
2936 This command measures the time it takes to read+decompress and recompress
2938 This command measures the time it takes to read+decompress and recompress
2937 chunks in a revlog. It effectively isolates I/O and compression performance.
2939 chunks in a revlog. It effectively isolates I/O and compression performance.
2938 For measurements of higher-level operations like resolving revisions,
2940 For measurements of higher-level operations like resolving revisions,
2939 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2941 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2940 """
2942 """
2941 opts = _byteskwargs(opts)
2943 opts = _byteskwargs(opts)
2942
2944
2943 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2945 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2944
2946
2945 # _chunkraw was renamed to _getsegmentforrevs.
2947 # _chunkraw was renamed to _getsegmentforrevs.
2946 try:
2948 try:
2947 segmentforrevs = rl._getsegmentforrevs
2949 segmentforrevs = rl._getsegmentforrevs
2948 except AttributeError:
2950 except AttributeError:
2949 segmentforrevs = rl._chunkraw
2951 segmentforrevs = rl._chunkraw
2950
2952
2951 # Verify engines argument.
2953 # Verify engines argument.
2952 if engines:
2954 if engines:
2953 engines = set(e.strip() for e in engines.split(b','))
2955 engines = set(e.strip() for e in engines.split(b','))
2954 for engine in engines:
2956 for engine in engines:
2955 try:
2957 try:
2956 util.compressionengines[engine]
2958 util.compressionengines[engine]
2957 except KeyError:
2959 except KeyError:
2958 raise error.Abort(b'unknown compression engine: %s' % engine)
2960 raise error.Abort(b'unknown compression engine: %s' % engine)
2959 else:
2961 else:
2960 engines = []
2962 engines = []
2961 for e in util.compengines:
2963 for e in util.compengines:
2962 engine = util.compengines[e]
2964 engine = util.compengines[e]
2963 try:
2965 try:
2964 if engine.available():
2966 if engine.available():
2965 engine.revlogcompressor().compress(b'dummy')
2967 engine.revlogcompressor().compress(b'dummy')
2966 engines.append(e)
2968 engines.append(e)
2967 except NotImplementedError:
2969 except NotImplementedError:
2968 pass
2970 pass
2969
2971
2970 revs = list(rl.revs(startrev, len(rl) - 1))
2972 revs = list(rl.revs(startrev, len(rl) - 1))
2971
2973
2972 def rlfh(rl):
2974 def rlfh(rl):
2973 if rl._inline:
2975 if rl._inline:
2974 return getsvfs(repo)(rl.indexfile)
2976 return getsvfs(repo)(rl.indexfile)
2975 else:
2977 else:
2976 return getsvfs(repo)(rl.datafile)
2978 return getsvfs(repo)(rl.datafile)
2977
2979
2978 def doread():
2980 def doread():
2979 rl.clearcaches()
2981 rl.clearcaches()
2980 for rev in revs:
2982 for rev in revs:
2981 segmentforrevs(rev, rev)
2983 segmentforrevs(rev, rev)
2982
2984
2983 def doreadcachedfh():
2985 def doreadcachedfh():
2984 rl.clearcaches()
2986 rl.clearcaches()
2985 fh = rlfh(rl)
2987 fh = rlfh(rl)
2986 for rev in revs:
2988 for rev in revs:
2987 segmentforrevs(rev, rev, df=fh)
2989 segmentforrevs(rev, rev, df=fh)
2988
2990
2989 def doreadbatch():
2991 def doreadbatch():
2990 rl.clearcaches()
2992 rl.clearcaches()
2991 segmentforrevs(revs[0], revs[-1])
2993 segmentforrevs(revs[0], revs[-1])
2992
2994
2993 def doreadbatchcachedfh():
2995 def doreadbatchcachedfh():
2994 rl.clearcaches()
2996 rl.clearcaches()
2995 fh = rlfh(rl)
2997 fh = rlfh(rl)
2996 segmentforrevs(revs[0], revs[-1], df=fh)
2998 segmentforrevs(revs[0], revs[-1], df=fh)
2997
2999
2998 def dochunk():
3000 def dochunk():
2999 rl.clearcaches()
3001 rl.clearcaches()
3000 fh = rlfh(rl)
3002 fh = rlfh(rl)
3001 for rev in revs:
3003 for rev in revs:
3002 rl._chunk(rev, df=fh)
3004 rl._chunk(rev, df=fh)
3003
3005
3004 chunks = [None]
3006 chunks = [None]
3005
3007
3006 def dochunkbatch():
3008 def dochunkbatch():
3007 rl.clearcaches()
3009 rl.clearcaches()
3008 fh = rlfh(rl)
3010 fh = rlfh(rl)
3009 # Save chunks as a side-effect.
3011 # Save chunks as a side-effect.
3010 chunks[0] = rl._chunks(revs, df=fh)
3012 chunks[0] = rl._chunks(revs, df=fh)
3011
3013
3012 def docompress(compressor):
3014 def docompress(compressor):
3013 rl.clearcaches()
3015 rl.clearcaches()
3014
3016
3015 try:
3017 try:
3016 # Swap in the requested compression engine.
3018 # Swap in the requested compression engine.
3017 oldcompressor = rl._compressor
3019 oldcompressor = rl._compressor
3018 rl._compressor = compressor
3020 rl._compressor = compressor
3019 for chunk in chunks[0]:
3021 for chunk in chunks[0]:
3020 rl.compress(chunk)
3022 rl.compress(chunk)
3021 finally:
3023 finally:
3022 rl._compressor = oldcompressor
3024 rl._compressor = oldcompressor
3023
3025
3024 benches = [
3026 benches = [
3025 (lambda: doread(), b'read'),
3027 (lambda: doread(), b'read'),
3026 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3028 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3027 (lambda: doreadbatch(), b'read batch'),
3029 (lambda: doreadbatch(), b'read batch'),
3028 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3030 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3029 (lambda: dochunk(), b'chunk'),
3031 (lambda: dochunk(), b'chunk'),
3030 (lambda: dochunkbatch(), b'chunk batch'),
3032 (lambda: dochunkbatch(), b'chunk batch'),
3031 ]
3033 ]
3032
3034
3033 for engine in sorted(engines):
3035 for engine in sorted(engines):
3034 compressor = util.compengines[engine].revlogcompressor()
3036 compressor = util.compengines[engine].revlogcompressor()
3035 benches.append(
3037 benches.append(
3036 (
3038 (
3037 functools.partial(docompress, compressor),
3039 functools.partial(docompress, compressor),
3038 b'compress w/ %s' % engine,
3040 b'compress w/ %s' % engine,
3039 )
3041 )
3040 )
3042 )
3041
3043
3042 for fn, title in benches:
3044 for fn, title in benches:
3043 timer, fm = gettimer(ui, opts)
3045 timer, fm = gettimer(ui, opts)
3044 timer(fn, title=title)
3046 timer(fn, title=title)
3045 fm.end()
3047 fm.end()
3046
3048
3047
3049
3048 @command(
3050 @command(
3049 b'perfrevlogrevision',
3051 b'perfrevlogrevision',
3050 revlogopts
3052 revlogopts
3051 + formatteropts
3053 + formatteropts
3052 + [(b'', b'cache', False, b'use caches instead of clearing')],
3054 + [(b'', b'cache', False, b'use caches instead of clearing')],
3053 b'-c|-m|FILE REV',
3055 b'-c|-m|FILE REV',
3054 )
3056 )
3055 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3057 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3056 """Benchmark obtaining a revlog revision.
3058 """Benchmark obtaining a revlog revision.
3057
3059
3058 Obtaining a revlog revision consists of roughly the following steps:
3060 Obtaining a revlog revision consists of roughly the following steps:
3059
3061
3060 1. Compute the delta chain
3062 1. Compute the delta chain
3061 2. Slice the delta chain if applicable
3063 2. Slice the delta chain if applicable
3062 3. Obtain the raw chunks for that delta chain
3064 3. Obtain the raw chunks for that delta chain
3063 4. Decompress each raw chunk
3065 4. Decompress each raw chunk
3064 5. Apply binary patches to obtain fulltext
3066 5. Apply binary patches to obtain fulltext
3065 6. Verify hash of fulltext
3067 6. Verify hash of fulltext
3066
3068
3067 This command measures the time spent in each of these phases.
3069 This command measures the time spent in each of these phases.
3068 """
3070 """
3069 opts = _byteskwargs(opts)
3071 opts = _byteskwargs(opts)
3070
3072
3071 if opts.get(b'changelog') or opts.get(b'manifest'):
3073 if opts.get(b'changelog') or opts.get(b'manifest'):
3072 file_, rev = None, file_
3074 file_, rev = None, file_
3073 elif rev is None:
3075 elif rev is None:
3074 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3076 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3075
3077
3076 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3078 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3077
3079
3078 # _chunkraw was renamed to _getsegmentforrevs.
3080 # _chunkraw was renamed to _getsegmentforrevs.
3079 try:
3081 try:
3080 segmentforrevs = r._getsegmentforrevs
3082 segmentforrevs = r._getsegmentforrevs
3081 except AttributeError:
3083 except AttributeError:
3082 segmentforrevs = r._chunkraw
3084 segmentforrevs = r._chunkraw
3083
3085
3084 node = r.lookup(rev)
3086 node = r.lookup(rev)
3085 rev = r.rev(node)
3087 rev = r.rev(node)
3086
3088
3087 def getrawchunks(data, chain):
3089 def getrawchunks(data, chain):
3088 start = r.start
3090 start = r.start
3089 length = r.length
3091 length = r.length
3090 inline = r._inline
3092 inline = r._inline
3091 iosize = r._io.size
3093 iosize = r._io.size
3092 buffer = util.buffer
3094 buffer = util.buffer
3093
3095
3094 chunks = []
3096 chunks = []
3095 ladd = chunks.append
3097 ladd = chunks.append
3096 for idx, item in enumerate(chain):
3098 for idx, item in enumerate(chain):
3097 offset = start(item[0])
3099 offset = start(item[0])
3098 bits = data[idx]
3100 bits = data[idx]
3099 for rev in item:
3101 for rev in item:
3100 chunkstart = start(rev)
3102 chunkstart = start(rev)
3101 if inline:
3103 if inline:
3102 chunkstart += (rev + 1) * iosize
3104 chunkstart += (rev + 1) * iosize
3103 chunklength = length(rev)
3105 chunklength = length(rev)
3104 ladd(buffer(bits, chunkstart - offset, chunklength))
3106 ladd(buffer(bits, chunkstart - offset, chunklength))
3105
3107
3106 return chunks
3108 return chunks
3107
3109
3108 def dodeltachain(rev):
3110 def dodeltachain(rev):
3109 if not cache:
3111 if not cache:
3110 r.clearcaches()
3112 r.clearcaches()
3111 r._deltachain(rev)
3113 r._deltachain(rev)
3112
3114
3113 def doread(chain):
3115 def doread(chain):
3114 if not cache:
3116 if not cache:
3115 r.clearcaches()
3117 r.clearcaches()
3116 for item in slicedchain:
3118 for item in slicedchain:
3117 segmentforrevs(item[0], item[-1])
3119 segmentforrevs(item[0], item[-1])
3118
3120
3119 def doslice(r, chain, size):
3121 def doslice(r, chain, size):
3120 for s in slicechunk(r, chain, targetsize=size):
3122 for s in slicechunk(r, chain, targetsize=size):
3121 pass
3123 pass
3122
3124
3123 def dorawchunks(data, chain):
3125 def dorawchunks(data, chain):
3124 if not cache:
3126 if not cache:
3125 r.clearcaches()
3127 r.clearcaches()
3126 getrawchunks(data, chain)
3128 getrawchunks(data, chain)
3127
3129
3128 def dodecompress(chunks):
3130 def dodecompress(chunks):
3129 decomp = r.decompress
3131 decomp = r.decompress
3130 for chunk in chunks:
3132 for chunk in chunks:
3131 decomp(chunk)
3133 decomp(chunk)
3132
3134
3133 def dopatch(text, bins):
3135 def dopatch(text, bins):
3134 if not cache:
3136 if not cache:
3135 r.clearcaches()
3137 r.clearcaches()
3136 mdiff.patches(text, bins)
3138 mdiff.patches(text, bins)
3137
3139
3138 def dohash(text):
3140 def dohash(text):
3139 if not cache:
3141 if not cache:
3140 r.clearcaches()
3142 r.clearcaches()
3141 r.checkhash(text, node, rev=rev)
3143 r.checkhash(text, node, rev=rev)
3142
3144
3143 def dorevision():
3145 def dorevision():
3144 if not cache:
3146 if not cache:
3145 r.clearcaches()
3147 r.clearcaches()
3146 r.revision(node)
3148 r.revision(node)
3147
3149
3148 try:
3150 try:
3149 from mercurial.revlogutils.deltas import slicechunk
3151 from mercurial.revlogutils.deltas import slicechunk
3150 except ImportError:
3152 except ImportError:
3151 slicechunk = getattr(revlog, '_slicechunk', None)
3153 slicechunk = getattr(revlog, '_slicechunk', None)
3152
3154
3153 size = r.length(rev)
3155 size = r.length(rev)
3154 chain = r._deltachain(rev)[0]
3156 chain = r._deltachain(rev)[0]
3155 if not getattr(r, '_withsparseread', False):
3157 if not getattr(r, '_withsparseread', False):
3156 slicedchain = (chain,)
3158 slicedchain = (chain,)
3157 else:
3159 else:
3158 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3160 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3159 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3161 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3160 rawchunks = getrawchunks(data, slicedchain)
3162 rawchunks = getrawchunks(data, slicedchain)
3161 bins = r._chunks(chain)
3163 bins = r._chunks(chain)
3162 text = bytes(bins[0])
3164 text = bytes(bins[0])
3163 bins = bins[1:]
3165 bins = bins[1:]
3164 text = mdiff.patches(text, bins)
3166 text = mdiff.patches(text, bins)
3165
3167
3166 benches = [
3168 benches = [
3167 (lambda: dorevision(), b'full'),
3169 (lambda: dorevision(), b'full'),
3168 (lambda: dodeltachain(rev), b'deltachain'),
3170 (lambda: dodeltachain(rev), b'deltachain'),
3169 (lambda: doread(chain), b'read'),
3171 (lambda: doread(chain), b'read'),
3170 ]
3172 ]
3171
3173
3172 if getattr(r, '_withsparseread', False):
3174 if getattr(r, '_withsparseread', False):
3173 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3175 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3174 benches.append(slicing)
3176 benches.append(slicing)
3175
3177
3176 benches.extend(
3178 benches.extend(
3177 [
3179 [
3178 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3180 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3179 (lambda: dodecompress(rawchunks), b'decompress'),
3181 (lambda: dodecompress(rawchunks), b'decompress'),
3180 (lambda: dopatch(text, bins), b'patch'),
3182 (lambda: dopatch(text, bins), b'patch'),
3181 (lambda: dohash(text), b'hash'),
3183 (lambda: dohash(text), b'hash'),
3182 ]
3184 ]
3183 )
3185 )
3184
3186
3185 timer, fm = gettimer(ui, opts)
3187 timer, fm = gettimer(ui, opts)
3186 for fn, title in benches:
3188 for fn, title in benches:
3187 timer(fn, title=title)
3189 timer(fn, title=title)
3188 fm.end()
3190 fm.end()
3189
3191
3190
3192
3191 @command(
3193 @command(
3192 b'perfrevset',
3194 b'perfrevset',
3193 [
3195 [
3194 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3196 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3195 (b'', b'contexts', False, b'obtain changectx for each revision'),
3197 (b'', b'contexts', False, b'obtain changectx for each revision'),
3196 ]
3198 ]
3197 + formatteropts,
3199 + formatteropts,
3198 b"REVSET",
3200 b"REVSET",
3199 )
3201 )
3200 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3202 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3201 """benchmark the execution time of a revset
3203 """benchmark the execution time of a revset
3202
3204
3203 Use the --clean option if need to evaluate the impact of build volatile
3205 Use the --clean option if need to evaluate the impact of build volatile
3204 revisions set cache on the revset execution. Volatile cache hold filtered
3206 revisions set cache on the revset execution. Volatile cache hold filtered
3205 and obsolete related cache."""
3207 and obsolete related cache."""
3206 opts = _byteskwargs(opts)
3208 opts = _byteskwargs(opts)
3207
3209
3208 timer, fm = gettimer(ui, opts)
3210 timer, fm = gettimer(ui, opts)
3209
3211
3210 def d():
3212 def d():
3211 if clear:
3213 if clear:
3212 repo.invalidatevolatilesets()
3214 repo.invalidatevolatilesets()
3213 if contexts:
3215 if contexts:
3214 for ctx in repo.set(expr):
3216 for ctx in repo.set(expr):
3215 pass
3217 pass
3216 else:
3218 else:
3217 for r in repo.revs(expr):
3219 for r in repo.revs(expr):
3218 pass
3220 pass
3219
3221
3220 timer(d)
3222 timer(d)
3221 fm.end()
3223 fm.end()
3222
3224
3223
3225
3224 @command(
3226 @command(
3225 b'perfvolatilesets',
3227 b'perfvolatilesets',
3226 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3228 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3227 + formatteropts,
3229 + formatteropts,
3228 )
3230 )
3229 def perfvolatilesets(ui, repo, *names, **opts):
3231 def perfvolatilesets(ui, repo, *names, **opts):
3230 """benchmark the computation of various volatile set
3232 """benchmark the computation of various volatile set
3231
3233
3232 Volatile set computes element related to filtering and obsolescence."""
3234 Volatile set computes element related to filtering and obsolescence."""
3233 opts = _byteskwargs(opts)
3235 opts = _byteskwargs(opts)
3234 timer, fm = gettimer(ui, opts)
3236 timer, fm = gettimer(ui, opts)
3235 repo = repo.unfiltered()
3237 repo = repo.unfiltered()
3236
3238
3237 def getobs(name):
3239 def getobs(name):
3238 def d():
3240 def d():
3239 repo.invalidatevolatilesets()
3241 repo.invalidatevolatilesets()
3240 if opts[b'clear_obsstore']:
3242 if opts[b'clear_obsstore']:
3241 clearfilecache(repo, b'obsstore')
3243 clearfilecache(repo, b'obsstore')
3242 obsolete.getrevs(repo, name)
3244 obsolete.getrevs(repo, name)
3243
3245
3244 return d
3246 return d
3245
3247
3246 allobs = sorted(obsolete.cachefuncs)
3248 allobs = sorted(obsolete.cachefuncs)
3247 if names:
3249 if names:
3248 allobs = [n for n in allobs if n in names]
3250 allobs = [n for n in allobs if n in names]
3249
3251
3250 for name in allobs:
3252 for name in allobs:
3251 timer(getobs(name), title=name)
3253 timer(getobs(name), title=name)
3252
3254
3253 def getfiltered(name):
3255 def getfiltered(name):
3254 def d():
3256 def d():
3255 repo.invalidatevolatilesets()
3257 repo.invalidatevolatilesets()
3256 if opts[b'clear_obsstore']:
3258 if opts[b'clear_obsstore']:
3257 clearfilecache(repo, b'obsstore')
3259 clearfilecache(repo, b'obsstore')
3258 repoview.filterrevs(repo, name)
3260 repoview.filterrevs(repo, name)
3259
3261
3260 return d
3262 return d
3261
3263
3262 allfilter = sorted(repoview.filtertable)
3264 allfilter = sorted(repoview.filtertable)
3263 if names:
3265 if names:
3264 allfilter = [n for n in allfilter if n in names]
3266 allfilter = [n for n in allfilter if n in names]
3265
3267
3266 for name in allfilter:
3268 for name in allfilter:
3267 timer(getfiltered(name), title=name)
3269 timer(getfiltered(name), title=name)
3268 fm.end()
3270 fm.end()
3269
3271
3270
3272
3271 @command(
3273 @command(
3272 b'perfbranchmap',
3274 b'perfbranchmap',
3273 [
3275 [
3274 (b'f', b'full', False, b'Includes build time of subset'),
3276 (b'f', b'full', False, b'Includes build time of subset'),
3275 (
3277 (
3276 b'',
3278 b'',
3277 b'clear-revbranch',
3279 b'clear-revbranch',
3278 False,
3280 False,
3279 b'purge the revbranch cache between computation',
3281 b'purge the revbranch cache between computation',
3280 ),
3282 ),
3281 ]
3283 ]
3282 + formatteropts,
3284 + formatteropts,
3283 )
3285 )
3284 def perfbranchmap(ui, repo, *filternames, **opts):
3286 def perfbranchmap(ui, repo, *filternames, **opts):
3285 """benchmark the update of a branchmap
3287 """benchmark the update of a branchmap
3286
3288
3287 This benchmarks the full repo.branchmap() call with read and write disabled
3289 This benchmarks the full repo.branchmap() call with read and write disabled
3288 """
3290 """
3289 opts = _byteskwargs(opts)
3291 opts = _byteskwargs(opts)
3290 full = opts.get(b"full", False)
3292 full = opts.get(b"full", False)
3291 clear_revbranch = opts.get(b"clear_revbranch", False)
3293 clear_revbranch = opts.get(b"clear_revbranch", False)
3292 timer, fm = gettimer(ui, opts)
3294 timer, fm = gettimer(ui, opts)
3293
3295
3294 def getbranchmap(filtername):
3296 def getbranchmap(filtername):
3295 """generate a benchmark function for the filtername"""
3297 """generate a benchmark function for the filtername"""
3296 if filtername is None:
3298 if filtername is None:
3297 view = repo
3299 view = repo
3298 else:
3300 else:
3299 view = repo.filtered(filtername)
3301 view = repo.filtered(filtername)
3300 if util.safehasattr(view._branchcaches, '_per_filter'):
3302 if util.safehasattr(view._branchcaches, '_per_filter'):
3301 filtered = view._branchcaches._per_filter
3303 filtered = view._branchcaches._per_filter
3302 else:
3304 else:
3303 # older versions
3305 # older versions
3304 filtered = view._branchcaches
3306 filtered = view._branchcaches
3305
3307
3306 def d():
3308 def d():
3307 if clear_revbranch:
3309 if clear_revbranch:
3308 repo.revbranchcache()._clear()
3310 repo.revbranchcache()._clear()
3309 if full:
3311 if full:
3310 view._branchcaches.clear()
3312 view._branchcaches.clear()
3311 else:
3313 else:
3312 filtered.pop(filtername, None)
3314 filtered.pop(filtername, None)
3313 view.branchmap()
3315 view.branchmap()
3314
3316
3315 return d
3317 return d
3316
3318
3317 # add filter in smaller subset to bigger subset
3319 # add filter in smaller subset to bigger subset
3318 possiblefilters = set(repoview.filtertable)
3320 possiblefilters = set(repoview.filtertable)
3319 if filternames:
3321 if filternames:
3320 possiblefilters &= set(filternames)
3322 possiblefilters &= set(filternames)
3321 subsettable = getbranchmapsubsettable()
3323 subsettable = getbranchmapsubsettable()
3322 allfilters = []
3324 allfilters = []
3323 while possiblefilters:
3325 while possiblefilters:
3324 for name in possiblefilters:
3326 for name in possiblefilters:
3325 subset = subsettable.get(name)
3327 subset = subsettable.get(name)
3326 if subset not in possiblefilters:
3328 if subset not in possiblefilters:
3327 break
3329 break
3328 else:
3330 else:
3329 assert False, b'subset cycle %s!' % possiblefilters
3331 assert False, b'subset cycle %s!' % possiblefilters
3330 allfilters.append(name)
3332 allfilters.append(name)
3331 possiblefilters.remove(name)
3333 possiblefilters.remove(name)
3332
3334
3333 # warm the cache
3335 # warm the cache
3334 if not full:
3336 if not full:
3335 for name in allfilters:
3337 for name in allfilters:
3336 repo.filtered(name).branchmap()
3338 repo.filtered(name).branchmap()
3337 if not filternames or b'unfiltered' in filternames:
3339 if not filternames or b'unfiltered' in filternames:
3338 # add unfiltered
3340 # add unfiltered
3339 allfilters.append(None)
3341 allfilters.append(None)
3340
3342
3341 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3343 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3342 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3344 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3343 branchcacheread.set(classmethod(lambda *args: None))
3345 branchcacheread.set(classmethod(lambda *args: None))
3344 else:
3346 else:
3345 # older versions
3347 # older versions
3346 branchcacheread = safeattrsetter(branchmap, b'read')
3348 branchcacheread = safeattrsetter(branchmap, b'read')
3347 branchcacheread.set(lambda *args: None)
3349 branchcacheread.set(lambda *args: None)
3348 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3350 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3349 branchcachewrite.set(lambda *args: None)
3351 branchcachewrite.set(lambda *args: None)
3350 try:
3352 try:
3351 for name in allfilters:
3353 for name in allfilters:
3352 printname = name
3354 printname = name
3353 if name is None:
3355 if name is None:
3354 printname = b'unfiltered'
3356 printname = b'unfiltered'
3355 timer(getbranchmap(name), title=str(printname))
3357 timer(getbranchmap(name), title=str(printname))
3356 finally:
3358 finally:
3357 branchcacheread.restore()
3359 branchcacheread.restore()
3358 branchcachewrite.restore()
3360 branchcachewrite.restore()
3359 fm.end()
3361 fm.end()
3360
3362
3361
3363
3362 @command(
3364 @command(
3363 b'perfbranchmapupdate',
3365 b'perfbranchmapupdate',
3364 [
3366 [
3365 (b'', b'base', [], b'subset of revision to start from'),
3367 (b'', b'base', [], b'subset of revision to start from'),
3366 (b'', b'target', [], b'subset of revision to end with'),
3368 (b'', b'target', [], b'subset of revision to end with'),
3367 (b'', b'clear-caches', False, b'clear cache between each runs'),
3369 (b'', b'clear-caches', False, b'clear cache between each runs'),
3368 ]
3370 ]
3369 + formatteropts,
3371 + formatteropts,
3370 )
3372 )
3371 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3373 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3372 """benchmark branchmap update from for <base> revs to <target> revs
3374 """benchmark branchmap update from for <base> revs to <target> revs
3373
3375
3374 If `--clear-caches` is passed, the following items will be reset before
3376 If `--clear-caches` is passed, the following items will be reset before
3375 each update:
3377 each update:
3376 * the changelog instance and associated indexes
3378 * the changelog instance and associated indexes
3377 * the rev-branch-cache instance
3379 * the rev-branch-cache instance
3378
3380
3379 Examples:
3381 Examples:
3380
3382
3381 # update for the one last revision
3383 # update for the one last revision
3382 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3384 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3383
3385
3384 $ update for change coming with a new branch
3386 $ update for change coming with a new branch
3385 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3387 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3386 """
3388 """
3387 from mercurial import branchmap
3389 from mercurial import branchmap
3388 from mercurial import repoview
3390 from mercurial import repoview
3389
3391
3390 opts = _byteskwargs(opts)
3392 opts = _byteskwargs(opts)
3391 timer, fm = gettimer(ui, opts)
3393 timer, fm = gettimer(ui, opts)
3392 clearcaches = opts[b'clear_caches']
3394 clearcaches = opts[b'clear_caches']
3393 unfi = repo.unfiltered()
3395 unfi = repo.unfiltered()
3394 x = [None] # used to pass data between closure
3396 x = [None] # used to pass data between closure
3395
3397
3396 # we use a `list` here to avoid possible side effect from smartset
3398 # we use a `list` here to avoid possible side effect from smartset
3397 baserevs = list(scmutil.revrange(repo, base))
3399 baserevs = list(scmutil.revrange(repo, base))
3398 targetrevs = list(scmutil.revrange(repo, target))
3400 targetrevs = list(scmutil.revrange(repo, target))
3399 if not baserevs:
3401 if not baserevs:
3400 raise error.Abort(b'no revisions selected for --base')
3402 raise error.Abort(b'no revisions selected for --base')
3401 if not targetrevs:
3403 if not targetrevs:
3402 raise error.Abort(b'no revisions selected for --target')
3404 raise error.Abort(b'no revisions selected for --target')
3403
3405
3404 # make sure the target branchmap also contains the one in the base
3406 # make sure the target branchmap also contains the one in the base
3405 targetrevs = list(set(baserevs) | set(targetrevs))
3407 targetrevs = list(set(baserevs) | set(targetrevs))
3406 targetrevs.sort()
3408 targetrevs.sort()
3407
3409
3408 cl = repo.changelog
3410 cl = repo.changelog
3409 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3411 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3410 allbaserevs.sort()
3412 allbaserevs.sort()
3411 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3413 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3412
3414
3413 newrevs = list(alltargetrevs.difference(allbaserevs))
3415 newrevs = list(alltargetrevs.difference(allbaserevs))
3414 newrevs.sort()
3416 newrevs.sort()
3415
3417
3416 allrevs = frozenset(unfi.changelog.revs())
3418 allrevs = frozenset(unfi.changelog.revs())
3417 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3419 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3418 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3420 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3419
3421
3420 def basefilter(repo, visibilityexceptions=None):
3422 def basefilter(repo, visibilityexceptions=None):
3421 return basefilterrevs
3423 return basefilterrevs
3422
3424
3423 def targetfilter(repo, visibilityexceptions=None):
3425 def targetfilter(repo, visibilityexceptions=None):
3424 return targetfilterrevs
3426 return targetfilterrevs
3425
3427
3426 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3428 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3427 ui.status(msg % (len(allbaserevs), len(newrevs)))
3429 ui.status(msg % (len(allbaserevs), len(newrevs)))
3428 if targetfilterrevs:
3430 if targetfilterrevs:
3429 msg = b'(%d revisions still filtered)\n'
3431 msg = b'(%d revisions still filtered)\n'
3430 ui.status(msg % len(targetfilterrevs))
3432 ui.status(msg % len(targetfilterrevs))
3431
3433
3432 try:
3434 try:
3433 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3435 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3434 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3436 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3435
3437
3436 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3438 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3437 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3439 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3438
3440
3439 # try to find an existing branchmap to reuse
3441 # try to find an existing branchmap to reuse
3440 subsettable = getbranchmapsubsettable()
3442 subsettable = getbranchmapsubsettable()
3441 candidatefilter = subsettable.get(None)
3443 candidatefilter = subsettable.get(None)
3442 while candidatefilter is not None:
3444 while candidatefilter is not None:
3443 candidatebm = repo.filtered(candidatefilter).branchmap()
3445 candidatebm = repo.filtered(candidatefilter).branchmap()
3444 if candidatebm.validfor(baserepo):
3446 if candidatebm.validfor(baserepo):
3445 filtered = repoview.filterrevs(repo, candidatefilter)
3447 filtered = repoview.filterrevs(repo, candidatefilter)
3446 missing = [r for r in allbaserevs if r in filtered]
3448 missing = [r for r in allbaserevs if r in filtered]
3447 base = candidatebm.copy()
3449 base = candidatebm.copy()
3448 base.update(baserepo, missing)
3450 base.update(baserepo, missing)
3449 break
3451 break
3450 candidatefilter = subsettable.get(candidatefilter)
3452 candidatefilter = subsettable.get(candidatefilter)
3451 else:
3453 else:
3452 # no suitable subset where found
3454 # no suitable subset where found
3453 base = branchmap.branchcache()
3455 base = branchmap.branchcache()
3454 base.update(baserepo, allbaserevs)
3456 base.update(baserepo, allbaserevs)
3455
3457
3456 def setup():
3458 def setup():
3457 x[0] = base.copy()
3459 x[0] = base.copy()
3458 if clearcaches:
3460 if clearcaches:
3459 unfi._revbranchcache = None
3461 unfi._revbranchcache = None
3460 clearchangelog(repo)
3462 clearchangelog(repo)
3461
3463
3462 def bench():
3464 def bench():
3463 x[0].update(targetrepo, newrevs)
3465 x[0].update(targetrepo, newrevs)
3464
3466
3465 timer(bench, setup=setup)
3467 timer(bench, setup=setup)
3466 fm.end()
3468 fm.end()
3467 finally:
3469 finally:
3468 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3470 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3469 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3471 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3470
3472
3471
3473
3472 @command(
3474 @command(
3473 b'perfbranchmapload',
3475 b'perfbranchmapload',
3474 [
3476 [
3475 (b'f', b'filter', b'', b'Specify repoview filter'),
3477 (b'f', b'filter', b'', b'Specify repoview filter'),
3476 (b'', b'list', False, b'List brachmap filter caches'),
3478 (b'', b'list', False, b'List brachmap filter caches'),
3477 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3479 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3478 ]
3480 ]
3479 + formatteropts,
3481 + formatteropts,
3480 )
3482 )
3481 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3483 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3482 """benchmark reading the branchmap"""
3484 """benchmark reading the branchmap"""
3483 opts = _byteskwargs(opts)
3485 opts = _byteskwargs(opts)
3484 clearrevlogs = opts[b'clear_revlogs']
3486 clearrevlogs = opts[b'clear_revlogs']
3485
3487
3486 if list:
3488 if list:
3487 for name, kind, st in repo.cachevfs.readdir(stat=True):
3489 for name, kind, st in repo.cachevfs.readdir(stat=True):
3488 if name.startswith(b'branch2'):
3490 if name.startswith(b'branch2'):
3489 filtername = name.partition(b'-')[2] or b'unfiltered'
3491 filtername = name.partition(b'-')[2] or b'unfiltered'
3490 ui.status(
3492 ui.status(
3491 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3493 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3492 )
3494 )
3493 return
3495 return
3494 if not filter:
3496 if not filter:
3495 filter = None
3497 filter = None
3496 subsettable = getbranchmapsubsettable()
3498 subsettable = getbranchmapsubsettable()
3497 if filter is None:
3499 if filter is None:
3498 repo = repo.unfiltered()
3500 repo = repo.unfiltered()
3499 else:
3501 else:
3500 repo = repoview.repoview(repo, filter)
3502 repo = repoview.repoview(repo, filter)
3501
3503
3502 repo.branchmap() # make sure we have a relevant, up to date branchmap
3504 repo.branchmap() # make sure we have a relevant, up to date branchmap
3503
3505
3504 try:
3506 try:
3505 fromfile = branchmap.branchcache.fromfile
3507 fromfile = branchmap.branchcache.fromfile
3506 except AttributeError:
3508 except AttributeError:
3507 # older versions
3509 # older versions
3508 fromfile = branchmap.read
3510 fromfile = branchmap.read
3509
3511
3510 currentfilter = filter
3512 currentfilter = filter
3511 # try once without timer, the filter may not be cached
3513 # try once without timer, the filter may not be cached
3512 while fromfile(repo) is None:
3514 while fromfile(repo) is None:
3513 currentfilter = subsettable.get(currentfilter)
3515 currentfilter = subsettable.get(currentfilter)
3514 if currentfilter is None:
3516 if currentfilter is None:
3515 raise error.Abort(
3517 raise error.Abort(
3516 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3518 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3517 )
3519 )
3518 repo = repo.filtered(currentfilter)
3520 repo = repo.filtered(currentfilter)
3519 timer, fm = gettimer(ui, opts)
3521 timer, fm = gettimer(ui, opts)
3520
3522
3521 def setup():
3523 def setup():
3522 if clearrevlogs:
3524 if clearrevlogs:
3523 clearchangelog(repo)
3525 clearchangelog(repo)
3524
3526
3525 def bench():
3527 def bench():
3526 fromfile(repo)
3528 fromfile(repo)
3527
3529
3528 timer(bench, setup=setup)
3530 timer(bench, setup=setup)
3529 fm.end()
3531 fm.end()
3530
3532
3531
3533
3532 @command(b'perfloadmarkers')
3534 @command(b'perfloadmarkers')
3533 def perfloadmarkers(ui, repo):
3535 def perfloadmarkers(ui, repo):
3534 """benchmark the time to parse the on-disk markers for a repo
3536 """benchmark the time to parse the on-disk markers for a repo
3535
3537
3536 Result is the number of markers in the repo."""
3538 Result is the number of markers in the repo."""
3537 timer, fm = gettimer(ui)
3539 timer, fm = gettimer(ui)
3538 svfs = getsvfs(repo)
3540 svfs = getsvfs(repo)
3539 timer(lambda: len(obsolete.obsstore(svfs)))
3541 timer(lambda: len(obsolete.obsstore(svfs)))
3540 fm.end()
3542 fm.end()
3541
3543
3542
3544
3543 @command(
3545 @command(
3544 b'perflrucachedict',
3546 b'perflrucachedict',
3545 formatteropts
3547 formatteropts
3546 + [
3548 + [
3547 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3549 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3548 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3550 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3549 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3551 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3550 (b'', b'size', 4, b'size of cache'),
3552 (b'', b'size', 4, b'size of cache'),
3551 (b'', b'gets', 10000, b'number of key lookups'),
3553 (b'', b'gets', 10000, b'number of key lookups'),
3552 (b'', b'sets', 10000, b'number of key sets'),
3554 (b'', b'sets', 10000, b'number of key sets'),
3553 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3555 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3554 (
3556 (
3555 b'',
3557 b'',
3556 b'mixedgetfreq',
3558 b'mixedgetfreq',
3557 50,
3559 50,
3558 b'frequency of get vs set ops in mixed mode',
3560 b'frequency of get vs set ops in mixed mode',
3559 ),
3561 ),
3560 ],
3562 ],
3561 norepo=True,
3563 norepo=True,
3562 )
3564 )
3563 def perflrucache(
3565 def perflrucache(
3564 ui,
3566 ui,
3565 mincost=0,
3567 mincost=0,
3566 maxcost=100,
3568 maxcost=100,
3567 costlimit=0,
3569 costlimit=0,
3568 size=4,
3570 size=4,
3569 gets=10000,
3571 gets=10000,
3570 sets=10000,
3572 sets=10000,
3571 mixed=10000,
3573 mixed=10000,
3572 mixedgetfreq=50,
3574 mixedgetfreq=50,
3573 **opts
3575 **opts
3574 ):
3576 ):
3575 opts = _byteskwargs(opts)
3577 opts = _byteskwargs(opts)
3576
3578
3577 def doinit():
3579 def doinit():
3578 for i in _xrange(10000):
3580 for i in _xrange(10000):
3579 util.lrucachedict(size)
3581 util.lrucachedict(size)
3580
3582
3581 costrange = list(range(mincost, maxcost + 1))
3583 costrange = list(range(mincost, maxcost + 1))
3582
3584
3583 values = []
3585 values = []
3584 for i in _xrange(size):
3586 for i in _xrange(size):
3585 values.append(random.randint(0, _maxint))
3587 values.append(random.randint(0, _maxint))
3586
3588
3587 # Get mode fills the cache and tests raw lookup performance with no
3589 # Get mode fills the cache and tests raw lookup performance with no
3588 # eviction.
3590 # eviction.
3589 getseq = []
3591 getseq = []
3590 for i in _xrange(gets):
3592 for i in _xrange(gets):
3591 getseq.append(random.choice(values))
3593 getseq.append(random.choice(values))
3592
3594
3593 def dogets():
3595 def dogets():
3594 d = util.lrucachedict(size)
3596 d = util.lrucachedict(size)
3595 for v in values:
3597 for v in values:
3596 d[v] = v
3598 d[v] = v
3597 for key in getseq:
3599 for key in getseq:
3598 value = d[key]
3600 value = d[key]
3599 value # silence pyflakes warning
3601 value # silence pyflakes warning
3600
3602
3601 def dogetscost():
3603 def dogetscost():
3602 d = util.lrucachedict(size, maxcost=costlimit)
3604 d = util.lrucachedict(size, maxcost=costlimit)
3603 for i, v in enumerate(values):
3605 for i, v in enumerate(values):
3604 d.insert(v, v, cost=costs[i])
3606 d.insert(v, v, cost=costs[i])
3605 for key in getseq:
3607 for key in getseq:
3606 try:
3608 try:
3607 value = d[key]
3609 value = d[key]
3608 value # silence pyflakes warning
3610 value # silence pyflakes warning
3609 except KeyError:
3611 except KeyError:
3610 pass
3612 pass
3611
3613
3612 # Set mode tests insertion speed with cache eviction.
3614 # Set mode tests insertion speed with cache eviction.
3613 setseq = []
3615 setseq = []
3614 costs = []
3616 costs = []
3615 for i in _xrange(sets):
3617 for i in _xrange(sets):
3616 setseq.append(random.randint(0, _maxint))
3618 setseq.append(random.randint(0, _maxint))
3617 costs.append(random.choice(costrange))
3619 costs.append(random.choice(costrange))
3618
3620
3619 def doinserts():
3621 def doinserts():
3620 d = util.lrucachedict(size)
3622 d = util.lrucachedict(size)
3621 for v in setseq:
3623 for v in setseq:
3622 d.insert(v, v)
3624 d.insert(v, v)
3623
3625
3624 def doinsertscost():
3626 def doinsertscost():
3625 d = util.lrucachedict(size, maxcost=costlimit)
3627 d = util.lrucachedict(size, maxcost=costlimit)
3626 for i, v in enumerate(setseq):
3628 for i, v in enumerate(setseq):
3627 d.insert(v, v, cost=costs[i])
3629 d.insert(v, v, cost=costs[i])
3628
3630
3629 def dosets():
3631 def dosets():
3630 d = util.lrucachedict(size)
3632 d = util.lrucachedict(size)
3631 for v in setseq:
3633 for v in setseq:
3632 d[v] = v
3634 d[v] = v
3633
3635
3634 # Mixed mode randomly performs gets and sets with eviction.
3636 # Mixed mode randomly performs gets and sets with eviction.
3635 mixedops = []
3637 mixedops = []
3636 for i in _xrange(mixed):
3638 for i in _xrange(mixed):
3637 r = random.randint(0, 100)
3639 r = random.randint(0, 100)
3638 if r < mixedgetfreq:
3640 if r < mixedgetfreq:
3639 op = 0
3641 op = 0
3640 else:
3642 else:
3641 op = 1
3643 op = 1
3642
3644
3643 mixedops.append(
3645 mixedops.append(
3644 (op, random.randint(0, size * 2), random.choice(costrange))
3646 (op, random.randint(0, size * 2), random.choice(costrange))
3645 )
3647 )
3646
3648
3647 def domixed():
3649 def domixed():
3648 d = util.lrucachedict(size)
3650 d = util.lrucachedict(size)
3649
3651
3650 for op, v, cost in mixedops:
3652 for op, v, cost in mixedops:
3651 if op == 0:
3653 if op == 0:
3652 try:
3654 try:
3653 d[v]
3655 d[v]
3654 except KeyError:
3656 except KeyError:
3655 pass
3657 pass
3656 else:
3658 else:
3657 d[v] = v
3659 d[v] = v
3658
3660
3659 def domixedcost():
3661 def domixedcost():
3660 d = util.lrucachedict(size, maxcost=costlimit)
3662 d = util.lrucachedict(size, maxcost=costlimit)
3661
3663
3662 for op, v, cost in mixedops:
3664 for op, v, cost in mixedops:
3663 if op == 0:
3665 if op == 0:
3664 try:
3666 try:
3665 d[v]
3667 d[v]
3666 except KeyError:
3668 except KeyError:
3667 pass
3669 pass
3668 else:
3670 else:
3669 d.insert(v, v, cost=cost)
3671 d.insert(v, v, cost=cost)
3670
3672
3671 benches = [
3673 benches = [
3672 (doinit, b'init'),
3674 (doinit, b'init'),
3673 ]
3675 ]
3674
3676
3675 if costlimit:
3677 if costlimit:
3676 benches.extend(
3678 benches.extend(
3677 [
3679 [
3678 (dogetscost, b'gets w/ cost limit'),
3680 (dogetscost, b'gets w/ cost limit'),
3679 (doinsertscost, b'inserts w/ cost limit'),
3681 (doinsertscost, b'inserts w/ cost limit'),
3680 (domixedcost, b'mixed w/ cost limit'),
3682 (domixedcost, b'mixed w/ cost limit'),
3681 ]
3683 ]
3682 )
3684 )
3683 else:
3685 else:
3684 benches.extend(
3686 benches.extend(
3685 [
3687 [
3686 (dogets, b'gets'),
3688 (dogets, b'gets'),
3687 (doinserts, b'inserts'),
3689 (doinserts, b'inserts'),
3688 (dosets, b'sets'),
3690 (dosets, b'sets'),
3689 (domixed, b'mixed'),
3691 (domixed, b'mixed'),
3690 ]
3692 ]
3691 )
3693 )
3692
3694
3693 for fn, title in benches:
3695 for fn, title in benches:
3694 timer, fm = gettimer(ui, opts)
3696 timer, fm = gettimer(ui, opts)
3695 timer(fn, title=title)
3697 timer(fn, title=title)
3696 fm.end()
3698 fm.end()
3697
3699
3698
3700
3699 @command(b'perfwrite', formatteropts)
3701 @command(b'perfwrite', formatteropts)
3700 def perfwrite(ui, repo, **opts):
3702 def perfwrite(ui, repo, **opts):
3701 """microbenchmark ui.write
3703 """microbenchmark ui.write
3702 """
3704 """
3703 opts = _byteskwargs(opts)
3705 opts = _byteskwargs(opts)
3704
3706
3705 timer, fm = gettimer(ui, opts)
3707 timer, fm = gettimer(ui, opts)
3706
3708
3707 def write():
3709 def write():
3708 for i in range(100000):
3710 for i in range(100000):
3709 ui.writenoi18n(b'Testing write performance\n')
3711 ui.writenoi18n(b'Testing write performance\n')
3710
3712
3711 timer(write)
3713 timer(write)
3712 fm.end()
3714 fm.end()
3713
3715
3714
3716
3715 def uisetup(ui):
3717 def uisetup(ui):
3716 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3718 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3717 commands, b'debugrevlogopts'
3719 commands, b'debugrevlogopts'
3718 ):
3720 ):
3719 # for "historical portability":
3721 # for "historical portability":
3720 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3722 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3721 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3723 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3722 # openrevlog() should cause failure, because it has been
3724 # openrevlog() should cause failure, because it has been
3723 # available since 3.5 (or 49c583ca48c4).
3725 # available since 3.5 (or 49c583ca48c4).
3724 def openrevlog(orig, repo, cmd, file_, opts):
3726 def openrevlog(orig, repo, cmd, file_, opts):
3725 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3727 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3726 raise error.Abort(
3728 raise error.Abort(
3727 b"This version doesn't support --dir option",
3729 b"This version doesn't support --dir option",
3728 hint=b"use 3.5 or later",
3730 hint=b"use 3.5 or later",
3729 )
3731 )
3730 return orig(repo, cmd, file_, opts)
3732 return orig(repo, cmd, file_, opts)
3731
3733
3732 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3734 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3733
3735
3734
3736
3735 @command(
3737 @command(
3736 b'perfprogress',
3738 b'perfprogress',
3737 formatteropts
3739 formatteropts
3738 + [
3740 + [
3739 (b'', b'topic', b'topic', b'topic for progress messages'),
3741 (b'', b'topic', b'topic', b'topic for progress messages'),
3740 (b'c', b'total', 1000000, b'total value we are progressing to'),
3742 (b'c', b'total', 1000000, b'total value we are progressing to'),
3741 ],
3743 ],
3742 norepo=True,
3744 norepo=True,
3743 )
3745 )
3744 def perfprogress(ui, topic=None, total=None, **opts):
3746 def perfprogress(ui, topic=None, total=None, **opts):
3745 """printing of progress bars"""
3747 """printing of progress bars"""
3746 opts = _byteskwargs(opts)
3748 opts = _byteskwargs(opts)
3747
3749
3748 timer, fm = gettimer(ui, opts)
3750 timer, fm = gettimer(ui, opts)
3749
3751
3750 def doprogress():
3752 def doprogress():
3751 with ui.makeprogress(topic, total=total) as progress:
3753 with ui.makeprogress(topic, total=total) as progress:
3752 for i in _xrange(total):
3754 for i in _xrange(total):
3753 progress.increment()
3755 progress.increment()
3754
3756
3755 timer(doprogress)
3757 timer(doprogress)
3756 fm.end()
3758 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now