##// END OF EJS Templates
perf: add a --stats argument to perfhelper-pathcopies...
marmoute -
r43212:adac17fa default
parent child Browse files
Show More
@@ -1,3228 +1,3273 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
132 if pycompat.ispy3:
132 if pycompat.ispy3:
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
134 else:
134 else:
135 _maxint = sys.maxint
135 _maxint = sys.maxint
136 except (NameError, ImportError, AttributeError):
136 except (NameError, ImportError, AttributeError):
137 import inspect
137 import inspect
138 getargspec = inspect.getargspec
138 getargspec = inspect.getargspec
139 _byteskwargs = identity
139 _byteskwargs = identity
140 _bytestr = str
140 _bytestr = str
141 fsencode = identity # no py3 support
141 fsencode = identity # no py3 support
142 _maxint = sys.maxint # no py3 support
142 _maxint = sys.maxint # no py3 support
143 _sysstr = lambda x: x # no py3 support
143 _sysstr = lambda x: x # no py3 support
144 _xrange = xrange
144 _xrange = xrange
145
145
146 try:
146 try:
147 # 4.7+
147 # 4.7+
148 queue = pycompat.queue.Queue
148 queue = pycompat.queue.Queue
149 except (NameError, AttributeError, ImportError):
149 except (NameError, AttributeError, ImportError):
150 # <4.7.
150 # <4.7.
151 try:
151 try:
152 queue = pycompat.queue
152 queue = pycompat.queue
153 except (NameError, AttributeError, ImportError):
153 except (NameError, AttributeError, ImportError):
154 import Queue as queue
154 import Queue as queue
155
155
156 try:
156 try:
157 from mercurial import logcmdutil
157 from mercurial import logcmdutil
158 makelogtemplater = logcmdutil.maketemplater
158 makelogtemplater = logcmdutil.maketemplater
159 except (AttributeError, ImportError):
159 except (AttributeError, ImportError):
160 try:
160 try:
161 makelogtemplater = cmdutil.makelogtemplater
161 makelogtemplater = cmdutil.makelogtemplater
162 except (AttributeError, ImportError):
162 except (AttributeError, ImportError):
163 makelogtemplater = None
163 makelogtemplater = None
164
164
165 # for "historical portability":
165 # for "historical portability":
166 # define util.safehasattr forcibly, because util.safehasattr has been
166 # define util.safehasattr forcibly, because util.safehasattr has been
167 # available since 1.9.3 (or 94b200a11cf7)
167 # available since 1.9.3 (or 94b200a11cf7)
168 _undefined = object()
168 _undefined = object()
169 def safehasattr(thing, attr):
169 def safehasattr(thing, attr):
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
171 setattr(util, 'safehasattr', safehasattr)
171 setattr(util, 'safehasattr', safehasattr)
172
172
173 # for "historical portability":
173 # for "historical portability":
174 # define util.timer forcibly, because util.timer has been available
174 # define util.timer forcibly, because util.timer has been available
175 # since ae5d60bb70c9
175 # since ae5d60bb70c9
176 if safehasattr(time, 'perf_counter'):
176 if safehasattr(time, 'perf_counter'):
177 util.timer = time.perf_counter
177 util.timer = time.perf_counter
178 elif os.name == b'nt':
178 elif os.name == b'nt':
179 util.timer = time.clock
179 util.timer = time.clock
180 else:
180 else:
181 util.timer = time.time
181 util.timer = time.time
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # use locally defined empty option list, if formatteropts isn't
184 # use locally defined empty option list, if formatteropts isn't
185 # available, because commands.formatteropts has been available since
185 # available, because commands.formatteropts has been available since
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
187 # available since 2.2 (or ae5f92e154d3)
187 # available since 2.2 (or ae5f92e154d3)
188 formatteropts = getattr(cmdutil, "formatteropts",
188 formatteropts = getattr(cmdutil, "formatteropts",
189 getattr(commands, "formatteropts", []))
189 getattr(commands, "formatteropts", []))
190
190
191 # for "historical portability":
191 # for "historical portability":
192 # use locally defined option list, if debugrevlogopts isn't available,
192 # use locally defined option list, if debugrevlogopts isn't available,
193 # because commands.debugrevlogopts has been available since 3.7 (or
193 # because commands.debugrevlogopts has been available since 3.7 (or
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
195 # since 1.9 (or a79fea6b3e77).
195 # since 1.9 (or a79fea6b3e77).
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
197 getattr(commands, "debugrevlogopts", [
197 getattr(commands, "debugrevlogopts", [
198 (b'c', b'changelog', False, (b'open changelog')),
198 (b'c', b'changelog', False, (b'open changelog')),
199 (b'm', b'manifest', False, (b'open manifest')),
199 (b'm', b'manifest', False, (b'open manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
201 ]))
201 ]))
202
202
203 cmdtable = {}
203 cmdtable = {}
204
204
205 # for "historical portability":
205 # for "historical portability":
206 # define parsealiases locally, because cmdutil.parsealiases has been
206 # define parsealiases locally, because cmdutil.parsealiases has been
207 # available since 1.5 (or 6252852b4332)
207 # available since 1.5 (or 6252852b4332)
208 def parsealiases(cmd):
208 def parsealiases(cmd):
209 return cmd.split(b"|")
209 return cmd.split(b"|")
210
210
211 if safehasattr(registrar, 'command'):
211 if safehasattr(registrar, 'command'):
212 command = registrar.command(cmdtable)
212 command = registrar.command(cmdtable)
213 elif safehasattr(cmdutil, 'command'):
213 elif safehasattr(cmdutil, 'command'):
214 command = cmdutil.command(cmdtable)
214 command = cmdutil.command(cmdtable)
215 if b'norepo' not in getargspec(command).args:
215 if b'norepo' not in getargspec(command).args:
216 # for "historical portability":
216 # for "historical portability":
217 # wrap original cmdutil.command, because "norepo" option has
217 # wrap original cmdutil.command, because "norepo" option has
218 # been available since 3.1 (or 75a96326cecb)
218 # been available since 3.1 (or 75a96326cecb)
219 _command = command
219 _command = command
220 def command(name, options=(), synopsis=None, norepo=False):
220 def command(name, options=(), synopsis=None, norepo=False):
221 if norepo:
221 if norepo:
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 return _command(name, list(options), synopsis)
223 return _command(name, list(options), synopsis)
224 else:
224 else:
225 # for "historical portability":
225 # for "historical portability":
226 # define "@command" annotation locally, because cmdutil.command
226 # define "@command" annotation locally, because cmdutil.command
227 # has been available since 1.9 (or 2daa5179e73f)
227 # has been available since 1.9 (or 2daa5179e73f)
228 def command(name, options=(), synopsis=None, norepo=False):
228 def command(name, options=(), synopsis=None, norepo=False):
229 def decorator(func):
229 def decorator(func):
230 if synopsis:
230 if synopsis:
231 cmdtable[name] = func, list(options), synopsis
231 cmdtable[name] = func, list(options), synopsis
232 else:
232 else:
233 cmdtable[name] = func, list(options)
233 cmdtable[name] = func, list(options)
234 if norepo:
234 if norepo:
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
236 return func
236 return func
237 return decorator
237 return decorator
238
238
239 try:
239 try:
240 import mercurial.registrar
240 import mercurial.registrar
241 import mercurial.configitems
241 import mercurial.configitems
242 configtable = {}
242 configtable = {}
243 configitem = mercurial.registrar.configitem(configtable)
243 configitem = mercurial.registrar.configitem(configtable)
244 configitem(b'perf', b'presleep',
244 configitem(b'perf', b'presleep',
245 default=mercurial.configitems.dynamicdefault,
245 default=mercurial.configitems.dynamicdefault,
246 experimental=True,
246 experimental=True,
247 )
247 )
248 configitem(b'perf', b'stub',
248 configitem(b'perf', b'stub',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 configitem(b'perf', b'parentscount',
252 configitem(b'perf', b'parentscount',
253 default=mercurial.configitems.dynamicdefault,
253 default=mercurial.configitems.dynamicdefault,
254 experimental=True,
254 experimental=True,
255 )
255 )
256 configitem(b'perf', b'all-timing',
256 configitem(b'perf', b'all-timing',
257 default=mercurial.configitems.dynamicdefault,
257 default=mercurial.configitems.dynamicdefault,
258 experimental=True,
258 experimental=True,
259 )
259 )
260 configitem(b'perf', b'pre-run',
260 configitem(b'perf', b'pre-run',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 configitem(b'perf', b'profile-benchmark',
263 configitem(b'perf', b'profile-benchmark',
264 default=mercurial.configitems.dynamicdefault,
264 default=mercurial.configitems.dynamicdefault,
265 )
265 )
266 configitem(b'perf', b'run-limits',
266 configitem(b'perf', b'run-limits',
267 default=mercurial.configitems.dynamicdefault,
267 default=mercurial.configitems.dynamicdefault,
268 experimental=True,
268 experimental=True,
269 )
269 )
270 except (ImportError, AttributeError):
270 except (ImportError, AttributeError):
271 pass
271 pass
272 except TypeError:
272 except TypeError:
273 # compatibility fix for a11fd395e83f
273 # compatibility fix for a11fd395e83f
274 # hg version: 5.2
274 # hg version: 5.2
275 configitem(b'perf', b'presleep',
275 configitem(b'perf', b'presleep',
276 default=mercurial.configitems.dynamicdefault,
276 default=mercurial.configitems.dynamicdefault,
277 )
277 )
278 configitem(b'perf', b'stub',
278 configitem(b'perf', b'stub',
279 default=mercurial.configitems.dynamicdefault,
279 default=mercurial.configitems.dynamicdefault,
280 )
280 )
281 configitem(b'perf', b'parentscount',
281 configitem(b'perf', b'parentscount',
282 default=mercurial.configitems.dynamicdefault,
282 default=mercurial.configitems.dynamicdefault,
283 )
283 )
284 configitem(b'perf', b'all-timing',
284 configitem(b'perf', b'all-timing',
285 default=mercurial.configitems.dynamicdefault,
285 default=mercurial.configitems.dynamicdefault,
286 )
286 )
287 configitem(b'perf', b'pre-run',
287 configitem(b'perf', b'pre-run',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 )
289 )
290 configitem(b'perf', b'profile-benchmark',
290 configitem(b'perf', b'profile-benchmark',
291 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
292 )
292 )
293 configitem(b'perf', b'run-limits',
293 configitem(b'perf', b'run-limits',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 )
295 )
296
296
297 def getlen(ui):
297 def getlen(ui):
298 if ui.configbool(b"perf", b"stub", False):
298 if ui.configbool(b"perf", b"stub", False):
299 return lambda x: 1
299 return lambda x: 1
300 return len
300 return len
301
301
302 class noop(object):
302 class noop(object):
303 """dummy context manager"""
303 """dummy context manager"""
304 def __enter__(self):
304 def __enter__(self):
305 pass
305 pass
306 def __exit__(self, *args):
306 def __exit__(self, *args):
307 pass
307 pass
308
308
309 NOOPCTX = noop()
309 NOOPCTX = noop()
310
310
311 def gettimer(ui, opts=None):
311 def gettimer(ui, opts=None):
312 """return a timer function and formatter: (timer, formatter)
312 """return a timer function and formatter: (timer, formatter)
313
313
314 This function exists to gather the creation of formatter in a single
314 This function exists to gather the creation of formatter in a single
315 place instead of duplicating it in all performance commands."""
315 place instead of duplicating it in all performance commands."""
316
316
317 # enforce an idle period before execution to counteract power management
317 # enforce an idle period before execution to counteract power management
318 # experimental config: perf.presleep
318 # experimental config: perf.presleep
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
320
320
321 if opts is None:
321 if opts is None:
322 opts = {}
322 opts = {}
323 # redirect all to stderr unless buffer api is in use
323 # redirect all to stderr unless buffer api is in use
324 if not ui._buffers:
324 if not ui._buffers:
325 ui = ui.copy()
325 ui = ui.copy()
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
327 if uifout:
327 if uifout:
328 # for "historical portability":
328 # for "historical portability":
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
330 uifout.set(ui.ferr)
330 uifout.set(ui.ferr)
331
331
332 # get a formatter
332 # get a formatter
333 uiformatter = getattr(ui, 'formatter', None)
333 uiformatter = getattr(ui, 'formatter', None)
334 if uiformatter:
334 if uiformatter:
335 fm = uiformatter(b'perf', opts)
335 fm = uiformatter(b'perf', opts)
336 else:
336 else:
337 # for "historical portability":
337 # for "historical portability":
338 # define formatter locally, because ui.formatter has been
338 # define formatter locally, because ui.formatter has been
339 # available since 2.2 (or ae5f92e154d3)
339 # available since 2.2 (or ae5f92e154d3)
340 from mercurial import node
340 from mercurial import node
341 class defaultformatter(object):
341 class defaultformatter(object):
342 """Minimized composition of baseformatter and plainformatter
342 """Minimized composition of baseformatter and plainformatter
343 """
343 """
344 def __init__(self, ui, topic, opts):
344 def __init__(self, ui, topic, opts):
345 self._ui = ui
345 self._ui = ui
346 if ui.debugflag:
346 if ui.debugflag:
347 self.hexfunc = node.hex
347 self.hexfunc = node.hex
348 else:
348 else:
349 self.hexfunc = node.short
349 self.hexfunc = node.short
350 def __nonzero__(self):
350 def __nonzero__(self):
351 return False
351 return False
352 __bool__ = __nonzero__
352 __bool__ = __nonzero__
353 def startitem(self):
353 def startitem(self):
354 pass
354 pass
355 def data(self, **data):
355 def data(self, **data):
356 pass
356 pass
357 def write(self, fields, deftext, *fielddata, **opts):
357 def write(self, fields, deftext, *fielddata, **opts):
358 self._ui.write(deftext % fielddata, **opts)
358 self._ui.write(deftext % fielddata, **opts)
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
360 if cond:
360 if cond:
361 self._ui.write(deftext % fielddata, **opts)
361 self._ui.write(deftext % fielddata, **opts)
362 def plain(self, text, **opts):
362 def plain(self, text, **opts):
363 self._ui.write(text, **opts)
363 self._ui.write(text, **opts)
364 def end(self):
364 def end(self):
365 pass
365 pass
366 fm = defaultformatter(ui, b'perf', opts)
366 fm = defaultformatter(ui, b'perf', opts)
367
367
368 # stub function, runs code only once instead of in a loop
368 # stub function, runs code only once instead of in a loop
369 # experimental config: perf.stub
369 # experimental config: perf.stub
370 if ui.configbool(b"perf", b"stub", False):
370 if ui.configbool(b"perf", b"stub", False):
371 return functools.partial(stub_timer, fm), fm
371 return functools.partial(stub_timer, fm), fm
372
372
373 # experimental config: perf.all-timing
373 # experimental config: perf.all-timing
374 displayall = ui.configbool(b"perf", b"all-timing", False)
374 displayall = ui.configbool(b"perf", b"all-timing", False)
375
375
376 # experimental config: perf.run-limits
376 # experimental config: perf.run-limits
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
378 limits = []
378 limits = []
379 for item in limitspec:
379 for item in limitspec:
380 parts = item.split(b'-', 1)
380 parts = item.split(b'-', 1)
381 if len(parts) < 2:
381 if len(parts) < 2:
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
383 % item))
383 % item))
384 continue
384 continue
385 try:
385 try:
386 time_limit = float(_sysstr(parts[0]))
386 time_limit = float(_sysstr(parts[0]))
387 except ValueError as e:
387 except ValueError as e:
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
389 % (_bytestr(e), item)))
389 % (_bytestr(e), item)))
390 continue
390 continue
391 try:
391 try:
392 run_limit = int(_sysstr(parts[1]))
392 run_limit = int(_sysstr(parts[1]))
393 except ValueError as e:
393 except ValueError as e:
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
395 % (_bytestr(e), item)))
395 % (_bytestr(e), item)))
396 continue
396 continue
397 limits.append((time_limit, run_limit))
397 limits.append((time_limit, run_limit))
398 if not limits:
398 if not limits:
399 limits = DEFAULTLIMITS
399 limits = DEFAULTLIMITS
400
400
401 profiler = None
401 profiler = None
402 if profiling is not None:
402 if profiling is not None:
403 if ui.configbool(b"perf", b"profile-benchmark", False):
403 if ui.configbool(b"perf", b"profile-benchmark", False):
404 profiler = profiling.profile(ui)
404 profiler = profiling.profile(ui)
405
405
406 prerun = getint(ui, b"perf", b"pre-run", 0)
406 prerun = getint(ui, b"perf", b"pre-run", 0)
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
408 prerun=prerun, profiler=profiler)
408 prerun=prerun, profiler=profiler)
409 return t, fm
409 return t, fm
410
410
411 def stub_timer(fm, func, setup=None, title=None):
411 def stub_timer(fm, func, setup=None, title=None):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415
415
416 @contextlib.contextmanager
416 @contextlib.contextmanager
417 def timeone():
417 def timeone():
418 r = []
418 r = []
419 ostart = os.times()
419 ostart = os.times()
420 cstart = util.timer()
420 cstart = util.timer()
421 yield r
421 yield r
422 cstop = util.timer()
422 cstop = util.timer()
423 ostop = os.times()
423 ostop = os.times()
424 a, b = ostart, ostop
424 a, b = ostart, ostop
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
426
426
427
427
428 # list of stop condition (elapsed time, minimal run count)
428 # list of stop condition (elapsed time, minimal run count)
429 DEFAULTLIMITS = (
429 DEFAULTLIMITS = (
430 (3.0, 100),
430 (3.0, 100),
431 (10.0, 3),
431 (10.0, 3),
432 )
432 )
433
433
434 def _timer(fm, func, setup=None, title=None, displayall=False,
434 def _timer(fm, func, setup=None, title=None, displayall=False,
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
436 gc.collect()
436 gc.collect()
437 results = []
437 results = []
438 begin = util.timer()
438 begin = util.timer()
439 count = 0
439 count = 0
440 if profiler is None:
440 if profiler is None:
441 profiler = NOOPCTX
441 profiler = NOOPCTX
442 for i in range(prerun):
442 for i in range(prerun):
443 if setup is not None:
443 if setup is not None:
444 setup()
444 setup()
445 func()
445 func()
446 keepgoing = True
446 keepgoing = True
447 while keepgoing:
447 while keepgoing:
448 if setup is not None:
448 if setup is not None:
449 setup()
449 setup()
450 with profiler:
450 with profiler:
451 with timeone() as item:
451 with timeone() as item:
452 r = func()
452 r = func()
453 profiler = NOOPCTX
453 profiler = NOOPCTX
454 count += 1
454 count += 1
455 results.append(item[0])
455 results.append(item[0])
456 cstop = util.timer()
456 cstop = util.timer()
457 # Look for a stop condition.
457 # Look for a stop condition.
458 elapsed = cstop - begin
458 elapsed = cstop - begin
459 for t, mincount in limits:
459 for t, mincount in limits:
460 if elapsed >= t and count >= mincount:
460 if elapsed >= t and count >= mincount:
461 keepgoing = False
461 keepgoing = False
462 break
462 break
463
463
464 formatone(fm, results, title=title, result=r,
464 formatone(fm, results, title=title, result=r,
465 displayall=displayall)
465 displayall=displayall)
466
466
467 def formatone(fm, timings, title=None, result=None, displayall=False):
467 def formatone(fm, timings, title=None, result=None, displayall=False):
468
468
469 count = len(timings)
469 count = len(timings)
470
470
471 fm.startitem()
471 fm.startitem()
472
472
473 if title:
473 if title:
474 fm.write(b'title', b'! %s\n', title)
474 fm.write(b'title', b'! %s\n', title)
475 if result:
475 if result:
476 fm.write(b'result', b'! result: %s\n', result)
476 fm.write(b'result', b'! result: %s\n', result)
477 def display(role, entry):
477 def display(role, entry):
478 prefix = b''
478 prefix = b''
479 if role != b'best':
479 if role != b'best':
480 prefix = b'%s.' % role
480 prefix = b'%s.' % role
481 fm.plain(b'!')
481 fm.plain(b'!')
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
484 fm.write(prefix + b'user', b' user %f', entry[1])
484 fm.write(prefix + b'user', b' user %f', entry[1])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
487 fm.plain(b'\n')
487 fm.plain(b'\n')
488 timings.sort()
488 timings.sort()
489 min_val = timings[0]
489 min_val = timings[0]
490 display(b'best', min_val)
490 display(b'best', min_val)
491 if displayall:
491 if displayall:
492 max_val = timings[-1]
492 max_val = timings[-1]
493 display(b'max', max_val)
493 display(b'max', max_val)
494 avg = tuple([sum(x) / count for x in zip(*timings)])
494 avg = tuple([sum(x) / count for x in zip(*timings)])
495 display(b'avg', avg)
495 display(b'avg', avg)
496 median = timings[len(timings) // 2]
496 median = timings[len(timings) // 2]
497 display(b'median', median)
497 display(b'median', median)
498
498
499 # utilities for historical portability
499 # utilities for historical portability
500
500
501 def getint(ui, section, name, default):
501 def getint(ui, section, name, default):
502 # for "historical portability":
502 # for "historical portability":
503 # ui.configint has been available since 1.9 (or fa2b596db182)
503 # ui.configint has been available since 1.9 (or fa2b596db182)
504 v = ui.config(section, name, None)
504 v = ui.config(section, name, None)
505 if v is None:
505 if v is None:
506 return default
506 return default
507 try:
507 try:
508 return int(v)
508 return int(v)
509 except ValueError:
509 except ValueError:
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
511 % (section, name, v))
511 % (section, name, v))
512
512
513 def safeattrsetter(obj, name, ignoremissing=False):
513 def safeattrsetter(obj, name, ignoremissing=False):
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
515
515
516 This function is aborted, if 'obj' doesn't have 'name' attribute
516 This function is aborted, if 'obj' doesn't have 'name' attribute
517 at runtime. This avoids overlooking removal of an attribute, which
517 at runtime. This avoids overlooking removal of an attribute, which
518 breaks assumption of performance measurement, in the future.
518 breaks assumption of performance measurement, in the future.
519
519
520 This function returns the object to (1) assign a new value, and
520 This function returns the object to (1) assign a new value, and
521 (2) restore an original value to the attribute.
521 (2) restore an original value to the attribute.
522
522
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
524 abortion, and this function returns None. This is useful to
524 abortion, and this function returns None. This is useful to
525 examine an attribute, which isn't ensured in all Mercurial
525 examine an attribute, which isn't ensured in all Mercurial
526 versions.
526 versions.
527 """
527 """
528 if not util.safehasattr(obj, name):
528 if not util.safehasattr(obj, name):
529 if ignoremissing:
529 if ignoremissing:
530 return None
530 return None
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
532 b" of performance measurement") % (name, obj))
532 b" of performance measurement") % (name, obj))
533
533
534 origvalue = getattr(obj, _sysstr(name))
534 origvalue = getattr(obj, _sysstr(name))
535 class attrutil(object):
535 class attrutil(object):
536 def set(self, newvalue):
536 def set(self, newvalue):
537 setattr(obj, _sysstr(name), newvalue)
537 setattr(obj, _sysstr(name), newvalue)
538 def restore(self):
538 def restore(self):
539 setattr(obj, _sysstr(name), origvalue)
539 setattr(obj, _sysstr(name), origvalue)
540
540
541 return attrutil()
541 return attrutil()
542
542
543 # utilities to examine each internal API changes
543 # utilities to examine each internal API changes
544
544
545 def getbranchmapsubsettable():
545 def getbranchmapsubsettable():
546 # for "historical portability":
546 # for "historical portability":
547 # subsettable is defined in:
547 # subsettable is defined in:
548 # - branchmap since 2.9 (or 175c6fd8cacc)
548 # - branchmap since 2.9 (or 175c6fd8cacc)
549 # - repoview since 2.5 (or 59a9f18d4587)
549 # - repoview since 2.5 (or 59a9f18d4587)
550 # - repoviewutil since 5.0
550 # - repoviewutil since 5.0
551 for mod in (branchmap, repoview, repoviewutil):
551 for mod in (branchmap, repoview, repoviewutil):
552 subsettable = getattr(mod, 'subsettable', None)
552 subsettable = getattr(mod, 'subsettable', None)
553 if subsettable:
553 if subsettable:
554 return subsettable
554 return subsettable
555
555
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
557 # branchmap and repoview modules exist, but subsettable attribute
557 # branchmap and repoview modules exist, but subsettable attribute
558 # doesn't)
558 # doesn't)
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
560 hint=b"use 2.5 or later")
560 hint=b"use 2.5 or later")
561
561
562 def getsvfs(repo):
562 def getsvfs(repo):
563 """Return appropriate object to access files under .hg/store
563 """Return appropriate object to access files under .hg/store
564 """
564 """
565 # for "historical portability":
565 # for "historical portability":
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
567 svfs = getattr(repo, 'svfs', None)
567 svfs = getattr(repo, 'svfs', None)
568 if svfs:
568 if svfs:
569 return svfs
569 return svfs
570 else:
570 else:
571 return getattr(repo, 'sopener')
571 return getattr(repo, 'sopener')
572
572
573 def getvfs(repo):
573 def getvfs(repo):
574 """Return appropriate object to access files under .hg
574 """Return appropriate object to access files under .hg
575 """
575 """
576 # for "historical portability":
576 # for "historical portability":
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
578 vfs = getattr(repo, 'vfs', None)
578 vfs = getattr(repo, 'vfs', None)
579 if vfs:
579 if vfs:
580 return vfs
580 return vfs
581 else:
581 else:
582 return getattr(repo, 'opener')
582 return getattr(repo, 'opener')
583
583
584 def repocleartagscachefunc(repo):
584 def repocleartagscachefunc(repo):
585 """Return the function to clear tags cache according to repo internal API
585 """Return the function to clear tags cache according to repo internal API
586 """
586 """
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
589 # correct way to clear tags cache, because existing code paths
589 # correct way to clear tags cache, because existing code paths
590 # expect _tagscache to be a structured object.
590 # expect _tagscache to be a structured object.
591 def clearcache():
591 def clearcache():
592 # _tagscache has been filteredpropertycache since 2.5 (or
592 # _tagscache has been filteredpropertycache since 2.5 (or
593 # 98c867ac1330), and delattr() can't work in such case
593 # 98c867ac1330), and delattr() can't work in such case
594 if b'_tagscache' in vars(repo):
594 if b'_tagscache' in vars(repo):
595 del repo.__dict__[b'_tagscache']
595 del repo.__dict__[b'_tagscache']
596 return clearcache
596 return clearcache
597
597
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
599 if repotags: # since 1.4 (or 5614a628d173)
599 if repotags: # since 1.4 (or 5614a628d173)
600 return lambda : repotags.set(None)
600 return lambda : repotags.set(None)
601
601
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
604 return lambda : repotagscache.set(None)
604 return lambda : repotagscache.set(None)
605
605
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
607 # this point, but it isn't so problematic, because:
607 # this point, but it isn't so problematic, because:
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
609 # in perftags() causes failure soon
609 # in perftags() causes failure soon
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
611 raise error.Abort((b"tags API of this hg command is unknown"))
611 raise error.Abort((b"tags API of this hg command is unknown"))
612
612
613 # utilities to clear cache
613 # utilities to clear cache
614
614
615 def clearfilecache(obj, attrname):
615 def clearfilecache(obj, attrname):
616 unfiltered = getattr(obj, 'unfiltered', None)
616 unfiltered = getattr(obj, 'unfiltered', None)
617 if unfiltered is not None:
617 if unfiltered is not None:
618 obj = obj.unfiltered()
618 obj = obj.unfiltered()
619 if attrname in vars(obj):
619 if attrname in vars(obj):
620 delattr(obj, attrname)
620 delattr(obj, attrname)
621 obj._filecache.pop(attrname, None)
621 obj._filecache.pop(attrname, None)
622
622
623 def clearchangelog(repo):
623 def clearchangelog(repo):
624 if repo is not repo.unfiltered():
624 if repo is not repo.unfiltered():
625 object.__setattr__(repo, r'_clcachekey', None)
625 object.__setattr__(repo, r'_clcachekey', None)
626 object.__setattr__(repo, r'_clcache', None)
626 object.__setattr__(repo, r'_clcache', None)
627 clearfilecache(repo.unfiltered(), 'changelog')
627 clearfilecache(repo.unfiltered(), 'changelog')
628
628
629 # perf commands
629 # perf commands
630
630
631 @command(b'perfwalk', formatteropts)
631 @command(b'perfwalk', formatteropts)
632 def perfwalk(ui, repo, *pats, **opts):
632 def perfwalk(ui, repo, *pats, **opts):
633 opts = _byteskwargs(opts)
633 opts = _byteskwargs(opts)
634 timer, fm = gettimer(ui, opts)
634 timer, fm = gettimer(ui, opts)
635 m = scmutil.match(repo[None], pats, {})
635 m = scmutil.match(repo[None], pats, {})
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
637 ignored=False))))
637 ignored=False))))
638 fm.end()
638 fm.end()
639
639
640 @command(b'perfannotate', formatteropts)
640 @command(b'perfannotate', formatteropts)
641 def perfannotate(ui, repo, f, **opts):
641 def perfannotate(ui, repo, f, **opts):
642 opts = _byteskwargs(opts)
642 opts = _byteskwargs(opts)
643 timer, fm = gettimer(ui, opts)
643 timer, fm = gettimer(ui, opts)
644 fc = repo[b'.'][f]
644 fc = repo[b'.'][f]
645 timer(lambda: len(fc.annotate(True)))
645 timer(lambda: len(fc.annotate(True)))
646 fm.end()
646 fm.end()
647
647
648 @command(b'perfstatus',
648 @command(b'perfstatus',
649 [(b'u', b'unknown', False,
649 [(b'u', b'unknown', False,
650 b'ask status to look for unknown files')] + formatteropts)
650 b'ask status to look for unknown files')] + formatteropts)
651 def perfstatus(ui, repo, **opts):
651 def perfstatus(ui, repo, **opts):
652 opts = _byteskwargs(opts)
652 opts = _byteskwargs(opts)
653 #m = match.always(repo.root, repo.getcwd())
653 #m = match.always(repo.root, repo.getcwd())
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
655 # False))))
655 # False))))
656 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
658 fm.end()
658 fm.end()
659
659
660 @command(b'perfaddremove', formatteropts)
660 @command(b'perfaddremove', formatteropts)
661 def perfaddremove(ui, repo, **opts):
661 def perfaddremove(ui, repo, **opts):
662 opts = _byteskwargs(opts)
662 opts = _byteskwargs(opts)
663 timer, fm = gettimer(ui, opts)
663 timer, fm = gettimer(ui, opts)
664 try:
664 try:
665 oldquiet = repo.ui.quiet
665 oldquiet = repo.ui.quiet
666 repo.ui.quiet = True
666 repo.ui.quiet = True
667 matcher = scmutil.match(repo[None])
667 matcher = scmutil.match(repo[None])
668 opts[b'dry_run'] = True
668 opts[b'dry_run'] = True
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
670 uipathfn = scmutil.getuipathfn(repo)
670 uipathfn = scmutil.getuipathfn(repo)
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
672 else:
672 else:
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
674 finally:
674 finally:
675 repo.ui.quiet = oldquiet
675 repo.ui.quiet = oldquiet
676 fm.end()
676 fm.end()
677
677
678 def clearcaches(cl):
678 def clearcaches(cl):
679 # behave somewhat consistently across internal API changes
679 # behave somewhat consistently across internal API changes
680 if util.safehasattr(cl, b'clearcaches'):
680 if util.safehasattr(cl, b'clearcaches'):
681 cl.clearcaches()
681 cl.clearcaches()
682 elif util.safehasattr(cl, b'_nodecache'):
682 elif util.safehasattr(cl, b'_nodecache'):
683 from mercurial.node import nullid, nullrev
683 from mercurial.node import nullid, nullrev
684 cl._nodecache = {nullid: nullrev}
684 cl._nodecache = {nullid: nullrev}
685 cl._nodepos = None
685 cl._nodepos = None
686
686
687 @command(b'perfheads', formatteropts)
687 @command(b'perfheads', formatteropts)
688 def perfheads(ui, repo, **opts):
688 def perfheads(ui, repo, **opts):
689 """benchmark the computation of a changelog heads"""
689 """benchmark the computation of a changelog heads"""
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 cl = repo.changelog
692 cl = repo.changelog
693 def s():
693 def s():
694 clearcaches(cl)
694 clearcaches(cl)
695 def d():
695 def d():
696 len(cl.headrevs())
696 len(cl.headrevs())
697 timer(d, setup=s)
697 timer(d, setup=s)
698 fm.end()
698 fm.end()
699
699
700 @command(b'perftags', formatteropts+
700 @command(b'perftags', formatteropts+
701 [
701 [
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
703 ])
703 ])
704 def perftags(ui, repo, **opts):
704 def perftags(ui, repo, **opts):
705 opts = _byteskwargs(opts)
705 opts = _byteskwargs(opts)
706 timer, fm = gettimer(ui, opts)
706 timer, fm = gettimer(ui, opts)
707 repocleartagscache = repocleartagscachefunc(repo)
707 repocleartagscache = repocleartagscachefunc(repo)
708 clearrevlogs = opts[b'clear_revlogs']
708 clearrevlogs = opts[b'clear_revlogs']
709 def s():
709 def s():
710 if clearrevlogs:
710 if clearrevlogs:
711 clearchangelog(repo)
711 clearchangelog(repo)
712 clearfilecache(repo.unfiltered(), 'manifest')
712 clearfilecache(repo.unfiltered(), 'manifest')
713 repocleartagscache()
713 repocleartagscache()
714 def t():
714 def t():
715 return len(repo.tags())
715 return len(repo.tags())
716 timer(t, setup=s)
716 timer(t, setup=s)
717 fm.end()
717 fm.end()
718
718
719 @command(b'perfancestors', formatteropts)
719 @command(b'perfancestors', formatteropts)
720 def perfancestors(ui, repo, **opts):
720 def perfancestors(ui, repo, **opts):
721 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
722 timer, fm = gettimer(ui, opts)
722 timer, fm = gettimer(ui, opts)
723 heads = repo.changelog.headrevs()
723 heads = repo.changelog.headrevs()
724 def d():
724 def d():
725 for a in repo.changelog.ancestors(heads):
725 for a in repo.changelog.ancestors(heads):
726 pass
726 pass
727 timer(d)
727 timer(d)
728 fm.end()
728 fm.end()
729
729
730 @command(b'perfancestorset', formatteropts)
730 @command(b'perfancestorset', formatteropts)
731 def perfancestorset(ui, repo, revset, **opts):
731 def perfancestorset(ui, repo, revset, **opts):
732 opts = _byteskwargs(opts)
732 opts = _byteskwargs(opts)
733 timer, fm = gettimer(ui, opts)
733 timer, fm = gettimer(ui, opts)
734 revs = repo.revs(revset)
734 revs = repo.revs(revset)
735 heads = repo.changelog.headrevs()
735 heads = repo.changelog.headrevs()
736 def d():
736 def d():
737 s = repo.changelog.ancestors(heads)
737 s = repo.changelog.ancestors(heads)
738 for rev in revs:
738 for rev in revs:
739 rev in s
739 rev in s
740 timer(d)
740 timer(d)
741 fm.end()
741 fm.end()
742
742
743 @command(b'perfdiscovery', formatteropts, b'PATH')
743 @command(b'perfdiscovery', formatteropts, b'PATH')
744 def perfdiscovery(ui, repo, path, **opts):
744 def perfdiscovery(ui, repo, path, **opts):
745 """benchmark discovery between local repo and the peer at given path
745 """benchmark discovery between local repo and the peer at given path
746 """
746 """
747 repos = [repo, None]
747 repos = [repo, None]
748 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
749 path = ui.expandpath(path)
749 path = ui.expandpath(path)
750
750
751 def s():
751 def s():
752 repos[1] = hg.peer(ui, opts, path)
752 repos[1] = hg.peer(ui, opts, path)
753 def d():
753 def d():
754 setdiscovery.findcommonheads(ui, *repos)
754 setdiscovery.findcommonheads(ui, *repos)
755 timer(d, setup=s)
755 timer(d, setup=s)
756 fm.end()
756 fm.end()
757
757
758 @command(b'perfbookmarks', formatteropts +
758 @command(b'perfbookmarks', formatteropts +
759 [
759 [
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
761 ])
761 ])
762 def perfbookmarks(ui, repo, **opts):
762 def perfbookmarks(ui, repo, **opts):
763 """benchmark parsing bookmarks from disk to memory"""
763 """benchmark parsing bookmarks from disk to memory"""
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 clearrevlogs = opts[b'clear_revlogs']
767 clearrevlogs = opts[b'clear_revlogs']
768 def s():
768 def s():
769 if clearrevlogs:
769 if clearrevlogs:
770 clearchangelog(repo)
770 clearchangelog(repo)
771 clearfilecache(repo, b'_bookmarks')
771 clearfilecache(repo, b'_bookmarks')
772 def d():
772 def d():
773 repo._bookmarks
773 repo._bookmarks
774 timer(d, setup=s)
774 timer(d, setup=s)
775 fm.end()
775 fm.end()
776
776
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
778 def perfbundleread(ui, repo, bundlepath, **opts):
778 def perfbundleread(ui, repo, bundlepath, **opts):
779 """Benchmark reading of bundle files.
779 """Benchmark reading of bundle files.
780
780
781 This command is meant to isolate the I/O part of bundle reading as
781 This command is meant to isolate the I/O part of bundle reading as
782 much as possible.
782 much as possible.
783 """
783 """
784 from mercurial import (
784 from mercurial import (
785 bundle2,
785 bundle2,
786 exchange,
786 exchange,
787 streamclone,
787 streamclone,
788 )
788 )
789
789
790 opts = _byteskwargs(opts)
790 opts = _byteskwargs(opts)
791
791
792 def makebench(fn):
792 def makebench(fn):
793 def run():
793 def run():
794 with open(bundlepath, b'rb') as fh:
794 with open(bundlepath, b'rb') as fh:
795 bundle = exchange.readbundle(ui, fh, bundlepath)
795 bundle = exchange.readbundle(ui, fh, bundlepath)
796 fn(bundle)
796 fn(bundle)
797
797
798 return run
798 return run
799
799
800 def makereadnbytes(size):
800 def makereadnbytes(size):
801 def run():
801 def run():
802 with open(bundlepath, b'rb') as fh:
802 with open(bundlepath, b'rb') as fh:
803 bundle = exchange.readbundle(ui, fh, bundlepath)
803 bundle = exchange.readbundle(ui, fh, bundlepath)
804 while bundle.read(size):
804 while bundle.read(size):
805 pass
805 pass
806
806
807 return run
807 return run
808
808
809 def makestdioread(size):
809 def makestdioread(size):
810 def run():
810 def run():
811 with open(bundlepath, b'rb') as fh:
811 with open(bundlepath, b'rb') as fh:
812 while fh.read(size):
812 while fh.read(size):
813 pass
813 pass
814
814
815 return run
815 return run
816
816
817 # bundle1
817 # bundle1
818
818
819 def deltaiter(bundle):
819 def deltaiter(bundle):
820 for delta in bundle.deltaiter():
820 for delta in bundle.deltaiter():
821 pass
821 pass
822
822
823 def iterchunks(bundle):
823 def iterchunks(bundle):
824 for chunk in bundle.getchunks():
824 for chunk in bundle.getchunks():
825 pass
825 pass
826
826
827 # bundle2
827 # bundle2
828
828
829 def forwardchunks(bundle):
829 def forwardchunks(bundle):
830 for chunk in bundle._forwardchunks():
830 for chunk in bundle._forwardchunks():
831 pass
831 pass
832
832
833 def iterparts(bundle):
833 def iterparts(bundle):
834 for part in bundle.iterparts():
834 for part in bundle.iterparts():
835 pass
835 pass
836
836
837 def iterpartsseekable(bundle):
837 def iterpartsseekable(bundle):
838 for part in bundle.iterparts(seekable=True):
838 for part in bundle.iterparts(seekable=True):
839 pass
839 pass
840
840
841 def seek(bundle):
841 def seek(bundle):
842 for part in bundle.iterparts(seekable=True):
842 for part in bundle.iterparts(seekable=True):
843 part.seek(0, os.SEEK_END)
843 part.seek(0, os.SEEK_END)
844
844
845 def makepartreadnbytes(size):
845 def makepartreadnbytes(size):
846 def run():
846 def run():
847 with open(bundlepath, b'rb') as fh:
847 with open(bundlepath, b'rb') as fh:
848 bundle = exchange.readbundle(ui, fh, bundlepath)
848 bundle = exchange.readbundle(ui, fh, bundlepath)
849 for part in bundle.iterparts():
849 for part in bundle.iterparts():
850 while part.read(size):
850 while part.read(size):
851 pass
851 pass
852
852
853 return run
853 return run
854
854
855 benches = [
855 benches = [
856 (makestdioread(8192), b'read(8k)'),
856 (makestdioread(8192), b'read(8k)'),
857 (makestdioread(16384), b'read(16k)'),
857 (makestdioread(16384), b'read(16k)'),
858 (makestdioread(32768), b'read(32k)'),
858 (makestdioread(32768), b'read(32k)'),
859 (makestdioread(131072), b'read(128k)'),
859 (makestdioread(131072), b'read(128k)'),
860 ]
860 ]
861
861
862 with open(bundlepath, b'rb') as fh:
862 with open(bundlepath, b'rb') as fh:
863 bundle = exchange.readbundle(ui, fh, bundlepath)
863 bundle = exchange.readbundle(ui, fh, bundlepath)
864
864
865 if isinstance(bundle, changegroup.cg1unpacker):
865 if isinstance(bundle, changegroup.cg1unpacker):
866 benches.extend([
866 benches.extend([
867 (makebench(deltaiter), b'cg1 deltaiter()'),
867 (makebench(deltaiter), b'cg1 deltaiter()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
873 ])
873 ])
874 elif isinstance(bundle, bundle2.unbundle20):
874 elif isinstance(bundle, bundle2.unbundle20):
875 benches.extend([
875 benches.extend([
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
879 (makebench(seek), b'bundle2 part seek()'),
879 (makebench(seek), b'bundle2 part seek()'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
884 ])
884 ])
885 elif isinstance(bundle, streamclone.streamcloneapplier):
885 elif isinstance(bundle, streamclone.streamcloneapplier):
886 raise error.Abort(b'stream clone bundles not supported')
886 raise error.Abort(b'stream clone bundles not supported')
887 else:
887 else:
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
889
889
890 for fn, title in benches:
890 for fn, title in benches:
891 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
892 timer(fn, title=title)
892 timer(fn, title=title)
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfchangegroupchangelog', formatteropts +
895 @command(b'perfchangegroupchangelog', formatteropts +
896 [(b'', b'cgversion', b'02', b'changegroup version'),
896 [(b'', b'cgversion', b'02', b'changegroup version'),
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
899 """Benchmark producing a changelog group for a changegroup.
899 """Benchmark producing a changelog group for a changegroup.
900
900
901 This measures the time spent processing the changelog during a
901 This measures the time spent processing the changelog during a
902 bundle operation. This occurs during `hg bundle` and on a server
902 bundle operation. This occurs during `hg bundle` and on a server
903 processing a `getbundle` wire protocol request (handles clones
903 processing a `getbundle` wire protocol request (handles clones
904 and pull requests).
904 and pull requests).
905
905
906 By default, all revisions are added to the changegroup.
906 By default, all revisions are added to the changegroup.
907 """
907 """
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 cl = repo.changelog
909 cl = repo.changelog
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
911 bundler = changegroup.getbundler(cgversion, repo)
911 bundler = changegroup.getbundler(cgversion, repo)
912
912
913 def d():
913 def d():
914 state, chunks = bundler._generatechangelog(cl, nodes)
914 state, chunks = bundler._generatechangelog(cl, nodes)
915 for chunk in chunks:
915 for chunk in chunks:
916 pass
916 pass
917
917
918 timer, fm = gettimer(ui, opts)
918 timer, fm = gettimer(ui, opts)
919
919
920 # Terminal printing can interfere with timing. So disable it.
920 # Terminal printing can interfere with timing. So disable it.
921 with ui.configoverride({(b'progress', b'disable'): True}):
921 with ui.configoverride({(b'progress', b'disable'): True}):
922 timer(d)
922 timer(d)
923
923
924 fm.end()
924 fm.end()
925
925
926 @command(b'perfdirs', formatteropts)
926 @command(b'perfdirs', formatteropts)
927 def perfdirs(ui, repo, **opts):
927 def perfdirs(ui, repo, **opts):
928 opts = _byteskwargs(opts)
928 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
929 timer, fm = gettimer(ui, opts)
930 dirstate = repo.dirstate
930 dirstate = repo.dirstate
931 b'a' in dirstate
931 b'a' in dirstate
932 def d():
932 def d():
933 dirstate.hasdir(b'a')
933 dirstate.hasdir(b'a')
934 del dirstate._map._dirs
934 del dirstate._map._dirs
935 timer(d)
935 timer(d)
936 fm.end()
936 fm.end()
937
937
938 @command(b'perfdirstate', formatteropts)
938 @command(b'perfdirstate', formatteropts)
939 def perfdirstate(ui, repo, **opts):
939 def perfdirstate(ui, repo, **opts):
940 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
942 b"a" in repo.dirstate
942 b"a" in repo.dirstate
943 def d():
943 def d():
944 repo.dirstate.invalidate()
944 repo.dirstate.invalidate()
945 b"a" in repo.dirstate
945 b"a" in repo.dirstate
946 timer(d)
946 timer(d)
947 fm.end()
947 fm.end()
948
948
949 @command(b'perfdirstatedirs', formatteropts)
949 @command(b'perfdirstatedirs', formatteropts)
950 def perfdirstatedirs(ui, repo, **opts):
950 def perfdirstatedirs(ui, repo, **opts):
951 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
952 timer, fm = gettimer(ui, opts)
952 timer, fm = gettimer(ui, opts)
953 b"a" in repo.dirstate
953 b"a" in repo.dirstate
954 def d():
954 def d():
955 repo.dirstate.hasdir(b"a")
955 repo.dirstate.hasdir(b"a")
956 del repo.dirstate._map._dirs
956 del repo.dirstate._map._dirs
957 timer(d)
957 timer(d)
958 fm.end()
958 fm.end()
959
959
960 @command(b'perfdirstatefoldmap', formatteropts)
960 @command(b'perfdirstatefoldmap', formatteropts)
961 def perfdirstatefoldmap(ui, repo, **opts):
961 def perfdirstatefoldmap(ui, repo, **opts):
962 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
963 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
964 dirstate = repo.dirstate
964 dirstate = repo.dirstate
965 b'a' in dirstate
965 b'a' in dirstate
966 def d():
966 def d():
967 dirstate._map.filefoldmap.get(b'a')
967 dirstate._map.filefoldmap.get(b'a')
968 del dirstate._map.filefoldmap
968 del dirstate._map.filefoldmap
969 timer(d)
969 timer(d)
970 fm.end()
970 fm.end()
971
971
972 @command(b'perfdirfoldmap', formatteropts)
972 @command(b'perfdirfoldmap', formatteropts)
973 def perfdirfoldmap(ui, repo, **opts):
973 def perfdirfoldmap(ui, repo, **opts):
974 opts = _byteskwargs(opts)
974 opts = _byteskwargs(opts)
975 timer, fm = gettimer(ui, opts)
975 timer, fm = gettimer(ui, opts)
976 dirstate = repo.dirstate
976 dirstate = repo.dirstate
977 b'a' in dirstate
977 b'a' in dirstate
978 def d():
978 def d():
979 dirstate._map.dirfoldmap.get(b'a')
979 dirstate._map.dirfoldmap.get(b'a')
980 del dirstate._map.dirfoldmap
980 del dirstate._map.dirfoldmap
981 del dirstate._map._dirs
981 del dirstate._map._dirs
982 timer(d)
982 timer(d)
983 fm.end()
983 fm.end()
984
984
985 @command(b'perfdirstatewrite', formatteropts)
985 @command(b'perfdirstatewrite', formatteropts)
986 def perfdirstatewrite(ui, repo, **opts):
986 def perfdirstatewrite(ui, repo, **opts):
987 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
989 ds = repo.dirstate
989 ds = repo.dirstate
990 b"a" in ds
990 b"a" in ds
991 def d():
991 def d():
992 ds._dirty = True
992 ds._dirty = True
993 ds.write(repo.currenttransaction())
993 ds.write(repo.currenttransaction())
994 timer(d)
994 timer(d)
995 fm.end()
995 fm.end()
996
996
997 def _getmergerevs(repo, opts):
997 def _getmergerevs(repo, opts):
998 """parse command argument to return rev involved in merge
998 """parse command argument to return rev involved in merge
999
999
1000 input: options dictionnary with `rev`, `from` and `bse`
1000 input: options dictionnary with `rev`, `from` and `bse`
1001 output: (localctx, otherctx, basectx)
1001 output: (localctx, otherctx, basectx)
1002 """
1002 """
1003 if opts[b'from']:
1003 if opts[b'from']:
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1005 wctx = repo[fromrev]
1005 wctx = repo[fromrev]
1006 else:
1006 else:
1007 wctx = repo[None]
1007 wctx = repo[None]
1008 # we don't want working dir files to be stat'd in the benchmark, so
1008 # we don't want working dir files to be stat'd in the benchmark, so
1009 # prime that cache
1009 # prime that cache
1010 wctx.dirty()
1010 wctx.dirty()
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1012 if opts[b'base']:
1012 if opts[b'base']:
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1014 ancestor = repo[fromrev]
1014 ancestor = repo[fromrev]
1015 else:
1015 else:
1016 ancestor = wctx.ancestor(rctx)
1016 ancestor = wctx.ancestor(rctx)
1017 return (wctx, rctx, ancestor)
1017 return (wctx, rctx, ancestor)
1018
1018
1019 @command(b'perfmergecalculate',
1019 @command(b'perfmergecalculate',
1020 [
1020 [
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1022 (b'', b'from', b'', b'rev to merge from'),
1022 (b'', b'from', b'', b'rev to merge from'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1024 ] + formatteropts)
1024 ] + formatteropts)
1025 def perfmergecalculate(ui, repo, **opts):
1025 def perfmergecalculate(ui, repo, **opts):
1026 opts = _byteskwargs(opts)
1026 opts = _byteskwargs(opts)
1027 timer, fm = gettimer(ui, opts)
1027 timer, fm = gettimer(ui, opts)
1028
1028
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1030 def d():
1030 def d():
1031 # acceptremote is True because we don't want prompts in the middle of
1031 # acceptremote is True because we don't want prompts in the middle of
1032 # our benchmark
1032 # our benchmark
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1034 acceptremote=True, followcopies=True)
1034 acceptremote=True, followcopies=True)
1035 timer(d)
1035 timer(d)
1036 fm.end()
1036 fm.end()
1037
1037
1038 @command(b'perfmergecopies',
1038 @command(b'perfmergecopies',
1039 [
1039 [
1040 (b'r', b'rev', b'.', b'rev to merge against'),
1040 (b'r', b'rev', b'.', b'rev to merge against'),
1041 (b'', b'from', b'', b'rev to merge from'),
1041 (b'', b'from', b'', b'rev to merge from'),
1042 (b'', b'base', b'', b'the revision to use as base'),
1042 (b'', b'base', b'', b'the revision to use as base'),
1043 ] + formatteropts)
1043 ] + formatteropts)
1044 def perfmergecopies(ui, repo, **opts):
1044 def perfmergecopies(ui, repo, **opts):
1045 """measure runtime of `copies.mergecopies`"""
1045 """measure runtime of `copies.mergecopies`"""
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1048 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1049 def d():
1049 def d():
1050 # acceptremote is True because we don't want prompts in the middle of
1050 # acceptremote is True because we don't want prompts in the middle of
1051 # our benchmark
1051 # our benchmark
1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1052 copies.mergecopies(repo, wctx, rctx, ancestor)
1053 timer(d)
1053 timer(d)
1054 fm.end()
1054 fm.end()
1055
1055
1056 @command(b'perfpathcopies', [], b"REV REV")
1056 @command(b'perfpathcopies', [], b"REV REV")
1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1057 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1058 """benchmark the copy tracing logic"""
1058 """benchmark the copy tracing logic"""
1059 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1061 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1062 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1063 def d():
1063 def d():
1064 copies.pathcopies(ctx1, ctx2)
1064 copies.pathcopies(ctx1, ctx2)
1065 timer(d)
1065 timer(d)
1066 fm.end()
1066 fm.end()
1067
1067
1068 @command(b'perfphases',
1068 @command(b'perfphases',
1069 [(b'', b'full', False, b'include file reading time too'),
1069 [(b'', b'full', False, b'include file reading time too'),
1070 ], b"")
1070 ], b"")
1071 def perfphases(ui, repo, **opts):
1071 def perfphases(ui, repo, **opts):
1072 """benchmark phasesets computation"""
1072 """benchmark phasesets computation"""
1073 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1074 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1075 _phases = repo._phasecache
1075 _phases = repo._phasecache
1076 full = opts.get(b'full')
1076 full = opts.get(b'full')
1077 def d():
1077 def d():
1078 phases = _phases
1078 phases = _phases
1079 if full:
1079 if full:
1080 clearfilecache(repo, b'_phasecache')
1080 clearfilecache(repo, b'_phasecache')
1081 phases = repo._phasecache
1081 phases = repo._phasecache
1082 phases.invalidate()
1082 phases.invalidate()
1083 phases.loadphaserevs(repo)
1083 phases.loadphaserevs(repo)
1084 timer(d)
1084 timer(d)
1085 fm.end()
1085 fm.end()
1086
1086
1087 @command(b'perfphasesremote',
1087 @command(b'perfphasesremote',
1088 [], b"[DEST]")
1088 [], b"[DEST]")
1089 def perfphasesremote(ui, repo, dest=None, **opts):
1089 def perfphasesremote(ui, repo, dest=None, **opts):
1090 """benchmark time needed to analyse phases of the remote server"""
1090 """benchmark time needed to analyse phases of the remote server"""
1091 from mercurial.node import (
1091 from mercurial.node import (
1092 bin,
1092 bin,
1093 )
1093 )
1094 from mercurial import (
1094 from mercurial import (
1095 exchange,
1095 exchange,
1096 hg,
1096 hg,
1097 phases,
1097 phases,
1098 )
1098 )
1099 opts = _byteskwargs(opts)
1099 opts = _byteskwargs(opts)
1100 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1101
1101
1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1102 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1103 if not path:
1103 if not path:
1104 raise error.Abort((b'default repository not configured!'),
1104 raise error.Abort((b'default repository not configured!'),
1105 hint=(b"see 'hg help config.paths'"))
1105 hint=(b"see 'hg help config.paths'"))
1106 dest = path.pushloc or path.loc
1106 dest = path.pushloc or path.loc
1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1107 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1108 other = hg.peer(repo, opts, dest)
1108 other = hg.peer(repo, opts, dest)
1109
1109
1110 # easier to perform discovery through the operation
1110 # easier to perform discovery through the operation
1111 op = exchange.pushoperation(repo, other)
1111 op = exchange.pushoperation(repo, other)
1112 exchange._pushdiscoverychangeset(op)
1112 exchange._pushdiscoverychangeset(op)
1113
1113
1114 remotesubset = op.fallbackheads
1114 remotesubset = op.fallbackheads
1115
1115
1116 with other.commandexecutor() as e:
1116 with other.commandexecutor() as e:
1117 remotephases = e.callcommand(b'listkeys',
1117 remotephases = e.callcommand(b'listkeys',
1118 {b'namespace': b'phases'}).result()
1118 {b'namespace': b'phases'}).result()
1119 del other
1119 del other
1120 publishing = remotephases.get(b'publishing', False)
1120 publishing = remotephases.get(b'publishing', False)
1121 if publishing:
1121 if publishing:
1122 ui.status((b'publishing: yes\n'))
1122 ui.status((b'publishing: yes\n'))
1123 else:
1123 else:
1124 ui.status((b'publishing: no\n'))
1124 ui.status((b'publishing: no\n'))
1125
1125
1126 nodemap = repo.changelog.nodemap
1126 nodemap = repo.changelog.nodemap
1127 nonpublishroots = 0
1127 nonpublishroots = 0
1128 for nhex, phase in remotephases.iteritems():
1128 for nhex, phase in remotephases.iteritems():
1129 if nhex == b'publishing': # ignore data related to publish option
1129 if nhex == b'publishing': # ignore data related to publish option
1130 continue
1130 continue
1131 node = bin(nhex)
1131 node = bin(nhex)
1132 if node in nodemap and int(phase):
1132 if node in nodemap and int(phase):
1133 nonpublishroots += 1
1133 nonpublishroots += 1
1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1134 ui.status((b'number of roots: %d\n') % len(remotephases))
1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1135 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1136 def d():
1136 def d():
1137 phases.remotephasessummary(repo,
1137 phases.remotephasessummary(repo,
1138 remotesubset,
1138 remotesubset,
1139 remotephases)
1139 remotephases)
1140 timer(d)
1140 timer(d)
1141 fm.end()
1141 fm.end()
1142
1142
1143 @command(b'perfmanifest',[
1143 @command(b'perfmanifest',[
1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1144 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1145 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1146 ] + formatteropts, b'REV|NODE')
1146 ] + formatteropts, b'REV|NODE')
1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1147 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1148 """benchmark the time to read a manifest from disk and return a usable
1148 """benchmark the time to read a manifest from disk and return a usable
1149 dict-like object
1149 dict-like object
1150
1150
1151 Manifest caches are cleared before retrieval."""
1151 Manifest caches are cleared before retrieval."""
1152 opts = _byteskwargs(opts)
1152 opts = _byteskwargs(opts)
1153 timer, fm = gettimer(ui, opts)
1153 timer, fm = gettimer(ui, opts)
1154 if not manifest_rev:
1154 if not manifest_rev:
1155 ctx = scmutil.revsingle(repo, rev, rev)
1155 ctx = scmutil.revsingle(repo, rev, rev)
1156 t = ctx.manifestnode()
1156 t = ctx.manifestnode()
1157 else:
1157 else:
1158 from mercurial.node import bin
1158 from mercurial.node import bin
1159
1159
1160 if len(rev) == 40:
1160 if len(rev) == 40:
1161 t = bin(rev)
1161 t = bin(rev)
1162 else:
1162 else:
1163 try:
1163 try:
1164 rev = int(rev)
1164 rev = int(rev)
1165
1165
1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1166 if util.safehasattr(repo.manifestlog, b'getstorage'):
1167 t = repo.manifestlog.getstorage(b'').node(rev)
1167 t = repo.manifestlog.getstorage(b'').node(rev)
1168 else:
1168 else:
1169 t = repo.manifestlog._revlog.lookup(rev)
1169 t = repo.manifestlog._revlog.lookup(rev)
1170 except ValueError:
1170 except ValueError:
1171 raise error.Abort(b'manifest revision must be integer or full '
1171 raise error.Abort(b'manifest revision must be integer or full '
1172 b'node')
1172 b'node')
1173 def d():
1173 def d():
1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1174 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1175 repo.manifestlog[t].read()
1175 repo.manifestlog[t].read()
1176 timer(d)
1176 timer(d)
1177 fm.end()
1177 fm.end()
1178
1178
1179 @command(b'perfchangeset', formatteropts)
1179 @command(b'perfchangeset', formatteropts)
1180 def perfchangeset(ui, repo, rev, **opts):
1180 def perfchangeset(ui, repo, rev, **opts):
1181 opts = _byteskwargs(opts)
1181 opts = _byteskwargs(opts)
1182 timer, fm = gettimer(ui, opts)
1182 timer, fm = gettimer(ui, opts)
1183 n = scmutil.revsingle(repo, rev).node()
1183 n = scmutil.revsingle(repo, rev).node()
1184 def d():
1184 def d():
1185 repo.changelog.read(n)
1185 repo.changelog.read(n)
1186 #repo.changelog._cache = None
1186 #repo.changelog._cache = None
1187 timer(d)
1187 timer(d)
1188 fm.end()
1188 fm.end()
1189
1189
1190 @command(b'perfignore', formatteropts)
1190 @command(b'perfignore', formatteropts)
1191 def perfignore(ui, repo, **opts):
1191 def perfignore(ui, repo, **opts):
1192 """benchmark operation related to computing ignore"""
1192 """benchmark operation related to computing ignore"""
1193 opts = _byteskwargs(opts)
1193 opts = _byteskwargs(opts)
1194 timer, fm = gettimer(ui, opts)
1194 timer, fm = gettimer(ui, opts)
1195 dirstate = repo.dirstate
1195 dirstate = repo.dirstate
1196
1196
1197 def setupone():
1197 def setupone():
1198 dirstate.invalidate()
1198 dirstate.invalidate()
1199 clearfilecache(dirstate, b'_ignore')
1199 clearfilecache(dirstate, b'_ignore')
1200
1200
1201 def runone():
1201 def runone():
1202 dirstate._ignore
1202 dirstate._ignore
1203
1203
1204 timer(runone, setup=setupone, title=b"load")
1204 timer(runone, setup=setupone, title=b"load")
1205 fm.end()
1205 fm.end()
1206
1206
1207 @command(b'perfindex', [
1207 @command(b'perfindex', [
1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1208 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1209 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1210 ] + formatteropts)
1210 ] + formatteropts)
1211 def perfindex(ui, repo, **opts):
1211 def perfindex(ui, repo, **opts):
1212 """benchmark index creation time followed by a lookup
1212 """benchmark index creation time followed by a lookup
1213
1213
1214 The default is to look `tip` up. Depending on the index implementation,
1214 The default is to look `tip` up. Depending on the index implementation,
1215 the revision looked up can matters. For example, an implementation
1215 the revision looked up can matters. For example, an implementation
1216 scanning the index will have a faster lookup time for `--rev tip` than for
1216 scanning the index will have a faster lookup time for `--rev tip` than for
1217 `--rev 0`. The number of looked up revisions and their order can also
1217 `--rev 0`. The number of looked up revisions and their order can also
1218 matters.
1218 matters.
1219
1219
1220 Example of useful set to test:
1220 Example of useful set to test:
1221 * tip
1221 * tip
1222 * 0
1222 * 0
1223 * -10:
1223 * -10:
1224 * :10
1224 * :10
1225 * -10: + :10
1225 * -10: + :10
1226 * :10: + -10:
1226 * :10: + -10:
1227 * -10000:
1227 * -10000:
1228 * -10000: + 0
1228 * -10000: + 0
1229
1229
1230 It is not currently possible to check for lookup of a missing node. For
1230 It is not currently possible to check for lookup of a missing node. For
1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1231 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1232 import mercurial.revlog
1232 import mercurial.revlog
1233 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1234 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 if opts[b'no_lookup']:
1236 if opts[b'no_lookup']:
1237 if opts['rev']:
1237 if opts['rev']:
1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1238 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1239 nodes = []
1239 nodes = []
1240 elif not opts[b'rev']:
1240 elif not opts[b'rev']:
1241 nodes = [repo[b"tip"].node()]
1241 nodes = [repo[b"tip"].node()]
1242 else:
1242 else:
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1243 revs = scmutil.revrange(repo, opts[b'rev'])
1244 cl = repo.changelog
1244 cl = repo.changelog
1245 nodes = [cl.node(r) for r in revs]
1245 nodes = [cl.node(r) for r in revs]
1246
1246
1247 unfi = repo.unfiltered()
1247 unfi = repo.unfiltered()
1248 # find the filecache func directly
1248 # find the filecache func directly
1249 # This avoid polluting the benchmark with the filecache logic
1249 # This avoid polluting the benchmark with the filecache logic
1250 makecl = unfi.__class__.changelog.func
1250 makecl = unfi.__class__.changelog.func
1251 def setup():
1251 def setup():
1252 # probably not necessary, but for good measure
1252 # probably not necessary, but for good measure
1253 clearchangelog(unfi)
1253 clearchangelog(unfi)
1254 def d():
1254 def d():
1255 cl = makecl(unfi)
1255 cl = makecl(unfi)
1256 for n in nodes:
1256 for n in nodes:
1257 cl.rev(n)
1257 cl.rev(n)
1258 timer(d, setup=setup)
1258 timer(d, setup=setup)
1259 fm.end()
1259 fm.end()
1260
1260
1261 @command(b'perfnodemap', [
1261 @command(b'perfnodemap', [
1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1262 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1263 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1264 ] + formatteropts)
1264 ] + formatteropts)
1265 def perfnodemap(ui, repo, **opts):
1265 def perfnodemap(ui, repo, **opts):
1266 """benchmark the time necessary to look up revision from a cold nodemap
1266 """benchmark the time necessary to look up revision from a cold nodemap
1267
1267
1268 Depending on the implementation, the amount and order of revision we look
1268 Depending on the implementation, the amount and order of revision we look
1269 up can varies. Example of useful set to test:
1269 up can varies. Example of useful set to test:
1270 * tip
1270 * tip
1271 * 0
1271 * 0
1272 * -10:
1272 * -10:
1273 * :10
1273 * :10
1274 * -10: + :10
1274 * -10: + :10
1275 * :10: + -10:
1275 * :10: + -10:
1276 * -10000:
1276 * -10000:
1277 * -10000: + 0
1277 * -10000: + 0
1278
1278
1279 The command currently focus on valid binary lookup. Benchmarking for
1279 The command currently focus on valid binary lookup. Benchmarking for
1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1280 hexlookup, prefix lookup and missing lookup would also be valuable.
1281 """
1281 """
1282 import mercurial.revlog
1282 import mercurial.revlog
1283 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1284 timer, fm = gettimer(ui, opts)
1284 timer, fm = gettimer(ui, opts)
1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1285 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1286
1286
1287 unfi = repo.unfiltered()
1287 unfi = repo.unfiltered()
1288 clearcaches = opts['clear_caches']
1288 clearcaches = opts['clear_caches']
1289 # find the filecache func directly
1289 # find the filecache func directly
1290 # This avoid polluting the benchmark with the filecache logic
1290 # This avoid polluting the benchmark with the filecache logic
1291 makecl = unfi.__class__.changelog.func
1291 makecl = unfi.__class__.changelog.func
1292 if not opts[b'rev']:
1292 if not opts[b'rev']:
1293 raise error.Abort('use --rev to specify revisions to look up')
1293 raise error.Abort('use --rev to specify revisions to look up')
1294 revs = scmutil.revrange(repo, opts[b'rev'])
1294 revs = scmutil.revrange(repo, opts[b'rev'])
1295 cl = repo.changelog
1295 cl = repo.changelog
1296 nodes = [cl.node(r) for r in revs]
1296 nodes = [cl.node(r) for r in revs]
1297
1297
1298 # use a list to pass reference to a nodemap from one closure to the next
1298 # use a list to pass reference to a nodemap from one closure to the next
1299 nodeget = [None]
1299 nodeget = [None]
1300 def setnodeget():
1300 def setnodeget():
1301 # probably not necessary, but for good measure
1301 # probably not necessary, but for good measure
1302 clearchangelog(unfi)
1302 clearchangelog(unfi)
1303 nodeget[0] = makecl(unfi).nodemap.get
1303 nodeget[0] = makecl(unfi).nodemap.get
1304
1304
1305 def d():
1305 def d():
1306 get = nodeget[0]
1306 get = nodeget[0]
1307 for n in nodes:
1307 for n in nodes:
1308 get(n)
1308 get(n)
1309
1309
1310 setup = None
1310 setup = None
1311 if clearcaches:
1311 if clearcaches:
1312 def setup():
1312 def setup():
1313 setnodeget()
1313 setnodeget()
1314 else:
1314 else:
1315 setnodeget()
1315 setnodeget()
1316 d() # prewarm the data structure
1316 d() # prewarm the data structure
1317 timer(d, setup=setup)
1317 timer(d, setup=setup)
1318 fm.end()
1318 fm.end()
1319
1319
1320 @command(b'perfstartup', formatteropts)
1320 @command(b'perfstartup', formatteropts)
1321 def perfstartup(ui, repo, **opts):
1321 def perfstartup(ui, repo, **opts):
1322 opts = _byteskwargs(opts)
1322 opts = _byteskwargs(opts)
1323 timer, fm = gettimer(ui, opts)
1323 timer, fm = gettimer(ui, opts)
1324 def d():
1324 def d():
1325 if os.name != r'nt':
1325 if os.name != r'nt':
1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1326 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1327 fsencode(sys.argv[0]))
1327 fsencode(sys.argv[0]))
1328 else:
1328 else:
1329 os.environ[r'HGRCPATH'] = r' '
1329 os.environ[r'HGRCPATH'] = r' '
1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1330 os.system(r"%s version -q > NUL" % sys.argv[0])
1331 timer(d)
1331 timer(d)
1332 fm.end()
1332 fm.end()
1333
1333
1334 @command(b'perfparents', formatteropts)
1334 @command(b'perfparents', formatteropts)
1335 def perfparents(ui, repo, **opts):
1335 def perfparents(ui, repo, **opts):
1336 """benchmark the time necessary to fetch one changeset's parents.
1336 """benchmark the time necessary to fetch one changeset's parents.
1337
1337
1338 The fetch is done using the `node identifier`, traversing all object layers
1338 The fetch is done using the `node identifier`, traversing all object layers
1339 from the repository object. The first N revisions will be used for this
1339 from the repository object. The first N revisions will be used for this
1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1340 benchmark. N is controlled by the ``perf.parentscount`` config option
1341 (default: 1000).
1341 (default: 1000).
1342 """
1342 """
1343 opts = _byteskwargs(opts)
1343 opts = _byteskwargs(opts)
1344 timer, fm = gettimer(ui, opts)
1344 timer, fm = gettimer(ui, opts)
1345 # control the number of commits perfparents iterates over
1345 # control the number of commits perfparents iterates over
1346 # experimental config: perf.parentscount
1346 # experimental config: perf.parentscount
1347 count = getint(ui, b"perf", b"parentscount", 1000)
1347 count = getint(ui, b"perf", b"parentscount", 1000)
1348 if len(repo.changelog) < count:
1348 if len(repo.changelog) < count:
1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1349 raise error.Abort(b"repo needs %d commits for this test" % count)
1350 repo = repo.unfiltered()
1350 repo = repo.unfiltered()
1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1351 nl = [repo.changelog.node(i) for i in _xrange(count)]
1352 def d():
1352 def d():
1353 for n in nl:
1353 for n in nl:
1354 repo.changelog.parents(n)
1354 repo.changelog.parents(n)
1355 timer(d)
1355 timer(d)
1356 fm.end()
1356 fm.end()
1357
1357
1358 @command(b'perfctxfiles', formatteropts)
1358 @command(b'perfctxfiles', formatteropts)
1359 def perfctxfiles(ui, repo, x, **opts):
1359 def perfctxfiles(ui, repo, x, **opts):
1360 opts = _byteskwargs(opts)
1360 opts = _byteskwargs(opts)
1361 x = int(x)
1361 x = int(x)
1362 timer, fm = gettimer(ui, opts)
1362 timer, fm = gettimer(ui, opts)
1363 def d():
1363 def d():
1364 len(repo[x].files())
1364 len(repo[x].files())
1365 timer(d)
1365 timer(d)
1366 fm.end()
1366 fm.end()
1367
1367
1368 @command(b'perfrawfiles', formatteropts)
1368 @command(b'perfrawfiles', formatteropts)
1369 def perfrawfiles(ui, repo, x, **opts):
1369 def perfrawfiles(ui, repo, x, **opts):
1370 opts = _byteskwargs(opts)
1370 opts = _byteskwargs(opts)
1371 x = int(x)
1371 x = int(x)
1372 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1373 cl = repo.changelog
1373 cl = repo.changelog
1374 def d():
1374 def d():
1375 len(cl.read(x)[3])
1375 len(cl.read(x)[3])
1376 timer(d)
1376 timer(d)
1377 fm.end()
1377 fm.end()
1378
1378
1379 @command(b'perflookup', formatteropts)
1379 @command(b'perflookup', formatteropts)
1380 def perflookup(ui, repo, rev, **opts):
1380 def perflookup(ui, repo, rev, **opts):
1381 opts = _byteskwargs(opts)
1381 opts = _byteskwargs(opts)
1382 timer, fm = gettimer(ui, opts)
1382 timer, fm = gettimer(ui, opts)
1383 timer(lambda: len(repo.lookup(rev)))
1383 timer(lambda: len(repo.lookup(rev)))
1384 fm.end()
1384 fm.end()
1385
1385
1386 @command(b'perflinelogedits',
1386 @command(b'perflinelogedits',
1387 [(b'n', b'edits', 10000, b'number of edits'),
1387 [(b'n', b'edits', 10000, b'number of edits'),
1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1388 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1389 ], norepo=True)
1389 ], norepo=True)
1390 def perflinelogedits(ui, **opts):
1390 def perflinelogedits(ui, **opts):
1391 from mercurial import linelog
1391 from mercurial import linelog
1392
1392
1393 opts = _byteskwargs(opts)
1393 opts = _byteskwargs(opts)
1394
1394
1395 edits = opts[b'edits']
1395 edits = opts[b'edits']
1396 maxhunklines = opts[b'max_hunk_lines']
1396 maxhunklines = opts[b'max_hunk_lines']
1397
1397
1398 maxb1 = 100000
1398 maxb1 = 100000
1399 random.seed(0)
1399 random.seed(0)
1400 randint = random.randint
1400 randint = random.randint
1401 currentlines = 0
1401 currentlines = 0
1402 arglist = []
1402 arglist = []
1403 for rev in _xrange(edits):
1403 for rev in _xrange(edits):
1404 a1 = randint(0, currentlines)
1404 a1 = randint(0, currentlines)
1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1405 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1406 b1 = randint(0, maxb1)
1406 b1 = randint(0, maxb1)
1407 b2 = randint(b1, b1 + maxhunklines)
1407 b2 = randint(b1, b1 + maxhunklines)
1408 currentlines += (b2 - b1) - (a2 - a1)
1408 currentlines += (b2 - b1) - (a2 - a1)
1409 arglist.append((rev, a1, a2, b1, b2))
1409 arglist.append((rev, a1, a2, b1, b2))
1410
1410
1411 def d():
1411 def d():
1412 ll = linelog.linelog()
1412 ll = linelog.linelog()
1413 for args in arglist:
1413 for args in arglist:
1414 ll.replacelines(*args)
1414 ll.replacelines(*args)
1415
1415
1416 timer, fm = gettimer(ui, opts)
1416 timer, fm = gettimer(ui, opts)
1417 timer(d)
1417 timer(d)
1418 fm.end()
1418 fm.end()
1419
1419
1420 @command(b'perfrevrange', formatteropts)
1420 @command(b'perfrevrange', formatteropts)
1421 def perfrevrange(ui, repo, *specs, **opts):
1421 def perfrevrange(ui, repo, *specs, **opts):
1422 opts = _byteskwargs(opts)
1422 opts = _byteskwargs(opts)
1423 timer, fm = gettimer(ui, opts)
1423 timer, fm = gettimer(ui, opts)
1424 revrange = scmutil.revrange
1424 revrange = scmutil.revrange
1425 timer(lambda: len(revrange(repo, specs)))
1425 timer(lambda: len(revrange(repo, specs)))
1426 fm.end()
1426 fm.end()
1427
1427
1428 @command(b'perfnodelookup', formatteropts)
1428 @command(b'perfnodelookup', formatteropts)
1429 def perfnodelookup(ui, repo, rev, **opts):
1429 def perfnodelookup(ui, repo, rev, **opts):
1430 opts = _byteskwargs(opts)
1430 opts = _byteskwargs(opts)
1431 timer, fm = gettimer(ui, opts)
1431 timer, fm = gettimer(ui, opts)
1432 import mercurial.revlog
1432 import mercurial.revlog
1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1433 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1434 n = scmutil.revsingle(repo, rev).node()
1434 n = scmutil.revsingle(repo, rev).node()
1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1435 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1436 def d():
1436 def d():
1437 cl.rev(n)
1437 cl.rev(n)
1438 clearcaches(cl)
1438 clearcaches(cl)
1439 timer(d)
1439 timer(d)
1440 fm.end()
1440 fm.end()
1441
1441
1442 @command(b'perflog',
1442 @command(b'perflog',
1443 [(b'', b'rename', False, b'ask log to follow renames')
1443 [(b'', b'rename', False, b'ask log to follow renames')
1444 ] + formatteropts)
1444 ] + formatteropts)
1445 def perflog(ui, repo, rev=None, **opts):
1445 def perflog(ui, repo, rev=None, **opts):
1446 opts = _byteskwargs(opts)
1446 opts = _byteskwargs(opts)
1447 if rev is None:
1447 if rev is None:
1448 rev=[]
1448 rev=[]
1449 timer, fm = gettimer(ui, opts)
1449 timer, fm = gettimer(ui, opts)
1450 ui.pushbuffer()
1450 ui.pushbuffer()
1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1451 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1452 copies=opts.get(b'rename')))
1452 copies=opts.get(b'rename')))
1453 ui.popbuffer()
1453 ui.popbuffer()
1454 fm.end()
1454 fm.end()
1455
1455
1456 @command(b'perfmoonwalk', formatteropts)
1456 @command(b'perfmoonwalk', formatteropts)
1457 def perfmoonwalk(ui, repo, **opts):
1457 def perfmoonwalk(ui, repo, **opts):
1458 """benchmark walking the changelog backwards
1458 """benchmark walking the changelog backwards
1459
1459
1460 This also loads the changelog data for each revision in the changelog.
1460 This also loads the changelog data for each revision in the changelog.
1461 """
1461 """
1462 opts = _byteskwargs(opts)
1462 opts = _byteskwargs(opts)
1463 timer, fm = gettimer(ui, opts)
1463 timer, fm = gettimer(ui, opts)
1464 def moonwalk():
1464 def moonwalk():
1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1465 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1466 ctx = repo[i]
1466 ctx = repo[i]
1467 ctx.branch() # read changelog data (in addition to the index)
1467 ctx.branch() # read changelog data (in addition to the index)
1468 timer(moonwalk)
1468 timer(moonwalk)
1469 fm.end()
1469 fm.end()
1470
1470
1471 @command(b'perftemplating',
1471 @command(b'perftemplating',
1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1472 [(b'r', b'rev', [], b'revisions to run the template on'),
1473 ] + formatteropts)
1473 ] + formatteropts)
1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1474 def perftemplating(ui, repo, testedtemplate=None, **opts):
1475 """test the rendering time of a given template"""
1475 """test the rendering time of a given template"""
1476 if makelogtemplater is None:
1476 if makelogtemplater is None:
1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1477 raise error.Abort((b"perftemplating not available with this Mercurial"),
1478 hint=b"use 4.3 or later")
1478 hint=b"use 4.3 or later")
1479
1479
1480 opts = _byteskwargs(opts)
1480 opts = _byteskwargs(opts)
1481
1481
1482 nullui = ui.copy()
1482 nullui = ui.copy()
1483 nullui.fout = open(os.devnull, r'wb')
1483 nullui.fout = open(os.devnull, r'wb')
1484 nullui.disablepager()
1484 nullui.disablepager()
1485 revs = opts.get(b'rev')
1485 revs = opts.get(b'rev')
1486 if not revs:
1486 if not revs:
1487 revs = [b'all()']
1487 revs = [b'all()']
1488 revs = list(scmutil.revrange(repo, revs))
1488 revs = list(scmutil.revrange(repo, revs))
1489
1489
1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1490 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1491 b' {author|person}: {desc|firstline}\n')
1491 b' {author|person}: {desc|firstline}\n')
1492 if testedtemplate is None:
1492 if testedtemplate is None:
1493 testedtemplate = defaulttemplate
1493 testedtemplate = defaulttemplate
1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1494 displayer = makelogtemplater(nullui, repo, testedtemplate)
1495 def format():
1495 def format():
1496 for r in revs:
1496 for r in revs:
1497 ctx = repo[r]
1497 ctx = repo[r]
1498 displayer.show(ctx)
1498 displayer.show(ctx)
1499 displayer.flush(ctx)
1499 displayer.flush(ctx)
1500
1500
1501 timer, fm = gettimer(ui, opts)
1501 timer, fm = gettimer(ui, opts)
1502 timer(format)
1502 timer(format)
1503 fm.end()
1503 fm.end()
1504
1504
1505 def _displaystats(ui, opts, entries, data):
1505 def _displaystats(ui, opts, entries, data):
1506 pass
1506 pass
1507 # use a second formatter because the data are quite different, not sure
1507 # use a second formatter because the data are quite different, not sure
1508 # how it flies with the templater.
1508 # how it flies with the templater.
1509 fm = ui.formatter(b'perf-stats', opts)
1509 fm = ui.formatter(b'perf-stats', opts)
1510 for key, title in entries:
1510 for key, title in entries:
1511 values = data[key]
1511 values = data[key]
1512 nbvalues = len(data)
1512 nbvalues = len(data)
1513 values.sort()
1513 values.sort()
1514 stats = {
1514 stats = {
1515 'key': key,
1515 'key': key,
1516 'title': title,
1516 'title': title,
1517 'nbitems': len(values),
1517 'nbitems': len(values),
1518 'min': values[0][0],
1518 'min': values[0][0],
1519 '10%': values[(nbvalues * 10) // 100][0],
1519 '10%': values[(nbvalues * 10) // 100][0],
1520 '25%': values[(nbvalues * 25) // 100][0],
1520 '25%': values[(nbvalues * 25) // 100][0],
1521 '50%': values[(nbvalues * 50) // 100][0],
1521 '50%': values[(nbvalues * 50) // 100][0],
1522 '75%': values[(nbvalues * 75) // 100][0],
1522 '75%': values[(nbvalues * 75) // 100][0],
1523 '80%': values[(nbvalues * 80) // 100][0],
1523 '80%': values[(nbvalues * 80) // 100][0],
1524 '85%': values[(nbvalues * 85) // 100][0],
1524 '85%': values[(nbvalues * 85) // 100][0],
1525 '90%': values[(nbvalues * 90) // 100][0],
1525 '90%': values[(nbvalues * 90) // 100][0],
1526 '95%': values[(nbvalues * 95) // 100][0],
1526 '95%': values[(nbvalues * 95) // 100][0],
1527 '99%': values[(nbvalues * 99) // 100][0],
1527 '99%': values[(nbvalues * 99) // 100][0],
1528 'max': values[-1][0],
1528 'max': values[-1][0],
1529 }
1529 }
1530 fm.startitem()
1530 fm.startitem()
1531 fm.data(**stats)
1531 fm.data(**stats)
1532 # make node pretty for the human output
1532 # make node pretty for the human output
1533 fm.plain('### %s (%d items)\n' % (title, len(values)))
1533 fm.plain('### %s (%d items)\n' % (title, len(values)))
1534 lines = [
1534 lines = [
1535 'min',
1535 'min',
1536 '10%',
1536 '10%',
1537 '25%',
1537 '25%',
1538 '50%',
1538 '50%',
1539 '75%',
1539 '75%',
1540 '80%',
1540 '80%',
1541 '85%',
1541 '85%',
1542 '90%',
1542 '90%',
1543 '95%',
1543 '95%',
1544 '99%',
1544 '99%',
1545 'max',
1545 'max',
1546 ]
1546 ]
1547 for l in lines:
1547 for l in lines:
1548 fm.plain('%s: %s\n' % (l, stats[l]))
1548 fm.plain('%s: %s\n' % (l, stats[l]))
1549 fm.end()
1549 fm.end()
1550
1550
1551 @command(b'perfhelper-mergecopies', formatteropts +
1551 @command(b'perfhelper-mergecopies', formatteropts +
1552 [
1552 [
1553 (b'r', b'revs', [], b'restrict search to these revisions'),
1553 (b'r', b'revs', [], b'restrict search to these revisions'),
1554 (b'', b'timing', False, b'provides extra data (costly)'),
1554 (b'', b'timing', False, b'provides extra data (costly)'),
1555 (b'', b'stats', False, b'provides statistic about the measured data'),
1555 (b'', b'stats', False, b'provides statistic about the measured data'),
1556 ])
1556 ])
1557 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1557 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1558 """find statistics about potential parameters for `perfmergecopies`
1558 """find statistics about potential parameters for `perfmergecopies`
1559
1559
1560 This command find (base, p1, p2) triplet relevant for copytracing
1560 This command find (base, p1, p2) triplet relevant for copytracing
1561 benchmarking in the context of a merge. It reports values for some of the
1561 benchmarking in the context of a merge. It reports values for some of the
1562 parameters that impact merge copy tracing time during merge.
1562 parameters that impact merge copy tracing time during merge.
1563
1563
1564 If `--timing` is set, rename detection is run and the associated timing
1564 If `--timing` is set, rename detection is run and the associated timing
1565 will be reported. The extra details come at the cost of slower command
1565 will be reported. The extra details come at the cost of slower command
1566 execution.
1566 execution.
1567
1567
1568 Since rename detection is only run once, other factors might easily
1568 Since rename detection is only run once, other factors might easily
1569 affect the precision of the timing. However it should give a good
1569 affect the precision of the timing. However it should give a good
1570 approximation of which revision triplets are very costly.
1570 approximation of which revision triplets are very costly.
1571 """
1571 """
1572 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1573 fm = ui.formatter(b'perf', opts)
1573 fm = ui.formatter(b'perf', opts)
1574 dotiming = opts[b'timing']
1574 dotiming = opts[b'timing']
1575 dostats = opts[b'stats']
1575 dostats = opts[b'stats']
1576
1576
1577 output_template = [
1577 output_template = [
1578 ("base", "%(base)12s"),
1578 ("base", "%(base)12s"),
1579 ("p1", "%(p1.node)12s"),
1579 ("p1", "%(p1.node)12s"),
1580 ("p2", "%(p2.node)12s"),
1580 ("p2", "%(p2.node)12s"),
1581 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1581 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1582 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1582 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1583 ("p1.renames", "%(p1.renamedfiles)12d"),
1583 ("p1.renames", "%(p1.renamedfiles)12d"),
1584 ("p1.time", "%(p1.time)12.3f"),
1584 ("p1.time", "%(p1.time)12.3f"),
1585 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1585 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1586 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1586 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1587 ("p2.renames", "%(p2.renamedfiles)12d"),
1587 ("p2.renames", "%(p2.renamedfiles)12d"),
1588 ("p2.time", "%(p2.time)12.3f"),
1588 ("p2.time", "%(p2.time)12.3f"),
1589 ("renames", "%(nbrenamedfiles)12d"),
1589 ("renames", "%(nbrenamedfiles)12d"),
1590 ("total.time", "%(time)12.3f"),
1590 ("total.time", "%(time)12.3f"),
1591 ]
1591 ]
1592 if not dotiming:
1592 if not dotiming:
1593 output_template = [i for i in output_template
1593 output_template = [i for i in output_template
1594 if not ('time' in i[0] or 'renames' in i[0])]
1594 if not ('time' in i[0] or 'renames' in i[0])]
1595 header_names = [h for (h, v) in output_template]
1595 header_names = [h for (h, v) in output_template]
1596 output = ' '.join([v for (h, v) in output_template]) + '\n'
1596 output = ' '.join([v for (h, v) in output_template]) + '\n'
1597 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1597 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1598 fm.plain(header % tuple(header_names))
1598 fm.plain(header % tuple(header_names))
1599
1599
1600 if not revs:
1600 if not revs:
1601 revs = ['all()']
1601 revs = ['all()']
1602 revs = scmutil.revrange(repo, revs)
1602 revs = scmutil.revrange(repo, revs)
1603
1603
1604 if dostats:
1604 if dostats:
1605 alldata = {
1605 alldata = {
1606 'nbrevs': [],
1606 'nbrevs': [],
1607 'nbmissingfiles': [],
1607 'nbmissingfiles': [],
1608 }
1608 }
1609 if dotiming:
1609 if dotiming:
1610 alldata['parentnbrenames'] = []
1610 alldata['parentnbrenames'] = []
1611 alldata['totalnbrenames'] = []
1611 alldata['totalnbrenames'] = []
1612 alldata['parenttime'] = []
1612 alldata['parenttime'] = []
1613 alldata['totaltime'] = []
1613 alldata['totaltime'] = []
1614
1614
1615 roi = repo.revs('merge() and %ld', revs)
1615 roi = repo.revs('merge() and %ld', revs)
1616 for r in roi:
1616 for r in roi:
1617 ctx = repo[r]
1617 ctx = repo[r]
1618 p1 = ctx.p1()
1618 p1 = ctx.p1()
1619 p2 = ctx.p2()
1619 p2 = ctx.p2()
1620 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1620 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1621 for b in bases:
1621 for b in bases:
1622 b = repo[b]
1622 b = repo[b]
1623 p1missing = copies._computeforwardmissing(b, p1)
1623 p1missing = copies._computeforwardmissing(b, p1)
1624 p2missing = copies._computeforwardmissing(b, p2)
1624 p2missing = copies._computeforwardmissing(b, p2)
1625 data = {
1625 data = {
1626 b'base': b.hex(),
1626 b'base': b.hex(),
1627 b'p1.node': p1.hex(),
1627 b'p1.node': p1.hex(),
1628 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1628 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1629 b'p1.nbmissingfiles': len(p1missing),
1629 b'p1.nbmissingfiles': len(p1missing),
1630 b'p2.node': p2.hex(),
1630 b'p2.node': p2.hex(),
1631 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1631 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1632 b'p2.nbmissingfiles': len(p2missing),
1632 b'p2.nbmissingfiles': len(p2missing),
1633 }
1633 }
1634 if dostats:
1634 if dostats:
1635 if p1missing:
1635 if p1missing:
1636 alldata['nbrevs'].append((
1636 alldata['nbrevs'].append((
1637 data['p1.nbrevs'],
1637 data['p1.nbrevs'],
1638 b.hex(),
1638 b.hex(),
1639 p1.hex()
1639 p1.hex()
1640 ))
1640 ))
1641 alldata['nbmissingfiles'].append((
1641 alldata['nbmissingfiles'].append((
1642 data['p1.nbmissingfiles'],
1642 data['p1.nbmissingfiles'],
1643 b.hex(),
1643 b.hex(),
1644 p1.hex()
1644 p1.hex()
1645 ))
1645 ))
1646 if p2missing:
1646 if p2missing:
1647 alldata['nbrevs'].append((
1647 alldata['nbrevs'].append((
1648 data['p2.nbrevs'],
1648 data['p2.nbrevs'],
1649 b.hex(),
1649 b.hex(),
1650 p2.hex()
1650 p2.hex()
1651 ))
1651 ))
1652 alldata['nbmissingfiles'].append((
1652 alldata['nbmissingfiles'].append((
1653 data['p2.nbmissingfiles'],
1653 data['p2.nbmissingfiles'],
1654 b.hex(),
1654 b.hex(),
1655 p2.hex()
1655 p2.hex()
1656 ))
1656 ))
1657 if dotiming:
1657 if dotiming:
1658 begin = util.timer()
1658 begin = util.timer()
1659 mergedata = copies.mergecopies(repo, p1, p2, b)
1659 mergedata = copies.mergecopies(repo, p1, p2, b)
1660 end = util.timer()
1660 end = util.timer()
1661 # not very stable timing since we did only one run
1661 # not very stable timing since we did only one run
1662 data['time'] = end - begin
1662 data['time'] = end - begin
1663 # mergedata contains five dicts: "copy", "movewithdir",
1663 # mergedata contains five dicts: "copy", "movewithdir",
1664 # "diverge", "renamedelete" and "dirmove".
1664 # "diverge", "renamedelete" and "dirmove".
1665 # The first 4 are about renamed file so lets count that.
1665 # The first 4 are about renamed file so lets count that.
1666 renames = len(mergedata[0])
1666 renames = len(mergedata[0])
1667 renames += len(mergedata[1])
1667 renames += len(mergedata[1])
1668 renames += len(mergedata[2])
1668 renames += len(mergedata[2])
1669 renames += len(mergedata[3])
1669 renames += len(mergedata[3])
1670 data['nbrenamedfiles'] = renames
1670 data['nbrenamedfiles'] = renames
1671 begin = util.timer()
1671 begin = util.timer()
1672 p1renames = copies.pathcopies(b, p1)
1672 p1renames = copies.pathcopies(b, p1)
1673 end = util.timer()
1673 end = util.timer()
1674 data['p1.time'] = end - begin
1674 data['p1.time'] = end - begin
1675 begin = util.timer()
1675 begin = util.timer()
1676 p2renames = copies.pathcopies(b, p2)
1676 p2renames = copies.pathcopies(b, p2)
1677 data['p2.time'] = end - begin
1677 data['p2.time'] = end - begin
1678 end = util.timer()
1678 end = util.timer()
1679 data['p1.renamedfiles'] = len(p1renames)
1679 data['p1.renamedfiles'] = len(p1renames)
1680 data['p2.renamedfiles'] = len(p2renames)
1680 data['p2.renamedfiles'] = len(p2renames)
1681
1681
1682 if dostats:
1682 if dostats:
1683 if p1missing:
1683 if p1missing:
1684 alldata['parentnbrenames'].append((
1684 alldata['parentnbrenames'].append((
1685 data['p1.renamedfiles'],
1685 data['p1.renamedfiles'],
1686 b.hex(),
1686 b.hex(),
1687 p1.hex()
1687 p1.hex()
1688 ))
1688 ))
1689 alldata['parenttime'].append((
1689 alldata['parenttime'].append((
1690 data['p1.time'],
1690 data['p1.time'],
1691 b.hex(),
1691 b.hex(),
1692 p1.hex()
1692 p1.hex()
1693 ))
1693 ))
1694 if p2missing:
1694 if p2missing:
1695 alldata['parentnbrenames'].append((
1695 alldata['parentnbrenames'].append((
1696 data['p2.renamedfiles'],
1696 data['p2.renamedfiles'],
1697 b.hex(),
1697 b.hex(),
1698 p2.hex()
1698 p2.hex()
1699 ))
1699 ))
1700 alldata['parenttime'].append((
1700 alldata['parenttime'].append((
1701 data['p2.time'],
1701 data['p2.time'],
1702 b.hex(),
1702 b.hex(),
1703 p2.hex()
1703 p2.hex()
1704 ))
1704 ))
1705 if p1missing or p2missing:
1705 if p1missing or p2missing:
1706 alldata['totalnbrenames'].append((
1706 alldata['totalnbrenames'].append((
1707 data['nbrenamedfiles'],
1707 data['nbrenamedfiles'],
1708 b.hex(),
1708 b.hex(),
1709 p1.hex(),
1709 p1.hex(),
1710 p2.hex()
1710 p2.hex()
1711 ))
1711 ))
1712 alldata['totaltime'].append((
1712 alldata['totaltime'].append((
1713 data['time'],
1713 data['time'],
1714 b.hex(),
1714 b.hex(),
1715 p1.hex(),
1715 p1.hex(),
1716 p2.hex()
1716 p2.hex()
1717 ))
1717 ))
1718 fm.startitem()
1718 fm.startitem()
1719 fm.data(**data)
1719 fm.data(**data)
1720 # make node pretty for the human output
1720 # make node pretty for the human output
1721 out = data.copy()
1721 out = data.copy()
1722 out['base'] = fm.hexfunc(b.node())
1722 out['base'] = fm.hexfunc(b.node())
1723 out['p1.node'] = fm.hexfunc(p1.node())
1723 out['p1.node'] = fm.hexfunc(p1.node())
1724 out['p2.node'] = fm.hexfunc(p2.node())
1724 out['p2.node'] = fm.hexfunc(p2.node())
1725 fm.plain(output % out)
1725 fm.plain(output % out)
1726
1726
1727 fm.end()
1727 fm.end()
1728 if dostats:
1728 if dostats:
1729 # use a second formatter because the data are quite different, not sure
1729 # use a second formatter because the data are quite different, not sure
1730 # how it flies with the templater.
1730 # how it flies with the templater.
1731 entries = [
1731 entries = [
1732 ('nbrevs', 'number of revision covered'),
1732 ('nbrevs', 'number of revision covered'),
1733 ('nbmissingfiles', 'number of missing files at head'),
1733 ('nbmissingfiles', 'number of missing files at head'),
1734 ]
1734 ]
1735 if dotiming:
1735 if dotiming:
1736 entries.append(('parentnbrenames',
1736 entries.append(('parentnbrenames',
1737 'rename from one parent to base'))
1737 'rename from one parent to base'))
1738 entries.append(('totalnbrenames', 'total number of renames'))
1738 entries.append(('totalnbrenames', 'total number of renames'))
1739 entries.append(('parenttime', 'time for one parent'))
1739 entries.append(('parenttime', 'time for one parent'))
1740 entries.append(('totaltime', 'time for both parents'))
1740 entries.append(('totaltime', 'time for both parents'))
1741 _displaystats(ui, opts, entries, alldata)
1741 _displaystats(ui, opts, entries, alldata)
1742
1742
1743
1743
1744 @command(b'perfhelper-pathcopies', formatteropts +
1744 @command(b'perfhelper-pathcopies', formatteropts +
1745 [
1745 [
1746 (b'r', b'revs', [], b'restrict search to these revisions'),
1746 (b'r', b'revs', [], b'restrict search to these revisions'),
1747 (b'', b'timing', False, b'provides extra data (costly)'),
1747 (b'', b'timing', False, b'provides extra data (costly)'),
1748 (b'', b'stats', False, b'provides statistic about the measured data'),
1748 ])
1749 ])
1749 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1750 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1750 """find statistic about potential parameters for the `perftracecopies`
1751 """find statistic about potential parameters for the `perftracecopies`
1751
1752
1752 This command find source-destination pair relevant for copytracing testing.
1753 This command find source-destination pair relevant for copytracing testing.
1753 It report value for some of the parameters that impact copy tracing time.
1754 It report value for some of the parameters that impact copy tracing time.
1754
1755
1755 If `--timing` is set, rename detection is run and the associated timing
1756 If `--timing` is set, rename detection is run and the associated timing
1756 will be reported. The extra details comes at the cost of a slower command
1757 will be reported. The extra details comes at the cost of a slower command
1757 execution.
1758 execution.
1758
1759
1759 Since the rename detection is only run once, other factors might easily
1760 Since the rename detection is only run once, other factors might easily
1760 affect the precision of the timing. However it should give a good
1761 affect the precision of the timing. However it should give a good
1761 approximation of which revision pairs are very costly.
1762 approximation of which revision pairs are very costly.
1762 """
1763 """
1763 opts = _byteskwargs(opts)
1764 opts = _byteskwargs(opts)
1764 fm = ui.formatter(b'perf', opts)
1765 fm = ui.formatter(b'perf', opts)
1765 dotiming = opts[b'timing']
1766 dotiming = opts[b'timing']
1767 dostats = opts[b'stats']
1766
1768
1767 if dotiming:
1769 if dotiming:
1768 header = '%12s %12s %12s %12s %12s %12s\n'
1770 header = '%12s %12s %12s %12s %12s %12s\n'
1769 output = ("%(source)12s %(destination)12s "
1771 output = ("%(source)12s %(destination)12s "
1770 "%(nbrevs)12d %(nbmissingfiles)12d "
1772 "%(nbrevs)12d %(nbmissingfiles)12d "
1771 "%(nbrenamedfiles)12d %(time)18.5f\n")
1773 "%(nbrenamedfiles)12d %(time)18.5f\n")
1772 header_names = ("source", "destination", "nb-revs", "nb-files",
1774 header_names = ("source", "destination", "nb-revs", "nb-files",
1773 "nb-renames", "time")
1775 "nb-renames", "time")
1774 fm.plain(header % header_names)
1776 fm.plain(header % header_names)
1775 else:
1777 else:
1776 header = '%12s %12s %12s %12s\n'
1778 header = '%12s %12s %12s %12s\n'
1777 output = ("%(source)12s %(destination)12s "
1779 output = ("%(source)12s %(destination)12s "
1778 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1780 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1779 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1781 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1780
1782
1781 if not revs:
1783 if not revs:
1782 revs = ['all()']
1784 revs = ['all()']
1783 revs = scmutil.revrange(repo, revs)
1785 revs = scmutil.revrange(repo, revs)
1784
1786
1787
1788 if dostats:
1789 alldata = {
1790 'nbrevs': [],
1791 'nbmissingfiles': [],
1792 }
1793 if dotiming:
1794 alldata['nbrenames'] = []
1795 alldata['time'] = []
1796
1785 roi = repo.revs('merge() and %ld', revs)
1797 roi = repo.revs('merge() and %ld', revs)
1786 for r in roi:
1798 for r in roi:
1787 ctx = repo[r]
1799 ctx = repo[r]
1788 p1 = ctx.p1().rev()
1800 p1 = ctx.p1().rev()
1789 p2 = ctx.p2().rev()
1801 p2 = ctx.p2().rev()
1790 bases = repo.changelog._commonancestorsheads(p1, p2)
1802 bases = repo.changelog._commonancestorsheads(p1, p2)
1791 for p in (p1, p2):
1803 for p in (p1, p2):
1792 for b in bases:
1804 for b in bases:
1793 base = repo[b]
1805 base = repo[b]
1794 parent = repo[p]
1806 parent = repo[p]
1795 missing = copies._computeforwardmissing(base, parent)
1807 missing = copies._computeforwardmissing(base, parent)
1796 if not missing:
1808 if not missing:
1797 continue
1809 continue
1798 data = {
1810 data = {
1799 b'source': base.hex(),
1811 b'source': base.hex(),
1800 b'destination': parent.hex(),
1812 b'destination': parent.hex(),
1801 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1813 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1802 b'nbmissingfiles': len(missing),
1814 b'nbmissingfiles': len(missing),
1803 }
1815 }
1816 alldata['nbrevs'].append((
1817 data['nbrevs'],
1818 base.hex(),
1819 parent.hex(),
1820 ))
1821 alldata['nbmissingfiles'].append((
1822 data['nbmissingfiles'],
1823 base.hex(),
1824 parent.hex(),
1825 ))
1804 if dotiming:
1826 if dotiming:
1805 begin = util.timer()
1827 begin = util.timer()
1806 renames = copies.pathcopies(base, parent)
1828 renames = copies.pathcopies(base, parent)
1807 end = util.timer()
1829 end = util.timer()
1808 # not very stable timing since we did only one run
1830 # not very stable timing since we did only one run
1809 data['time'] = end - begin
1831 data['time'] = end - begin
1810 data['nbrenamedfiles'] = len(renames)
1832 data['nbrenamedfiles'] = len(renames)
1833 alldata['time'].append((
1834 data['time'],
1835 base.hex(),
1836 parent.hex(),
1837 ))
1838 alldata['nbrenames'].append((
1839 data['nbrenamedfiles'],
1840 base.hex(),
1841 parent.hex(),
1842 ))
1811 fm.startitem()
1843 fm.startitem()
1812 fm.data(**data)
1844 fm.data(**data)
1813 out = data.copy()
1845 out = data.copy()
1814 out['source'] = fm.hexfunc(base.node())
1846 out['source'] = fm.hexfunc(base.node())
1815 out['destination'] = fm.hexfunc(parent.node())
1847 out['destination'] = fm.hexfunc(parent.node())
1816 fm.plain(output % out)
1848 fm.plain(output % out)
1817
1849
1818 fm.end()
1850 fm.end()
1851 if dostats:
1852 # use a second formatter because the data are quite different, not sure
1853 # how it flies with the templater.
1854 fm = ui.formatter(b'perf', opts)
1855 entries = [
1856 ('nbrevs', 'number of revision covered'),
1857 ('nbmissingfiles', 'number of missing files at head'),
1858 ]
1859 if dotiming:
1860 entries.append(('nbrenames',
1861 'renamed files'))
1862 entries.append(('time', 'time'))
1863 _displaystats(ui, opts, entries, alldata)
1819
1864
1820 @command(b'perfcca', formatteropts)
1865 @command(b'perfcca', formatteropts)
1821 def perfcca(ui, repo, **opts):
1866 def perfcca(ui, repo, **opts):
1822 opts = _byteskwargs(opts)
1867 opts = _byteskwargs(opts)
1823 timer, fm = gettimer(ui, opts)
1868 timer, fm = gettimer(ui, opts)
1824 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1869 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1825 fm.end()
1870 fm.end()
1826
1871
1827 @command(b'perffncacheload', formatteropts)
1872 @command(b'perffncacheload', formatteropts)
1828 def perffncacheload(ui, repo, **opts):
1873 def perffncacheload(ui, repo, **opts):
1829 opts = _byteskwargs(opts)
1874 opts = _byteskwargs(opts)
1830 timer, fm = gettimer(ui, opts)
1875 timer, fm = gettimer(ui, opts)
1831 s = repo.store
1876 s = repo.store
1832 def d():
1877 def d():
1833 s.fncache._load()
1878 s.fncache._load()
1834 timer(d)
1879 timer(d)
1835 fm.end()
1880 fm.end()
1836
1881
1837 @command(b'perffncachewrite', formatteropts)
1882 @command(b'perffncachewrite', formatteropts)
1838 def perffncachewrite(ui, repo, **opts):
1883 def perffncachewrite(ui, repo, **opts):
1839 opts = _byteskwargs(opts)
1884 opts = _byteskwargs(opts)
1840 timer, fm = gettimer(ui, opts)
1885 timer, fm = gettimer(ui, opts)
1841 s = repo.store
1886 s = repo.store
1842 lock = repo.lock()
1887 lock = repo.lock()
1843 s.fncache._load()
1888 s.fncache._load()
1844 tr = repo.transaction(b'perffncachewrite')
1889 tr = repo.transaction(b'perffncachewrite')
1845 tr.addbackup(b'fncache')
1890 tr.addbackup(b'fncache')
1846 def d():
1891 def d():
1847 s.fncache._dirty = True
1892 s.fncache._dirty = True
1848 s.fncache.write(tr)
1893 s.fncache.write(tr)
1849 timer(d)
1894 timer(d)
1850 tr.close()
1895 tr.close()
1851 lock.release()
1896 lock.release()
1852 fm.end()
1897 fm.end()
1853
1898
1854 @command(b'perffncacheencode', formatteropts)
1899 @command(b'perffncacheencode', formatteropts)
1855 def perffncacheencode(ui, repo, **opts):
1900 def perffncacheencode(ui, repo, **opts):
1856 opts = _byteskwargs(opts)
1901 opts = _byteskwargs(opts)
1857 timer, fm = gettimer(ui, opts)
1902 timer, fm = gettimer(ui, opts)
1858 s = repo.store
1903 s = repo.store
1859 s.fncache._load()
1904 s.fncache._load()
1860 def d():
1905 def d():
1861 for p in s.fncache.entries:
1906 for p in s.fncache.entries:
1862 s.encode(p)
1907 s.encode(p)
1863 timer(d)
1908 timer(d)
1864 fm.end()
1909 fm.end()
1865
1910
1866 def _bdiffworker(q, blocks, xdiff, ready, done):
1911 def _bdiffworker(q, blocks, xdiff, ready, done):
1867 while not done.is_set():
1912 while not done.is_set():
1868 pair = q.get()
1913 pair = q.get()
1869 while pair is not None:
1914 while pair is not None:
1870 if xdiff:
1915 if xdiff:
1871 mdiff.bdiff.xdiffblocks(*pair)
1916 mdiff.bdiff.xdiffblocks(*pair)
1872 elif blocks:
1917 elif blocks:
1873 mdiff.bdiff.blocks(*pair)
1918 mdiff.bdiff.blocks(*pair)
1874 else:
1919 else:
1875 mdiff.textdiff(*pair)
1920 mdiff.textdiff(*pair)
1876 q.task_done()
1921 q.task_done()
1877 pair = q.get()
1922 pair = q.get()
1878 q.task_done() # for the None one
1923 q.task_done() # for the None one
1879 with ready:
1924 with ready:
1880 ready.wait()
1925 ready.wait()
1881
1926
1882 def _manifestrevision(repo, mnode):
1927 def _manifestrevision(repo, mnode):
1883 ml = repo.manifestlog
1928 ml = repo.manifestlog
1884
1929
1885 if util.safehasattr(ml, b'getstorage'):
1930 if util.safehasattr(ml, b'getstorage'):
1886 store = ml.getstorage(b'')
1931 store = ml.getstorage(b'')
1887 else:
1932 else:
1888 store = ml._revlog
1933 store = ml._revlog
1889
1934
1890 return store.revision(mnode)
1935 return store.revision(mnode)
1891
1936
1892 @command(b'perfbdiff', revlogopts + formatteropts + [
1937 @command(b'perfbdiff', revlogopts + formatteropts + [
1893 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1938 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1894 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1939 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1895 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1940 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1896 (b'', b'blocks', False, b'test computing diffs into blocks'),
1941 (b'', b'blocks', False, b'test computing diffs into blocks'),
1897 (b'', b'xdiff', False, b'use xdiff algorithm'),
1942 (b'', b'xdiff', False, b'use xdiff algorithm'),
1898 ],
1943 ],
1899
1944
1900 b'-c|-m|FILE REV')
1945 b'-c|-m|FILE REV')
1901 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1946 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1902 """benchmark a bdiff between revisions
1947 """benchmark a bdiff between revisions
1903
1948
1904 By default, benchmark a bdiff between its delta parent and itself.
1949 By default, benchmark a bdiff between its delta parent and itself.
1905
1950
1906 With ``--count``, benchmark bdiffs between delta parents and self for N
1951 With ``--count``, benchmark bdiffs between delta parents and self for N
1907 revisions starting at the specified revision.
1952 revisions starting at the specified revision.
1908
1953
1909 With ``--alldata``, assume the requested revision is a changeset and
1954 With ``--alldata``, assume the requested revision is a changeset and
1910 measure bdiffs for all changes related to that changeset (manifest
1955 measure bdiffs for all changes related to that changeset (manifest
1911 and filelogs).
1956 and filelogs).
1912 """
1957 """
1913 opts = _byteskwargs(opts)
1958 opts = _byteskwargs(opts)
1914
1959
1915 if opts[b'xdiff'] and not opts[b'blocks']:
1960 if opts[b'xdiff'] and not opts[b'blocks']:
1916 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1961 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1917
1962
1918 if opts[b'alldata']:
1963 if opts[b'alldata']:
1919 opts[b'changelog'] = True
1964 opts[b'changelog'] = True
1920
1965
1921 if opts.get(b'changelog') or opts.get(b'manifest'):
1966 if opts.get(b'changelog') or opts.get(b'manifest'):
1922 file_, rev = None, file_
1967 file_, rev = None, file_
1923 elif rev is None:
1968 elif rev is None:
1924 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1969 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1925
1970
1926 blocks = opts[b'blocks']
1971 blocks = opts[b'blocks']
1927 xdiff = opts[b'xdiff']
1972 xdiff = opts[b'xdiff']
1928 textpairs = []
1973 textpairs = []
1929
1974
1930 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1975 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1931
1976
1932 startrev = r.rev(r.lookup(rev))
1977 startrev = r.rev(r.lookup(rev))
1933 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1978 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1934 if opts[b'alldata']:
1979 if opts[b'alldata']:
1935 # Load revisions associated with changeset.
1980 # Load revisions associated with changeset.
1936 ctx = repo[rev]
1981 ctx = repo[rev]
1937 mtext = _manifestrevision(repo, ctx.manifestnode())
1982 mtext = _manifestrevision(repo, ctx.manifestnode())
1938 for pctx in ctx.parents():
1983 for pctx in ctx.parents():
1939 pman = _manifestrevision(repo, pctx.manifestnode())
1984 pman = _manifestrevision(repo, pctx.manifestnode())
1940 textpairs.append((pman, mtext))
1985 textpairs.append((pman, mtext))
1941
1986
1942 # Load filelog revisions by iterating manifest delta.
1987 # Load filelog revisions by iterating manifest delta.
1943 man = ctx.manifest()
1988 man = ctx.manifest()
1944 pman = ctx.p1().manifest()
1989 pman = ctx.p1().manifest()
1945 for filename, change in pman.diff(man).items():
1990 for filename, change in pman.diff(man).items():
1946 fctx = repo.file(filename)
1991 fctx = repo.file(filename)
1947 f1 = fctx.revision(change[0][0] or -1)
1992 f1 = fctx.revision(change[0][0] or -1)
1948 f2 = fctx.revision(change[1][0] or -1)
1993 f2 = fctx.revision(change[1][0] or -1)
1949 textpairs.append((f1, f2))
1994 textpairs.append((f1, f2))
1950 else:
1995 else:
1951 dp = r.deltaparent(rev)
1996 dp = r.deltaparent(rev)
1952 textpairs.append((r.revision(dp), r.revision(rev)))
1997 textpairs.append((r.revision(dp), r.revision(rev)))
1953
1998
1954 withthreads = threads > 0
1999 withthreads = threads > 0
1955 if not withthreads:
2000 if not withthreads:
1956 def d():
2001 def d():
1957 for pair in textpairs:
2002 for pair in textpairs:
1958 if xdiff:
2003 if xdiff:
1959 mdiff.bdiff.xdiffblocks(*pair)
2004 mdiff.bdiff.xdiffblocks(*pair)
1960 elif blocks:
2005 elif blocks:
1961 mdiff.bdiff.blocks(*pair)
2006 mdiff.bdiff.blocks(*pair)
1962 else:
2007 else:
1963 mdiff.textdiff(*pair)
2008 mdiff.textdiff(*pair)
1964 else:
2009 else:
1965 q = queue()
2010 q = queue()
1966 for i in _xrange(threads):
2011 for i in _xrange(threads):
1967 q.put(None)
2012 q.put(None)
1968 ready = threading.Condition()
2013 ready = threading.Condition()
1969 done = threading.Event()
2014 done = threading.Event()
1970 for i in _xrange(threads):
2015 for i in _xrange(threads):
1971 threading.Thread(target=_bdiffworker,
2016 threading.Thread(target=_bdiffworker,
1972 args=(q, blocks, xdiff, ready, done)).start()
2017 args=(q, blocks, xdiff, ready, done)).start()
1973 q.join()
2018 q.join()
1974 def d():
2019 def d():
1975 for pair in textpairs:
2020 for pair in textpairs:
1976 q.put(pair)
2021 q.put(pair)
1977 for i in _xrange(threads):
2022 for i in _xrange(threads):
1978 q.put(None)
2023 q.put(None)
1979 with ready:
2024 with ready:
1980 ready.notify_all()
2025 ready.notify_all()
1981 q.join()
2026 q.join()
1982 timer, fm = gettimer(ui, opts)
2027 timer, fm = gettimer(ui, opts)
1983 timer(d)
2028 timer(d)
1984 fm.end()
2029 fm.end()
1985
2030
1986 if withthreads:
2031 if withthreads:
1987 done.set()
2032 done.set()
1988 for i in _xrange(threads):
2033 for i in _xrange(threads):
1989 q.put(None)
2034 q.put(None)
1990 with ready:
2035 with ready:
1991 ready.notify_all()
2036 ready.notify_all()
1992
2037
1993 @command(b'perfunidiff', revlogopts + formatteropts + [
2038 @command(b'perfunidiff', revlogopts + formatteropts + [
1994 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
2039 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1995 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2040 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1996 ], b'-c|-m|FILE REV')
2041 ], b'-c|-m|FILE REV')
1997 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2042 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1998 """benchmark a unified diff between revisions
2043 """benchmark a unified diff between revisions
1999
2044
2000 This doesn't include any copy tracing - it's just a unified diff
2045 This doesn't include any copy tracing - it's just a unified diff
2001 of the texts.
2046 of the texts.
2002
2047
2003 By default, benchmark a diff between its delta parent and itself.
2048 By default, benchmark a diff between its delta parent and itself.
2004
2049
2005 With ``--count``, benchmark diffs between delta parents and self for N
2050 With ``--count``, benchmark diffs between delta parents and self for N
2006 revisions starting at the specified revision.
2051 revisions starting at the specified revision.
2007
2052
2008 With ``--alldata``, assume the requested revision is a changeset and
2053 With ``--alldata``, assume the requested revision is a changeset and
2009 measure diffs for all changes related to that changeset (manifest
2054 measure diffs for all changes related to that changeset (manifest
2010 and filelogs).
2055 and filelogs).
2011 """
2056 """
2012 opts = _byteskwargs(opts)
2057 opts = _byteskwargs(opts)
2013 if opts[b'alldata']:
2058 if opts[b'alldata']:
2014 opts[b'changelog'] = True
2059 opts[b'changelog'] = True
2015
2060
2016 if opts.get(b'changelog') or opts.get(b'manifest'):
2061 if opts.get(b'changelog') or opts.get(b'manifest'):
2017 file_, rev = None, file_
2062 file_, rev = None, file_
2018 elif rev is None:
2063 elif rev is None:
2019 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2064 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2020
2065
2021 textpairs = []
2066 textpairs = []
2022
2067
2023 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2068 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2024
2069
2025 startrev = r.rev(r.lookup(rev))
2070 startrev = r.rev(r.lookup(rev))
2026 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2071 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2027 if opts[b'alldata']:
2072 if opts[b'alldata']:
2028 # Load revisions associated with changeset.
2073 # Load revisions associated with changeset.
2029 ctx = repo[rev]
2074 ctx = repo[rev]
2030 mtext = _manifestrevision(repo, ctx.manifestnode())
2075 mtext = _manifestrevision(repo, ctx.manifestnode())
2031 for pctx in ctx.parents():
2076 for pctx in ctx.parents():
2032 pman = _manifestrevision(repo, pctx.manifestnode())
2077 pman = _manifestrevision(repo, pctx.manifestnode())
2033 textpairs.append((pman, mtext))
2078 textpairs.append((pman, mtext))
2034
2079
2035 # Load filelog revisions by iterating manifest delta.
2080 # Load filelog revisions by iterating manifest delta.
2036 man = ctx.manifest()
2081 man = ctx.manifest()
2037 pman = ctx.p1().manifest()
2082 pman = ctx.p1().manifest()
2038 for filename, change in pman.diff(man).items():
2083 for filename, change in pman.diff(man).items():
2039 fctx = repo.file(filename)
2084 fctx = repo.file(filename)
2040 f1 = fctx.revision(change[0][0] or -1)
2085 f1 = fctx.revision(change[0][0] or -1)
2041 f2 = fctx.revision(change[1][0] or -1)
2086 f2 = fctx.revision(change[1][0] or -1)
2042 textpairs.append((f1, f2))
2087 textpairs.append((f1, f2))
2043 else:
2088 else:
2044 dp = r.deltaparent(rev)
2089 dp = r.deltaparent(rev)
2045 textpairs.append((r.revision(dp), r.revision(rev)))
2090 textpairs.append((r.revision(dp), r.revision(rev)))
2046
2091
2047 def d():
2092 def d():
2048 for left, right in textpairs:
2093 for left, right in textpairs:
2049 # The date strings don't matter, so we pass empty strings.
2094 # The date strings don't matter, so we pass empty strings.
2050 headerlines, hunks = mdiff.unidiff(
2095 headerlines, hunks = mdiff.unidiff(
2051 left, b'', right, b'', b'left', b'right', binary=False)
2096 left, b'', right, b'', b'left', b'right', binary=False)
2052 # consume iterators in roughly the way patch.py does
2097 # consume iterators in roughly the way patch.py does
2053 b'\n'.join(headerlines)
2098 b'\n'.join(headerlines)
2054 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2099 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2055 timer, fm = gettimer(ui, opts)
2100 timer, fm = gettimer(ui, opts)
2056 timer(d)
2101 timer(d)
2057 fm.end()
2102 fm.end()
2058
2103
2059 @command(b'perfdiffwd', formatteropts)
2104 @command(b'perfdiffwd', formatteropts)
2060 def perfdiffwd(ui, repo, **opts):
2105 def perfdiffwd(ui, repo, **opts):
2061 """Profile diff of working directory changes"""
2106 """Profile diff of working directory changes"""
2062 opts = _byteskwargs(opts)
2107 opts = _byteskwargs(opts)
2063 timer, fm = gettimer(ui, opts)
2108 timer, fm = gettimer(ui, opts)
2064 options = {
2109 options = {
2065 'w': 'ignore_all_space',
2110 'w': 'ignore_all_space',
2066 'b': 'ignore_space_change',
2111 'b': 'ignore_space_change',
2067 'B': 'ignore_blank_lines',
2112 'B': 'ignore_blank_lines',
2068 }
2113 }
2069
2114
2070 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2115 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2071 opts = dict((options[c], b'1') for c in diffopt)
2116 opts = dict((options[c], b'1') for c in diffopt)
2072 def d():
2117 def d():
2073 ui.pushbuffer()
2118 ui.pushbuffer()
2074 commands.diff(ui, repo, **opts)
2119 commands.diff(ui, repo, **opts)
2075 ui.popbuffer()
2120 ui.popbuffer()
2076 diffopt = diffopt.encode('ascii')
2121 diffopt = diffopt.encode('ascii')
2077 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2122 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2078 timer(d, title=title)
2123 timer(d, title=title)
2079 fm.end()
2124 fm.end()
2080
2125
2081 @command(b'perfrevlogindex', revlogopts + formatteropts,
2126 @command(b'perfrevlogindex', revlogopts + formatteropts,
2082 b'-c|-m|FILE')
2127 b'-c|-m|FILE')
2083 def perfrevlogindex(ui, repo, file_=None, **opts):
2128 def perfrevlogindex(ui, repo, file_=None, **opts):
2084 """Benchmark operations against a revlog index.
2129 """Benchmark operations against a revlog index.
2085
2130
2086 This tests constructing a revlog instance, reading index data,
2131 This tests constructing a revlog instance, reading index data,
2087 parsing index data, and performing various operations related to
2132 parsing index data, and performing various operations related to
2088 index data.
2133 index data.
2089 """
2134 """
2090
2135
2091 opts = _byteskwargs(opts)
2136 opts = _byteskwargs(opts)
2092
2137
2093 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2138 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2094
2139
2095 opener = getattr(rl, 'opener') # trick linter
2140 opener = getattr(rl, 'opener') # trick linter
2096 indexfile = rl.indexfile
2141 indexfile = rl.indexfile
2097 data = opener.read(indexfile)
2142 data = opener.read(indexfile)
2098
2143
2099 header = struct.unpack(b'>I', data[0:4])[0]
2144 header = struct.unpack(b'>I', data[0:4])[0]
2100 version = header & 0xFFFF
2145 version = header & 0xFFFF
2101 if version == 1:
2146 if version == 1:
2102 revlogio = revlog.revlogio()
2147 revlogio = revlog.revlogio()
2103 inline = header & (1 << 16)
2148 inline = header & (1 << 16)
2104 else:
2149 else:
2105 raise error.Abort((b'unsupported revlog version: %d') % version)
2150 raise error.Abort((b'unsupported revlog version: %d') % version)
2106
2151
2107 rllen = len(rl)
2152 rllen = len(rl)
2108
2153
2109 node0 = rl.node(0)
2154 node0 = rl.node(0)
2110 node25 = rl.node(rllen // 4)
2155 node25 = rl.node(rllen // 4)
2111 node50 = rl.node(rllen // 2)
2156 node50 = rl.node(rllen // 2)
2112 node75 = rl.node(rllen // 4 * 3)
2157 node75 = rl.node(rllen // 4 * 3)
2113 node100 = rl.node(rllen - 1)
2158 node100 = rl.node(rllen - 1)
2114
2159
2115 allrevs = range(rllen)
2160 allrevs = range(rllen)
2116 allrevsrev = list(reversed(allrevs))
2161 allrevsrev = list(reversed(allrevs))
2117 allnodes = [rl.node(rev) for rev in range(rllen)]
2162 allnodes = [rl.node(rev) for rev in range(rllen)]
2118 allnodesrev = list(reversed(allnodes))
2163 allnodesrev = list(reversed(allnodes))
2119
2164
2120 def constructor():
2165 def constructor():
2121 revlog.revlog(opener, indexfile)
2166 revlog.revlog(opener, indexfile)
2122
2167
2123 def read():
2168 def read():
2124 with opener(indexfile) as fh:
2169 with opener(indexfile) as fh:
2125 fh.read()
2170 fh.read()
2126
2171
2127 def parseindex():
2172 def parseindex():
2128 revlogio.parseindex(data, inline)
2173 revlogio.parseindex(data, inline)
2129
2174
2130 def getentry(revornode):
2175 def getentry(revornode):
2131 index = revlogio.parseindex(data, inline)[0]
2176 index = revlogio.parseindex(data, inline)[0]
2132 index[revornode]
2177 index[revornode]
2133
2178
2134 def getentries(revs, count=1):
2179 def getentries(revs, count=1):
2135 index = revlogio.parseindex(data, inline)[0]
2180 index = revlogio.parseindex(data, inline)[0]
2136
2181
2137 for i in range(count):
2182 for i in range(count):
2138 for rev in revs:
2183 for rev in revs:
2139 index[rev]
2184 index[rev]
2140
2185
2141 def resolvenode(node):
2186 def resolvenode(node):
2142 nodemap = revlogio.parseindex(data, inline)[1]
2187 nodemap = revlogio.parseindex(data, inline)[1]
2143 # This only works for the C code.
2188 # This only works for the C code.
2144 if nodemap is None:
2189 if nodemap is None:
2145 return
2190 return
2146
2191
2147 try:
2192 try:
2148 nodemap[node]
2193 nodemap[node]
2149 except error.RevlogError:
2194 except error.RevlogError:
2150 pass
2195 pass
2151
2196
2152 def resolvenodes(nodes, count=1):
2197 def resolvenodes(nodes, count=1):
2153 nodemap = revlogio.parseindex(data, inline)[1]
2198 nodemap = revlogio.parseindex(data, inline)[1]
2154 if nodemap is None:
2199 if nodemap is None:
2155 return
2200 return
2156
2201
2157 for i in range(count):
2202 for i in range(count):
2158 for node in nodes:
2203 for node in nodes:
2159 try:
2204 try:
2160 nodemap[node]
2205 nodemap[node]
2161 except error.RevlogError:
2206 except error.RevlogError:
2162 pass
2207 pass
2163
2208
2164 benches = [
2209 benches = [
2165 (constructor, b'revlog constructor'),
2210 (constructor, b'revlog constructor'),
2166 (read, b'read'),
2211 (read, b'read'),
2167 (parseindex, b'create index object'),
2212 (parseindex, b'create index object'),
2168 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2213 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2169 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2214 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2170 (lambda: resolvenode(node0), b'look up node at rev 0'),
2215 (lambda: resolvenode(node0), b'look up node at rev 0'),
2171 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2216 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2172 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2217 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2173 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2218 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2174 (lambda: resolvenode(node100), b'look up node at tip'),
2219 (lambda: resolvenode(node100), b'look up node at tip'),
2175 # 2x variation is to measure caching impact.
2220 # 2x variation is to measure caching impact.
2176 (lambda: resolvenodes(allnodes),
2221 (lambda: resolvenodes(allnodes),
2177 b'look up all nodes (forward)'),
2222 b'look up all nodes (forward)'),
2178 (lambda: resolvenodes(allnodes, 2),
2223 (lambda: resolvenodes(allnodes, 2),
2179 b'look up all nodes 2x (forward)'),
2224 b'look up all nodes 2x (forward)'),
2180 (lambda: resolvenodes(allnodesrev),
2225 (lambda: resolvenodes(allnodesrev),
2181 b'look up all nodes (reverse)'),
2226 b'look up all nodes (reverse)'),
2182 (lambda: resolvenodes(allnodesrev, 2),
2227 (lambda: resolvenodes(allnodesrev, 2),
2183 b'look up all nodes 2x (reverse)'),
2228 b'look up all nodes 2x (reverse)'),
2184 (lambda: getentries(allrevs),
2229 (lambda: getentries(allrevs),
2185 b'retrieve all index entries (forward)'),
2230 b'retrieve all index entries (forward)'),
2186 (lambda: getentries(allrevs, 2),
2231 (lambda: getentries(allrevs, 2),
2187 b'retrieve all index entries 2x (forward)'),
2232 b'retrieve all index entries 2x (forward)'),
2188 (lambda: getentries(allrevsrev),
2233 (lambda: getentries(allrevsrev),
2189 b'retrieve all index entries (reverse)'),
2234 b'retrieve all index entries (reverse)'),
2190 (lambda: getentries(allrevsrev, 2),
2235 (lambda: getentries(allrevsrev, 2),
2191 b'retrieve all index entries 2x (reverse)'),
2236 b'retrieve all index entries 2x (reverse)'),
2192 ]
2237 ]
2193
2238
2194 for fn, title in benches:
2239 for fn, title in benches:
2195 timer, fm = gettimer(ui, opts)
2240 timer, fm = gettimer(ui, opts)
2196 timer(fn, title=title)
2241 timer(fn, title=title)
2197 fm.end()
2242 fm.end()
2198
2243
2199 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2244 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2200 [(b'd', b'dist', 100, b'distance between the revisions'),
2245 [(b'd', b'dist', 100, b'distance between the revisions'),
2201 (b's', b'startrev', 0, b'revision to start reading at'),
2246 (b's', b'startrev', 0, b'revision to start reading at'),
2202 (b'', b'reverse', False, b'read in reverse')],
2247 (b'', b'reverse', False, b'read in reverse')],
2203 b'-c|-m|FILE')
2248 b'-c|-m|FILE')
2204 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2249 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2205 **opts):
2250 **opts):
2206 """Benchmark reading a series of revisions from a revlog.
2251 """Benchmark reading a series of revisions from a revlog.
2207
2252
2208 By default, we read every ``-d/--dist`` revision from 0 to tip of
2253 By default, we read every ``-d/--dist`` revision from 0 to tip of
2209 the specified revlog.
2254 the specified revlog.
2210
2255
2211 The start revision can be defined via ``-s/--startrev``.
2256 The start revision can be defined via ``-s/--startrev``.
2212 """
2257 """
2213 opts = _byteskwargs(opts)
2258 opts = _byteskwargs(opts)
2214
2259
2215 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2260 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2216 rllen = getlen(ui)(rl)
2261 rllen = getlen(ui)(rl)
2217
2262
2218 if startrev < 0:
2263 if startrev < 0:
2219 startrev = rllen + startrev
2264 startrev = rllen + startrev
2220
2265
2221 def d():
2266 def d():
2222 rl.clearcaches()
2267 rl.clearcaches()
2223
2268
2224 beginrev = startrev
2269 beginrev = startrev
2225 endrev = rllen
2270 endrev = rllen
2226 dist = opts[b'dist']
2271 dist = opts[b'dist']
2227
2272
2228 if reverse:
2273 if reverse:
2229 beginrev, endrev = endrev - 1, beginrev - 1
2274 beginrev, endrev = endrev - 1, beginrev - 1
2230 dist = -1 * dist
2275 dist = -1 * dist
2231
2276
2232 for x in _xrange(beginrev, endrev, dist):
2277 for x in _xrange(beginrev, endrev, dist):
2233 # Old revisions don't support passing int.
2278 # Old revisions don't support passing int.
2234 n = rl.node(x)
2279 n = rl.node(x)
2235 rl.revision(n)
2280 rl.revision(n)
2236
2281
2237 timer, fm = gettimer(ui, opts)
2282 timer, fm = gettimer(ui, opts)
2238 timer(d)
2283 timer(d)
2239 fm.end()
2284 fm.end()
2240
2285
2241 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2286 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2242 [(b's', b'startrev', 1000, b'revision to start writing at'),
2287 [(b's', b'startrev', 1000, b'revision to start writing at'),
2243 (b'', b'stoprev', -1, b'last revision to write'),
2288 (b'', b'stoprev', -1, b'last revision to write'),
2244 (b'', b'count', 3, b'number of passes to perform'),
2289 (b'', b'count', 3, b'number of passes to perform'),
2245 (b'', b'details', False, b'print timing for every revisions tested'),
2290 (b'', b'details', False, b'print timing for every revisions tested'),
2246 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2291 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2247 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2292 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2248 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2293 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2249 ],
2294 ],
2250 b'-c|-m|FILE')
2295 b'-c|-m|FILE')
2251 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2296 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2252 """Benchmark writing a series of revisions to a revlog.
2297 """Benchmark writing a series of revisions to a revlog.
2253
2298
2254 Possible source values are:
2299 Possible source values are:
2255 * `full`: add from a full text (default).
2300 * `full`: add from a full text (default).
2256 * `parent-1`: add from a delta to the first parent
2301 * `parent-1`: add from a delta to the first parent
2257 * `parent-2`: add from a delta to the second parent if it exists
2302 * `parent-2`: add from a delta to the second parent if it exists
2258 (use a delta from the first parent otherwise)
2303 (use a delta from the first parent otherwise)
2259 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2304 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2260 * `storage`: add from the existing precomputed deltas
2305 * `storage`: add from the existing precomputed deltas
2261
2306
2262 Note: This performance command measures performance in a custom way. As a
2307 Note: This performance command measures performance in a custom way. As a
2263 result some of the global configuration of the 'perf' command does not
2308 result some of the global configuration of the 'perf' command does not
2264 apply to it:
2309 apply to it:
2265
2310
2266 * ``pre-run``: disabled
2311 * ``pre-run``: disabled
2267
2312
2268 * ``profile-benchmark``: disabled
2313 * ``profile-benchmark``: disabled
2269
2314
2270 * ``run-limits``: disabled use --count instead
2315 * ``run-limits``: disabled use --count instead
2271 """
2316 """
2272 opts = _byteskwargs(opts)
2317 opts = _byteskwargs(opts)
2273
2318
2274 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2319 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2275 rllen = getlen(ui)(rl)
2320 rllen = getlen(ui)(rl)
2276 if startrev < 0:
2321 if startrev < 0:
2277 startrev = rllen + startrev
2322 startrev = rllen + startrev
2278 if stoprev < 0:
2323 if stoprev < 0:
2279 stoprev = rllen + stoprev
2324 stoprev = rllen + stoprev
2280
2325
2281 lazydeltabase = opts['lazydeltabase']
2326 lazydeltabase = opts['lazydeltabase']
2282 source = opts['source']
2327 source = opts['source']
2283 clearcaches = opts['clear_caches']
2328 clearcaches = opts['clear_caches']
2284 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2329 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2285 b'storage')
2330 b'storage')
2286 if source not in validsource:
2331 if source not in validsource:
2287 raise error.Abort('invalid source type: %s' % source)
2332 raise error.Abort('invalid source type: %s' % source)
2288
2333
2289 ### actually gather results
2334 ### actually gather results
2290 count = opts['count']
2335 count = opts['count']
2291 if count <= 0:
2336 if count <= 0:
2292 raise error.Abort('invalide run count: %d' % count)
2337 raise error.Abort('invalide run count: %d' % count)
2293 allresults = []
2338 allresults = []
2294 for c in range(count):
2339 for c in range(count):
2295 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2340 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2296 lazydeltabase=lazydeltabase,
2341 lazydeltabase=lazydeltabase,
2297 clearcaches=clearcaches)
2342 clearcaches=clearcaches)
2298 allresults.append(timing)
2343 allresults.append(timing)
2299
2344
2300 ### consolidate the results in a single list
2345 ### consolidate the results in a single list
2301 results = []
2346 results = []
2302 for idx, (rev, t) in enumerate(allresults[0]):
2347 for idx, (rev, t) in enumerate(allresults[0]):
2303 ts = [t]
2348 ts = [t]
2304 for other in allresults[1:]:
2349 for other in allresults[1:]:
2305 orev, ot = other[idx]
2350 orev, ot = other[idx]
2306 assert orev == rev
2351 assert orev == rev
2307 ts.append(ot)
2352 ts.append(ot)
2308 results.append((rev, ts))
2353 results.append((rev, ts))
2309 resultcount = len(results)
2354 resultcount = len(results)
2310
2355
2311 ### Compute and display relevant statistics
2356 ### Compute and display relevant statistics
2312
2357
2313 # get a formatter
2358 # get a formatter
2314 fm = ui.formatter(b'perf', opts)
2359 fm = ui.formatter(b'perf', opts)
2315 displayall = ui.configbool(b"perf", b"all-timing", False)
2360 displayall = ui.configbool(b"perf", b"all-timing", False)
2316
2361
2317 # print individual details if requested
2362 # print individual details if requested
2318 if opts['details']:
2363 if opts['details']:
2319 for idx, item in enumerate(results, 1):
2364 for idx, item in enumerate(results, 1):
2320 rev, data = item
2365 rev, data = item
2321 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2366 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2322 formatone(fm, data, title=title, displayall=displayall)
2367 formatone(fm, data, title=title, displayall=displayall)
2323
2368
2324 # sorts results by median time
2369 # sorts results by median time
2325 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2370 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2326 # list of (name, index) to display)
2371 # list of (name, index) to display)
2327 relevants = [
2372 relevants = [
2328 ("min", 0),
2373 ("min", 0),
2329 ("10%", resultcount * 10 // 100),
2374 ("10%", resultcount * 10 // 100),
2330 ("25%", resultcount * 25 // 100),
2375 ("25%", resultcount * 25 // 100),
2331 ("50%", resultcount * 70 // 100),
2376 ("50%", resultcount * 70 // 100),
2332 ("75%", resultcount * 75 // 100),
2377 ("75%", resultcount * 75 // 100),
2333 ("90%", resultcount * 90 // 100),
2378 ("90%", resultcount * 90 // 100),
2334 ("95%", resultcount * 95 // 100),
2379 ("95%", resultcount * 95 // 100),
2335 ("99%", resultcount * 99 // 100),
2380 ("99%", resultcount * 99 // 100),
2336 ("99.9%", resultcount * 999 // 1000),
2381 ("99.9%", resultcount * 999 // 1000),
2337 ("99.99%", resultcount * 9999 // 10000),
2382 ("99.99%", resultcount * 9999 // 10000),
2338 ("99.999%", resultcount * 99999 // 100000),
2383 ("99.999%", resultcount * 99999 // 100000),
2339 ("max", -1),
2384 ("max", -1),
2340 ]
2385 ]
2341 if not ui.quiet:
2386 if not ui.quiet:
2342 for name, idx in relevants:
2387 for name, idx in relevants:
2343 data = results[idx]
2388 data = results[idx]
2344 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2389 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2345 formatone(fm, data[1], title=title, displayall=displayall)
2390 formatone(fm, data[1], title=title, displayall=displayall)
2346
2391
2347 # XXX summing that many float will not be very precise, we ignore this fact
2392 # XXX summing that many float will not be very precise, we ignore this fact
2348 # for now
2393 # for now
2349 totaltime = []
2394 totaltime = []
2350 for item in allresults:
2395 for item in allresults:
2351 totaltime.append((sum(x[1][0] for x in item),
2396 totaltime.append((sum(x[1][0] for x in item),
2352 sum(x[1][1] for x in item),
2397 sum(x[1][1] for x in item),
2353 sum(x[1][2] for x in item),)
2398 sum(x[1][2] for x in item),)
2354 )
2399 )
2355 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2400 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2356 displayall=displayall)
2401 displayall=displayall)
2357 fm.end()
2402 fm.end()
2358
2403
2359 class _faketr(object):
2404 class _faketr(object):
2360 def add(s, x, y, z=None):
2405 def add(s, x, y, z=None):
2361 return None
2406 return None
2362
2407
2363 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2408 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2364 lazydeltabase=True, clearcaches=True):
2409 lazydeltabase=True, clearcaches=True):
2365 timings = []
2410 timings = []
2366 tr = _faketr()
2411 tr = _faketr()
2367 with _temprevlog(ui, orig, startrev) as dest:
2412 with _temprevlog(ui, orig, startrev) as dest:
2368 dest._lazydeltabase = lazydeltabase
2413 dest._lazydeltabase = lazydeltabase
2369 revs = list(orig.revs(startrev, stoprev))
2414 revs = list(orig.revs(startrev, stoprev))
2370 total = len(revs)
2415 total = len(revs)
2371 topic = 'adding'
2416 topic = 'adding'
2372 if runidx is not None:
2417 if runidx is not None:
2373 topic += ' (run #%d)' % runidx
2418 topic += ' (run #%d)' % runidx
2374 # Support both old and new progress API
2419 # Support both old and new progress API
2375 if util.safehasattr(ui, 'makeprogress'):
2420 if util.safehasattr(ui, 'makeprogress'):
2376 progress = ui.makeprogress(topic, unit='revs', total=total)
2421 progress = ui.makeprogress(topic, unit='revs', total=total)
2377 def updateprogress(pos):
2422 def updateprogress(pos):
2378 progress.update(pos)
2423 progress.update(pos)
2379 def completeprogress():
2424 def completeprogress():
2380 progress.complete()
2425 progress.complete()
2381 else:
2426 else:
2382 def updateprogress(pos):
2427 def updateprogress(pos):
2383 ui.progress(topic, pos, unit='revs', total=total)
2428 ui.progress(topic, pos, unit='revs', total=total)
2384 def completeprogress():
2429 def completeprogress():
2385 ui.progress(topic, None, unit='revs', total=total)
2430 ui.progress(topic, None, unit='revs', total=total)
2386
2431
2387 for idx, rev in enumerate(revs):
2432 for idx, rev in enumerate(revs):
2388 updateprogress(idx)
2433 updateprogress(idx)
2389 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2434 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2390 if clearcaches:
2435 if clearcaches:
2391 dest.index.clearcaches()
2436 dest.index.clearcaches()
2392 dest.clearcaches()
2437 dest.clearcaches()
2393 with timeone() as r:
2438 with timeone() as r:
2394 dest.addrawrevision(*addargs, **addkwargs)
2439 dest.addrawrevision(*addargs, **addkwargs)
2395 timings.append((rev, r[0]))
2440 timings.append((rev, r[0]))
2396 updateprogress(total)
2441 updateprogress(total)
2397 completeprogress()
2442 completeprogress()
2398 return timings
2443 return timings
2399
2444
2400 def _getrevisionseed(orig, rev, tr, source):
2445 def _getrevisionseed(orig, rev, tr, source):
2401 from mercurial.node import nullid
2446 from mercurial.node import nullid
2402
2447
2403 linkrev = orig.linkrev(rev)
2448 linkrev = orig.linkrev(rev)
2404 node = orig.node(rev)
2449 node = orig.node(rev)
2405 p1, p2 = orig.parents(node)
2450 p1, p2 = orig.parents(node)
2406 flags = orig.flags(rev)
2451 flags = orig.flags(rev)
2407 cachedelta = None
2452 cachedelta = None
2408 text = None
2453 text = None
2409
2454
2410 if source == b'full':
2455 if source == b'full':
2411 text = orig.revision(rev)
2456 text = orig.revision(rev)
2412 elif source == b'parent-1':
2457 elif source == b'parent-1':
2413 baserev = orig.rev(p1)
2458 baserev = orig.rev(p1)
2414 cachedelta = (baserev, orig.revdiff(p1, rev))
2459 cachedelta = (baserev, orig.revdiff(p1, rev))
2415 elif source == b'parent-2':
2460 elif source == b'parent-2':
2416 parent = p2
2461 parent = p2
2417 if p2 == nullid:
2462 if p2 == nullid:
2418 parent = p1
2463 parent = p1
2419 baserev = orig.rev(parent)
2464 baserev = orig.rev(parent)
2420 cachedelta = (baserev, orig.revdiff(parent, rev))
2465 cachedelta = (baserev, orig.revdiff(parent, rev))
2421 elif source == b'parent-smallest':
2466 elif source == b'parent-smallest':
2422 p1diff = orig.revdiff(p1, rev)
2467 p1diff = orig.revdiff(p1, rev)
2423 parent = p1
2468 parent = p1
2424 diff = p1diff
2469 diff = p1diff
2425 if p2 != nullid:
2470 if p2 != nullid:
2426 p2diff = orig.revdiff(p2, rev)
2471 p2diff = orig.revdiff(p2, rev)
2427 if len(p1diff) > len(p2diff):
2472 if len(p1diff) > len(p2diff):
2428 parent = p2
2473 parent = p2
2429 diff = p2diff
2474 diff = p2diff
2430 baserev = orig.rev(parent)
2475 baserev = orig.rev(parent)
2431 cachedelta = (baserev, diff)
2476 cachedelta = (baserev, diff)
2432 elif source == b'storage':
2477 elif source == b'storage':
2433 baserev = orig.deltaparent(rev)
2478 baserev = orig.deltaparent(rev)
2434 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2479 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2435
2480
2436 return ((text, tr, linkrev, p1, p2),
2481 return ((text, tr, linkrev, p1, p2),
2437 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2482 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2438
2483
2439 @contextlib.contextmanager
2484 @contextlib.contextmanager
2440 def _temprevlog(ui, orig, truncaterev):
2485 def _temprevlog(ui, orig, truncaterev):
2441 from mercurial import vfs as vfsmod
2486 from mercurial import vfs as vfsmod
2442
2487
2443 if orig._inline:
2488 if orig._inline:
2444 raise error.Abort('not supporting inline revlog (yet)')
2489 raise error.Abort('not supporting inline revlog (yet)')
2445 revlogkwargs = {}
2490 revlogkwargs = {}
2446 k = 'upperboundcomp'
2491 k = 'upperboundcomp'
2447 if util.safehasattr(orig, k):
2492 if util.safehasattr(orig, k):
2448 revlogkwargs[k] = getattr(orig, k)
2493 revlogkwargs[k] = getattr(orig, k)
2449
2494
2450 origindexpath = orig.opener.join(orig.indexfile)
2495 origindexpath = orig.opener.join(orig.indexfile)
2451 origdatapath = orig.opener.join(orig.datafile)
2496 origdatapath = orig.opener.join(orig.datafile)
2452 indexname = 'revlog.i'
2497 indexname = 'revlog.i'
2453 dataname = 'revlog.d'
2498 dataname = 'revlog.d'
2454
2499
2455 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2500 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2456 try:
2501 try:
2457 # copy the data file in a temporary directory
2502 # copy the data file in a temporary directory
2458 ui.debug('copying data in %s\n' % tmpdir)
2503 ui.debug('copying data in %s\n' % tmpdir)
2459 destindexpath = os.path.join(tmpdir, 'revlog.i')
2504 destindexpath = os.path.join(tmpdir, 'revlog.i')
2460 destdatapath = os.path.join(tmpdir, 'revlog.d')
2505 destdatapath = os.path.join(tmpdir, 'revlog.d')
2461 shutil.copyfile(origindexpath, destindexpath)
2506 shutil.copyfile(origindexpath, destindexpath)
2462 shutil.copyfile(origdatapath, destdatapath)
2507 shutil.copyfile(origdatapath, destdatapath)
2463
2508
2464 # remove the data we want to add again
2509 # remove the data we want to add again
2465 ui.debug('truncating data to be rewritten\n')
2510 ui.debug('truncating data to be rewritten\n')
2466 with open(destindexpath, 'ab') as index:
2511 with open(destindexpath, 'ab') as index:
2467 index.seek(0)
2512 index.seek(0)
2468 index.truncate(truncaterev * orig._io.size)
2513 index.truncate(truncaterev * orig._io.size)
2469 with open(destdatapath, 'ab') as data:
2514 with open(destdatapath, 'ab') as data:
2470 data.seek(0)
2515 data.seek(0)
2471 data.truncate(orig.start(truncaterev))
2516 data.truncate(orig.start(truncaterev))
2472
2517
2473 # instantiate a new revlog from the temporary copy
2518 # instantiate a new revlog from the temporary copy
2474 ui.debug('truncating adding to be rewritten\n')
2519 ui.debug('truncating adding to be rewritten\n')
2475 vfs = vfsmod.vfs(tmpdir)
2520 vfs = vfsmod.vfs(tmpdir)
2476 vfs.options = getattr(orig.opener, 'options', None)
2521 vfs.options = getattr(orig.opener, 'options', None)
2477
2522
2478 dest = revlog.revlog(vfs,
2523 dest = revlog.revlog(vfs,
2479 indexfile=indexname,
2524 indexfile=indexname,
2480 datafile=dataname, **revlogkwargs)
2525 datafile=dataname, **revlogkwargs)
2481 if dest._inline:
2526 if dest._inline:
2482 raise error.Abort('not supporting inline revlog (yet)')
2527 raise error.Abort('not supporting inline revlog (yet)')
2483 # make sure internals are initialized
2528 # make sure internals are initialized
2484 dest.revision(len(dest) - 1)
2529 dest.revision(len(dest) - 1)
2485 yield dest
2530 yield dest
2486 del dest, vfs
2531 del dest, vfs
2487 finally:
2532 finally:
2488 shutil.rmtree(tmpdir, True)
2533 shutil.rmtree(tmpdir, True)
2489
2534
2490 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2535 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2491 [(b'e', b'engines', b'', b'compression engines to use'),
2536 [(b'e', b'engines', b'', b'compression engines to use'),
2492 (b's', b'startrev', 0, b'revision to start at')],
2537 (b's', b'startrev', 0, b'revision to start at')],
2493 b'-c|-m|FILE')
2538 b'-c|-m|FILE')
2494 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2539 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2495 """Benchmark operations on revlog chunks.
2540 """Benchmark operations on revlog chunks.
2496
2541
2497 Logically, each revlog is a collection of fulltext revisions. However,
2542 Logically, each revlog is a collection of fulltext revisions. However,
2498 stored within each revlog are "chunks" of possibly compressed data. This
2543 stored within each revlog are "chunks" of possibly compressed data. This
2499 data needs to be read and decompressed or compressed and written.
2544 data needs to be read and decompressed or compressed and written.
2500
2545
2501 This command measures the time it takes to read+decompress and recompress
2546 This command measures the time it takes to read+decompress and recompress
2502 chunks in a revlog. It effectively isolates I/O and compression performance.
2547 chunks in a revlog. It effectively isolates I/O and compression performance.
2503 For measurements of higher-level operations like resolving revisions,
2548 For measurements of higher-level operations like resolving revisions,
2504 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2549 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2505 """
2550 """
2506 opts = _byteskwargs(opts)
2551 opts = _byteskwargs(opts)
2507
2552
2508 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2553 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2509
2554
2510 # _chunkraw was renamed to _getsegmentforrevs.
2555 # _chunkraw was renamed to _getsegmentforrevs.
2511 try:
2556 try:
2512 segmentforrevs = rl._getsegmentforrevs
2557 segmentforrevs = rl._getsegmentforrevs
2513 except AttributeError:
2558 except AttributeError:
2514 segmentforrevs = rl._chunkraw
2559 segmentforrevs = rl._chunkraw
2515
2560
2516 # Verify engines argument.
2561 # Verify engines argument.
2517 if engines:
2562 if engines:
2518 engines = set(e.strip() for e in engines.split(b','))
2563 engines = set(e.strip() for e in engines.split(b','))
2519 for engine in engines:
2564 for engine in engines:
2520 try:
2565 try:
2521 util.compressionengines[engine]
2566 util.compressionengines[engine]
2522 except KeyError:
2567 except KeyError:
2523 raise error.Abort(b'unknown compression engine: %s' % engine)
2568 raise error.Abort(b'unknown compression engine: %s' % engine)
2524 else:
2569 else:
2525 engines = []
2570 engines = []
2526 for e in util.compengines:
2571 for e in util.compengines:
2527 engine = util.compengines[e]
2572 engine = util.compengines[e]
2528 try:
2573 try:
2529 if engine.available():
2574 if engine.available():
2530 engine.revlogcompressor().compress(b'dummy')
2575 engine.revlogcompressor().compress(b'dummy')
2531 engines.append(e)
2576 engines.append(e)
2532 except NotImplementedError:
2577 except NotImplementedError:
2533 pass
2578 pass
2534
2579
2535 revs = list(rl.revs(startrev, len(rl) - 1))
2580 revs = list(rl.revs(startrev, len(rl) - 1))
2536
2581
2537 def rlfh(rl):
2582 def rlfh(rl):
2538 if rl._inline:
2583 if rl._inline:
2539 return getsvfs(repo)(rl.indexfile)
2584 return getsvfs(repo)(rl.indexfile)
2540 else:
2585 else:
2541 return getsvfs(repo)(rl.datafile)
2586 return getsvfs(repo)(rl.datafile)
2542
2587
2543 def doread():
2588 def doread():
2544 rl.clearcaches()
2589 rl.clearcaches()
2545 for rev in revs:
2590 for rev in revs:
2546 segmentforrevs(rev, rev)
2591 segmentforrevs(rev, rev)
2547
2592
2548 def doreadcachedfh():
2593 def doreadcachedfh():
2549 rl.clearcaches()
2594 rl.clearcaches()
2550 fh = rlfh(rl)
2595 fh = rlfh(rl)
2551 for rev in revs:
2596 for rev in revs:
2552 segmentforrevs(rev, rev, df=fh)
2597 segmentforrevs(rev, rev, df=fh)
2553
2598
2554 def doreadbatch():
2599 def doreadbatch():
2555 rl.clearcaches()
2600 rl.clearcaches()
2556 segmentforrevs(revs[0], revs[-1])
2601 segmentforrevs(revs[0], revs[-1])
2557
2602
2558 def doreadbatchcachedfh():
2603 def doreadbatchcachedfh():
2559 rl.clearcaches()
2604 rl.clearcaches()
2560 fh = rlfh(rl)
2605 fh = rlfh(rl)
2561 segmentforrevs(revs[0], revs[-1], df=fh)
2606 segmentforrevs(revs[0], revs[-1], df=fh)
2562
2607
2563 def dochunk():
2608 def dochunk():
2564 rl.clearcaches()
2609 rl.clearcaches()
2565 fh = rlfh(rl)
2610 fh = rlfh(rl)
2566 for rev in revs:
2611 for rev in revs:
2567 rl._chunk(rev, df=fh)
2612 rl._chunk(rev, df=fh)
2568
2613
2569 chunks = [None]
2614 chunks = [None]
2570
2615
2571 def dochunkbatch():
2616 def dochunkbatch():
2572 rl.clearcaches()
2617 rl.clearcaches()
2573 fh = rlfh(rl)
2618 fh = rlfh(rl)
2574 # Save chunks as a side-effect.
2619 # Save chunks as a side-effect.
2575 chunks[0] = rl._chunks(revs, df=fh)
2620 chunks[0] = rl._chunks(revs, df=fh)
2576
2621
2577 def docompress(compressor):
2622 def docompress(compressor):
2578 rl.clearcaches()
2623 rl.clearcaches()
2579
2624
2580 try:
2625 try:
2581 # Swap in the requested compression engine.
2626 # Swap in the requested compression engine.
2582 oldcompressor = rl._compressor
2627 oldcompressor = rl._compressor
2583 rl._compressor = compressor
2628 rl._compressor = compressor
2584 for chunk in chunks[0]:
2629 for chunk in chunks[0]:
2585 rl.compress(chunk)
2630 rl.compress(chunk)
2586 finally:
2631 finally:
2587 rl._compressor = oldcompressor
2632 rl._compressor = oldcompressor
2588
2633
2589 benches = [
2634 benches = [
2590 (lambda: doread(), b'read'),
2635 (lambda: doread(), b'read'),
2591 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2636 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2592 (lambda: doreadbatch(), b'read batch'),
2637 (lambda: doreadbatch(), b'read batch'),
2593 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2638 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2594 (lambda: dochunk(), b'chunk'),
2639 (lambda: dochunk(), b'chunk'),
2595 (lambda: dochunkbatch(), b'chunk batch'),
2640 (lambda: dochunkbatch(), b'chunk batch'),
2596 ]
2641 ]
2597
2642
2598 for engine in sorted(engines):
2643 for engine in sorted(engines):
2599 compressor = util.compengines[engine].revlogcompressor()
2644 compressor = util.compengines[engine].revlogcompressor()
2600 benches.append((functools.partial(docompress, compressor),
2645 benches.append((functools.partial(docompress, compressor),
2601 b'compress w/ %s' % engine))
2646 b'compress w/ %s' % engine))
2602
2647
2603 for fn, title in benches:
2648 for fn, title in benches:
2604 timer, fm = gettimer(ui, opts)
2649 timer, fm = gettimer(ui, opts)
2605 timer(fn, title=title)
2650 timer(fn, title=title)
2606 fm.end()
2651 fm.end()
2607
2652
2608 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2653 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2609 [(b'', b'cache', False, b'use caches instead of clearing')],
2654 [(b'', b'cache', False, b'use caches instead of clearing')],
2610 b'-c|-m|FILE REV')
2655 b'-c|-m|FILE REV')
2611 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2656 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2612 """Benchmark obtaining a revlog revision.
2657 """Benchmark obtaining a revlog revision.
2613
2658
2614 Obtaining a revlog revision consists of roughly the following steps:
2659 Obtaining a revlog revision consists of roughly the following steps:
2615
2660
2616 1. Compute the delta chain
2661 1. Compute the delta chain
2617 2. Slice the delta chain if applicable
2662 2. Slice the delta chain if applicable
2618 3. Obtain the raw chunks for that delta chain
2663 3. Obtain the raw chunks for that delta chain
2619 4. Decompress each raw chunk
2664 4. Decompress each raw chunk
2620 5. Apply binary patches to obtain fulltext
2665 5. Apply binary patches to obtain fulltext
2621 6. Verify hash of fulltext
2666 6. Verify hash of fulltext
2622
2667
2623 This command measures the time spent in each of these phases.
2668 This command measures the time spent in each of these phases.
2624 """
2669 """
2625 opts = _byteskwargs(opts)
2670 opts = _byteskwargs(opts)
2626
2671
2627 if opts.get(b'changelog') or opts.get(b'manifest'):
2672 if opts.get(b'changelog') or opts.get(b'manifest'):
2628 file_, rev = None, file_
2673 file_, rev = None, file_
2629 elif rev is None:
2674 elif rev is None:
2630 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2675 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2631
2676
2632 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2677 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2633
2678
2634 # _chunkraw was renamed to _getsegmentforrevs.
2679 # _chunkraw was renamed to _getsegmentforrevs.
2635 try:
2680 try:
2636 segmentforrevs = r._getsegmentforrevs
2681 segmentforrevs = r._getsegmentforrevs
2637 except AttributeError:
2682 except AttributeError:
2638 segmentforrevs = r._chunkraw
2683 segmentforrevs = r._chunkraw
2639
2684
2640 node = r.lookup(rev)
2685 node = r.lookup(rev)
2641 rev = r.rev(node)
2686 rev = r.rev(node)
2642
2687
2643 def getrawchunks(data, chain):
2688 def getrawchunks(data, chain):
2644 start = r.start
2689 start = r.start
2645 length = r.length
2690 length = r.length
2646 inline = r._inline
2691 inline = r._inline
2647 iosize = r._io.size
2692 iosize = r._io.size
2648 buffer = util.buffer
2693 buffer = util.buffer
2649
2694
2650 chunks = []
2695 chunks = []
2651 ladd = chunks.append
2696 ladd = chunks.append
2652 for idx, item in enumerate(chain):
2697 for idx, item in enumerate(chain):
2653 offset = start(item[0])
2698 offset = start(item[0])
2654 bits = data[idx]
2699 bits = data[idx]
2655 for rev in item:
2700 for rev in item:
2656 chunkstart = start(rev)
2701 chunkstart = start(rev)
2657 if inline:
2702 if inline:
2658 chunkstart += (rev + 1) * iosize
2703 chunkstart += (rev + 1) * iosize
2659 chunklength = length(rev)
2704 chunklength = length(rev)
2660 ladd(buffer(bits, chunkstart - offset, chunklength))
2705 ladd(buffer(bits, chunkstart - offset, chunklength))
2661
2706
2662 return chunks
2707 return chunks
2663
2708
2664 def dodeltachain(rev):
2709 def dodeltachain(rev):
2665 if not cache:
2710 if not cache:
2666 r.clearcaches()
2711 r.clearcaches()
2667 r._deltachain(rev)
2712 r._deltachain(rev)
2668
2713
2669 def doread(chain):
2714 def doread(chain):
2670 if not cache:
2715 if not cache:
2671 r.clearcaches()
2716 r.clearcaches()
2672 for item in slicedchain:
2717 for item in slicedchain:
2673 segmentforrevs(item[0], item[-1])
2718 segmentforrevs(item[0], item[-1])
2674
2719
2675 def doslice(r, chain, size):
2720 def doslice(r, chain, size):
2676 for s in slicechunk(r, chain, targetsize=size):
2721 for s in slicechunk(r, chain, targetsize=size):
2677 pass
2722 pass
2678
2723
2679 def dorawchunks(data, chain):
2724 def dorawchunks(data, chain):
2680 if not cache:
2725 if not cache:
2681 r.clearcaches()
2726 r.clearcaches()
2682 getrawchunks(data, chain)
2727 getrawchunks(data, chain)
2683
2728
2684 def dodecompress(chunks):
2729 def dodecompress(chunks):
2685 decomp = r.decompress
2730 decomp = r.decompress
2686 for chunk in chunks:
2731 for chunk in chunks:
2687 decomp(chunk)
2732 decomp(chunk)
2688
2733
2689 def dopatch(text, bins):
2734 def dopatch(text, bins):
2690 if not cache:
2735 if not cache:
2691 r.clearcaches()
2736 r.clearcaches()
2692 mdiff.patches(text, bins)
2737 mdiff.patches(text, bins)
2693
2738
2694 def dohash(text):
2739 def dohash(text):
2695 if not cache:
2740 if not cache:
2696 r.clearcaches()
2741 r.clearcaches()
2697 r.checkhash(text, node, rev=rev)
2742 r.checkhash(text, node, rev=rev)
2698
2743
2699 def dorevision():
2744 def dorevision():
2700 if not cache:
2745 if not cache:
2701 r.clearcaches()
2746 r.clearcaches()
2702 r.revision(node)
2747 r.revision(node)
2703
2748
2704 try:
2749 try:
2705 from mercurial.revlogutils.deltas import slicechunk
2750 from mercurial.revlogutils.deltas import slicechunk
2706 except ImportError:
2751 except ImportError:
2707 slicechunk = getattr(revlog, '_slicechunk', None)
2752 slicechunk = getattr(revlog, '_slicechunk', None)
2708
2753
2709 size = r.length(rev)
2754 size = r.length(rev)
2710 chain = r._deltachain(rev)[0]
2755 chain = r._deltachain(rev)[0]
2711 if not getattr(r, '_withsparseread', False):
2756 if not getattr(r, '_withsparseread', False):
2712 slicedchain = (chain,)
2757 slicedchain = (chain,)
2713 else:
2758 else:
2714 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2759 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2715 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2760 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2716 rawchunks = getrawchunks(data, slicedchain)
2761 rawchunks = getrawchunks(data, slicedchain)
2717 bins = r._chunks(chain)
2762 bins = r._chunks(chain)
2718 text = bytes(bins[0])
2763 text = bytes(bins[0])
2719 bins = bins[1:]
2764 bins = bins[1:]
2720 text = mdiff.patches(text, bins)
2765 text = mdiff.patches(text, bins)
2721
2766
2722 benches = [
2767 benches = [
2723 (lambda: dorevision(), b'full'),
2768 (lambda: dorevision(), b'full'),
2724 (lambda: dodeltachain(rev), b'deltachain'),
2769 (lambda: dodeltachain(rev), b'deltachain'),
2725 (lambda: doread(chain), b'read'),
2770 (lambda: doread(chain), b'read'),
2726 ]
2771 ]
2727
2772
2728 if getattr(r, '_withsparseread', False):
2773 if getattr(r, '_withsparseread', False):
2729 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2774 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2730 benches.append(slicing)
2775 benches.append(slicing)
2731
2776
2732 benches.extend([
2777 benches.extend([
2733 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2778 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2734 (lambda: dodecompress(rawchunks), b'decompress'),
2779 (lambda: dodecompress(rawchunks), b'decompress'),
2735 (lambda: dopatch(text, bins), b'patch'),
2780 (lambda: dopatch(text, bins), b'patch'),
2736 (lambda: dohash(text), b'hash'),
2781 (lambda: dohash(text), b'hash'),
2737 ])
2782 ])
2738
2783
2739 timer, fm = gettimer(ui, opts)
2784 timer, fm = gettimer(ui, opts)
2740 for fn, title in benches:
2785 for fn, title in benches:
2741 timer(fn, title=title)
2786 timer(fn, title=title)
2742 fm.end()
2787 fm.end()
2743
2788
2744 @command(b'perfrevset',
2789 @command(b'perfrevset',
2745 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2790 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2746 (b'', b'contexts', False, b'obtain changectx for each revision')]
2791 (b'', b'contexts', False, b'obtain changectx for each revision')]
2747 + formatteropts, b"REVSET")
2792 + formatteropts, b"REVSET")
2748 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2793 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2749 """benchmark the execution time of a revset
2794 """benchmark the execution time of a revset
2750
2795
2751 Use the --clean option if need to evaluate the impact of build volatile
2796 Use the --clean option if need to evaluate the impact of build volatile
2752 revisions set cache on the revset execution. Volatile cache hold filtered
2797 revisions set cache on the revset execution. Volatile cache hold filtered
2753 and obsolete related cache."""
2798 and obsolete related cache."""
2754 opts = _byteskwargs(opts)
2799 opts = _byteskwargs(opts)
2755
2800
2756 timer, fm = gettimer(ui, opts)
2801 timer, fm = gettimer(ui, opts)
2757 def d():
2802 def d():
2758 if clear:
2803 if clear:
2759 repo.invalidatevolatilesets()
2804 repo.invalidatevolatilesets()
2760 if contexts:
2805 if contexts:
2761 for ctx in repo.set(expr): pass
2806 for ctx in repo.set(expr): pass
2762 else:
2807 else:
2763 for r in repo.revs(expr): pass
2808 for r in repo.revs(expr): pass
2764 timer(d)
2809 timer(d)
2765 fm.end()
2810 fm.end()
2766
2811
2767 @command(b'perfvolatilesets',
2812 @command(b'perfvolatilesets',
2768 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2813 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2769 ] + formatteropts)
2814 ] + formatteropts)
2770 def perfvolatilesets(ui, repo, *names, **opts):
2815 def perfvolatilesets(ui, repo, *names, **opts):
2771 """benchmark the computation of various volatile set
2816 """benchmark the computation of various volatile set
2772
2817
2773 Volatile set computes element related to filtering and obsolescence."""
2818 Volatile set computes element related to filtering and obsolescence."""
2774 opts = _byteskwargs(opts)
2819 opts = _byteskwargs(opts)
2775 timer, fm = gettimer(ui, opts)
2820 timer, fm = gettimer(ui, opts)
2776 repo = repo.unfiltered()
2821 repo = repo.unfiltered()
2777
2822
2778 def getobs(name):
2823 def getobs(name):
2779 def d():
2824 def d():
2780 repo.invalidatevolatilesets()
2825 repo.invalidatevolatilesets()
2781 if opts[b'clear_obsstore']:
2826 if opts[b'clear_obsstore']:
2782 clearfilecache(repo, b'obsstore')
2827 clearfilecache(repo, b'obsstore')
2783 obsolete.getrevs(repo, name)
2828 obsolete.getrevs(repo, name)
2784 return d
2829 return d
2785
2830
2786 allobs = sorted(obsolete.cachefuncs)
2831 allobs = sorted(obsolete.cachefuncs)
2787 if names:
2832 if names:
2788 allobs = [n for n in allobs if n in names]
2833 allobs = [n for n in allobs if n in names]
2789
2834
2790 for name in allobs:
2835 for name in allobs:
2791 timer(getobs(name), title=name)
2836 timer(getobs(name), title=name)
2792
2837
2793 def getfiltered(name):
2838 def getfiltered(name):
2794 def d():
2839 def d():
2795 repo.invalidatevolatilesets()
2840 repo.invalidatevolatilesets()
2796 if opts[b'clear_obsstore']:
2841 if opts[b'clear_obsstore']:
2797 clearfilecache(repo, b'obsstore')
2842 clearfilecache(repo, b'obsstore')
2798 repoview.filterrevs(repo, name)
2843 repoview.filterrevs(repo, name)
2799 return d
2844 return d
2800
2845
2801 allfilter = sorted(repoview.filtertable)
2846 allfilter = sorted(repoview.filtertable)
2802 if names:
2847 if names:
2803 allfilter = [n for n in allfilter if n in names]
2848 allfilter = [n for n in allfilter if n in names]
2804
2849
2805 for name in allfilter:
2850 for name in allfilter:
2806 timer(getfiltered(name), title=name)
2851 timer(getfiltered(name), title=name)
2807 fm.end()
2852 fm.end()
2808
2853
2809 @command(b'perfbranchmap',
2854 @command(b'perfbranchmap',
2810 [(b'f', b'full', False,
2855 [(b'f', b'full', False,
2811 b'Includes build time of subset'),
2856 b'Includes build time of subset'),
2812 (b'', b'clear-revbranch', False,
2857 (b'', b'clear-revbranch', False,
2813 b'purge the revbranch cache between computation'),
2858 b'purge the revbranch cache between computation'),
2814 ] + formatteropts)
2859 ] + formatteropts)
2815 def perfbranchmap(ui, repo, *filternames, **opts):
2860 def perfbranchmap(ui, repo, *filternames, **opts):
2816 """benchmark the update of a branchmap
2861 """benchmark the update of a branchmap
2817
2862
2818 This benchmarks the full repo.branchmap() call with read and write disabled
2863 This benchmarks the full repo.branchmap() call with read and write disabled
2819 """
2864 """
2820 opts = _byteskwargs(opts)
2865 opts = _byteskwargs(opts)
2821 full = opts.get(b"full", False)
2866 full = opts.get(b"full", False)
2822 clear_revbranch = opts.get(b"clear_revbranch", False)
2867 clear_revbranch = opts.get(b"clear_revbranch", False)
2823 timer, fm = gettimer(ui, opts)
2868 timer, fm = gettimer(ui, opts)
2824 def getbranchmap(filtername):
2869 def getbranchmap(filtername):
2825 """generate a benchmark function for the filtername"""
2870 """generate a benchmark function for the filtername"""
2826 if filtername is None:
2871 if filtername is None:
2827 view = repo
2872 view = repo
2828 else:
2873 else:
2829 view = repo.filtered(filtername)
2874 view = repo.filtered(filtername)
2830 if util.safehasattr(view._branchcaches, '_per_filter'):
2875 if util.safehasattr(view._branchcaches, '_per_filter'):
2831 filtered = view._branchcaches._per_filter
2876 filtered = view._branchcaches._per_filter
2832 else:
2877 else:
2833 # older versions
2878 # older versions
2834 filtered = view._branchcaches
2879 filtered = view._branchcaches
2835 def d():
2880 def d():
2836 if clear_revbranch:
2881 if clear_revbranch:
2837 repo.revbranchcache()._clear()
2882 repo.revbranchcache()._clear()
2838 if full:
2883 if full:
2839 view._branchcaches.clear()
2884 view._branchcaches.clear()
2840 else:
2885 else:
2841 filtered.pop(filtername, None)
2886 filtered.pop(filtername, None)
2842 view.branchmap()
2887 view.branchmap()
2843 return d
2888 return d
2844 # add filter in smaller subset to bigger subset
2889 # add filter in smaller subset to bigger subset
2845 possiblefilters = set(repoview.filtertable)
2890 possiblefilters = set(repoview.filtertable)
2846 if filternames:
2891 if filternames:
2847 possiblefilters &= set(filternames)
2892 possiblefilters &= set(filternames)
2848 subsettable = getbranchmapsubsettable()
2893 subsettable = getbranchmapsubsettable()
2849 allfilters = []
2894 allfilters = []
2850 while possiblefilters:
2895 while possiblefilters:
2851 for name in possiblefilters:
2896 for name in possiblefilters:
2852 subset = subsettable.get(name)
2897 subset = subsettable.get(name)
2853 if subset not in possiblefilters:
2898 if subset not in possiblefilters:
2854 break
2899 break
2855 else:
2900 else:
2856 assert False, b'subset cycle %s!' % possiblefilters
2901 assert False, b'subset cycle %s!' % possiblefilters
2857 allfilters.append(name)
2902 allfilters.append(name)
2858 possiblefilters.remove(name)
2903 possiblefilters.remove(name)
2859
2904
2860 # warm the cache
2905 # warm the cache
2861 if not full:
2906 if not full:
2862 for name in allfilters:
2907 for name in allfilters:
2863 repo.filtered(name).branchmap()
2908 repo.filtered(name).branchmap()
2864 if not filternames or b'unfiltered' in filternames:
2909 if not filternames or b'unfiltered' in filternames:
2865 # add unfiltered
2910 # add unfiltered
2866 allfilters.append(None)
2911 allfilters.append(None)
2867
2912
2868 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2913 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2869 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2914 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2870 branchcacheread.set(classmethod(lambda *args: None))
2915 branchcacheread.set(classmethod(lambda *args: None))
2871 else:
2916 else:
2872 # older versions
2917 # older versions
2873 branchcacheread = safeattrsetter(branchmap, b'read')
2918 branchcacheread = safeattrsetter(branchmap, b'read')
2874 branchcacheread.set(lambda *args: None)
2919 branchcacheread.set(lambda *args: None)
2875 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2920 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2876 branchcachewrite.set(lambda *args: None)
2921 branchcachewrite.set(lambda *args: None)
2877 try:
2922 try:
2878 for name in allfilters:
2923 for name in allfilters:
2879 printname = name
2924 printname = name
2880 if name is None:
2925 if name is None:
2881 printname = b'unfiltered'
2926 printname = b'unfiltered'
2882 timer(getbranchmap(name), title=str(printname))
2927 timer(getbranchmap(name), title=str(printname))
2883 finally:
2928 finally:
2884 branchcacheread.restore()
2929 branchcacheread.restore()
2885 branchcachewrite.restore()
2930 branchcachewrite.restore()
2886 fm.end()
2931 fm.end()
2887
2932
2888 @command(b'perfbranchmapupdate', [
2933 @command(b'perfbranchmapupdate', [
2889 (b'', b'base', [], b'subset of revision to start from'),
2934 (b'', b'base', [], b'subset of revision to start from'),
2890 (b'', b'target', [], b'subset of revision to end with'),
2935 (b'', b'target', [], b'subset of revision to end with'),
2891 (b'', b'clear-caches', False, b'clear cache between each runs')
2936 (b'', b'clear-caches', False, b'clear cache between each runs')
2892 ] + formatteropts)
2937 ] + formatteropts)
2893 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2938 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2894 """benchmark branchmap update from for <base> revs to <target> revs
2939 """benchmark branchmap update from for <base> revs to <target> revs
2895
2940
2896 If `--clear-caches` is passed, the following items will be reset before
2941 If `--clear-caches` is passed, the following items will be reset before
2897 each update:
2942 each update:
2898 * the changelog instance and associated indexes
2943 * the changelog instance and associated indexes
2899 * the rev-branch-cache instance
2944 * the rev-branch-cache instance
2900
2945
2901 Examples:
2946 Examples:
2902
2947
2903 # update for the one last revision
2948 # update for the one last revision
2904 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2949 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2905
2950
2906 $ update for change coming with a new branch
2951 $ update for change coming with a new branch
2907 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2952 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2908 """
2953 """
2909 from mercurial import branchmap
2954 from mercurial import branchmap
2910 from mercurial import repoview
2955 from mercurial import repoview
2911 opts = _byteskwargs(opts)
2956 opts = _byteskwargs(opts)
2912 timer, fm = gettimer(ui, opts)
2957 timer, fm = gettimer(ui, opts)
2913 clearcaches = opts[b'clear_caches']
2958 clearcaches = opts[b'clear_caches']
2914 unfi = repo.unfiltered()
2959 unfi = repo.unfiltered()
2915 x = [None] # used to pass data between closure
2960 x = [None] # used to pass data between closure
2916
2961
2917 # we use a `list` here to avoid possible side effect from smartset
2962 # we use a `list` here to avoid possible side effect from smartset
2918 baserevs = list(scmutil.revrange(repo, base))
2963 baserevs = list(scmutil.revrange(repo, base))
2919 targetrevs = list(scmutil.revrange(repo, target))
2964 targetrevs = list(scmutil.revrange(repo, target))
2920 if not baserevs:
2965 if not baserevs:
2921 raise error.Abort(b'no revisions selected for --base')
2966 raise error.Abort(b'no revisions selected for --base')
2922 if not targetrevs:
2967 if not targetrevs:
2923 raise error.Abort(b'no revisions selected for --target')
2968 raise error.Abort(b'no revisions selected for --target')
2924
2969
2925 # make sure the target branchmap also contains the one in the base
2970 # make sure the target branchmap also contains the one in the base
2926 targetrevs = list(set(baserevs) | set(targetrevs))
2971 targetrevs = list(set(baserevs) | set(targetrevs))
2927 targetrevs.sort()
2972 targetrevs.sort()
2928
2973
2929 cl = repo.changelog
2974 cl = repo.changelog
2930 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2975 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2931 allbaserevs.sort()
2976 allbaserevs.sort()
2932 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2977 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2933
2978
2934 newrevs = list(alltargetrevs.difference(allbaserevs))
2979 newrevs = list(alltargetrevs.difference(allbaserevs))
2935 newrevs.sort()
2980 newrevs.sort()
2936
2981
2937 allrevs = frozenset(unfi.changelog.revs())
2982 allrevs = frozenset(unfi.changelog.revs())
2938 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2983 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2939 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2984 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2940
2985
2941 def basefilter(repo, visibilityexceptions=None):
2986 def basefilter(repo, visibilityexceptions=None):
2942 return basefilterrevs
2987 return basefilterrevs
2943
2988
2944 def targetfilter(repo, visibilityexceptions=None):
2989 def targetfilter(repo, visibilityexceptions=None):
2945 return targetfilterrevs
2990 return targetfilterrevs
2946
2991
2947 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2992 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2948 ui.status(msg % (len(allbaserevs), len(newrevs)))
2993 ui.status(msg % (len(allbaserevs), len(newrevs)))
2949 if targetfilterrevs:
2994 if targetfilterrevs:
2950 msg = b'(%d revisions still filtered)\n'
2995 msg = b'(%d revisions still filtered)\n'
2951 ui.status(msg % len(targetfilterrevs))
2996 ui.status(msg % len(targetfilterrevs))
2952
2997
2953 try:
2998 try:
2954 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2999 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2955 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3000 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2956
3001
2957 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3002 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2958 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3003 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2959
3004
2960 # try to find an existing branchmap to reuse
3005 # try to find an existing branchmap to reuse
2961 subsettable = getbranchmapsubsettable()
3006 subsettable = getbranchmapsubsettable()
2962 candidatefilter = subsettable.get(None)
3007 candidatefilter = subsettable.get(None)
2963 while candidatefilter is not None:
3008 while candidatefilter is not None:
2964 candidatebm = repo.filtered(candidatefilter).branchmap()
3009 candidatebm = repo.filtered(candidatefilter).branchmap()
2965 if candidatebm.validfor(baserepo):
3010 if candidatebm.validfor(baserepo):
2966 filtered = repoview.filterrevs(repo, candidatefilter)
3011 filtered = repoview.filterrevs(repo, candidatefilter)
2967 missing = [r for r in allbaserevs if r in filtered]
3012 missing = [r for r in allbaserevs if r in filtered]
2968 base = candidatebm.copy()
3013 base = candidatebm.copy()
2969 base.update(baserepo, missing)
3014 base.update(baserepo, missing)
2970 break
3015 break
2971 candidatefilter = subsettable.get(candidatefilter)
3016 candidatefilter = subsettable.get(candidatefilter)
2972 else:
3017 else:
2973 # no suitable subset where found
3018 # no suitable subset where found
2974 base = branchmap.branchcache()
3019 base = branchmap.branchcache()
2975 base.update(baserepo, allbaserevs)
3020 base.update(baserepo, allbaserevs)
2976
3021
2977 def setup():
3022 def setup():
2978 x[0] = base.copy()
3023 x[0] = base.copy()
2979 if clearcaches:
3024 if clearcaches:
2980 unfi._revbranchcache = None
3025 unfi._revbranchcache = None
2981 clearchangelog(repo)
3026 clearchangelog(repo)
2982
3027
2983 def bench():
3028 def bench():
2984 x[0].update(targetrepo, newrevs)
3029 x[0].update(targetrepo, newrevs)
2985
3030
2986 timer(bench, setup=setup)
3031 timer(bench, setup=setup)
2987 fm.end()
3032 fm.end()
2988 finally:
3033 finally:
2989 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3034 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2990 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3035 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2991
3036
2992 @command(b'perfbranchmapload', [
3037 @command(b'perfbranchmapload', [
2993 (b'f', b'filter', b'', b'Specify repoview filter'),
3038 (b'f', b'filter', b'', b'Specify repoview filter'),
2994 (b'', b'list', False, b'List brachmap filter caches'),
3039 (b'', b'list', False, b'List brachmap filter caches'),
2995 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3040 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2996
3041
2997 ] + formatteropts)
3042 ] + formatteropts)
2998 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3043 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2999 """benchmark reading the branchmap"""
3044 """benchmark reading the branchmap"""
3000 opts = _byteskwargs(opts)
3045 opts = _byteskwargs(opts)
3001 clearrevlogs = opts[b'clear_revlogs']
3046 clearrevlogs = opts[b'clear_revlogs']
3002
3047
3003 if list:
3048 if list:
3004 for name, kind, st in repo.cachevfs.readdir(stat=True):
3049 for name, kind, st in repo.cachevfs.readdir(stat=True):
3005 if name.startswith(b'branch2'):
3050 if name.startswith(b'branch2'):
3006 filtername = name.partition(b'-')[2] or b'unfiltered'
3051 filtername = name.partition(b'-')[2] or b'unfiltered'
3007 ui.status(b'%s - %s\n'
3052 ui.status(b'%s - %s\n'
3008 % (filtername, util.bytecount(st.st_size)))
3053 % (filtername, util.bytecount(st.st_size)))
3009 return
3054 return
3010 if not filter:
3055 if not filter:
3011 filter = None
3056 filter = None
3012 subsettable = getbranchmapsubsettable()
3057 subsettable = getbranchmapsubsettable()
3013 if filter is None:
3058 if filter is None:
3014 repo = repo.unfiltered()
3059 repo = repo.unfiltered()
3015 else:
3060 else:
3016 repo = repoview.repoview(repo, filter)
3061 repo = repoview.repoview(repo, filter)
3017
3062
3018 repo.branchmap() # make sure we have a relevant, up to date branchmap
3063 repo.branchmap() # make sure we have a relevant, up to date branchmap
3019
3064
3020 try:
3065 try:
3021 fromfile = branchmap.branchcache.fromfile
3066 fromfile = branchmap.branchcache.fromfile
3022 except AttributeError:
3067 except AttributeError:
3023 # older versions
3068 # older versions
3024 fromfile = branchmap.read
3069 fromfile = branchmap.read
3025
3070
3026 currentfilter = filter
3071 currentfilter = filter
3027 # try once without timer, the filter may not be cached
3072 # try once without timer, the filter may not be cached
3028 while fromfile(repo) is None:
3073 while fromfile(repo) is None:
3029 currentfilter = subsettable.get(currentfilter)
3074 currentfilter = subsettable.get(currentfilter)
3030 if currentfilter is None:
3075 if currentfilter is None:
3031 raise error.Abort(b'No branchmap cached for %s repo'
3076 raise error.Abort(b'No branchmap cached for %s repo'
3032 % (filter or b'unfiltered'))
3077 % (filter or b'unfiltered'))
3033 repo = repo.filtered(currentfilter)
3078 repo = repo.filtered(currentfilter)
3034 timer, fm = gettimer(ui, opts)
3079 timer, fm = gettimer(ui, opts)
3035 def setup():
3080 def setup():
3036 if clearrevlogs:
3081 if clearrevlogs:
3037 clearchangelog(repo)
3082 clearchangelog(repo)
3038 def bench():
3083 def bench():
3039 fromfile(repo)
3084 fromfile(repo)
3040 timer(bench, setup=setup)
3085 timer(bench, setup=setup)
3041 fm.end()
3086 fm.end()
3042
3087
3043 @command(b'perfloadmarkers')
3088 @command(b'perfloadmarkers')
3044 def perfloadmarkers(ui, repo):
3089 def perfloadmarkers(ui, repo):
3045 """benchmark the time to parse the on-disk markers for a repo
3090 """benchmark the time to parse the on-disk markers for a repo
3046
3091
3047 Result is the number of markers in the repo."""
3092 Result is the number of markers in the repo."""
3048 timer, fm = gettimer(ui)
3093 timer, fm = gettimer(ui)
3049 svfs = getsvfs(repo)
3094 svfs = getsvfs(repo)
3050 timer(lambda: len(obsolete.obsstore(svfs)))
3095 timer(lambda: len(obsolete.obsstore(svfs)))
3051 fm.end()
3096 fm.end()
3052
3097
3053 @command(b'perflrucachedict', formatteropts +
3098 @command(b'perflrucachedict', formatteropts +
3054 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3099 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3055 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3100 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3056 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3101 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3057 (b'', b'size', 4, b'size of cache'),
3102 (b'', b'size', 4, b'size of cache'),
3058 (b'', b'gets', 10000, b'number of key lookups'),
3103 (b'', b'gets', 10000, b'number of key lookups'),
3059 (b'', b'sets', 10000, b'number of key sets'),
3104 (b'', b'sets', 10000, b'number of key sets'),
3060 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3105 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3061 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3106 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3062 norepo=True)
3107 norepo=True)
3063 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3108 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3064 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3109 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3065 opts = _byteskwargs(opts)
3110 opts = _byteskwargs(opts)
3066
3111
3067 def doinit():
3112 def doinit():
3068 for i in _xrange(10000):
3113 for i in _xrange(10000):
3069 util.lrucachedict(size)
3114 util.lrucachedict(size)
3070
3115
3071 costrange = list(range(mincost, maxcost + 1))
3116 costrange = list(range(mincost, maxcost + 1))
3072
3117
3073 values = []
3118 values = []
3074 for i in _xrange(size):
3119 for i in _xrange(size):
3075 values.append(random.randint(0, _maxint))
3120 values.append(random.randint(0, _maxint))
3076
3121
3077 # Get mode fills the cache and tests raw lookup performance with no
3122 # Get mode fills the cache and tests raw lookup performance with no
3078 # eviction.
3123 # eviction.
3079 getseq = []
3124 getseq = []
3080 for i in _xrange(gets):
3125 for i in _xrange(gets):
3081 getseq.append(random.choice(values))
3126 getseq.append(random.choice(values))
3082
3127
3083 def dogets():
3128 def dogets():
3084 d = util.lrucachedict(size)
3129 d = util.lrucachedict(size)
3085 for v in values:
3130 for v in values:
3086 d[v] = v
3131 d[v] = v
3087 for key in getseq:
3132 for key in getseq:
3088 value = d[key]
3133 value = d[key]
3089 value # silence pyflakes warning
3134 value # silence pyflakes warning
3090
3135
3091 def dogetscost():
3136 def dogetscost():
3092 d = util.lrucachedict(size, maxcost=costlimit)
3137 d = util.lrucachedict(size, maxcost=costlimit)
3093 for i, v in enumerate(values):
3138 for i, v in enumerate(values):
3094 d.insert(v, v, cost=costs[i])
3139 d.insert(v, v, cost=costs[i])
3095 for key in getseq:
3140 for key in getseq:
3096 try:
3141 try:
3097 value = d[key]
3142 value = d[key]
3098 value # silence pyflakes warning
3143 value # silence pyflakes warning
3099 except KeyError:
3144 except KeyError:
3100 pass
3145 pass
3101
3146
3102 # Set mode tests insertion speed with cache eviction.
3147 # Set mode tests insertion speed with cache eviction.
3103 setseq = []
3148 setseq = []
3104 costs = []
3149 costs = []
3105 for i in _xrange(sets):
3150 for i in _xrange(sets):
3106 setseq.append(random.randint(0, _maxint))
3151 setseq.append(random.randint(0, _maxint))
3107 costs.append(random.choice(costrange))
3152 costs.append(random.choice(costrange))
3108
3153
3109 def doinserts():
3154 def doinserts():
3110 d = util.lrucachedict(size)
3155 d = util.lrucachedict(size)
3111 for v in setseq:
3156 for v in setseq:
3112 d.insert(v, v)
3157 d.insert(v, v)
3113
3158
3114 def doinsertscost():
3159 def doinsertscost():
3115 d = util.lrucachedict(size, maxcost=costlimit)
3160 d = util.lrucachedict(size, maxcost=costlimit)
3116 for i, v in enumerate(setseq):
3161 for i, v in enumerate(setseq):
3117 d.insert(v, v, cost=costs[i])
3162 d.insert(v, v, cost=costs[i])
3118
3163
3119 def dosets():
3164 def dosets():
3120 d = util.lrucachedict(size)
3165 d = util.lrucachedict(size)
3121 for v in setseq:
3166 for v in setseq:
3122 d[v] = v
3167 d[v] = v
3123
3168
3124 # Mixed mode randomly performs gets and sets with eviction.
3169 # Mixed mode randomly performs gets and sets with eviction.
3125 mixedops = []
3170 mixedops = []
3126 for i in _xrange(mixed):
3171 for i in _xrange(mixed):
3127 r = random.randint(0, 100)
3172 r = random.randint(0, 100)
3128 if r < mixedgetfreq:
3173 if r < mixedgetfreq:
3129 op = 0
3174 op = 0
3130 else:
3175 else:
3131 op = 1
3176 op = 1
3132
3177
3133 mixedops.append((op,
3178 mixedops.append((op,
3134 random.randint(0, size * 2),
3179 random.randint(0, size * 2),
3135 random.choice(costrange)))
3180 random.choice(costrange)))
3136
3181
3137 def domixed():
3182 def domixed():
3138 d = util.lrucachedict(size)
3183 d = util.lrucachedict(size)
3139
3184
3140 for op, v, cost in mixedops:
3185 for op, v, cost in mixedops:
3141 if op == 0:
3186 if op == 0:
3142 try:
3187 try:
3143 d[v]
3188 d[v]
3144 except KeyError:
3189 except KeyError:
3145 pass
3190 pass
3146 else:
3191 else:
3147 d[v] = v
3192 d[v] = v
3148
3193
3149 def domixedcost():
3194 def domixedcost():
3150 d = util.lrucachedict(size, maxcost=costlimit)
3195 d = util.lrucachedict(size, maxcost=costlimit)
3151
3196
3152 for op, v, cost in mixedops:
3197 for op, v, cost in mixedops:
3153 if op == 0:
3198 if op == 0:
3154 try:
3199 try:
3155 d[v]
3200 d[v]
3156 except KeyError:
3201 except KeyError:
3157 pass
3202 pass
3158 else:
3203 else:
3159 d.insert(v, v, cost=cost)
3204 d.insert(v, v, cost=cost)
3160
3205
3161 benches = [
3206 benches = [
3162 (doinit, b'init'),
3207 (doinit, b'init'),
3163 ]
3208 ]
3164
3209
3165 if costlimit:
3210 if costlimit:
3166 benches.extend([
3211 benches.extend([
3167 (dogetscost, b'gets w/ cost limit'),
3212 (dogetscost, b'gets w/ cost limit'),
3168 (doinsertscost, b'inserts w/ cost limit'),
3213 (doinsertscost, b'inserts w/ cost limit'),
3169 (domixedcost, b'mixed w/ cost limit'),
3214 (domixedcost, b'mixed w/ cost limit'),
3170 ])
3215 ])
3171 else:
3216 else:
3172 benches.extend([
3217 benches.extend([
3173 (dogets, b'gets'),
3218 (dogets, b'gets'),
3174 (doinserts, b'inserts'),
3219 (doinserts, b'inserts'),
3175 (dosets, b'sets'),
3220 (dosets, b'sets'),
3176 (domixed, b'mixed')
3221 (domixed, b'mixed')
3177 ])
3222 ])
3178
3223
3179 for fn, title in benches:
3224 for fn, title in benches:
3180 timer, fm = gettimer(ui, opts)
3225 timer, fm = gettimer(ui, opts)
3181 timer(fn, title=title)
3226 timer(fn, title=title)
3182 fm.end()
3227 fm.end()
3183
3228
3184 @command(b'perfwrite', formatteropts)
3229 @command(b'perfwrite', formatteropts)
3185 def perfwrite(ui, repo, **opts):
3230 def perfwrite(ui, repo, **opts):
3186 """microbenchmark ui.write
3231 """microbenchmark ui.write
3187 """
3232 """
3188 opts = _byteskwargs(opts)
3233 opts = _byteskwargs(opts)
3189
3234
3190 timer, fm = gettimer(ui, opts)
3235 timer, fm = gettimer(ui, opts)
3191 def write():
3236 def write():
3192 for i in range(100000):
3237 for i in range(100000):
3193 ui.write((b'Testing write performance\n'))
3238 ui.write((b'Testing write performance\n'))
3194 timer(write)
3239 timer(write)
3195 fm.end()
3240 fm.end()
3196
3241
3197 def uisetup(ui):
3242 def uisetup(ui):
3198 if (util.safehasattr(cmdutil, b'openrevlog') and
3243 if (util.safehasattr(cmdutil, b'openrevlog') and
3199 not util.safehasattr(commands, b'debugrevlogopts')):
3244 not util.safehasattr(commands, b'debugrevlogopts')):
3200 # for "historical portability":
3245 # for "historical portability":
3201 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3246 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3202 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3247 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3203 # openrevlog() should cause failure, because it has been
3248 # openrevlog() should cause failure, because it has been
3204 # available since 3.5 (or 49c583ca48c4).
3249 # available since 3.5 (or 49c583ca48c4).
3205 def openrevlog(orig, repo, cmd, file_, opts):
3250 def openrevlog(orig, repo, cmd, file_, opts):
3206 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3251 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3207 raise error.Abort(b"This version doesn't support --dir option",
3252 raise error.Abort(b"This version doesn't support --dir option",
3208 hint=b"use 3.5 or later")
3253 hint=b"use 3.5 or later")
3209 return orig(repo, cmd, file_, opts)
3254 return orig(repo, cmd, file_, opts)
3210 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3255 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3211
3256
3212 @command(b'perfprogress', formatteropts + [
3257 @command(b'perfprogress', formatteropts + [
3213 (b'', b'topic', b'topic', b'topic for progress messages'),
3258 (b'', b'topic', b'topic', b'topic for progress messages'),
3214 (b'c', b'total', 1000000, b'total value we are progressing to'),
3259 (b'c', b'total', 1000000, b'total value we are progressing to'),
3215 ], norepo=True)
3260 ], norepo=True)
3216 def perfprogress(ui, topic=None, total=None, **opts):
3261 def perfprogress(ui, topic=None, total=None, **opts):
3217 """printing of progress bars"""
3262 """printing of progress bars"""
3218 opts = _byteskwargs(opts)
3263 opts = _byteskwargs(opts)
3219
3264
3220 timer, fm = gettimer(ui, opts)
3265 timer, fm = gettimer(ui, opts)
3221
3266
3222 def doprogress():
3267 def doprogress():
3223 with ui.makeprogress(topic, total=total) as progress:
3268 with ui.makeprogress(topic, total=total) as progress:
3224 for i in _xrange(total):
3269 for i in _xrange(total):
3225 progress.increment()
3270 progress.increment()
3226
3271
3227 timer(doprogress)
3272 timer(doprogress)
3228 fm.end()
3273 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now