##// END OF EJS Templates
perf: fix perfhelper-pathcopies without --stats...
marmoute -
r43273:bbf77341 default
parent child Browse files
Show More
@@ -1,3274 +1,3276 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 dir(registrar) # forcibly load it
96 dir(registrar) # forcibly load it
97 except ImportError:
97 except ImportError:
98 registrar = None
98 registrar = None
99 try:
99 try:
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103 try:
103 try:
104 from mercurial.utils import repoviewutil # since 5.0
104 from mercurial.utils import repoviewutil # since 5.0
105 except ImportError:
105 except ImportError:
106 repoviewutil = None
106 repoviewutil = None
107 try:
107 try:
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 except ImportError:
109 except ImportError:
110 pass
110 pass
111 try:
111 try:
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 try:
116 try:
117 from mercurial import profiling
117 from mercurial import profiling
118 except ImportError:
118 except ImportError:
119 profiling = None
119 profiling = None
120
120
121 def identity(a):
121 def identity(a):
122 return a
122 return a
123
123
124 try:
124 try:
125 from mercurial import pycompat
125 from mercurial import pycompat
126 getargspec = pycompat.getargspec # added to module after 4.5
126 getargspec = pycompat.getargspec # added to module after 4.5
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
129 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
132 if pycompat.ispy3:
132 if pycompat.ispy3:
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 _maxint = sys.maxsize # per py3 docs for replacing maxint
134 else:
134 else:
135 _maxint = sys.maxint
135 _maxint = sys.maxint
136 except (NameError, ImportError, AttributeError):
136 except (NameError, ImportError, AttributeError):
137 import inspect
137 import inspect
138 getargspec = inspect.getargspec
138 getargspec = inspect.getargspec
139 _byteskwargs = identity
139 _byteskwargs = identity
140 _bytestr = str
140 _bytestr = str
141 fsencode = identity # no py3 support
141 fsencode = identity # no py3 support
142 _maxint = sys.maxint # no py3 support
142 _maxint = sys.maxint # no py3 support
143 _sysstr = lambda x: x # no py3 support
143 _sysstr = lambda x: x # no py3 support
144 _xrange = xrange
144 _xrange = xrange
145
145
146 try:
146 try:
147 # 4.7+
147 # 4.7+
148 queue = pycompat.queue.Queue
148 queue = pycompat.queue.Queue
149 except (NameError, AttributeError, ImportError):
149 except (NameError, AttributeError, ImportError):
150 # <4.7.
150 # <4.7.
151 try:
151 try:
152 queue = pycompat.queue
152 queue = pycompat.queue
153 except (NameError, AttributeError, ImportError):
153 except (NameError, AttributeError, ImportError):
154 import Queue as queue
154 import Queue as queue
155
155
156 try:
156 try:
157 from mercurial import logcmdutil
157 from mercurial import logcmdutil
158 makelogtemplater = logcmdutil.maketemplater
158 makelogtemplater = logcmdutil.maketemplater
159 except (AttributeError, ImportError):
159 except (AttributeError, ImportError):
160 try:
160 try:
161 makelogtemplater = cmdutil.makelogtemplater
161 makelogtemplater = cmdutil.makelogtemplater
162 except (AttributeError, ImportError):
162 except (AttributeError, ImportError):
163 makelogtemplater = None
163 makelogtemplater = None
164
164
165 # for "historical portability":
165 # for "historical portability":
166 # define util.safehasattr forcibly, because util.safehasattr has been
166 # define util.safehasattr forcibly, because util.safehasattr has been
167 # available since 1.9.3 (or 94b200a11cf7)
167 # available since 1.9.3 (or 94b200a11cf7)
168 _undefined = object()
168 _undefined = object()
169 def safehasattr(thing, attr):
169 def safehasattr(thing, attr):
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
170 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
171 setattr(util, 'safehasattr', safehasattr)
171 setattr(util, 'safehasattr', safehasattr)
172
172
173 # for "historical portability":
173 # for "historical portability":
174 # define util.timer forcibly, because util.timer has been available
174 # define util.timer forcibly, because util.timer has been available
175 # since ae5d60bb70c9
175 # since ae5d60bb70c9
176 if safehasattr(time, 'perf_counter'):
176 if safehasattr(time, 'perf_counter'):
177 util.timer = time.perf_counter
177 util.timer = time.perf_counter
178 elif os.name == b'nt':
178 elif os.name == b'nt':
179 util.timer = time.clock
179 util.timer = time.clock
180 else:
180 else:
181 util.timer = time.time
181 util.timer = time.time
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # use locally defined empty option list, if formatteropts isn't
184 # use locally defined empty option list, if formatteropts isn't
185 # available, because commands.formatteropts has been available since
185 # available, because commands.formatteropts has been available since
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
186 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
187 # available since 2.2 (or ae5f92e154d3)
187 # available since 2.2 (or ae5f92e154d3)
188 formatteropts = getattr(cmdutil, "formatteropts",
188 formatteropts = getattr(cmdutil, "formatteropts",
189 getattr(commands, "formatteropts", []))
189 getattr(commands, "formatteropts", []))
190
190
191 # for "historical portability":
191 # for "historical portability":
192 # use locally defined option list, if debugrevlogopts isn't available,
192 # use locally defined option list, if debugrevlogopts isn't available,
193 # because commands.debugrevlogopts has been available since 3.7 (or
193 # because commands.debugrevlogopts has been available since 3.7 (or
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
194 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
195 # since 1.9 (or a79fea6b3e77).
195 # since 1.9 (or a79fea6b3e77).
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
196 revlogopts = getattr(cmdutil, "debugrevlogopts",
197 getattr(commands, "debugrevlogopts", [
197 getattr(commands, "debugrevlogopts", [
198 (b'c', b'changelog', False, (b'open changelog')),
198 (b'c', b'changelog', False, (b'open changelog')),
199 (b'm', b'manifest', False, (b'open manifest')),
199 (b'm', b'manifest', False, (b'open manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
200 (b'', b'dir', False, (b'open directory manifest')),
201 ]))
201 ]))
202
202
203 cmdtable = {}
203 cmdtable = {}
204
204
205 # for "historical portability":
205 # for "historical portability":
206 # define parsealiases locally, because cmdutil.parsealiases has been
206 # define parsealiases locally, because cmdutil.parsealiases has been
207 # available since 1.5 (or 6252852b4332)
207 # available since 1.5 (or 6252852b4332)
208 def parsealiases(cmd):
208 def parsealiases(cmd):
209 return cmd.split(b"|")
209 return cmd.split(b"|")
210
210
211 if safehasattr(registrar, 'command'):
211 if safehasattr(registrar, 'command'):
212 command = registrar.command(cmdtable)
212 command = registrar.command(cmdtable)
213 elif safehasattr(cmdutil, 'command'):
213 elif safehasattr(cmdutil, 'command'):
214 command = cmdutil.command(cmdtable)
214 command = cmdutil.command(cmdtable)
215 if b'norepo' not in getargspec(command).args:
215 if b'norepo' not in getargspec(command).args:
216 # for "historical portability":
216 # for "historical portability":
217 # wrap original cmdutil.command, because "norepo" option has
217 # wrap original cmdutil.command, because "norepo" option has
218 # been available since 3.1 (or 75a96326cecb)
218 # been available since 3.1 (or 75a96326cecb)
219 _command = command
219 _command = command
220 def command(name, options=(), synopsis=None, norepo=False):
220 def command(name, options=(), synopsis=None, norepo=False):
221 if norepo:
221 if norepo:
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
222 commands.norepo += b' %s' % b' '.join(parsealiases(name))
223 return _command(name, list(options), synopsis)
223 return _command(name, list(options), synopsis)
224 else:
224 else:
225 # for "historical portability":
225 # for "historical portability":
226 # define "@command" annotation locally, because cmdutil.command
226 # define "@command" annotation locally, because cmdutil.command
227 # has been available since 1.9 (or 2daa5179e73f)
227 # has been available since 1.9 (or 2daa5179e73f)
228 def command(name, options=(), synopsis=None, norepo=False):
228 def command(name, options=(), synopsis=None, norepo=False):
229 def decorator(func):
229 def decorator(func):
230 if synopsis:
230 if synopsis:
231 cmdtable[name] = func, list(options), synopsis
231 cmdtable[name] = func, list(options), synopsis
232 else:
232 else:
233 cmdtable[name] = func, list(options)
233 cmdtable[name] = func, list(options)
234 if norepo:
234 if norepo:
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
235 commands.norepo += b' %s' % b' '.join(parsealiases(name))
236 return func
236 return func
237 return decorator
237 return decorator
238
238
239 try:
239 try:
240 import mercurial.registrar
240 import mercurial.registrar
241 import mercurial.configitems
241 import mercurial.configitems
242 configtable = {}
242 configtable = {}
243 configitem = mercurial.registrar.configitem(configtable)
243 configitem = mercurial.registrar.configitem(configtable)
244 configitem(b'perf', b'presleep',
244 configitem(b'perf', b'presleep',
245 default=mercurial.configitems.dynamicdefault,
245 default=mercurial.configitems.dynamicdefault,
246 experimental=True,
246 experimental=True,
247 )
247 )
248 configitem(b'perf', b'stub',
248 configitem(b'perf', b'stub',
249 default=mercurial.configitems.dynamicdefault,
249 default=mercurial.configitems.dynamicdefault,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 configitem(b'perf', b'parentscount',
252 configitem(b'perf', b'parentscount',
253 default=mercurial.configitems.dynamicdefault,
253 default=mercurial.configitems.dynamicdefault,
254 experimental=True,
254 experimental=True,
255 )
255 )
256 configitem(b'perf', b'all-timing',
256 configitem(b'perf', b'all-timing',
257 default=mercurial.configitems.dynamicdefault,
257 default=mercurial.configitems.dynamicdefault,
258 experimental=True,
258 experimental=True,
259 )
259 )
260 configitem(b'perf', b'pre-run',
260 configitem(b'perf', b'pre-run',
261 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
262 )
262 )
263 configitem(b'perf', b'profile-benchmark',
263 configitem(b'perf', b'profile-benchmark',
264 default=mercurial.configitems.dynamicdefault,
264 default=mercurial.configitems.dynamicdefault,
265 )
265 )
266 configitem(b'perf', b'run-limits',
266 configitem(b'perf', b'run-limits',
267 default=mercurial.configitems.dynamicdefault,
267 default=mercurial.configitems.dynamicdefault,
268 experimental=True,
268 experimental=True,
269 )
269 )
270 except (ImportError, AttributeError):
270 except (ImportError, AttributeError):
271 pass
271 pass
272 except TypeError:
272 except TypeError:
273 # compatibility fix for a11fd395e83f
273 # compatibility fix for a11fd395e83f
274 # hg version: 5.2
274 # hg version: 5.2
275 configitem(b'perf', b'presleep',
275 configitem(b'perf', b'presleep',
276 default=mercurial.configitems.dynamicdefault,
276 default=mercurial.configitems.dynamicdefault,
277 )
277 )
278 configitem(b'perf', b'stub',
278 configitem(b'perf', b'stub',
279 default=mercurial.configitems.dynamicdefault,
279 default=mercurial.configitems.dynamicdefault,
280 )
280 )
281 configitem(b'perf', b'parentscount',
281 configitem(b'perf', b'parentscount',
282 default=mercurial.configitems.dynamicdefault,
282 default=mercurial.configitems.dynamicdefault,
283 )
283 )
284 configitem(b'perf', b'all-timing',
284 configitem(b'perf', b'all-timing',
285 default=mercurial.configitems.dynamicdefault,
285 default=mercurial.configitems.dynamicdefault,
286 )
286 )
287 configitem(b'perf', b'pre-run',
287 configitem(b'perf', b'pre-run',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 )
289 )
290 configitem(b'perf', b'profile-benchmark',
290 configitem(b'perf', b'profile-benchmark',
291 default=mercurial.configitems.dynamicdefault,
291 default=mercurial.configitems.dynamicdefault,
292 )
292 )
293 configitem(b'perf', b'run-limits',
293 configitem(b'perf', b'run-limits',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 )
295 )
296
296
297 def getlen(ui):
297 def getlen(ui):
298 if ui.configbool(b"perf", b"stub", False):
298 if ui.configbool(b"perf", b"stub", False):
299 return lambda x: 1
299 return lambda x: 1
300 return len
300 return len
301
301
302 class noop(object):
302 class noop(object):
303 """dummy context manager"""
303 """dummy context manager"""
304 def __enter__(self):
304 def __enter__(self):
305 pass
305 pass
306 def __exit__(self, *args):
306 def __exit__(self, *args):
307 pass
307 pass
308
308
309 NOOPCTX = noop()
309 NOOPCTX = noop()
310
310
311 def gettimer(ui, opts=None):
311 def gettimer(ui, opts=None):
312 """return a timer function and formatter: (timer, formatter)
312 """return a timer function and formatter: (timer, formatter)
313
313
314 This function exists to gather the creation of formatter in a single
314 This function exists to gather the creation of formatter in a single
315 place instead of duplicating it in all performance commands."""
315 place instead of duplicating it in all performance commands."""
316
316
317 # enforce an idle period before execution to counteract power management
317 # enforce an idle period before execution to counteract power management
318 # experimental config: perf.presleep
318 # experimental config: perf.presleep
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
319 time.sleep(getint(ui, b"perf", b"presleep", 1))
320
320
321 if opts is None:
321 if opts is None:
322 opts = {}
322 opts = {}
323 # redirect all to stderr unless buffer api is in use
323 # redirect all to stderr unless buffer api is in use
324 if not ui._buffers:
324 if not ui._buffers:
325 ui = ui.copy()
325 ui = ui.copy()
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
326 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
327 if uifout:
327 if uifout:
328 # for "historical portability":
328 # for "historical portability":
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
329 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
330 uifout.set(ui.ferr)
330 uifout.set(ui.ferr)
331
331
332 # get a formatter
332 # get a formatter
333 uiformatter = getattr(ui, 'formatter', None)
333 uiformatter = getattr(ui, 'formatter', None)
334 if uiformatter:
334 if uiformatter:
335 fm = uiformatter(b'perf', opts)
335 fm = uiformatter(b'perf', opts)
336 else:
336 else:
337 # for "historical portability":
337 # for "historical portability":
338 # define formatter locally, because ui.formatter has been
338 # define formatter locally, because ui.formatter has been
339 # available since 2.2 (or ae5f92e154d3)
339 # available since 2.2 (or ae5f92e154d3)
340 from mercurial import node
340 from mercurial import node
341 class defaultformatter(object):
341 class defaultformatter(object):
342 """Minimized composition of baseformatter and plainformatter
342 """Minimized composition of baseformatter and plainformatter
343 """
343 """
344 def __init__(self, ui, topic, opts):
344 def __init__(self, ui, topic, opts):
345 self._ui = ui
345 self._ui = ui
346 if ui.debugflag:
346 if ui.debugflag:
347 self.hexfunc = node.hex
347 self.hexfunc = node.hex
348 else:
348 else:
349 self.hexfunc = node.short
349 self.hexfunc = node.short
350 def __nonzero__(self):
350 def __nonzero__(self):
351 return False
351 return False
352 __bool__ = __nonzero__
352 __bool__ = __nonzero__
353 def startitem(self):
353 def startitem(self):
354 pass
354 pass
355 def data(self, **data):
355 def data(self, **data):
356 pass
356 pass
357 def write(self, fields, deftext, *fielddata, **opts):
357 def write(self, fields, deftext, *fielddata, **opts):
358 self._ui.write(deftext % fielddata, **opts)
358 self._ui.write(deftext % fielddata, **opts)
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
359 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
360 if cond:
360 if cond:
361 self._ui.write(deftext % fielddata, **opts)
361 self._ui.write(deftext % fielddata, **opts)
362 def plain(self, text, **opts):
362 def plain(self, text, **opts):
363 self._ui.write(text, **opts)
363 self._ui.write(text, **opts)
364 def end(self):
364 def end(self):
365 pass
365 pass
366 fm = defaultformatter(ui, b'perf', opts)
366 fm = defaultformatter(ui, b'perf', opts)
367
367
368 # stub function, runs code only once instead of in a loop
368 # stub function, runs code only once instead of in a loop
369 # experimental config: perf.stub
369 # experimental config: perf.stub
370 if ui.configbool(b"perf", b"stub", False):
370 if ui.configbool(b"perf", b"stub", False):
371 return functools.partial(stub_timer, fm), fm
371 return functools.partial(stub_timer, fm), fm
372
372
373 # experimental config: perf.all-timing
373 # experimental config: perf.all-timing
374 displayall = ui.configbool(b"perf", b"all-timing", False)
374 displayall = ui.configbool(b"perf", b"all-timing", False)
375
375
376 # experimental config: perf.run-limits
376 # experimental config: perf.run-limits
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
377 limitspec = ui.configlist(b"perf", b"run-limits", [])
378 limits = []
378 limits = []
379 for item in limitspec:
379 for item in limitspec:
380 parts = item.split(b'-', 1)
380 parts = item.split(b'-', 1)
381 if len(parts) < 2:
381 if len(parts) < 2:
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
382 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
383 % item))
383 % item))
384 continue
384 continue
385 try:
385 try:
386 time_limit = float(_sysstr(parts[0]))
386 time_limit = float(_sysstr(parts[0]))
387 except ValueError as e:
387 except ValueError as e:
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
388 ui.warn((b'malformatted run limit entry, %s: %s\n'
389 % (_bytestr(e), item)))
389 % (_bytestr(e), item)))
390 continue
390 continue
391 try:
391 try:
392 run_limit = int(_sysstr(parts[1]))
392 run_limit = int(_sysstr(parts[1]))
393 except ValueError as e:
393 except ValueError as e:
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
394 ui.warn((b'malformatted run limit entry, %s: %s\n'
395 % (_bytestr(e), item)))
395 % (_bytestr(e), item)))
396 continue
396 continue
397 limits.append((time_limit, run_limit))
397 limits.append((time_limit, run_limit))
398 if not limits:
398 if not limits:
399 limits = DEFAULTLIMITS
399 limits = DEFAULTLIMITS
400
400
401 profiler = None
401 profiler = None
402 if profiling is not None:
402 if profiling is not None:
403 if ui.configbool(b"perf", b"profile-benchmark", False):
403 if ui.configbool(b"perf", b"profile-benchmark", False):
404 profiler = profiling.profile(ui)
404 profiler = profiling.profile(ui)
405
405
406 prerun = getint(ui, b"perf", b"pre-run", 0)
406 prerun = getint(ui, b"perf", b"pre-run", 0)
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
407 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
408 prerun=prerun, profiler=profiler)
408 prerun=prerun, profiler=profiler)
409 return t, fm
409 return t, fm
410
410
411 def stub_timer(fm, func, setup=None, title=None):
411 def stub_timer(fm, func, setup=None, title=None):
412 if setup is not None:
412 if setup is not None:
413 setup()
413 setup()
414 func()
414 func()
415
415
416 @contextlib.contextmanager
416 @contextlib.contextmanager
417 def timeone():
417 def timeone():
418 r = []
418 r = []
419 ostart = os.times()
419 ostart = os.times()
420 cstart = util.timer()
420 cstart = util.timer()
421 yield r
421 yield r
422 cstop = util.timer()
422 cstop = util.timer()
423 ostop = os.times()
423 ostop = os.times()
424 a, b = ostart, ostop
424 a, b = ostart, ostop
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
425 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
426
426
427
427
428 # list of stop condition (elapsed time, minimal run count)
428 # list of stop condition (elapsed time, minimal run count)
429 DEFAULTLIMITS = (
429 DEFAULTLIMITS = (
430 (3.0, 100),
430 (3.0, 100),
431 (10.0, 3),
431 (10.0, 3),
432 )
432 )
433
433
434 def _timer(fm, func, setup=None, title=None, displayall=False,
434 def _timer(fm, func, setup=None, title=None, displayall=False,
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
435 limits=DEFAULTLIMITS, prerun=0, profiler=None):
436 gc.collect()
436 gc.collect()
437 results = []
437 results = []
438 begin = util.timer()
438 begin = util.timer()
439 count = 0
439 count = 0
440 if profiler is None:
440 if profiler is None:
441 profiler = NOOPCTX
441 profiler = NOOPCTX
442 for i in range(prerun):
442 for i in range(prerun):
443 if setup is not None:
443 if setup is not None:
444 setup()
444 setup()
445 func()
445 func()
446 keepgoing = True
446 keepgoing = True
447 while keepgoing:
447 while keepgoing:
448 if setup is not None:
448 if setup is not None:
449 setup()
449 setup()
450 with profiler:
450 with profiler:
451 with timeone() as item:
451 with timeone() as item:
452 r = func()
452 r = func()
453 profiler = NOOPCTX
453 profiler = NOOPCTX
454 count += 1
454 count += 1
455 results.append(item[0])
455 results.append(item[0])
456 cstop = util.timer()
456 cstop = util.timer()
457 # Look for a stop condition.
457 # Look for a stop condition.
458 elapsed = cstop - begin
458 elapsed = cstop - begin
459 for t, mincount in limits:
459 for t, mincount in limits:
460 if elapsed >= t and count >= mincount:
460 if elapsed >= t and count >= mincount:
461 keepgoing = False
461 keepgoing = False
462 break
462 break
463
463
464 formatone(fm, results, title=title, result=r,
464 formatone(fm, results, title=title, result=r,
465 displayall=displayall)
465 displayall=displayall)
466
466
467 def formatone(fm, timings, title=None, result=None, displayall=False):
467 def formatone(fm, timings, title=None, result=None, displayall=False):
468
468
469 count = len(timings)
469 count = len(timings)
470
470
471 fm.startitem()
471 fm.startitem()
472
472
473 if title:
473 if title:
474 fm.write(b'title', b'! %s\n', title)
474 fm.write(b'title', b'! %s\n', title)
475 if result:
475 if result:
476 fm.write(b'result', b'! result: %s\n', result)
476 fm.write(b'result', b'! result: %s\n', result)
477 def display(role, entry):
477 def display(role, entry):
478 prefix = b''
478 prefix = b''
479 if role != b'best':
479 if role != b'best':
480 prefix = b'%s.' % role
480 prefix = b'%s.' % role
481 fm.plain(b'!')
481 fm.plain(b'!')
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
482 fm.write(prefix + b'wall', b' wall %f', entry[0])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
483 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
484 fm.write(prefix + b'user', b' user %f', entry[1])
484 fm.write(prefix + b'user', b' user %f', entry[1])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
485 fm.write(prefix + b'sys', b' sys %f', entry[2])
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
486 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
487 fm.plain(b'\n')
487 fm.plain(b'\n')
488 timings.sort()
488 timings.sort()
489 min_val = timings[0]
489 min_val = timings[0]
490 display(b'best', min_val)
490 display(b'best', min_val)
491 if displayall:
491 if displayall:
492 max_val = timings[-1]
492 max_val = timings[-1]
493 display(b'max', max_val)
493 display(b'max', max_val)
494 avg = tuple([sum(x) / count for x in zip(*timings)])
494 avg = tuple([sum(x) / count for x in zip(*timings)])
495 display(b'avg', avg)
495 display(b'avg', avg)
496 median = timings[len(timings) // 2]
496 median = timings[len(timings) // 2]
497 display(b'median', median)
497 display(b'median', median)
498
498
499 # utilities for historical portability
499 # utilities for historical portability
500
500
501 def getint(ui, section, name, default):
501 def getint(ui, section, name, default):
502 # for "historical portability":
502 # for "historical portability":
503 # ui.configint has been available since 1.9 (or fa2b596db182)
503 # ui.configint has been available since 1.9 (or fa2b596db182)
504 v = ui.config(section, name, None)
504 v = ui.config(section, name, None)
505 if v is None:
505 if v is None:
506 return default
506 return default
507 try:
507 try:
508 return int(v)
508 return int(v)
509 except ValueError:
509 except ValueError:
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
510 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
511 % (section, name, v))
511 % (section, name, v))
512
512
513 def safeattrsetter(obj, name, ignoremissing=False):
513 def safeattrsetter(obj, name, ignoremissing=False):
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
514 """Ensure that 'obj' has 'name' attribute before subsequent setattr
515
515
516 This function is aborted, if 'obj' doesn't have 'name' attribute
516 This function is aborted, if 'obj' doesn't have 'name' attribute
517 at runtime. This avoids overlooking removal of an attribute, which
517 at runtime. This avoids overlooking removal of an attribute, which
518 breaks assumption of performance measurement, in the future.
518 breaks assumption of performance measurement, in the future.
519
519
520 This function returns the object to (1) assign a new value, and
520 This function returns the object to (1) assign a new value, and
521 (2) restore an original value to the attribute.
521 (2) restore an original value to the attribute.
522
522
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
523 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
524 abortion, and this function returns None. This is useful to
524 abortion, and this function returns None. This is useful to
525 examine an attribute, which isn't ensured in all Mercurial
525 examine an attribute, which isn't ensured in all Mercurial
526 versions.
526 versions.
527 """
527 """
528 if not util.safehasattr(obj, name):
528 if not util.safehasattr(obj, name):
529 if ignoremissing:
529 if ignoremissing:
530 return None
530 return None
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
531 raise error.Abort((b"missing attribute %s of %s might break assumption"
532 b" of performance measurement") % (name, obj))
532 b" of performance measurement") % (name, obj))
533
533
534 origvalue = getattr(obj, _sysstr(name))
534 origvalue = getattr(obj, _sysstr(name))
535 class attrutil(object):
535 class attrutil(object):
536 def set(self, newvalue):
536 def set(self, newvalue):
537 setattr(obj, _sysstr(name), newvalue)
537 setattr(obj, _sysstr(name), newvalue)
538 def restore(self):
538 def restore(self):
539 setattr(obj, _sysstr(name), origvalue)
539 setattr(obj, _sysstr(name), origvalue)
540
540
541 return attrutil()
541 return attrutil()
542
542
543 # utilities to examine each internal API changes
543 # utilities to examine each internal API changes
544
544
545 def getbranchmapsubsettable():
545 def getbranchmapsubsettable():
546 # for "historical portability":
546 # for "historical portability":
547 # subsettable is defined in:
547 # subsettable is defined in:
548 # - branchmap since 2.9 (or 175c6fd8cacc)
548 # - branchmap since 2.9 (or 175c6fd8cacc)
549 # - repoview since 2.5 (or 59a9f18d4587)
549 # - repoview since 2.5 (or 59a9f18d4587)
550 # - repoviewutil since 5.0
550 # - repoviewutil since 5.0
551 for mod in (branchmap, repoview, repoviewutil):
551 for mod in (branchmap, repoview, repoviewutil):
552 subsettable = getattr(mod, 'subsettable', None)
552 subsettable = getattr(mod, 'subsettable', None)
553 if subsettable:
553 if subsettable:
554 return subsettable
554 return subsettable
555
555
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
556 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
557 # branchmap and repoview modules exist, but subsettable attribute
557 # branchmap and repoview modules exist, but subsettable attribute
558 # doesn't)
558 # doesn't)
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
559 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
560 hint=b"use 2.5 or later")
560 hint=b"use 2.5 or later")
561
561
562 def getsvfs(repo):
562 def getsvfs(repo):
563 """Return appropriate object to access files under .hg/store
563 """Return appropriate object to access files under .hg/store
564 """
564 """
565 # for "historical portability":
565 # for "historical portability":
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
566 # repo.svfs has been available since 2.3 (or 7034365089bf)
567 svfs = getattr(repo, 'svfs', None)
567 svfs = getattr(repo, 'svfs', None)
568 if svfs:
568 if svfs:
569 return svfs
569 return svfs
570 else:
570 else:
571 return getattr(repo, 'sopener')
571 return getattr(repo, 'sopener')
572
572
573 def getvfs(repo):
573 def getvfs(repo):
574 """Return appropriate object to access files under .hg
574 """Return appropriate object to access files under .hg
575 """
575 """
576 # for "historical portability":
576 # for "historical portability":
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
577 # repo.vfs has been available since 2.3 (or 7034365089bf)
578 vfs = getattr(repo, 'vfs', None)
578 vfs = getattr(repo, 'vfs', None)
579 if vfs:
579 if vfs:
580 return vfs
580 return vfs
581 else:
581 else:
582 return getattr(repo, 'opener')
582 return getattr(repo, 'opener')
583
583
584 def repocleartagscachefunc(repo):
584 def repocleartagscachefunc(repo):
585 """Return the function to clear tags cache according to repo internal API
585 """Return the function to clear tags cache according to repo internal API
586 """
586 """
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
587 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
588 # in this case, setattr(repo, '_tagscache', None) or so isn't
589 # correct way to clear tags cache, because existing code paths
589 # correct way to clear tags cache, because existing code paths
590 # expect _tagscache to be a structured object.
590 # expect _tagscache to be a structured object.
591 def clearcache():
591 def clearcache():
592 # _tagscache has been filteredpropertycache since 2.5 (or
592 # _tagscache has been filteredpropertycache since 2.5 (or
593 # 98c867ac1330), and delattr() can't work in such case
593 # 98c867ac1330), and delattr() can't work in such case
594 if b'_tagscache' in vars(repo):
594 if b'_tagscache' in vars(repo):
595 del repo.__dict__[b'_tagscache']
595 del repo.__dict__[b'_tagscache']
596 return clearcache
596 return clearcache
597
597
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
598 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
599 if repotags: # since 1.4 (or 5614a628d173)
599 if repotags: # since 1.4 (or 5614a628d173)
600 return lambda : repotags.set(None)
600 return lambda : repotags.set(None)
601
601
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
602 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
603 if repotagscache: # since 0.6 (or d7df759d0e97)
604 return lambda : repotagscache.set(None)
604 return lambda : repotagscache.set(None)
605
605
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
606 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
607 # this point, but it isn't so problematic, because:
607 # this point, but it isn't so problematic, because:
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
608 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
609 # in perftags() causes failure soon
609 # in perftags() causes failure soon
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
610 # - perf.py itself has been available since 1.1 (or eb240755386d)
611 raise error.Abort((b"tags API of this hg command is unknown"))
611 raise error.Abort((b"tags API of this hg command is unknown"))
612
612
613 # utilities to clear cache
613 # utilities to clear cache
614
614
615 def clearfilecache(obj, attrname):
615 def clearfilecache(obj, attrname):
616 unfiltered = getattr(obj, 'unfiltered', None)
616 unfiltered = getattr(obj, 'unfiltered', None)
617 if unfiltered is not None:
617 if unfiltered is not None:
618 obj = obj.unfiltered()
618 obj = obj.unfiltered()
619 if attrname in vars(obj):
619 if attrname in vars(obj):
620 delattr(obj, attrname)
620 delattr(obj, attrname)
621 obj._filecache.pop(attrname, None)
621 obj._filecache.pop(attrname, None)
622
622
623 def clearchangelog(repo):
623 def clearchangelog(repo):
624 if repo is not repo.unfiltered():
624 if repo is not repo.unfiltered():
625 object.__setattr__(repo, r'_clcachekey', None)
625 object.__setattr__(repo, r'_clcachekey', None)
626 object.__setattr__(repo, r'_clcache', None)
626 object.__setattr__(repo, r'_clcache', None)
627 clearfilecache(repo.unfiltered(), 'changelog')
627 clearfilecache(repo.unfiltered(), 'changelog')
628
628
629 # perf commands
629 # perf commands
630
630
631 @command(b'perfwalk', formatteropts)
631 @command(b'perfwalk', formatteropts)
632 def perfwalk(ui, repo, *pats, **opts):
632 def perfwalk(ui, repo, *pats, **opts):
633 opts = _byteskwargs(opts)
633 opts = _byteskwargs(opts)
634 timer, fm = gettimer(ui, opts)
634 timer, fm = gettimer(ui, opts)
635 m = scmutil.match(repo[None], pats, {})
635 m = scmutil.match(repo[None], pats, {})
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
636 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
637 ignored=False))))
637 ignored=False))))
638 fm.end()
638 fm.end()
639
639
640 @command(b'perfannotate', formatteropts)
640 @command(b'perfannotate', formatteropts)
641 def perfannotate(ui, repo, f, **opts):
641 def perfannotate(ui, repo, f, **opts):
642 opts = _byteskwargs(opts)
642 opts = _byteskwargs(opts)
643 timer, fm = gettimer(ui, opts)
643 timer, fm = gettimer(ui, opts)
644 fc = repo[b'.'][f]
644 fc = repo[b'.'][f]
645 timer(lambda: len(fc.annotate(True)))
645 timer(lambda: len(fc.annotate(True)))
646 fm.end()
646 fm.end()
647
647
648 @command(b'perfstatus',
648 @command(b'perfstatus',
649 [(b'u', b'unknown', False,
649 [(b'u', b'unknown', False,
650 b'ask status to look for unknown files')] + formatteropts)
650 b'ask status to look for unknown files')] + formatteropts)
651 def perfstatus(ui, repo, **opts):
651 def perfstatus(ui, repo, **opts):
652 opts = _byteskwargs(opts)
652 opts = _byteskwargs(opts)
653 #m = match.always(repo.root, repo.getcwd())
653 #m = match.always(repo.root, repo.getcwd())
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
654 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
655 # False))))
655 # False))))
656 timer, fm = gettimer(ui, opts)
656 timer, fm = gettimer(ui, opts)
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
657 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
658 fm.end()
658 fm.end()
659
659
660 @command(b'perfaddremove', formatteropts)
660 @command(b'perfaddremove', formatteropts)
661 def perfaddremove(ui, repo, **opts):
661 def perfaddremove(ui, repo, **opts):
662 opts = _byteskwargs(opts)
662 opts = _byteskwargs(opts)
663 timer, fm = gettimer(ui, opts)
663 timer, fm = gettimer(ui, opts)
664 try:
664 try:
665 oldquiet = repo.ui.quiet
665 oldquiet = repo.ui.quiet
666 repo.ui.quiet = True
666 repo.ui.quiet = True
667 matcher = scmutil.match(repo[None])
667 matcher = scmutil.match(repo[None])
668 opts[b'dry_run'] = True
668 opts[b'dry_run'] = True
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
669 if b'uipathfn' in getargspec(scmutil.addremove).args:
670 uipathfn = scmutil.getuipathfn(repo)
670 uipathfn = scmutil.getuipathfn(repo)
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
671 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
672 else:
672 else:
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
673 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
674 finally:
674 finally:
675 repo.ui.quiet = oldquiet
675 repo.ui.quiet = oldquiet
676 fm.end()
676 fm.end()
677
677
678 def clearcaches(cl):
678 def clearcaches(cl):
679 # behave somewhat consistently across internal API changes
679 # behave somewhat consistently across internal API changes
680 if util.safehasattr(cl, b'clearcaches'):
680 if util.safehasattr(cl, b'clearcaches'):
681 cl.clearcaches()
681 cl.clearcaches()
682 elif util.safehasattr(cl, b'_nodecache'):
682 elif util.safehasattr(cl, b'_nodecache'):
683 from mercurial.node import nullid, nullrev
683 from mercurial.node import nullid, nullrev
684 cl._nodecache = {nullid: nullrev}
684 cl._nodecache = {nullid: nullrev}
685 cl._nodepos = None
685 cl._nodepos = None
686
686
687 @command(b'perfheads', formatteropts)
687 @command(b'perfheads', formatteropts)
688 def perfheads(ui, repo, **opts):
688 def perfheads(ui, repo, **opts):
689 """benchmark the computation of a changelog heads"""
689 """benchmark the computation of a changelog heads"""
690 opts = _byteskwargs(opts)
690 opts = _byteskwargs(opts)
691 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
692 cl = repo.changelog
692 cl = repo.changelog
693 def s():
693 def s():
694 clearcaches(cl)
694 clearcaches(cl)
695 def d():
695 def d():
696 len(cl.headrevs())
696 len(cl.headrevs())
697 timer(d, setup=s)
697 timer(d, setup=s)
698 fm.end()
698 fm.end()
699
699
700 @command(b'perftags', formatteropts+
700 @command(b'perftags', formatteropts+
701 [
701 [
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
702 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
703 ])
703 ])
704 def perftags(ui, repo, **opts):
704 def perftags(ui, repo, **opts):
705 opts = _byteskwargs(opts)
705 opts = _byteskwargs(opts)
706 timer, fm = gettimer(ui, opts)
706 timer, fm = gettimer(ui, opts)
707 repocleartagscache = repocleartagscachefunc(repo)
707 repocleartagscache = repocleartagscachefunc(repo)
708 clearrevlogs = opts[b'clear_revlogs']
708 clearrevlogs = opts[b'clear_revlogs']
709 def s():
709 def s():
710 if clearrevlogs:
710 if clearrevlogs:
711 clearchangelog(repo)
711 clearchangelog(repo)
712 clearfilecache(repo.unfiltered(), 'manifest')
712 clearfilecache(repo.unfiltered(), 'manifest')
713 repocleartagscache()
713 repocleartagscache()
714 def t():
714 def t():
715 return len(repo.tags())
715 return len(repo.tags())
716 timer(t, setup=s)
716 timer(t, setup=s)
717 fm.end()
717 fm.end()
718
718
719 @command(b'perfancestors', formatteropts)
719 @command(b'perfancestors', formatteropts)
720 def perfancestors(ui, repo, **opts):
720 def perfancestors(ui, repo, **opts):
721 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
722 timer, fm = gettimer(ui, opts)
722 timer, fm = gettimer(ui, opts)
723 heads = repo.changelog.headrevs()
723 heads = repo.changelog.headrevs()
724 def d():
724 def d():
725 for a in repo.changelog.ancestors(heads):
725 for a in repo.changelog.ancestors(heads):
726 pass
726 pass
727 timer(d)
727 timer(d)
728 fm.end()
728 fm.end()
729
729
730 @command(b'perfancestorset', formatteropts)
730 @command(b'perfancestorset', formatteropts)
731 def perfancestorset(ui, repo, revset, **opts):
731 def perfancestorset(ui, repo, revset, **opts):
732 opts = _byteskwargs(opts)
732 opts = _byteskwargs(opts)
733 timer, fm = gettimer(ui, opts)
733 timer, fm = gettimer(ui, opts)
734 revs = repo.revs(revset)
734 revs = repo.revs(revset)
735 heads = repo.changelog.headrevs()
735 heads = repo.changelog.headrevs()
736 def d():
736 def d():
737 s = repo.changelog.ancestors(heads)
737 s = repo.changelog.ancestors(heads)
738 for rev in revs:
738 for rev in revs:
739 rev in s
739 rev in s
740 timer(d)
740 timer(d)
741 fm.end()
741 fm.end()
742
742
743 @command(b'perfdiscovery', formatteropts, b'PATH')
743 @command(b'perfdiscovery', formatteropts, b'PATH')
744 def perfdiscovery(ui, repo, path, **opts):
744 def perfdiscovery(ui, repo, path, **opts):
745 """benchmark discovery between local repo and the peer at given path
745 """benchmark discovery between local repo and the peer at given path
746 """
746 """
747 repos = [repo, None]
747 repos = [repo, None]
748 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
749 path = ui.expandpath(path)
749 path = ui.expandpath(path)
750
750
751 def s():
751 def s():
752 repos[1] = hg.peer(ui, opts, path)
752 repos[1] = hg.peer(ui, opts, path)
753 def d():
753 def d():
754 setdiscovery.findcommonheads(ui, *repos)
754 setdiscovery.findcommonheads(ui, *repos)
755 timer(d, setup=s)
755 timer(d, setup=s)
756 fm.end()
756 fm.end()
757
757
758 @command(b'perfbookmarks', formatteropts +
758 @command(b'perfbookmarks', formatteropts +
759 [
759 [
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
760 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
761 ])
761 ])
762 def perfbookmarks(ui, repo, **opts):
762 def perfbookmarks(ui, repo, **opts):
763 """benchmark parsing bookmarks from disk to memory"""
763 """benchmark parsing bookmarks from disk to memory"""
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 clearrevlogs = opts[b'clear_revlogs']
767 clearrevlogs = opts[b'clear_revlogs']
768 def s():
768 def s():
769 if clearrevlogs:
769 if clearrevlogs:
770 clearchangelog(repo)
770 clearchangelog(repo)
771 clearfilecache(repo, b'_bookmarks')
771 clearfilecache(repo, b'_bookmarks')
772 def d():
772 def d():
773 repo._bookmarks
773 repo._bookmarks
774 timer(d, setup=s)
774 timer(d, setup=s)
775 fm.end()
775 fm.end()
776
776
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
777 @command(b'perfbundleread', formatteropts, b'BUNDLE')
778 def perfbundleread(ui, repo, bundlepath, **opts):
778 def perfbundleread(ui, repo, bundlepath, **opts):
779 """Benchmark reading of bundle files.
779 """Benchmark reading of bundle files.
780
780
781 This command is meant to isolate the I/O part of bundle reading as
781 This command is meant to isolate the I/O part of bundle reading as
782 much as possible.
782 much as possible.
783 """
783 """
784 from mercurial import (
784 from mercurial import (
785 bundle2,
785 bundle2,
786 exchange,
786 exchange,
787 streamclone,
787 streamclone,
788 )
788 )
789
789
790 opts = _byteskwargs(opts)
790 opts = _byteskwargs(opts)
791
791
792 def makebench(fn):
792 def makebench(fn):
793 def run():
793 def run():
794 with open(bundlepath, b'rb') as fh:
794 with open(bundlepath, b'rb') as fh:
795 bundle = exchange.readbundle(ui, fh, bundlepath)
795 bundle = exchange.readbundle(ui, fh, bundlepath)
796 fn(bundle)
796 fn(bundle)
797
797
798 return run
798 return run
799
799
800 def makereadnbytes(size):
800 def makereadnbytes(size):
801 def run():
801 def run():
802 with open(bundlepath, b'rb') as fh:
802 with open(bundlepath, b'rb') as fh:
803 bundle = exchange.readbundle(ui, fh, bundlepath)
803 bundle = exchange.readbundle(ui, fh, bundlepath)
804 while bundle.read(size):
804 while bundle.read(size):
805 pass
805 pass
806
806
807 return run
807 return run
808
808
809 def makestdioread(size):
809 def makestdioread(size):
810 def run():
810 def run():
811 with open(bundlepath, b'rb') as fh:
811 with open(bundlepath, b'rb') as fh:
812 while fh.read(size):
812 while fh.read(size):
813 pass
813 pass
814
814
815 return run
815 return run
816
816
817 # bundle1
817 # bundle1
818
818
819 def deltaiter(bundle):
819 def deltaiter(bundle):
820 for delta in bundle.deltaiter():
820 for delta in bundle.deltaiter():
821 pass
821 pass
822
822
823 def iterchunks(bundle):
823 def iterchunks(bundle):
824 for chunk in bundle.getchunks():
824 for chunk in bundle.getchunks():
825 pass
825 pass
826
826
827 # bundle2
827 # bundle2
828
828
829 def forwardchunks(bundle):
829 def forwardchunks(bundle):
830 for chunk in bundle._forwardchunks():
830 for chunk in bundle._forwardchunks():
831 pass
831 pass
832
832
833 def iterparts(bundle):
833 def iterparts(bundle):
834 for part in bundle.iterparts():
834 for part in bundle.iterparts():
835 pass
835 pass
836
836
837 def iterpartsseekable(bundle):
837 def iterpartsseekable(bundle):
838 for part in bundle.iterparts(seekable=True):
838 for part in bundle.iterparts(seekable=True):
839 pass
839 pass
840
840
841 def seek(bundle):
841 def seek(bundle):
842 for part in bundle.iterparts(seekable=True):
842 for part in bundle.iterparts(seekable=True):
843 part.seek(0, os.SEEK_END)
843 part.seek(0, os.SEEK_END)
844
844
845 def makepartreadnbytes(size):
845 def makepartreadnbytes(size):
846 def run():
846 def run():
847 with open(bundlepath, b'rb') as fh:
847 with open(bundlepath, b'rb') as fh:
848 bundle = exchange.readbundle(ui, fh, bundlepath)
848 bundle = exchange.readbundle(ui, fh, bundlepath)
849 for part in bundle.iterparts():
849 for part in bundle.iterparts():
850 while part.read(size):
850 while part.read(size):
851 pass
851 pass
852
852
853 return run
853 return run
854
854
855 benches = [
855 benches = [
856 (makestdioread(8192), b'read(8k)'),
856 (makestdioread(8192), b'read(8k)'),
857 (makestdioread(16384), b'read(16k)'),
857 (makestdioread(16384), b'read(16k)'),
858 (makestdioread(32768), b'read(32k)'),
858 (makestdioread(32768), b'read(32k)'),
859 (makestdioread(131072), b'read(128k)'),
859 (makestdioread(131072), b'read(128k)'),
860 ]
860 ]
861
861
862 with open(bundlepath, b'rb') as fh:
862 with open(bundlepath, b'rb') as fh:
863 bundle = exchange.readbundle(ui, fh, bundlepath)
863 bundle = exchange.readbundle(ui, fh, bundlepath)
864
864
865 if isinstance(bundle, changegroup.cg1unpacker):
865 if isinstance(bundle, changegroup.cg1unpacker):
866 benches.extend([
866 benches.extend([
867 (makebench(deltaiter), b'cg1 deltaiter()'),
867 (makebench(deltaiter), b'cg1 deltaiter()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
868 (makebench(iterchunks), b'cg1 getchunks()'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
869 (makereadnbytes(8192), b'cg1 read(8k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
870 (makereadnbytes(16384), b'cg1 read(16k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
871 (makereadnbytes(32768), b'cg1 read(32k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
872 (makereadnbytes(131072), b'cg1 read(128k)'),
873 ])
873 ])
874 elif isinstance(bundle, bundle2.unbundle20):
874 elif isinstance(bundle, bundle2.unbundle20):
875 benches.extend([
875 benches.extend([
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
876 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
877 (makebench(iterparts), b'bundle2 iterparts()'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
878 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
879 (makebench(seek), b'bundle2 part seek()'),
879 (makebench(seek), b'bundle2 part seek()'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
880 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
881 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
882 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
883 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
884 ])
884 ])
885 elif isinstance(bundle, streamclone.streamcloneapplier):
885 elif isinstance(bundle, streamclone.streamcloneapplier):
886 raise error.Abort(b'stream clone bundles not supported')
886 raise error.Abort(b'stream clone bundles not supported')
887 else:
887 else:
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
888 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
889
889
890 for fn, title in benches:
890 for fn, title in benches:
891 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
892 timer(fn, title=title)
892 timer(fn, title=title)
893 fm.end()
893 fm.end()
894
894
895 @command(b'perfchangegroupchangelog', formatteropts +
895 @command(b'perfchangegroupchangelog', formatteropts +
896 [(b'', b'cgversion', b'02', b'changegroup version'),
896 [(b'', b'cgversion', b'02', b'changegroup version'),
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
897 (b'r', b'rev', b'', b'revisions to add to changegroup')])
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
898 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
899 """Benchmark producing a changelog group for a changegroup.
899 """Benchmark producing a changelog group for a changegroup.
900
900
901 This measures the time spent processing the changelog during a
901 This measures the time spent processing the changelog during a
902 bundle operation. This occurs during `hg bundle` and on a server
902 bundle operation. This occurs during `hg bundle` and on a server
903 processing a `getbundle` wire protocol request (handles clones
903 processing a `getbundle` wire protocol request (handles clones
904 and pull requests).
904 and pull requests).
905
905
906 By default, all revisions are added to the changegroup.
906 By default, all revisions are added to the changegroup.
907 """
907 """
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 cl = repo.changelog
909 cl = repo.changelog
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
910 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
911 bundler = changegroup.getbundler(cgversion, repo)
911 bundler = changegroup.getbundler(cgversion, repo)
912
912
913 def d():
913 def d():
914 state, chunks = bundler._generatechangelog(cl, nodes)
914 state, chunks = bundler._generatechangelog(cl, nodes)
915 for chunk in chunks:
915 for chunk in chunks:
916 pass
916 pass
917
917
918 timer, fm = gettimer(ui, opts)
918 timer, fm = gettimer(ui, opts)
919
919
920 # Terminal printing can interfere with timing. So disable it.
920 # Terminal printing can interfere with timing. So disable it.
921 with ui.configoverride({(b'progress', b'disable'): True}):
921 with ui.configoverride({(b'progress', b'disable'): True}):
922 timer(d)
922 timer(d)
923
923
924 fm.end()
924 fm.end()
925
925
926 @command(b'perfdirs', formatteropts)
926 @command(b'perfdirs', formatteropts)
927 def perfdirs(ui, repo, **opts):
927 def perfdirs(ui, repo, **opts):
928 opts = _byteskwargs(opts)
928 opts = _byteskwargs(opts)
929 timer, fm = gettimer(ui, opts)
929 timer, fm = gettimer(ui, opts)
930 dirstate = repo.dirstate
930 dirstate = repo.dirstate
931 b'a' in dirstate
931 b'a' in dirstate
932 def d():
932 def d():
933 dirstate.hasdir(b'a')
933 dirstate.hasdir(b'a')
934 del dirstate._map._dirs
934 del dirstate._map._dirs
935 timer(d)
935 timer(d)
936 fm.end()
936 fm.end()
937
937
938 @command(b'perfdirstate', formatteropts)
938 @command(b'perfdirstate', formatteropts)
939 def perfdirstate(ui, repo, **opts):
939 def perfdirstate(ui, repo, **opts):
940 opts = _byteskwargs(opts)
940 opts = _byteskwargs(opts)
941 timer, fm = gettimer(ui, opts)
941 timer, fm = gettimer(ui, opts)
942 b"a" in repo.dirstate
942 b"a" in repo.dirstate
943 def d():
943 def d():
944 repo.dirstate.invalidate()
944 repo.dirstate.invalidate()
945 b"a" in repo.dirstate
945 b"a" in repo.dirstate
946 timer(d)
946 timer(d)
947 fm.end()
947 fm.end()
948
948
949 @command(b'perfdirstatedirs', formatteropts)
949 @command(b'perfdirstatedirs', formatteropts)
950 def perfdirstatedirs(ui, repo, **opts):
950 def perfdirstatedirs(ui, repo, **opts):
951 opts = _byteskwargs(opts)
951 opts = _byteskwargs(opts)
952 timer, fm = gettimer(ui, opts)
952 timer, fm = gettimer(ui, opts)
953 b"a" in repo.dirstate
953 b"a" in repo.dirstate
954 def d():
954 def d():
955 repo.dirstate.hasdir(b"a")
955 repo.dirstate.hasdir(b"a")
956 del repo.dirstate._map._dirs
956 del repo.dirstate._map._dirs
957 timer(d)
957 timer(d)
958 fm.end()
958 fm.end()
959
959
960 @command(b'perfdirstatefoldmap', formatteropts)
960 @command(b'perfdirstatefoldmap', formatteropts)
961 def perfdirstatefoldmap(ui, repo, **opts):
961 def perfdirstatefoldmap(ui, repo, **opts):
962 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
963 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
964 dirstate = repo.dirstate
964 dirstate = repo.dirstate
965 b'a' in dirstate
965 b'a' in dirstate
966 def d():
966 def d():
967 dirstate._map.filefoldmap.get(b'a')
967 dirstate._map.filefoldmap.get(b'a')
968 del dirstate._map.filefoldmap
968 del dirstate._map.filefoldmap
969 timer(d)
969 timer(d)
970 fm.end()
970 fm.end()
971
971
972 @command(b'perfdirfoldmap', formatteropts)
972 @command(b'perfdirfoldmap', formatteropts)
973 def perfdirfoldmap(ui, repo, **opts):
973 def perfdirfoldmap(ui, repo, **opts):
974 opts = _byteskwargs(opts)
974 opts = _byteskwargs(opts)
975 timer, fm = gettimer(ui, opts)
975 timer, fm = gettimer(ui, opts)
976 dirstate = repo.dirstate
976 dirstate = repo.dirstate
977 b'a' in dirstate
977 b'a' in dirstate
978 def d():
978 def d():
979 dirstate._map.dirfoldmap.get(b'a')
979 dirstate._map.dirfoldmap.get(b'a')
980 del dirstate._map.dirfoldmap
980 del dirstate._map.dirfoldmap
981 del dirstate._map._dirs
981 del dirstate._map._dirs
982 timer(d)
982 timer(d)
983 fm.end()
983 fm.end()
984
984
985 @command(b'perfdirstatewrite', formatteropts)
985 @command(b'perfdirstatewrite', formatteropts)
986 def perfdirstatewrite(ui, repo, **opts):
986 def perfdirstatewrite(ui, repo, **opts):
987 opts = _byteskwargs(opts)
987 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
988 timer, fm = gettimer(ui, opts)
989 ds = repo.dirstate
989 ds = repo.dirstate
990 b"a" in ds
990 b"a" in ds
991 def d():
991 def d():
992 ds._dirty = True
992 ds._dirty = True
993 ds.write(repo.currenttransaction())
993 ds.write(repo.currenttransaction())
994 timer(d)
994 timer(d)
995 fm.end()
995 fm.end()
996
996
997 def _getmergerevs(repo, opts):
997 def _getmergerevs(repo, opts):
998 """parse command argument to return rev involved in merge
998 """parse command argument to return rev involved in merge
999
999
1000 input: options dictionnary with `rev`, `from` and `bse`
1000 input: options dictionnary with `rev`, `from` and `bse`
1001 output: (localctx, otherctx, basectx)
1001 output: (localctx, otherctx, basectx)
1002 """
1002 """
1003 if opts[b'from']:
1003 if opts[b'from']:
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1004 fromrev = scmutil.revsingle(repo, opts[b'from'])
1005 wctx = repo[fromrev]
1005 wctx = repo[fromrev]
1006 else:
1006 else:
1007 wctx = repo[None]
1007 wctx = repo[None]
1008 # we don't want working dir files to be stat'd in the benchmark, so
1008 # we don't want working dir files to be stat'd in the benchmark, so
1009 # prime that cache
1009 # prime that cache
1010 wctx.dirty()
1010 wctx.dirty()
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1011 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1012 if opts[b'base']:
1012 if opts[b'base']:
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1013 fromrev = scmutil.revsingle(repo, opts[b'base'])
1014 ancestor = repo[fromrev]
1014 ancestor = repo[fromrev]
1015 else:
1015 else:
1016 ancestor = wctx.ancestor(rctx)
1016 ancestor = wctx.ancestor(rctx)
1017 return (wctx, rctx, ancestor)
1017 return (wctx, rctx, ancestor)
1018
1018
1019 @command(b'perfmergecalculate',
1019 @command(b'perfmergecalculate',
1020 [
1020 [
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1021 (b'r', b'rev', b'.', b'rev to merge against'),
1022 (b'', b'from', b'', b'rev to merge from'),
1022 (b'', b'from', b'', b'rev to merge from'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1023 (b'', b'base', b'', b'the revision to use as base'),
1024 ] + formatteropts)
1024 ] + formatteropts)
1025 def perfmergecalculate(ui, repo, **opts):
1025 def perfmergecalculate(ui, repo, **opts):
1026 opts = _byteskwargs(opts)
1026 opts = _byteskwargs(opts)
1027 timer, fm = gettimer(ui, opts)
1027 timer, fm = gettimer(ui, opts)
1028
1028
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1029 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1030 def d():
1030 def d():
1031 # acceptremote is True because we don't want prompts in the middle of
1031 # acceptremote is True because we don't want prompts in the middle of
1032 # our benchmark
1032 # our benchmark
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], branchmerge=False,
1033 merge.calculateupdates(repo, wctx, rctx, [ancestor], branchmerge=False,
1034 force=False, acceptremote=True,
1034 force=False, acceptremote=True,
1035 followcopies=True)
1035 followcopies=True)
1036 timer(d)
1036 timer(d)
1037 fm.end()
1037 fm.end()
1038
1038
1039 @command(b'perfmergecopies',
1039 @command(b'perfmergecopies',
1040 [
1040 [
1041 (b'r', b'rev', b'.', b'rev to merge against'),
1041 (b'r', b'rev', b'.', b'rev to merge against'),
1042 (b'', b'from', b'', b'rev to merge from'),
1042 (b'', b'from', b'', b'rev to merge from'),
1043 (b'', b'base', b'', b'the revision to use as base'),
1043 (b'', b'base', b'', b'the revision to use as base'),
1044 ] + formatteropts)
1044 ] + formatteropts)
1045 def perfmergecopies(ui, repo, **opts):
1045 def perfmergecopies(ui, repo, **opts):
1046 """measure runtime of `copies.mergecopies`"""
1046 """measure runtime of `copies.mergecopies`"""
1047 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
1048 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
1049 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1049 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1050 def d():
1050 def d():
1051 # acceptremote is True because we don't want prompts in the middle of
1051 # acceptremote is True because we don't want prompts in the middle of
1052 # our benchmark
1052 # our benchmark
1053 copies.mergecopies(repo, wctx, rctx, ancestor)
1053 copies.mergecopies(repo, wctx, rctx, ancestor)
1054 timer(d)
1054 timer(d)
1055 fm.end()
1055 fm.end()
1056
1056
1057 @command(b'perfpathcopies', [], b"REV REV")
1057 @command(b'perfpathcopies', [], b"REV REV")
1058 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1058 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1059 """benchmark the copy tracing logic"""
1059 """benchmark the copy tracing logic"""
1060 opts = _byteskwargs(opts)
1060 opts = _byteskwargs(opts)
1061 timer, fm = gettimer(ui, opts)
1061 timer, fm = gettimer(ui, opts)
1062 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1062 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1063 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1063 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1064 def d():
1064 def d():
1065 copies.pathcopies(ctx1, ctx2)
1065 copies.pathcopies(ctx1, ctx2)
1066 timer(d)
1066 timer(d)
1067 fm.end()
1067 fm.end()
1068
1068
1069 @command(b'perfphases',
1069 @command(b'perfphases',
1070 [(b'', b'full', False, b'include file reading time too'),
1070 [(b'', b'full', False, b'include file reading time too'),
1071 ], b"")
1071 ], b"")
1072 def perfphases(ui, repo, **opts):
1072 def perfphases(ui, repo, **opts):
1073 """benchmark phasesets computation"""
1073 """benchmark phasesets computation"""
1074 opts = _byteskwargs(opts)
1074 opts = _byteskwargs(opts)
1075 timer, fm = gettimer(ui, opts)
1075 timer, fm = gettimer(ui, opts)
1076 _phases = repo._phasecache
1076 _phases = repo._phasecache
1077 full = opts.get(b'full')
1077 full = opts.get(b'full')
1078 def d():
1078 def d():
1079 phases = _phases
1079 phases = _phases
1080 if full:
1080 if full:
1081 clearfilecache(repo, b'_phasecache')
1081 clearfilecache(repo, b'_phasecache')
1082 phases = repo._phasecache
1082 phases = repo._phasecache
1083 phases.invalidate()
1083 phases.invalidate()
1084 phases.loadphaserevs(repo)
1084 phases.loadphaserevs(repo)
1085 timer(d)
1085 timer(d)
1086 fm.end()
1086 fm.end()
1087
1087
1088 @command(b'perfphasesremote',
1088 @command(b'perfphasesremote',
1089 [], b"[DEST]")
1089 [], b"[DEST]")
1090 def perfphasesremote(ui, repo, dest=None, **opts):
1090 def perfphasesremote(ui, repo, dest=None, **opts):
1091 """benchmark time needed to analyse phases of the remote server"""
1091 """benchmark time needed to analyse phases of the remote server"""
1092 from mercurial.node import (
1092 from mercurial.node import (
1093 bin,
1093 bin,
1094 )
1094 )
1095 from mercurial import (
1095 from mercurial import (
1096 exchange,
1096 exchange,
1097 hg,
1097 hg,
1098 phases,
1098 phases,
1099 )
1099 )
1100 opts = _byteskwargs(opts)
1100 opts = _byteskwargs(opts)
1101 timer, fm = gettimer(ui, opts)
1101 timer, fm = gettimer(ui, opts)
1102
1102
1103 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1103 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1104 if not path:
1104 if not path:
1105 raise error.Abort((b'default repository not configured!'),
1105 raise error.Abort((b'default repository not configured!'),
1106 hint=(b"see 'hg help config.paths'"))
1106 hint=(b"see 'hg help config.paths'"))
1107 dest = path.pushloc or path.loc
1107 dest = path.pushloc or path.loc
1108 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1108 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1109 other = hg.peer(repo, opts, dest)
1109 other = hg.peer(repo, opts, dest)
1110
1110
1111 # easier to perform discovery through the operation
1111 # easier to perform discovery through the operation
1112 op = exchange.pushoperation(repo, other)
1112 op = exchange.pushoperation(repo, other)
1113 exchange._pushdiscoverychangeset(op)
1113 exchange._pushdiscoverychangeset(op)
1114
1114
1115 remotesubset = op.fallbackheads
1115 remotesubset = op.fallbackheads
1116
1116
1117 with other.commandexecutor() as e:
1117 with other.commandexecutor() as e:
1118 remotephases = e.callcommand(b'listkeys',
1118 remotephases = e.callcommand(b'listkeys',
1119 {b'namespace': b'phases'}).result()
1119 {b'namespace': b'phases'}).result()
1120 del other
1120 del other
1121 publishing = remotephases.get(b'publishing', False)
1121 publishing = remotephases.get(b'publishing', False)
1122 if publishing:
1122 if publishing:
1123 ui.status((b'publishing: yes\n'))
1123 ui.status((b'publishing: yes\n'))
1124 else:
1124 else:
1125 ui.status((b'publishing: no\n'))
1125 ui.status((b'publishing: no\n'))
1126
1126
1127 nodemap = repo.changelog.nodemap
1127 nodemap = repo.changelog.nodemap
1128 nonpublishroots = 0
1128 nonpublishroots = 0
1129 for nhex, phase in remotephases.iteritems():
1129 for nhex, phase in remotephases.iteritems():
1130 if nhex == b'publishing': # ignore data related to publish option
1130 if nhex == b'publishing': # ignore data related to publish option
1131 continue
1131 continue
1132 node = bin(nhex)
1132 node = bin(nhex)
1133 if node in nodemap and int(phase):
1133 if node in nodemap and int(phase):
1134 nonpublishroots += 1
1134 nonpublishroots += 1
1135 ui.status((b'number of roots: %d\n') % len(remotephases))
1135 ui.status((b'number of roots: %d\n') % len(remotephases))
1136 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1136 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1137 def d():
1137 def d():
1138 phases.remotephasessummary(repo,
1138 phases.remotephasessummary(repo,
1139 remotesubset,
1139 remotesubset,
1140 remotephases)
1140 remotephases)
1141 timer(d)
1141 timer(d)
1142 fm.end()
1142 fm.end()
1143
1143
1144 @command(b'perfmanifest',[
1144 @command(b'perfmanifest',[
1145 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1145 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1146 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1146 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1147 ] + formatteropts, b'REV|NODE')
1147 ] + formatteropts, b'REV|NODE')
1148 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1148 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1149 """benchmark the time to read a manifest from disk and return a usable
1149 """benchmark the time to read a manifest from disk and return a usable
1150 dict-like object
1150 dict-like object
1151
1151
1152 Manifest caches are cleared before retrieval."""
1152 Manifest caches are cleared before retrieval."""
1153 opts = _byteskwargs(opts)
1153 opts = _byteskwargs(opts)
1154 timer, fm = gettimer(ui, opts)
1154 timer, fm = gettimer(ui, opts)
1155 if not manifest_rev:
1155 if not manifest_rev:
1156 ctx = scmutil.revsingle(repo, rev, rev)
1156 ctx = scmutil.revsingle(repo, rev, rev)
1157 t = ctx.manifestnode()
1157 t = ctx.manifestnode()
1158 else:
1158 else:
1159 from mercurial.node import bin
1159 from mercurial.node import bin
1160
1160
1161 if len(rev) == 40:
1161 if len(rev) == 40:
1162 t = bin(rev)
1162 t = bin(rev)
1163 else:
1163 else:
1164 try:
1164 try:
1165 rev = int(rev)
1165 rev = int(rev)
1166
1166
1167 if util.safehasattr(repo.manifestlog, b'getstorage'):
1167 if util.safehasattr(repo.manifestlog, b'getstorage'):
1168 t = repo.manifestlog.getstorage(b'').node(rev)
1168 t = repo.manifestlog.getstorage(b'').node(rev)
1169 else:
1169 else:
1170 t = repo.manifestlog._revlog.lookup(rev)
1170 t = repo.manifestlog._revlog.lookup(rev)
1171 except ValueError:
1171 except ValueError:
1172 raise error.Abort(b'manifest revision must be integer or full '
1172 raise error.Abort(b'manifest revision must be integer or full '
1173 b'node')
1173 b'node')
1174 def d():
1174 def d():
1175 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1175 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1176 repo.manifestlog[t].read()
1176 repo.manifestlog[t].read()
1177 timer(d)
1177 timer(d)
1178 fm.end()
1178 fm.end()
1179
1179
1180 @command(b'perfchangeset', formatteropts)
1180 @command(b'perfchangeset', formatteropts)
1181 def perfchangeset(ui, repo, rev, **opts):
1181 def perfchangeset(ui, repo, rev, **opts):
1182 opts = _byteskwargs(opts)
1182 opts = _byteskwargs(opts)
1183 timer, fm = gettimer(ui, opts)
1183 timer, fm = gettimer(ui, opts)
1184 n = scmutil.revsingle(repo, rev).node()
1184 n = scmutil.revsingle(repo, rev).node()
1185 def d():
1185 def d():
1186 repo.changelog.read(n)
1186 repo.changelog.read(n)
1187 #repo.changelog._cache = None
1187 #repo.changelog._cache = None
1188 timer(d)
1188 timer(d)
1189 fm.end()
1189 fm.end()
1190
1190
1191 @command(b'perfignore', formatteropts)
1191 @command(b'perfignore', formatteropts)
1192 def perfignore(ui, repo, **opts):
1192 def perfignore(ui, repo, **opts):
1193 """benchmark operation related to computing ignore"""
1193 """benchmark operation related to computing ignore"""
1194 opts = _byteskwargs(opts)
1194 opts = _byteskwargs(opts)
1195 timer, fm = gettimer(ui, opts)
1195 timer, fm = gettimer(ui, opts)
1196 dirstate = repo.dirstate
1196 dirstate = repo.dirstate
1197
1197
1198 def setupone():
1198 def setupone():
1199 dirstate.invalidate()
1199 dirstate.invalidate()
1200 clearfilecache(dirstate, b'_ignore')
1200 clearfilecache(dirstate, b'_ignore')
1201
1201
1202 def runone():
1202 def runone():
1203 dirstate._ignore
1203 dirstate._ignore
1204
1204
1205 timer(runone, setup=setupone, title=b"load")
1205 timer(runone, setup=setupone, title=b"load")
1206 fm.end()
1206 fm.end()
1207
1207
1208 @command(b'perfindex', [
1208 @command(b'perfindex', [
1209 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1209 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1210 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1210 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1211 ] + formatteropts)
1211 ] + formatteropts)
1212 def perfindex(ui, repo, **opts):
1212 def perfindex(ui, repo, **opts):
1213 """benchmark index creation time followed by a lookup
1213 """benchmark index creation time followed by a lookup
1214
1214
1215 The default is to look `tip` up. Depending on the index implementation,
1215 The default is to look `tip` up. Depending on the index implementation,
1216 the revision looked up can matters. For example, an implementation
1216 the revision looked up can matters. For example, an implementation
1217 scanning the index will have a faster lookup time for `--rev tip` than for
1217 scanning the index will have a faster lookup time for `--rev tip` than for
1218 `--rev 0`. The number of looked up revisions and their order can also
1218 `--rev 0`. The number of looked up revisions and their order can also
1219 matters.
1219 matters.
1220
1220
1221 Example of useful set to test:
1221 Example of useful set to test:
1222 * tip
1222 * tip
1223 * 0
1223 * 0
1224 * -10:
1224 * -10:
1225 * :10
1225 * :10
1226 * -10: + :10
1226 * -10: + :10
1227 * :10: + -10:
1227 * :10: + -10:
1228 * -10000:
1228 * -10000:
1229 * -10000: + 0
1229 * -10000: + 0
1230
1230
1231 It is not currently possible to check for lookup of a missing node. For
1231 It is not currently possible to check for lookup of a missing node. For
1232 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1232 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1233 import mercurial.revlog
1233 import mercurial.revlog
1234 opts = _byteskwargs(opts)
1234 opts = _byteskwargs(opts)
1235 timer, fm = gettimer(ui, opts)
1235 timer, fm = gettimer(ui, opts)
1236 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1237 if opts[b'no_lookup']:
1237 if opts[b'no_lookup']:
1238 if opts['rev']:
1238 if opts['rev']:
1239 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1239 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1240 nodes = []
1240 nodes = []
1241 elif not opts[b'rev']:
1241 elif not opts[b'rev']:
1242 nodes = [repo[b"tip"].node()]
1242 nodes = [repo[b"tip"].node()]
1243 else:
1243 else:
1244 revs = scmutil.revrange(repo, opts[b'rev'])
1244 revs = scmutil.revrange(repo, opts[b'rev'])
1245 cl = repo.changelog
1245 cl = repo.changelog
1246 nodes = [cl.node(r) for r in revs]
1246 nodes = [cl.node(r) for r in revs]
1247
1247
1248 unfi = repo.unfiltered()
1248 unfi = repo.unfiltered()
1249 # find the filecache func directly
1249 # find the filecache func directly
1250 # This avoid polluting the benchmark with the filecache logic
1250 # This avoid polluting the benchmark with the filecache logic
1251 makecl = unfi.__class__.changelog.func
1251 makecl = unfi.__class__.changelog.func
1252 def setup():
1252 def setup():
1253 # probably not necessary, but for good measure
1253 # probably not necessary, but for good measure
1254 clearchangelog(unfi)
1254 clearchangelog(unfi)
1255 def d():
1255 def d():
1256 cl = makecl(unfi)
1256 cl = makecl(unfi)
1257 for n in nodes:
1257 for n in nodes:
1258 cl.rev(n)
1258 cl.rev(n)
1259 timer(d, setup=setup)
1259 timer(d, setup=setup)
1260 fm.end()
1260 fm.end()
1261
1261
1262 @command(b'perfnodemap', [
1262 @command(b'perfnodemap', [
1263 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1263 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1264 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1264 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1265 ] + formatteropts)
1265 ] + formatteropts)
1266 def perfnodemap(ui, repo, **opts):
1266 def perfnodemap(ui, repo, **opts):
1267 """benchmark the time necessary to look up revision from a cold nodemap
1267 """benchmark the time necessary to look up revision from a cold nodemap
1268
1268
1269 Depending on the implementation, the amount and order of revision we look
1269 Depending on the implementation, the amount and order of revision we look
1270 up can varies. Example of useful set to test:
1270 up can varies. Example of useful set to test:
1271 * tip
1271 * tip
1272 * 0
1272 * 0
1273 * -10:
1273 * -10:
1274 * :10
1274 * :10
1275 * -10: + :10
1275 * -10: + :10
1276 * :10: + -10:
1276 * :10: + -10:
1277 * -10000:
1277 * -10000:
1278 * -10000: + 0
1278 * -10000: + 0
1279
1279
1280 The command currently focus on valid binary lookup. Benchmarking for
1280 The command currently focus on valid binary lookup. Benchmarking for
1281 hexlookup, prefix lookup and missing lookup would also be valuable.
1281 hexlookup, prefix lookup and missing lookup would also be valuable.
1282 """
1282 """
1283 import mercurial.revlog
1283 import mercurial.revlog
1284 opts = _byteskwargs(opts)
1284 opts = _byteskwargs(opts)
1285 timer, fm = gettimer(ui, opts)
1285 timer, fm = gettimer(ui, opts)
1286 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1286 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1287
1287
1288 unfi = repo.unfiltered()
1288 unfi = repo.unfiltered()
1289 clearcaches = opts['clear_caches']
1289 clearcaches = opts['clear_caches']
1290 # find the filecache func directly
1290 # find the filecache func directly
1291 # This avoid polluting the benchmark with the filecache logic
1291 # This avoid polluting the benchmark with the filecache logic
1292 makecl = unfi.__class__.changelog.func
1292 makecl = unfi.__class__.changelog.func
1293 if not opts[b'rev']:
1293 if not opts[b'rev']:
1294 raise error.Abort('use --rev to specify revisions to look up')
1294 raise error.Abort('use --rev to specify revisions to look up')
1295 revs = scmutil.revrange(repo, opts[b'rev'])
1295 revs = scmutil.revrange(repo, opts[b'rev'])
1296 cl = repo.changelog
1296 cl = repo.changelog
1297 nodes = [cl.node(r) for r in revs]
1297 nodes = [cl.node(r) for r in revs]
1298
1298
1299 # use a list to pass reference to a nodemap from one closure to the next
1299 # use a list to pass reference to a nodemap from one closure to the next
1300 nodeget = [None]
1300 nodeget = [None]
1301 def setnodeget():
1301 def setnodeget():
1302 # probably not necessary, but for good measure
1302 # probably not necessary, but for good measure
1303 clearchangelog(unfi)
1303 clearchangelog(unfi)
1304 nodeget[0] = makecl(unfi).nodemap.get
1304 nodeget[0] = makecl(unfi).nodemap.get
1305
1305
1306 def d():
1306 def d():
1307 get = nodeget[0]
1307 get = nodeget[0]
1308 for n in nodes:
1308 for n in nodes:
1309 get(n)
1309 get(n)
1310
1310
1311 setup = None
1311 setup = None
1312 if clearcaches:
1312 if clearcaches:
1313 def setup():
1313 def setup():
1314 setnodeget()
1314 setnodeget()
1315 else:
1315 else:
1316 setnodeget()
1316 setnodeget()
1317 d() # prewarm the data structure
1317 d() # prewarm the data structure
1318 timer(d, setup=setup)
1318 timer(d, setup=setup)
1319 fm.end()
1319 fm.end()
1320
1320
1321 @command(b'perfstartup', formatteropts)
1321 @command(b'perfstartup', formatteropts)
1322 def perfstartup(ui, repo, **opts):
1322 def perfstartup(ui, repo, **opts):
1323 opts = _byteskwargs(opts)
1323 opts = _byteskwargs(opts)
1324 timer, fm = gettimer(ui, opts)
1324 timer, fm = gettimer(ui, opts)
1325 def d():
1325 def d():
1326 if os.name != r'nt':
1326 if os.name != r'nt':
1327 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1327 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1328 fsencode(sys.argv[0]))
1328 fsencode(sys.argv[0]))
1329 else:
1329 else:
1330 os.environ[r'HGRCPATH'] = r' '
1330 os.environ[r'HGRCPATH'] = r' '
1331 os.system(r"%s version -q > NUL" % sys.argv[0])
1331 os.system(r"%s version -q > NUL" % sys.argv[0])
1332 timer(d)
1332 timer(d)
1333 fm.end()
1333 fm.end()
1334
1334
1335 @command(b'perfparents', formatteropts)
1335 @command(b'perfparents', formatteropts)
1336 def perfparents(ui, repo, **opts):
1336 def perfparents(ui, repo, **opts):
1337 """benchmark the time necessary to fetch one changeset's parents.
1337 """benchmark the time necessary to fetch one changeset's parents.
1338
1338
1339 The fetch is done using the `node identifier`, traversing all object layers
1339 The fetch is done using the `node identifier`, traversing all object layers
1340 from the repository object. The first N revisions will be used for this
1340 from the repository object. The first N revisions will be used for this
1341 benchmark. N is controlled by the ``perf.parentscount`` config option
1341 benchmark. N is controlled by the ``perf.parentscount`` config option
1342 (default: 1000).
1342 (default: 1000).
1343 """
1343 """
1344 opts = _byteskwargs(opts)
1344 opts = _byteskwargs(opts)
1345 timer, fm = gettimer(ui, opts)
1345 timer, fm = gettimer(ui, opts)
1346 # control the number of commits perfparents iterates over
1346 # control the number of commits perfparents iterates over
1347 # experimental config: perf.parentscount
1347 # experimental config: perf.parentscount
1348 count = getint(ui, b"perf", b"parentscount", 1000)
1348 count = getint(ui, b"perf", b"parentscount", 1000)
1349 if len(repo.changelog) < count:
1349 if len(repo.changelog) < count:
1350 raise error.Abort(b"repo needs %d commits for this test" % count)
1350 raise error.Abort(b"repo needs %d commits for this test" % count)
1351 repo = repo.unfiltered()
1351 repo = repo.unfiltered()
1352 nl = [repo.changelog.node(i) for i in _xrange(count)]
1352 nl = [repo.changelog.node(i) for i in _xrange(count)]
1353 def d():
1353 def d():
1354 for n in nl:
1354 for n in nl:
1355 repo.changelog.parents(n)
1355 repo.changelog.parents(n)
1356 timer(d)
1356 timer(d)
1357 fm.end()
1357 fm.end()
1358
1358
1359 @command(b'perfctxfiles', formatteropts)
1359 @command(b'perfctxfiles', formatteropts)
1360 def perfctxfiles(ui, repo, x, **opts):
1360 def perfctxfiles(ui, repo, x, **opts):
1361 opts = _byteskwargs(opts)
1361 opts = _byteskwargs(opts)
1362 x = int(x)
1362 x = int(x)
1363 timer, fm = gettimer(ui, opts)
1363 timer, fm = gettimer(ui, opts)
1364 def d():
1364 def d():
1365 len(repo[x].files())
1365 len(repo[x].files())
1366 timer(d)
1366 timer(d)
1367 fm.end()
1367 fm.end()
1368
1368
1369 @command(b'perfrawfiles', formatteropts)
1369 @command(b'perfrawfiles', formatteropts)
1370 def perfrawfiles(ui, repo, x, **opts):
1370 def perfrawfiles(ui, repo, x, **opts):
1371 opts = _byteskwargs(opts)
1371 opts = _byteskwargs(opts)
1372 x = int(x)
1372 x = int(x)
1373 timer, fm = gettimer(ui, opts)
1373 timer, fm = gettimer(ui, opts)
1374 cl = repo.changelog
1374 cl = repo.changelog
1375 def d():
1375 def d():
1376 len(cl.read(x)[3])
1376 len(cl.read(x)[3])
1377 timer(d)
1377 timer(d)
1378 fm.end()
1378 fm.end()
1379
1379
1380 @command(b'perflookup', formatteropts)
1380 @command(b'perflookup', formatteropts)
1381 def perflookup(ui, repo, rev, **opts):
1381 def perflookup(ui, repo, rev, **opts):
1382 opts = _byteskwargs(opts)
1382 opts = _byteskwargs(opts)
1383 timer, fm = gettimer(ui, opts)
1383 timer, fm = gettimer(ui, opts)
1384 timer(lambda: len(repo.lookup(rev)))
1384 timer(lambda: len(repo.lookup(rev)))
1385 fm.end()
1385 fm.end()
1386
1386
1387 @command(b'perflinelogedits',
1387 @command(b'perflinelogedits',
1388 [(b'n', b'edits', 10000, b'number of edits'),
1388 [(b'n', b'edits', 10000, b'number of edits'),
1389 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1389 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1390 ], norepo=True)
1390 ], norepo=True)
1391 def perflinelogedits(ui, **opts):
1391 def perflinelogedits(ui, **opts):
1392 from mercurial import linelog
1392 from mercurial import linelog
1393
1393
1394 opts = _byteskwargs(opts)
1394 opts = _byteskwargs(opts)
1395
1395
1396 edits = opts[b'edits']
1396 edits = opts[b'edits']
1397 maxhunklines = opts[b'max_hunk_lines']
1397 maxhunklines = opts[b'max_hunk_lines']
1398
1398
1399 maxb1 = 100000
1399 maxb1 = 100000
1400 random.seed(0)
1400 random.seed(0)
1401 randint = random.randint
1401 randint = random.randint
1402 currentlines = 0
1402 currentlines = 0
1403 arglist = []
1403 arglist = []
1404 for rev in _xrange(edits):
1404 for rev in _xrange(edits):
1405 a1 = randint(0, currentlines)
1405 a1 = randint(0, currentlines)
1406 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1406 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1407 b1 = randint(0, maxb1)
1407 b1 = randint(0, maxb1)
1408 b2 = randint(b1, b1 + maxhunklines)
1408 b2 = randint(b1, b1 + maxhunklines)
1409 currentlines += (b2 - b1) - (a2 - a1)
1409 currentlines += (b2 - b1) - (a2 - a1)
1410 arglist.append((rev, a1, a2, b1, b2))
1410 arglist.append((rev, a1, a2, b1, b2))
1411
1411
1412 def d():
1412 def d():
1413 ll = linelog.linelog()
1413 ll = linelog.linelog()
1414 for args in arglist:
1414 for args in arglist:
1415 ll.replacelines(*args)
1415 ll.replacelines(*args)
1416
1416
1417 timer, fm = gettimer(ui, opts)
1417 timer, fm = gettimer(ui, opts)
1418 timer(d)
1418 timer(d)
1419 fm.end()
1419 fm.end()
1420
1420
1421 @command(b'perfrevrange', formatteropts)
1421 @command(b'perfrevrange', formatteropts)
1422 def perfrevrange(ui, repo, *specs, **opts):
1422 def perfrevrange(ui, repo, *specs, **opts):
1423 opts = _byteskwargs(opts)
1423 opts = _byteskwargs(opts)
1424 timer, fm = gettimer(ui, opts)
1424 timer, fm = gettimer(ui, opts)
1425 revrange = scmutil.revrange
1425 revrange = scmutil.revrange
1426 timer(lambda: len(revrange(repo, specs)))
1426 timer(lambda: len(revrange(repo, specs)))
1427 fm.end()
1427 fm.end()
1428
1428
1429 @command(b'perfnodelookup', formatteropts)
1429 @command(b'perfnodelookup', formatteropts)
1430 def perfnodelookup(ui, repo, rev, **opts):
1430 def perfnodelookup(ui, repo, rev, **opts):
1431 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1433 import mercurial.revlog
1433 import mercurial.revlog
1434 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1434 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1435 n = scmutil.revsingle(repo, rev).node()
1435 n = scmutil.revsingle(repo, rev).node()
1436 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1436 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1437 def d():
1437 def d():
1438 cl.rev(n)
1438 cl.rev(n)
1439 clearcaches(cl)
1439 clearcaches(cl)
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443 @command(b'perflog',
1443 @command(b'perflog',
1444 [(b'', b'rename', False, b'ask log to follow renames')
1444 [(b'', b'rename', False, b'ask log to follow renames')
1445 ] + formatteropts)
1445 ] + formatteropts)
1446 def perflog(ui, repo, rev=None, **opts):
1446 def perflog(ui, repo, rev=None, **opts):
1447 opts = _byteskwargs(opts)
1447 opts = _byteskwargs(opts)
1448 if rev is None:
1448 if rev is None:
1449 rev=[]
1449 rev=[]
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 ui.pushbuffer()
1451 ui.pushbuffer()
1452 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1452 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1453 copies=opts.get(b'rename')))
1453 copies=opts.get(b'rename')))
1454 ui.popbuffer()
1454 ui.popbuffer()
1455 fm.end()
1455 fm.end()
1456
1456
1457 @command(b'perfmoonwalk', formatteropts)
1457 @command(b'perfmoonwalk', formatteropts)
1458 def perfmoonwalk(ui, repo, **opts):
1458 def perfmoonwalk(ui, repo, **opts):
1459 """benchmark walking the changelog backwards
1459 """benchmark walking the changelog backwards
1460
1460
1461 This also loads the changelog data for each revision in the changelog.
1461 This also loads the changelog data for each revision in the changelog.
1462 """
1462 """
1463 opts = _byteskwargs(opts)
1463 opts = _byteskwargs(opts)
1464 timer, fm = gettimer(ui, opts)
1464 timer, fm = gettimer(ui, opts)
1465 def moonwalk():
1465 def moonwalk():
1466 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1466 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1467 ctx = repo[i]
1467 ctx = repo[i]
1468 ctx.branch() # read changelog data (in addition to the index)
1468 ctx.branch() # read changelog data (in addition to the index)
1469 timer(moonwalk)
1469 timer(moonwalk)
1470 fm.end()
1470 fm.end()
1471
1471
1472 @command(b'perftemplating',
1472 @command(b'perftemplating',
1473 [(b'r', b'rev', [], b'revisions to run the template on'),
1473 [(b'r', b'rev', [], b'revisions to run the template on'),
1474 ] + formatteropts)
1474 ] + formatteropts)
1475 def perftemplating(ui, repo, testedtemplate=None, **opts):
1475 def perftemplating(ui, repo, testedtemplate=None, **opts):
1476 """test the rendering time of a given template"""
1476 """test the rendering time of a given template"""
1477 if makelogtemplater is None:
1477 if makelogtemplater is None:
1478 raise error.Abort((b"perftemplating not available with this Mercurial"),
1478 raise error.Abort((b"perftemplating not available with this Mercurial"),
1479 hint=b"use 4.3 or later")
1479 hint=b"use 4.3 or later")
1480
1480
1481 opts = _byteskwargs(opts)
1481 opts = _byteskwargs(opts)
1482
1482
1483 nullui = ui.copy()
1483 nullui = ui.copy()
1484 nullui.fout = open(os.devnull, r'wb')
1484 nullui.fout = open(os.devnull, r'wb')
1485 nullui.disablepager()
1485 nullui.disablepager()
1486 revs = opts.get(b'rev')
1486 revs = opts.get(b'rev')
1487 if not revs:
1487 if not revs:
1488 revs = [b'all()']
1488 revs = [b'all()']
1489 revs = list(scmutil.revrange(repo, revs))
1489 revs = list(scmutil.revrange(repo, revs))
1490
1490
1491 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1491 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1492 b' {author|person}: {desc|firstline}\n')
1492 b' {author|person}: {desc|firstline}\n')
1493 if testedtemplate is None:
1493 if testedtemplate is None:
1494 testedtemplate = defaulttemplate
1494 testedtemplate = defaulttemplate
1495 displayer = makelogtemplater(nullui, repo, testedtemplate)
1495 displayer = makelogtemplater(nullui, repo, testedtemplate)
1496 def format():
1496 def format():
1497 for r in revs:
1497 for r in revs:
1498 ctx = repo[r]
1498 ctx = repo[r]
1499 displayer.show(ctx)
1499 displayer.show(ctx)
1500 displayer.flush(ctx)
1500 displayer.flush(ctx)
1501
1501
1502 timer, fm = gettimer(ui, opts)
1502 timer, fm = gettimer(ui, opts)
1503 timer(format)
1503 timer(format)
1504 fm.end()
1504 fm.end()
1505
1505
1506 def _displaystats(ui, opts, entries, data):
1506 def _displaystats(ui, opts, entries, data):
1507 pass
1507 pass
1508 # use a second formatter because the data are quite different, not sure
1508 # use a second formatter because the data are quite different, not sure
1509 # how it flies with the templater.
1509 # how it flies with the templater.
1510 fm = ui.formatter(b'perf-stats', opts)
1510 fm = ui.formatter(b'perf-stats', opts)
1511 for key, title in entries:
1511 for key, title in entries:
1512 values = data[key]
1512 values = data[key]
1513 nbvalues = len(data)
1513 nbvalues = len(data)
1514 values.sort()
1514 values.sort()
1515 stats = {
1515 stats = {
1516 'key': key,
1516 'key': key,
1517 'title': title,
1517 'title': title,
1518 'nbitems': len(values),
1518 'nbitems': len(values),
1519 'min': values[0][0],
1519 'min': values[0][0],
1520 '10%': values[(nbvalues * 10) // 100][0],
1520 '10%': values[(nbvalues * 10) // 100][0],
1521 '25%': values[(nbvalues * 25) // 100][0],
1521 '25%': values[(nbvalues * 25) // 100][0],
1522 '50%': values[(nbvalues * 50) // 100][0],
1522 '50%': values[(nbvalues * 50) // 100][0],
1523 '75%': values[(nbvalues * 75) // 100][0],
1523 '75%': values[(nbvalues * 75) // 100][0],
1524 '80%': values[(nbvalues * 80) // 100][0],
1524 '80%': values[(nbvalues * 80) // 100][0],
1525 '85%': values[(nbvalues * 85) // 100][0],
1525 '85%': values[(nbvalues * 85) // 100][0],
1526 '90%': values[(nbvalues * 90) // 100][0],
1526 '90%': values[(nbvalues * 90) // 100][0],
1527 '95%': values[(nbvalues * 95) // 100][0],
1527 '95%': values[(nbvalues * 95) // 100][0],
1528 '99%': values[(nbvalues * 99) // 100][0],
1528 '99%': values[(nbvalues * 99) // 100][0],
1529 'max': values[-1][0],
1529 'max': values[-1][0],
1530 }
1530 }
1531 fm.startitem()
1531 fm.startitem()
1532 fm.data(**stats)
1532 fm.data(**stats)
1533 # make node pretty for the human output
1533 # make node pretty for the human output
1534 fm.plain('### %s (%d items)\n' % (title, len(values)))
1534 fm.plain('### %s (%d items)\n' % (title, len(values)))
1535 lines = [
1535 lines = [
1536 'min',
1536 'min',
1537 '10%',
1537 '10%',
1538 '25%',
1538 '25%',
1539 '50%',
1539 '50%',
1540 '75%',
1540 '75%',
1541 '80%',
1541 '80%',
1542 '85%',
1542 '85%',
1543 '90%',
1543 '90%',
1544 '95%',
1544 '95%',
1545 '99%',
1545 '99%',
1546 'max',
1546 'max',
1547 ]
1547 ]
1548 for l in lines:
1548 for l in lines:
1549 fm.plain('%s: %s\n' % (l, stats[l]))
1549 fm.plain('%s: %s\n' % (l, stats[l]))
1550 fm.end()
1550 fm.end()
1551
1551
1552 @command(b'perfhelper-mergecopies', formatteropts +
1552 @command(b'perfhelper-mergecopies', formatteropts +
1553 [
1553 [
1554 (b'r', b'revs', [], b'restrict search to these revisions'),
1554 (b'r', b'revs', [], b'restrict search to these revisions'),
1555 (b'', b'timing', False, b'provides extra data (costly)'),
1555 (b'', b'timing', False, b'provides extra data (costly)'),
1556 (b'', b'stats', False, b'provides statistic about the measured data'),
1556 (b'', b'stats', False, b'provides statistic about the measured data'),
1557 ])
1557 ])
1558 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1558 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1559 """find statistics about potential parameters for `perfmergecopies`
1559 """find statistics about potential parameters for `perfmergecopies`
1560
1560
1561 This command find (base, p1, p2) triplet relevant for copytracing
1561 This command find (base, p1, p2) triplet relevant for copytracing
1562 benchmarking in the context of a merge. It reports values for some of the
1562 benchmarking in the context of a merge. It reports values for some of the
1563 parameters that impact merge copy tracing time during merge.
1563 parameters that impact merge copy tracing time during merge.
1564
1564
1565 If `--timing` is set, rename detection is run and the associated timing
1565 If `--timing` is set, rename detection is run and the associated timing
1566 will be reported. The extra details come at the cost of slower command
1566 will be reported. The extra details come at the cost of slower command
1567 execution.
1567 execution.
1568
1568
1569 Since rename detection is only run once, other factors might easily
1569 Since rename detection is only run once, other factors might easily
1570 affect the precision of the timing. However it should give a good
1570 affect the precision of the timing. However it should give a good
1571 approximation of which revision triplets are very costly.
1571 approximation of which revision triplets are very costly.
1572 """
1572 """
1573 opts = _byteskwargs(opts)
1573 opts = _byteskwargs(opts)
1574 fm = ui.formatter(b'perf', opts)
1574 fm = ui.formatter(b'perf', opts)
1575 dotiming = opts[b'timing']
1575 dotiming = opts[b'timing']
1576 dostats = opts[b'stats']
1576 dostats = opts[b'stats']
1577
1577
1578 output_template = [
1578 output_template = [
1579 ("base", "%(base)12s"),
1579 ("base", "%(base)12s"),
1580 ("p1", "%(p1.node)12s"),
1580 ("p1", "%(p1.node)12s"),
1581 ("p2", "%(p2.node)12s"),
1581 ("p2", "%(p2.node)12s"),
1582 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1582 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1583 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1583 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1584 ("p1.renames", "%(p1.renamedfiles)12d"),
1584 ("p1.renames", "%(p1.renamedfiles)12d"),
1585 ("p1.time", "%(p1.time)12.3f"),
1585 ("p1.time", "%(p1.time)12.3f"),
1586 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1586 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1587 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1587 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1588 ("p2.renames", "%(p2.renamedfiles)12d"),
1588 ("p2.renames", "%(p2.renamedfiles)12d"),
1589 ("p2.time", "%(p2.time)12.3f"),
1589 ("p2.time", "%(p2.time)12.3f"),
1590 ("renames", "%(nbrenamedfiles)12d"),
1590 ("renames", "%(nbrenamedfiles)12d"),
1591 ("total.time", "%(time)12.3f"),
1591 ("total.time", "%(time)12.3f"),
1592 ]
1592 ]
1593 if not dotiming:
1593 if not dotiming:
1594 output_template = [i for i in output_template
1594 output_template = [i for i in output_template
1595 if not ('time' in i[0] or 'renames' in i[0])]
1595 if not ('time' in i[0] or 'renames' in i[0])]
1596 header_names = [h for (h, v) in output_template]
1596 header_names = [h for (h, v) in output_template]
1597 output = ' '.join([v for (h, v) in output_template]) + '\n'
1597 output = ' '.join([v for (h, v) in output_template]) + '\n'
1598 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1598 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1599 fm.plain(header % tuple(header_names))
1599 fm.plain(header % tuple(header_names))
1600
1600
1601 if not revs:
1601 if not revs:
1602 revs = ['all()']
1602 revs = ['all()']
1603 revs = scmutil.revrange(repo, revs)
1603 revs = scmutil.revrange(repo, revs)
1604
1604
1605 if dostats:
1605 if dostats:
1606 alldata = {
1606 alldata = {
1607 'nbrevs': [],
1607 'nbrevs': [],
1608 'nbmissingfiles': [],
1608 'nbmissingfiles': [],
1609 }
1609 }
1610 if dotiming:
1610 if dotiming:
1611 alldata['parentnbrenames'] = []
1611 alldata['parentnbrenames'] = []
1612 alldata['totalnbrenames'] = []
1612 alldata['totalnbrenames'] = []
1613 alldata['parenttime'] = []
1613 alldata['parenttime'] = []
1614 alldata['totaltime'] = []
1614 alldata['totaltime'] = []
1615
1615
1616 roi = repo.revs('merge() and %ld', revs)
1616 roi = repo.revs('merge() and %ld', revs)
1617 for r in roi:
1617 for r in roi:
1618 ctx = repo[r]
1618 ctx = repo[r]
1619 p1 = ctx.p1()
1619 p1 = ctx.p1()
1620 p2 = ctx.p2()
1620 p2 = ctx.p2()
1621 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1621 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1622 for b in bases:
1622 for b in bases:
1623 b = repo[b]
1623 b = repo[b]
1624 p1missing = copies._computeforwardmissing(b, p1)
1624 p1missing = copies._computeforwardmissing(b, p1)
1625 p2missing = copies._computeforwardmissing(b, p2)
1625 p2missing = copies._computeforwardmissing(b, p2)
1626 data = {
1626 data = {
1627 b'base': b.hex(),
1627 b'base': b.hex(),
1628 b'p1.node': p1.hex(),
1628 b'p1.node': p1.hex(),
1629 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1629 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1630 b'p1.nbmissingfiles': len(p1missing),
1630 b'p1.nbmissingfiles': len(p1missing),
1631 b'p2.node': p2.hex(),
1631 b'p2.node': p2.hex(),
1632 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1632 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1633 b'p2.nbmissingfiles': len(p2missing),
1633 b'p2.nbmissingfiles': len(p2missing),
1634 }
1634 }
1635 if dostats:
1635 if dostats:
1636 if p1missing:
1636 if p1missing:
1637 alldata['nbrevs'].append((
1637 alldata['nbrevs'].append((
1638 data['p1.nbrevs'],
1638 data['p1.nbrevs'],
1639 b.hex(),
1639 b.hex(),
1640 p1.hex()
1640 p1.hex()
1641 ))
1641 ))
1642 alldata['nbmissingfiles'].append((
1642 alldata['nbmissingfiles'].append((
1643 data['p1.nbmissingfiles'],
1643 data['p1.nbmissingfiles'],
1644 b.hex(),
1644 b.hex(),
1645 p1.hex()
1645 p1.hex()
1646 ))
1646 ))
1647 if p2missing:
1647 if p2missing:
1648 alldata['nbrevs'].append((
1648 alldata['nbrevs'].append((
1649 data['p2.nbrevs'],
1649 data['p2.nbrevs'],
1650 b.hex(),
1650 b.hex(),
1651 p2.hex()
1651 p2.hex()
1652 ))
1652 ))
1653 alldata['nbmissingfiles'].append((
1653 alldata['nbmissingfiles'].append((
1654 data['p2.nbmissingfiles'],
1654 data['p2.nbmissingfiles'],
1655 b.hex(),
1655 b.hex(),
1656 p2.hex()
1656 p2.hex()
1657 ))
1657 ))
1658 if dotiming:
1658 if dotiming:
1659 begin = util.timer()
1659 begin = util.timer()
1660 mergedata = copies.mergecopies(repo, p1, p2, b)
1660 mergedata = copies.mergecopies(repo, p1, p2, b)
1661 end = util.timer()
1661 end = util.timer()
1662 # not very stable timing since we did only one run
1662 # not very stable timing since we did only one run
1663 data['time'] = end - begin
1663 data['time'] = end - begin
1664 # mergedata contains five dicts: "copy", "movewithdir",
1664 # mergedata contains five dicts: "copy", "movewithdir",
1665 # "diverge", "renamedelete" and "dirmove".
1665 # "diverge", "renamedelete" and "dirmove".
1666 # The first 4 are about renamed file so lets count that.
1666 # The first 4 are about renamed file so lets count that.
1667 renames = len(mergedata[0])
1667 renames = len(mergedata[0])
1668 renames += len(mergedata[1])
1668 renames += len(mergedata[1])
1669 renames += len(mergedata[2])
1669 renames += len(mergedata[2])
1670 renames += len(mergedata[3])
1670 renames += len(mergedata[3])
1671 data['nbrenamedfiles'] = renames
1671 data['nbrenamedfiles'] = renames
1672 begin = util.timer()
1672 begin = util.timer()
1673 p1renames = copies.pathcopies(b, p1)
1673 p1renames = copies.pathcopies(b, p1)
1674 end = util.timer()
1674 end = util.timer()
1675 data['p1.time'] = end - begin
1675 data['p1.time'] = end - begin
1676 begin = util.timer()
1676 begin = util.timer()
1677 p2renames = copies.pathcopies(b, p2)
1677 p2renames = copies.pathcopies(b, p2)
1678 data['p2.time'] = end - begin
1678 data['p2.time'] = end - begin
1679 end = util.timer()
1679 end = util.timer()
1680 data['p1.renamedfiles'] = len(p1renames)
1680 data['p1.renamedfiles'] = len(p1renames)
1681 data['p2.renamedfiles'] = len(p2renames)
1681 data['p2.renamedfiles'] = len(p2renames)
1682
1682
1683 if dostats:
1683 if dostats:
1684 if p1missing:
1684 if p1missing:
1685 alldata['parentnbrenames'].append((
1685 alldata['parentnbrenames'].append((
1686 data['p1.renamedfiles'],
1686 data['p1.renamedfiles'],
1687 b.hex(),
1687 b.hex(),
1688 p1.hex()
1688 p1.hex()
1689 ))
1689 ))
1690 alldata['parenttime'].append((
1690 alldata['parenttime'].append((
1691 data['p1.time'],
1691 data['p1.time'],
1692 b.hex(),
1692 b.hex(),
1693 p1.hex()
1693 p1.hex()
1694 ))
1694 ))
1695 if p2missing:
1695 if p2missing:
1696 alldata['parentnbrenames'].append((
1696 alldata['parentnbrenames'].append((
1697 data['p2.renamedfiles'],
1697 data['p2.renamedfiles'],
1698 b.hex(),
1698 b.hex(),
1699 p2.hex()
1699 p2.hex()
1700 ))
1700 ))
1701 alldata['parenttime'].append((
1701 alldata['parenttime'].append((
1702 data['p2.time'],
1702 data['p2.time'],
1703 b.hex(),
1703 b.hex(),
1704 p2.hex()
1704 p2.hex()
1705 ))
1705 ))
1706 if p1missing or p2missing:
1706 if p1missing or p2missing:
1707 alldata['totalnbrenames'].append((
1707 alldata['totalnbrenames'].append((
1708 data['nbrenamedfiles'],
1708 data['nbrenamedfiles'],
1709 b.hex(),
1709 b.hex(),
1710 p1.hex(),
1710 p1.hex(),
1711 p2.hex()
1711 p2.hex()
1712 ))
1712 ))
1713 alldata['totaltime'].append((
1713 alldata['totaltime'].append((
1714 data['time'],
1714 data['time'],
1715 b.hex(),
1715 b.hex(),
1716 p1.hex(),
1716 p1.hex(),
1717 p2.hex()
1717 p2.hex()
1718 ))
1718 ))
1719 fm.startitem()
1719 fm.startitem()
1720 fm.data(**data)
1720 fm.data(**data)
1721 # make node pretty for the human output
1721 # make node pretty for the human output
1722 out = data.copy()
1722 out = data.copy()
1723 out['base'] = fm.hexfunc(b.node())
1723 out['base'] = fm.hexfunc(b.node())
1724 out['p1.node'] = fm.hexfunc(p1.node())
1724 out['p1.node'] = fm.hexfunc(p1.node())
1725 out['p2.node'] = fm.hexfunc(p2.node())
1725 out['p2.node'] = fm.hexfunc(p2.node())
1726 fm.plain(output % out)
1726 fm.plain(output % out)
1727
1727
1728 fm.end()
1728 fm.end()
1729 if dostats:
1729 if dostats:
1730 # use a second formatter because the data are quite different, not sure
1730 # use a second formatter because the data are quite different, not sure
1731 # how it flies with the templater.
1731 # how it flies with the templater.
1732 entries = [
1732 entries = [
1733 ('nbrevs', 'number of revision covered'),
1733 ('nbrevs', 'number of revision covered'),
1734 ('nbmissingfiles', 'number of missing files at head'),
1734 ('nbmissingfiles', 'number of missing files at head'),
1735 ]
1735 ]
1736 if dotiming:
1736 if dotiming:
1737 entries.append(('parentnbrenames',
1737 entries.append(('parentnbrenames',
1738 'rename from one parent to base'))
1738 'rename from one parent to base'))
1739 entries.append(('totalnbrenames', 'total number of renames'))
1739 entries.append(('totalnbrenames', 'total number of renames'))
1740 entries.append(('parenttime', 'time for one parent'))
1740 entries.append(('parenttime', 'time for one parent'))
1741 entries.append(('totaltime', 'time for both parents'))
1741 entries.append(('totaltime', 'time for both parents'))
1742 _displaystats(ui, opts, entries, alldata)
1742 _displaystats(ui, opts, entries, alldata)
1743
1743
1744
1744
1745 @command(b'perfhelper-pathcopies', formatteropts +
1745 @command(b'perfhelper-pathcopies', formatteropts +
1746 [
1746 [
1747 (b'r', b'revs', [], b'restrict search to these revisions'),
1747 (b'r', b'revs', [], b'restrict search to these revisions'),
1748 (b'', b'timing', False, b'provides extra data (costly)'),
1748 (b'', b'timing', False, b'provides extra data (costly)'),
1749 (b'', b'stats', False, b'provides statistic about the measured data'),
1749 (b'', b'stats', False, b'provides statistic about the measured data'),
1750 ])
1750 ])
1751 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1751 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1752 """find statistic about potential parameters for the `perftracecopies`
1752 """find statistic about potential parameters for the `perftracecopies`
1753
1753
1754 This command find source-destination pair relevant for copytracing testing.
1754 This command find source-destination pair relevant for copytracing testing.
1755 It report value for some of the parameters that impact copy tracing time.
1755 It report value for some of the parameters that impact copy tracing time.
1756
1756
1757 If `--timing` is set, rename detection is run and the associated timing
1757 If `--timing` is set, rename detection is run and the associated timing
1758 will be reported. The extra details comes at the cost of a slower command
1758 will be reported. The extra details comes at the cost of a slower command
1759 execution.
1759 execution.
1760
1760
1761 Since the rename detection is only run once, other factors might easily
1761 Since the rename detection is only run once, other factors might easily
1762 affect the precision of the timing. However it should give a good
1762 affect the precision of the timing. However it should give a good
1763 approximation of which revision pairs are very costly.
1763 approximation of which revision pairs are very costly.
1764 """
1764 """
1765 opts = _byteskwargs(opts)
1765 opts = _byteskwargs(opts)
1766 fm = ui.formatter(b'perf', opts)
1766 fm = ui.formatter(b'perf', opts)
1767 dotiming = opts[b'timing']
1767 dotiming = opts[b'timing']
1768 dostats = opts[b'stats']
1768 dostats = opts[b'stats']
1769
1769
1770 if dotiming:
1770 if dotiming:
1771 header = '%12s %12s %12s %12s %12s %12s\n'
1771 header = '%12s %12s %12s %12s %12s %12s\n'
1772 output = ("%(source)12s %(destination)12s "
1772 output = ("%(source)12s %(destination)12s "
1773 "%(nbrevs)12d %(nbmissingfiles)12d "
1773 "%(nbrevs)12d %(nbmissingfiles)12d "
1774 "%(nbrenamedfiles)12d %(time)18.5f\n")
1774 "%(nbrenamedfiles)12d %(time)18.5f\n")
1775 header_names = ("source", "destination", "nb-revs", "nb-files",
1775 header_names = ("source", "destination", "nb-revs", "nb-files",
1776 "nb-renames", "time")
1776 "nb-renames", "time")
1777 fm.plain(header % header_names)
1777 fm.plain(header % header_names)
1778 else:
1778 else:
1779 header = '%12s %12s %12s %12s\n'
1779 header = '%12s %12s %12s %12s\n'
1780 output = ("%(source)12s %(destination)12s "
1780 output = ("%(source)12s %(destination)12s "
1781 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1781 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1782 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1782 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1783
1783
1784 if not revs:
1784 if not revs:
1785 revs = ['all()']
1785 revs = ['all()']
1786 revs = scmutil.revrange(repo, revs)
1786 revs = scmutil.revrange(repo, revs)
1787
1787
1788
1788
1789 if dostats:
1789 if dostats:
1790 alldata = {
1790 alldata = {
1791 'nbrevs': [],
1791 'nbrevs': [],
1792 'nbmissingfiles': [],
1792 'nbmissingfiles': [],
1793 }
1793 }
1794 if dotiming:
1794 if dotiming:
1795 alldata['nbrenames'] = []
1795 alldata['nbrenames'] = []
1796 alldata['time'] = []
1796 alldata['time'] = []
1797
1797
1798 roi = repo.revs('merge() and %ld', revs)
1798 roi = repo.revs('merge() and %ld', revs)
1799 for r in roi:
1799 for r in roi:
1800 ctx = repo[r]
1800 ctx = repo[r]
1801 p1 = ctx.p1().rev()
1801 p1 = ctx.p1().rev()
1802 p2 = ctx.p2().rev()
1802 p2 = ctx.p2().rev()
1803 bases = repo.changelog._commonancestorsheads(p1, p2)
1803 bases = repo.changelog._commonancestorsheads(p1, p2)
1804 for p in (p1, p2):
1804 for p in (p1, p2):
1805 for b in bases:
1805 for b in bases:
1806 base = repo[b]
1806 base = repo[b]
1807 parent = repo[p]
1807 parent = repo[p]
1808 missing = copies._computeforwardmissing(base, parent)
1808 missing = copies._computeforwardmissing(base, parent)
1809 if not missing:
1809 if not missing:
1810 continue
1810 continue
1811 data = {
1811 data = {
1812 b'source': base.hex(),
1812 b'source': base.hex(),
1813 b'destination': parent.hex(),
1813 b'destination': parent.hex(),
1814 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1814 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1815 b'nbmissingfiles': len(missing),
1815 b'nbmissingfiles': len(missing),
1816 }
1816 }
1817 alldata['nbrevs'].append((
1817 if dostats:
1818 data['nbrevs'],
1818 alldata['nbrevs'].append((
1819 base.hex(),
1819 data['nbrevs'],
1820 parent.hex(),
1820 base.hex(),
1821 ))
1821 parent.hex(),
1822 alldata['nbmissingfiles'].append((
1822 ))
1823 data['nbmissingfiles'],
1823 alldata['nbmissingfiles'].append((
1824 base.hex(),
1824 data['nbmissingfiles'],
1825 parent.hex(),
1825 base.hex(),
1826 ))
1826 parent.hex(),
1827 ))
1827 if dotiming:
1828 if dotiming:
1828 begin = util.timer()
1829 begin = util.timer()
1829 renames = copies.pathcopies(base, parent)
1830 renames = copies.pathcopies(base, parent)
1830 end = util.timer()
1831 end = util.timer()
1831 # not very stable timing since we did only one run
1832 # not very stable timing since we did only one run
1832 data['time'] = end - begin
1833 data['time'] = end - begin
1833 data['nbrenamedfiles'] = len(renames)
1834 data['nbrenamedfiles'] = len(renames)
1834 alldata['time'].append((
1835 if dostats:
1835 data['time'],
1836 alldata['time'].append((
1836 base.hex(),
1837 data['time'],
1837 parent.hex(),
1838 base.hex(),
1838 ))
1839 parent.hex(),
1839 alldata['nbrenames'].append((
1840 ))
1840 data['nbrenamedfiles'],
1841 alldata['nbrenames'].append((
1841 base.hex(),
1842 data['nbrenamedfiles'],
1842 parent.hex(),
1843 base.hex(),
1843 ))
1844 parent.hex(),
1845 ))
1844 fm.startitem()
1846 fm.startitem()
1845 fm.data(**data)
1847 fm.data(**data)
1846 out = data.copy()
1848 out = data.copy()
1847 out['source'] = fm.hexfunc(base.node())
1849 out['source'] = fm.hexfunc(base.node())
1848 out['destination'] = fm.hexfunc(parent.node())
1850 out['destination'] = fm.hexfunc(parent.node())
1849 fm.plain(output % out)
1851 fm.plain(output % out)
1850
1852
1851 fm.end()
1853 fm.end()
1852 if dostats:
1854 if dostats:
1853 # use a second formatter because the data are quite different, not sure
1855 # use a second formatter because the data are quite different, not sure
1854 # how it flies with the templater.
1856 # how it flies with the templater.
1855 fm = ui.formatter(b'perf', opts)
1857 fm = ui.formatter(b'perf', opts)
1856 entries = [
1858 entries = [
1857 ('nbrevs', 'number of revision covered'),
1859 ('nbrevs', 'number of revision covered'),
1858 ('nbmissingfiles', 'number of missing files at head'),
1860 ('nbmissingfiles', 'number of missing files at head'),
1859 ]
1861 ]
1860 if dotiming:
1862 if dotiming:
1861 entries.append(('nbrenames',
1863 entries.append(('nbrenames',
1862 'renamed files'))
1864 'renamed files'))
1863 entries.append(('time', 'time'))
1865 entries.append(('time', 'time'))
1864 _displaystats(ui, opts, entries, alldata)
1866 _displaystats(ui, opts, entries, alldata)
1865
1867
1866 @command(b'perfcca', formatteropts)
1868 @command(b'perfcca', formatteropts)
1867 def perfcca(ui, repo, **opts):
1869 def perfcca(ui, repo, **opts):
1868 opts = _byteskwargs(opts)
1870 opts = _byteskwargs(opts)
1869 timer, fm = gettimer(ui, opts)
1871 timer, fm = gettimer(ui, opts)
1870 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1872 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1871 fm.end()
1873 fm.end()
1872
1874
1873 @command(b'perffncacheload', formatteropts)
1875 @command(b'perffncacheload', formatteropts)
1874 def perffncacheload(ui, repo, **opts):
1876 def perffncacheload(ui, repo, **opts):
1875 opts = _byteskwargs(opts)
1877 opts = _byteskwargs(opts)
1876 timer, fm = gettimer(ui, opts)
1878 timer, fm = gettimer(ui, opts)
1877 s = repo.store
1879 s = repo.store
1878 def d():
1880 def d():
1879 s.fncache._load()
1881 s.fncache._load()
1880 timer(d)
1882 timer(d)
1881 fm.end()
1883 fm.end()
1882
1884
1883 @command(b'perffncachewrite', formatteropts)
1885 @command(b'perffncachewrite', formatteropts)
1884 def perffncachewrite(ui, repo, **opts):
1886 def perffncachewrite(ui, repo, **opts):
1885 opts = _byteskwargs(opts)
1887 opts = _byteskwargs(opts)
1886 timer, fm = gettimer(ui, opts)
1888 timer, fm = gettimer(ui, opts)
1887 s = repo.store
1889 s = repo.store
1888 lock = repo.lock()
1890 lock = repo.lock()
1889 s.fncache._load()
1891 s.fncache._load()
1890 tr = repo.transaction(b'perffncachewrite')
1892 tr = repo.transaction(b'perffncachewrite')
1891 tr.addbackup(b'fncache')
1893 tr.addbackup(b'fncache')
1892 def d():
1894 def d():
1893 s.fncache._dirty = True
1895 s.fncache._dirty = True
1894 s.fncache.write(tr)
1896 s.fncache.write(tr)
1895 timer(d)
1897 timer(d)
1896 tr.close()
1898 tr.close()
1897 lock.release()
1899 lock.release()
1898 fm.end()
1900 fm.end()
1899
1901
1900 @command(b'perffncacheencode', formatteropts)
1902 @command(b'perffncacheencode', formatteropts)
1901 def perffncacheencode(ui, repo, **opts):
1903 def perffncacheencode(ui, repo, **opts):
1902 opts = _byteskwargs(opts)
1904 opts = _byteskwargs(opts)
1903 timer, fm = gettimer(ui, opts)
1905 timer, fm = gettimer(ui, opts)
1904 s = repo.store
1906 s = repo.store
1905 s.fncache._load()
1907 s.fncache._load()
1906 def d():
1908 def d():
1907 for p in s.fncache.entries:
1909 for p in s.fncache.entries:
1908 s.encode(p)
1910 s.encode(p)
1909 timer(d)
1911 timer(d)
1910 fm.end()
1912 fm.end()
1911
1913
1912 def _bdiffworker(q, blocks, xdiff, ready, done):
1914 def _bdiffworker(q, blocks, xdiff, ready, done):
1913 while not done.is_set():
1915 while not done.is_set():
1914 pair = q.get()
1916 pair = q.get()
1915 while pair is not None:
1917 while pair is not None:
1916 if xdiff:
1918 if xdiff:
1917 mdiff.bdiff.xdiffblocks(*pair)
1919 mdiff.bdiff.xdiffblocks(*pair)
1918 elif blocks:
1920 elif blocks:
1919 mdiff.bdiff.blocks(*pair)
1921 mdiff.bdiff.blocks(*pair)
1920 else:
1922 else:
1921 mdiff.textdiff(*pair)
1923 mdiff.textdiff(*pair)
1922 q.task_done()
1924 q.task_done()
1923 pair = q.get()
1925 pair = q.get()
1924 q.task_done() # for the None one
1926 q.task_done() # for the None one
1925 with ready:
1927 with ready:
1926 ready.wait()
1928 ready.wait()
1927
1929
1928 def _manifestrevision(repo, mnode):
1930 def _manifestrevision(repo, mnode):
1929 ml = repo.manifestlog
1931 ml = repo.manifestlog
1930
1932
1931 if util.safehasattr(ml, b'getstorage'):
1933 if util.safehasattr(ml, b'getstorage'):
1932 store = ml.getstorage(b'')
1934 store = ml.getstorage(b'')
1933 else:
1935 else:
1934 store = ml._revlog
1936 store = ml._revlog
1935
1937
1936 return store.revision(mnode)
1938 return store.revision(mnode)
1937
1939
1938 @command(b'perfbdiff', revlogopts + formatteropts + [
1940 @command(b'perfbdiff', revlogopts + formatteropts + [
1939 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1941 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1940 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1942 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1941 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1943 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1942 (b'', b'blocks', False, b'test computing diffs into blocks'),
1944 (b'', b'blocks', False, b'test computing diffs into blocks'),
1943 (b'', b'xdiff', False, b'use xdiff algorithm'),
1945 (b'', b'xdiff', False, b'use xdiff algorithm'),
1944 ],
1946 ],
1945
1947
1946 b'-c|-m|FILE REV')
1948 b'-c|-m|FILE REV')
1947 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1949 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1948 """benchmark a bdiff between revisions
1950 """benchmark a bdiff between revisions
1949
1951
1950 By default, benchmark a bdiff between its delta parent and itself.
1952 By default, benchmark a bdiff between its delta parent and itself.
1951
1953
1952 With ``--count``, benchmark bdiffs between delta parents and self for N
1954 With ``--count``, benchmark bdiffs between delta parents and self for N
1953 revisions starting at the specified revision.
1955 revisions starting at the specified revision.
1954
1956
1955 With ``--alldata``, assume the requested revision is a changeset and
1957 With ``--alldata``, assume the requested revision is a changeset and
1956 measure bdiffs for all changes related to that changeset (manifest
1958 measure bdiffs for all changes related to that changeset (manifest
1957 and filelogs).
1959 and filelogs).
1958 """
1960 """
1959 opts = _byteskwargs(opts)
1961 opts = _byteskwargs(opts)
1960
1962
1961 if opts[b'xdiff'] and not opts[b'blocks']:
1963 if opts[b'xdiff'] and not opts[b'blocks']:
1962 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1964 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1963
1965
1964 if opts[b'alldata']:
1966 if opts[b'alldata']:
1965 opts[b'changelog'] = True
1967 opts[b'changelog'] = True
1966
1968
1967 if opts.get(b'changelog') or opts.get(b'manifest'):
1969 if opts.get(b'changelog') or opts.get(b'manifest'):
1968 file_, rev = None, file_
1970 file_, rev = None, file_
1969 elif rev is None:
1971 elif rev is None:
1970 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1972 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1971
1973
1972 blocks = opts[b'blocks']
1974 blocks = opts[b'blocks']
1973 xdiff = opts[b'xdiff']
1975 xdiff = opts[b'xdiff']
1974 textpairs = []
1976 textpairs = []
1975
1977
1976 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1978 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1977
1979
1978 startrev = r.rev(r.lookup(rev))
1980 startrev = r.rev(r.lookup(rev))
1979 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1981 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1980 if opts[b'alldata']:
1982 if opts[b'alldata']:
1981 # Load revisions associated with changeset.
1983 # Load revisions associated with changeset.
1982 ctx = repo[rev]
1984 ctx = repo[rev]
1983 mtext = _manifestrevision(repo, ctx.manifestnode())
1985 mtext = _manifestrevision(repo, ctx.manifestnode())
1984 for pctx in ctx.parents():
1986 for pctx in ctx.parents():
1985 pman = _manifestrevision(repo, pctx.manifestnode())
1987 pman = _manifestrevision(repo, pctx.manifestnode())
1986 textpairs.append((pman, mtext))
1988 textpairs.append((pman, mtext))
1987
1989
1988 # Load filelog revisions by iterating manifest delta.
1990 # Load filelog revisions by iterating manifest delta.
1989 man = ctx.manifest()
1991 man = ctx.manifest()
1990 pman = ctx.p1().manifest()
1992 pman = ctx.p1().manifest()
1991 for filename, change in pman.diff(man).items():
1993 for filename, change in pman.diff(man).items():
1992 fctx = repo.file(filename)
1994 fctx = repo.file(filename)
1993 f1 = fctx.revision(change[0][0] or -1)
1995 f1 = fctx.revision(change[0][0] or -1)
1994 f2 = fctx.revision(change[1][0] or -1)
1996 f2 = fctx.revision(change[1][0] or -1)
1995 textpairs.append((f1, f2))
1997 textpairs.append((f1, f2))
1996 else:
1998 else:
1997 dp = r.deltaparent(rev)
1999 dp = r.deltaparent(rev)
1998 textpairs.append((r.revision(dp), r.revision(rev)))
2000 textpairs.append((r.revision(dp), r.revision(rev)))
1999
2001
2000 withthreads = threads > 0
2002 withthreads = threads > 0
2001 if not withthreads:
2003 if not withthreads:
2002 def d():
2004 def d():
2003 for pair in textpairs:
2005 for pair in textpairs:
2004 if xdiff:
2006 if xdiff:
2005 mdiff.bdiff.xdiffblocks(*pair)
2007 mdiff.bdiff.xdiffblocks(*pair)
2006 elif blocks:
2008 elif blocks:
2007 mdiff.bdiff.blocks(*pair)
2009 mdiff.bdiff.blocks(*pair)
2008 else:
2010 else:
2009 mdiff.textdiff(*pair)
2011 mdiff.textdiff(*pair)
2010 else:
2012 else:
2011 q = queue()
2013 q = queue()
2012 for i in _xrange(threads):
2014 for i in _xrange(threads):
2013 q.put(None)
2015 q.put(None)
2014 ready = threading.Condition()
2016 ready = threading.Condition()
2015 done = threading.Event()
2017 done = threading.Event()
2016 for i in _xrange(threads):
2018 for i in _xrange(threads):
2017 threading.Thread(target=_bdiffworker,
2019 threading.Thread(target=_bdiffworker,
2018 args=(q, blocks, xdiff, ready, done)).start()
2020 args=(q, blocks, xdiff, ready, done)).start()
2019 q.join()
2021 q.join()
2020 def d():
2022 def d():
2021 for pair in textpairs:
2023 for pair in textpairs:
2022 q.put(pair)
2024 q.put(pair)
2023 for i in _xrange(threads):
2025 for i in _xrange(threads):
2024 q.put(None)
2026 q.put(None)
2025 with ready:
2027 with ready:
2026 ready.notify_all()
2028 ready.notify_all()
2027 q.join()
2029 q.join()
2028 timer, fm = gettimer(ui, opts)
2030 timer, fm = gettimer(ui, opts)
2029 timer(d)
2031 timer(d)
2030 fm.end()
2032 fm.end()
2031
2033
2032 if withthreads:
2034 if withthreads:
2033 done.set()
2035 done.set()
2034 for i in _xrange(threads):
2036 for i in _xrange(threads):
2035 q.put(None)
2037 q.put(None)
2036 with ready:
2038 with ready:
2037 ready.notify_all()
2039 ready.notify_all()
2038
2040
2039 @command(b'perfunidiff', revlogopts + formatteropts + [
2041 @command(b'perfunidiff', revlogopts + formatteropts + [
2040 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
2042 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
2041 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2043 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2042 ], b'-c|-m|FILE REV')
2044 ], b'-c|-m|FILE REV')
2043 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2045 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2044 """benchmark a unified diff between revisions
2046 """benchmark a unified diff between revisions
2045
2047
2046 This doesn't include any copy tracing - it's just a unified diff
2048 This doesn't include any copy tracing - it's just a unified diff
2047 of the texts.
2049 of the texts.
2048
2050
2049 By default, benchmark a diff between its delta parent and itself.
2051 By default, benchmark a diff between its delta parent and itself.
2050
2052
2051 With ``--count``, benchmark diffs between delta parents and self for N
2053 With ``--count``, benchmark diffs between delta parents and self for N
2052 revisions starting at the specified revision.
2054 revisions starting at the specified revision.
2053
2055
2054 With ``--alldata``, assume the requested revision is a changeset and
2056 With ``--alldata``, assume the requested revision is a changeset and
2055 measure diffs for all changes related to that changeset (manifest
2057 measure diffs for all changes related to that changeset (manifest
2056 and filelogs).
2058 and filelogs).
2057 """
2059 """
2058 opts = _byteskwargs(opts)
2060 opts = _byteskwargs(opts)
2059 if opts[b'alldata']:
2061 if opts[b'alldata']:
2060 opts[b'changelog'] = True
2062 opts[b'changelog'] = True
2061
2063
2062 if opts.get(b'changelog') or opts.get(b'manifest'):
2064 if opts.get(b'changelog') or opts.get(b'manifest'):
2063 file_, rev = None, file_
2065 file_, rev = None, file_
2064 elif rev is None:
2066 elif rev is None:
2065 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2067 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2066
2068
2067 textpairs = []
2069 textpairs = []
2068
2070
2069 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2071 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2070
2072
2071 startrev = r.rev(r.lookup(rev))
2073 startrev = r.rev(r.lookup(rev))
2072 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2074 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2073 if opts[b'alldata']:
2075 if opts[b'alldata']:
2074 # Load revisions associated with changeset.
2076 # Load revisions associated with changeset.
2075 ctx = repo[rev]
2077 ctx = repo[rev]
2076 mtext = _manifestrevision(repo, ctx.manifestnode())
2078 mtext = _manifestrevision(repo, ctx.manifestnode())
2077 for pctx in ctx.parents():
2079 for pctx in ctx.parents():
2078 pman = _manifestrevision(repo, pctx.manifestnode())
2080 pman = _manifestrevision(repo, pctx.manifestnode())
2079 textpairs.append((pman, mtext))
2081 textpairs.append((pman, mtext))
2080
2082
2081 # Load filelog revisions by iterating manifest delta.
2083 # Load filelog revisions by iterating manifest delta.
2082 man = ctx.manifest()
2084 man = ctx.manifest()
2083 pman = ctx.p1().manifest()
2085 pman = ctx.p1().manifest()
2084 for filename, change in pman.diff(man).items():
2086 for filename, change in pman.diff(man).items():
2085 fctx = repo.file(filename)
2087 fctx = repo.file(filename)
2086 f1 = fctx.revision(change[0][0] or -1)
2088 f1 = fctx.revision(change[0][0] or -1)
2087 f2 = fctx.revision(change[1][0] or -1)
2089 f2 = fctx.revision(change[1][0] or -1)
2088 textpairs.append((f1, f2))
2090 textpairs.append((f1, f2))
2089 else:
2091 else:
2090 dp = r.deltaparent(rev)
2092 dp = r.deltaparent(rev)
2091 textpairs.append((r.revision(dp), r.revision(rev)))
2093 textpairs.append((r.revision(dp), r.revision(rev)))
2092
2094
2093 def d():
2095 def d():
2094 for left, right in textpairs:
2096 for left, right in textpairs:
2095 # The date strings don't matter, so we pass empty strings.
2097 # The date strings don't matter, so we pass empty strings.
2096 headerlines, hunks = mdiff.unidiff(
2098 headerlines, hunks = mdiff.unidiff(
2097 left, b'', right, b'', b'left', b'right', binary=False)
2099 left, b'', right, b'', b'left', b'right', binary=False)
2098 # consume iterators in roughly the way patch.py does
2100 # consume iterators in roughly the way patch.py does
2099 b'\n'.join(headerlines)
2101 b'\n'.join(headerlines)
2100 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2102 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2101 timer, fm = gettimer(ui, opts)
2103 timer, fm = gettimer(ui, opts)
2102 timer(d)
2104 timer(d)
2103 fm.end()
2105 fm.end()
2104
2106
2105 @command(b'perfdiffwd', formatteropts)
2107 @command(b'perfdiffwd', formatteropts)
2106 def perfdiffwd(ui, repo, **opts):
2108 def perfdiffwd(ui, repo, **opts):
2107 """Profile diff of working directory changes"""
2109 """Profile diff of working directory changes"""
2108 opts = _byteskwargs(opts)
2110 opts = _byteskwargs(opts)
2109 timer, fm = gettimer(ui, opts)
2111 timer, fm = gettimer(ui, opts)
2110 options = {
2112 options = {
2111 'w': 'ignore_all_space',
2113 'w': 'ignore_all_space',
2112 'b': 'ignore_space_change',
2114 'b': 'ignore_space_change',
2113 'B': 'ignore_blank_lines',
2115 'B': 'ignore_blank_lines',
2114 }
2116 }
2115
2117
2116 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2118 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2117 opts = dict((options[c], b'1') for c in diffopt)
2119 opts = dict((options[c], b'1') for c in diffopt)
2118 def d():
2120 def d():
2119 ui.pushbuffer()
2121 ui.pushbuffer()
2120 commands.diff(ui, repo, **opts)
2122 commands.diff(ui, repo, **opts)
2121 ui.popbuffer()
2123 ui.popbuffer()
2122 diffopt = diffopt.encode('ascii')
2124 diffopt = diffopt.encode('ascii')
2123 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2125 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2124 timer(d, title=title)
2126 timer(d, title=title)
2125 fm.end()
2127 fm.end()
2126
2128
2127 @command(b'perfrevlogindex', revlogopts + formatteropts,
2129 @command(b'perfrevlogindex', revlogopts + formatteropts,
2128 b'-c|-m|FILE')
2130 b'-c|-m|FILE')
2129 def perfrevlogindex(ui, repo, file_=None, **opts):
2131 def perfrevlogindex(ui, repo, file_=None, **opts):
2130 """Benchmark operations against a revlog index.
2132 """Benchmark operations against a revlog index.
2131
2133
2132 This tests constructing a revlog instance, reading index data,
2134 This tests constructing a revlog instance, reading index data,
2133 parsing index data, and performing various operations related to
2135 parsing index data, and performing various operations related to
2134 index data.
2136 index data.
2135 """
2137 """
2136
2138
2137 opts = _byteskwargs(opts)
2139 opts = _byteskwargs(opts)
2138
2140
2139 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2141 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2140
2142
2141 opener = getattr(rl, 'opener') # trick linter
2143 opener = getattr(rl, 'opener') # trick linter
2142 indexfile = rl.indexfile
2144 indexfile = rl.indexfile
2143 data = opener.read(indexfile)
2145 data = opener.read(indexfile)
2144
2146
2145 header = struct.unpack(b'>I', data[0:4])[0]
2147 header = struct.unpack(b'>I', data[0:4])[0]
2146 version = header & 0xFFFF
2148 version = header & 0xFFFF
2147 if version == 1:
2149 if version == 1:
2148 revlogio = revlog.revlogio()
2150 revlogio = revlog.revlogio()
2149 inline = header & (1 << 16)
2151 inline = header & (1 << 16)
2150 else:
2152 else:
2151 raise error.Abort((b'unsupported revlog version: %d') % version)
2153 raise error.Abort((b'unsupported revlog version: %d') % version)
2152
2154
2153 rllen = len(rl)
2155 rllen = len(rl)
2154
2156
2155 node0 = rl.node(0)
2157 node0 = rl.node(0)
2156 node25 = rl.node(rllen // 4)
2158 node25 = rl.node(rllen // 4)
2157 node50 = rl.node(rllen // 2)
2159 node50 = rl.node(rllen // 2)
2158 node75 = rl.node(rllen // 4 * 3)
2160 node75 = rl.node(rllen // 4 * 3)
2159 node100 = rl.node(rllen - 1)
2161 node100 = rl.node(rllen - 1)
2160
2162
2161 allrevs = range(rllen)
2163 allrevs = range(rllen)
2162 allrevsrev = list(reversed(allrevs))
2164 allrevsrev = list(reversed(allrevs))
2163 allnodes = [rl.node(rev) for rev in range(rllen)]
2165 allnodes = [rl.node(rev) for rev in range(rllen)]
2164 allnodesrev = list(reversed(allnodes))
2166 allnodesrev = list(reversed(allnodes))
2165
2167
2166 def constructor():
2168 def constructor():
2167 revlog.revlog(opener, indexfile)
2169 revlog.revlog(opener, indexfile)
2168
2170
2169 def read():
2171 def read():
2170 with opener(indexfile) as fh:
2172 with opener(indexfile) as fh:
2171 fh.read()
2173 fh.read()
2172
2174
2173 def parseindex():
2175 def parseindex():
2174 revlogio.parseindex(data, inline)
2176 revlogio.parseindex(data, inline)
2175
2177
2176 def getentry(revornode):
2178 def getentry(revornode):
2177 index = revlogio.parseindex(data, inline)[0]
2179 index = revlogio.parseindex(data, inline)[0]
2178 index[revornode]
2180 index[revornode]
2179
2181
2180 def getentries(revs, count=1):
2182 def getentries(revs, count=1):
2181 index = revlogio.parseindex(data, inline)[0]
2183 index = revlogio.parseindex(data, inline)[0]
2182
2184
2183 for i in range(count):
2185 for i in range(count):
2184 for rev in revs:
2186 for rev in revs:
2185 index[rev]
2187 index[rev]
2186
2188
2187 def resolvenode(node):
2189 def resolvenode(node):
2188 nodemap = revlogio.parseindex(data, inline)[1]
2190 nodemap = revlogio.parseindex(data, inline)[1]
2189 # This only works for the C code.
2191 # This only works for the C code.
2190 if nodemap is None:
2192 if nodemap is None:
2191 return
2193 return
2192
2194
2193 try:
2195 try:
2194 nodemap[node]
2196 nodemap[node]
2195 except error.RevlogError:
2197 except error.RevlogError:
2196 pass
2198 pass
2197
2199
2198 def resolvenodes(nodes, count=1):
2200 def resolvenodes(nodes, count=1):
2199 nodemap = revlogio.parseindex(data, inline)[1]
2201 nodemap = revlogio.parseindex(data, inline)[1]
2200 if nodemap is None:
2202 if nodemap is None:
2201 return
2203 return
2202
2204
2203 for i in range(count):
2205 for i in range(count):
2204 for node in nodes:
2206 for node in nodes:
2205 try:
2207 try:
2206 nodemap[node]
2208 nodemap[node]
2207 except error.RevlogError:
2209 except error.RevlogError:
2208 pass
2210 pass
2209
2211
2210 benches = [
2212 benches = [
2211 (constructor, b'revlog constructor'),
2213 (constructor, b'revlog constructor'),
2212 (read, b'read'),
2214 (read, b'read'),
2213 (parseindex, b'create index object'),
2215 (parseindex, b'create index object'),
2214 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2216 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2215 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2217 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2216 (lambda: resolvenode(node0), b'look up node at rev 0'),
2218 (lambda: resolvenode(node0), b'look up node at rev 0'),
2217 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2219 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2218 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2220 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2219 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2221 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2220 (lambda: resolvenode(node100), b'look up node at tip'),
2222 (lambda: resolvenode(node100), b'look up node at tip'),
2221 # 2x variation is to measure caching impact.
2223 # 2x variation is to measure caching impact.
2222 (lambda: resolvenodes(allnodes),
2224 (lambda: resolvenodes(allnodes),
2223 b'look up all nodes (forward)'),
2225 b'look up all nodes (forward)'),
2224 (lambda: resolvenodes(allnodes, 2),
2226 (lambda: resolvenodes(allnodes, 2),
2225 b'look up all nodes 2x (forward)'),
2227 b'look up all nodes 2x (forward)'),
2226 (lambda: resolvenodes(allnodesrev),
2228 (lambda: resolvenodes(allnodesrev),
2227 b'look up all nodes (reverse)'),
2229 b'look up all nodes (reverse)'),
2228 (lambda: resolvenodes(allnodesrev, 2),
2230 (lambda: resolvenodes(allnodesrev, 2),
2229 b'look up all nodes 2x (reverse)'),
2231 b'look up all nodes 2x (reverse)'),
2230 (lambda: getentries(allrevs),
2232 (lambda: getentries(allrevs),
2231 b'retrieve all index entries (forward)'),
2233 b'retrieve all index entries (forward)'),
2232 (lambda: getentries(allrevs, 2),
2234 (lambda: getentries(allrevs, 2),
2233 b'retrieve all index entries 2x (forward)'),
2235 b'retrieve all index entries 2x (forward)'),
2234 (lambda: getentries(allrevsrev),
2236 (lambda: getentries(allrevsrev),
2235 b'retrieve all index entries (reverse)'),
2237 b'retrieve all index entries (reverse)'),
2236 (lambda: getentries(allrevsrev, 2),
2238 (lambda: getentries(allrevsrev, 2),
2237 b'retrieve all index entries 2x (reverse)'),
2239 b'retrieve all index entries 2x (reverse)'),
2238 ]
2240 ]
2239
2241
2240 for fn, title in benches:
2242 for fn, title in benches:
2241 timer, fm = gettimer(ui, opts)
2243 timer, fm = gettimer(ui, opts)
2242 timer(fn, title=title)
2244 timer(fn, title=title)
2243 fm.end()
2245 fm.end()
2244
2246
2245 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2247 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2246 [(b'd', b'dist', 100, b'distance between the revisions'),
2248 [(b'd', b'dist', 100, b'distance between the revisions'),
2247 (b's', b'startrev', 0, b'revision to start reading at'),
2249 (b's', b'startrev', 0, b'revision to start reading at'),
2248 (b'', b'reverse', False, b'read in reverse')],
2250 (b'', b'reverse', False, b'read in reverse')],
2249 b'-c|-m|FILE')
2251 b'-c|-m|FILE')
2250 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2252 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2251 **opts):
2253 **opts):
2252 """Benchmark reading a series of revisions from a revlog.
2254 """Benchmark reading a series of revisions from a revlog.
2253
2255
2254 By default, we read every ``-d/--dist`` revision from 0 to tip of
2256 By default, we read every ``-d/--dist`` revision from 0 to tip of
2255 the specified revlog.
2257 the specified revlog.
2256
2258
2257 The start revision can be defined via ``-s/--startrev``.
2259 The start revision can be defined via ``-s/--startrev``.
2258 """
2260 """
2259 opts = _byteskwargs(opts)
2261 opts = _byteskwargs(opts)
2260
2262
2261 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2263 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2262 rllen = getlen(ui)(rl)
2264 rllen = getlen(ui)(rl)
2263
2265
2264 if startrev < 0:
2266 if startrev < 0:
2265 startrev = rllen + startrev
2267 startrev = rllen + startrev
2266
2268
2267 def d():
2269 def d():
2268 rl.clearcaches()
2270 rl.clearcaches()
2269
2271
2270 beginrev = startrev
2272 beginrev = startrev
2271 endrev = rllen
2273 endrev = rllen
2272 dist = opts[b'dist']
2274 dist = opts[b'dist']
2273
2275
2274 if reverse:
2276 if reverse:
2275 beginrev, endrev = endrev - 1, beginrev - 1
2277 beginrev, endrev = endrev - 1, beginrev - 1
2276 dist = -1 * dist
2278 dist = -1 * dist
2277
2279
2278 for x in _xrange(beginrev, endrev, dist):
2280 for x in _xrange(beginrev, endrev, dist):
2279 # Old revisions don't support passing int.
2281 # Old revisions don't support passing int.
2280 n = rl.node(x)
2282 n = rl.node(x)
2281 rl.revision(n)
2283 rl.revision(n)
2282
2284
2283 timer, fm = gettimer(ui, opts)
2285 timer, fm = gettimer(ui, opts)
2284 timer(d)
2286 timer(d)
2285 fm.end()
2287 fm.end()
2286
2288
2287 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2289 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2288 [(b's', b'startrev', 1000, b'revision to start writing at'),
2290 [(b's', b'startrev', 1000, b'revision to start writing at'),
2289 (b'', b'stoprev', -1, b'last revision to write'),
2291 (b'', b'stoprev', -1, b'last revision to write'),
2290 (b'', b'count', 3, b'number of passes to perform'),
2292 (b'', b'count', 3, b'number of passes to perform'),
2291 (b'', b'details', False, b'print timing for every revisions tested'),
2293 (b'', b'details', False, b'print timing for every revisions tested'),
2292 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2294 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2293 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2295 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2294 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2296 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2295 ],
2297 ],
2296 b'-c|-m|FILE')
2298 b'-c|-m|FILE')
2297 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2299 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2298 """Benchmark writing a series of revisions to a revlog.
2300 """Benchmark writing a series of revisions to a revlog.
2299
2301
2300 Possible source values are:
2302 Possible source values are:
2301 * `full`: add from a full text (default).
2303 * `full`: add from a full text (default).
2302 * `parent-1`: add from a delta to the first parent
2304 * `parent-1`: add from a delta to the first parent
2303 * `parent-2`: add from a delta to the second parent if it exists
2305 * `parent-2`: add from a delta to the second parent if it exists
2304 (use a delta from the first parent otherwise)
2306 (use a delta from the first parent otherwise)
2305 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2307 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2306 * `storage`: add from the existing precomputed deltas
2308 * `storage`: add from the existing precomputed deltas
2307
2309
2308 Note: This performance command measures performance in a custom way. As a
2310 Note: This performance command measures performance in a custom way. As a
2309 result some of the global configuration of the 'perf' command does not
2311 result some of the global configuration of the 'perf' command does not
2310 apply to it:
2312 apply to it:
2311
2313
2312 * ``pre-run``: disabled
2314 * ``pre-run``: disabled
2313
2315
2314 * ``profile-benchmark``: disabled
2316 * ``profile-benchmark``: disabled
2315
2317
2316 * ``run-limits``: disabled use --count instead
2318 * ``run-limits``: disabled use --count instead
2317 """
2319 """
2318 opts = _byteskwargs(opts)
2320 opts = _byteskwargs(opts)
2319
2321
2320 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2322 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2321 rllen = getlen(ui)(rl)
2323 rllen = getlen(ui)(rl)
2322 if startrev < 0:
2324 if startrev < 0:
2323 startrev = rllen + startrev
2325 startrev = rllen + startrev
2324 if stoprev < 0:
2326 if stoprev < 0:
2325 stoprev = rllen + stoprev
2327 stoprev = rllen + stoprev
2326
2328
2327 lazydeltabase = opts['lazydeltabase']
2329 lazydeltabase = opts['lazydeltabase']
2328 source = opts['source']
2330 source = opts['source']
2329 clearcaches = opts['clear_caches']
2331 clearcaches = opts['clear_caches']
2330 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2332 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2331 b'storage')
2333 b'storage')
2332 if source not in validsource:
2334 if source not in validsource:
2333 raise error.Abort('invalid source type: %s' % source)
2335 raise error.Abort('invalid source type: %s' % source)
2334
2336
2335 ### actually gather results
2337 ### actually gather results
2336 count = opts['count']
2338 count = opts['count']
2337 if count <= 0:
2339 if count <= 0:
2338 raise error.Abort('invalide run count: %d' % count)
2340 raise error.Abort('invalide run count: %d' % count)
2339 allresults = []
2341 allresults = []
2340 for c in range(count):
2342 for c in range(count):
2341 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2343 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2342 lazydeltabase=lazydeltabase,
2344 lazydeltabase=lazydeltabase,
2343 clearcaches=clearcaches)
2345 clearcaches=clearcaches)
2344 allresults.append(timing)
2346 allresults.append(timing)
2345
2347
2346 ### consolidate the results in a single list
2348 ### consolidate the results in a single list
2347 results = []
2349 results = []
2348 for idx, (rev, t) in enumerate(allresults[0]):
2350 for idx, (rev, t) in enumerate(allresults[0]):
2349 ts = [t]
2351 ts = [t]
2350 for other in allresults[1:]:
2352 for other in allresults[1:]:
2351 orev, ot = other[idx]
2353 orev, ot = other[idx]
2352 assert orev == rev
2354 assert orev == rev
2353 ts.append(ot)
2355 ts.append(ot)
2354 results.append((rev, ts))
2356 results.append((rev, ts))
2355 resultcount = len(results)
2357 resultcount = len(results)
2356
2358
2357 ### Compute and display relevant statistics
2359 ### Compute and display relevant statistics
2358
2360
2359 # get a formatter
2361 # get a formatter
2360 fm = ui.formatter(b'perf', opts)
2362 fm = ui.formatter(b'perf', opts)
2361 displayall = ui.configbool(b"perf", b"all-timing", False)
2363 displayall = ui.configbool(b"perf", b"all-timing", False)
2362
2364
2363 # print individual details if requested
2365 # print individual details if requested
2364 if opts['details']:
2366 if opts['details']:
2365 for idx, item in enumerate(results, 1):
2367 for idx, item in enumerate(results, 1):
2366 rev, data = item
2368 rev, data = item
2367 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2369 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2368 formatone(fm, data, title=title, displayall=displayall)
2370 formatone(fm, data, title=title, displayall=displayall)
2369
2371
2370 # sorts results by median time
2372 # sorts results by median time
2371 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2373 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2372 # list of (name, index) to display)
2374 # list of (name, index) to display)
2373 relevants = [
2375 relevants = [
2374 ("min", 0),
2376 ("min", 0),
2375 ("10%", resultcount * 10 // 100),
2377 ("10%", resultcount * 10 // 100),
2376 ("25%", resultcount * 25 // 100),
2378 ("25%", resultcount * 25 // 100),
2377 ("50%", resultcount * 70 // 100),
2379 ("50%", resultcount * 70 // 100),
2378 ("75%", resultcount * 75 // 100),
2380 ("75%", resultcount * 75 // 100),
2379 ("90%", resultcount * 90 // 100),
2381 ("90%", resultcount * 90 // 100),
2380 ("95%", resultcount * 95 // 100),
2382 ("95%", resultcount * 95 // 100),
2381 ("99%", resultcount * 99 // 100),
2383 ("99%", resultcount * 99 // 100),
2382 ("99.9%", resultcount * 999 // 1000),
2384 ("99.9%", resultcount * 999 // 1000),
2383 ("99.99%", resultcount * 9999 // 10000),
2385 ("99.99%", resultcount * 9999 // 10000),
2384 ("99.999%", resultcount * 99999 // 100000),
2386 ("99.999%", resultcount * 99999 // 100000),
2385 ("max", -1),
2387 ("max", -1),
2386 ]
2388 ]
2387 if not ui.quiet:
2389 if not ui.quiet:
2388 for name, idx in relevants:
2390 for name, idx in relevants:
2389 data = results[idx]
2391 data = results[idx]
2390 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2392 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2391 formatone(fm, data[1], title=title, displayall=displayall)
2393 formatone(fm, data[1], title=title, displayall=displayall)
2392
2394
2393 # XXX summing that many float will not be very precise, we ignore this fact
2395 # XXX summing that many float will not be very precise, we ignore this fact
2394 # for now
2396 # for now
2395 totaltime = []
2397 totaltime = []
2396 for item in allresults:
2398 for item in allresults:
2397 totaltime.append((sum(x[1][0] for x in item),
2399 totaltime.append((sum(x[1][0] for x in item),
2398 sum(x[1][1] for x in item),
2400 sum(x[1][1] for x in item),
2399 sum(x[1][2] for x in item),)
2401 sum(x[1][2] for x in item),)
2400 )
2402 )
2401 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2403 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2402 displayall=displayall)
2404 displayall=displayall)
2403 fm.end()
2405 fm.end()
2404
2406
2405 class _faketr(object):
2407 class _faketr(object):
2406 def add(s, x, y, z=None):
2408 def add(s, x, y, z=None):
2407 return None
2409 return None
2408
2410
2409 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2411 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2410 lazydeltabase=True, clearcaches=True):
2412 lazydeltabase=True, clearcaches=True):
2411 timings = []
2413 timings = []
2412 tr = _faketr()
2414 tr = _faketr()
2413 with _temprevlog(ui, orig, startrev) as dest:
2415 with _temprevlog(ui, orig, startrev) as dest:
2414 dest._lazydeltabase = lazydeltabase
2416 dest._lazydeltabase = lazydeltabase
2415 revs = list(orig.revs(startrev, stoprev))
2417 revs = list(orig.revs(startrev, stoprev))
2416 total = len(revs)
2418 total = len(revs)
2417 topic = 'adding'
2419 topic = 'adding'
2418 if runidx is not None:
2420 if runidx is not None:
2419 topic += ' (run #%d)' % runidx
2421 topic += ' (run #%d)' % runidx
2420 # Support both old and new progress API
2422 # Support both old and new progress API
2421 if util.safehasattr(ui, 'makeprogress'):
2423 if util.safehasattr(ui, 'makeprogress'):
2422 progress = ui.makeprogress(topic, unit='revs', total=total)
2424 progress = ui.makeprogress(topic, unit='revs', total=total)
2423 def updateprogress(pos):
2425 def updateprogress(pos):
2424 progress.update(pos)
2426 progress.update(pos)
2425 def completeprogress():
2427 def completeprogress():
2426 progress.complete()
2428 progress.complete()
2427 else:
2429 else:
2428 def updateprogress(pos):
2430 def updateprogress(pos):
2429 ui.progress(topic, pos, unit='revs', total=total)
2431 ui.progress(topic, pos, unit='revs', total=total)
2430 def completeprogress():
2432 def completeprogress():
2431 ui.progress(topic, None, unit='revs', total=total)
2433 ui.progress(topic, None, unit='revs', total=total)
2432
2434
2433 for idx, rev in enumerate(revs):
2435 for idx, rev in enumerate(revs):
2434 updateprogress(idx)
2436 updateprogress(idx)
2435 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2437 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2436 if clearcaches:
2438 if clearcaches:
2437 dest.index.clearcaches()
2439 dest.index.clearcaches()
2438 dest.clearcaches()
2440 dest.clearcaches()
2439 with timeone() as r:
2441 with timeone() as r:
2440 dest.addrawrevision(*addargs, **addkwargs)
2442 dest.addrawrevision(*addargs, **addkwargs)
2441 timings.append((rev, r[0]))
2443 timings.append((rev, r[0]))
2442 updateprogress(total)
2444 updateprogress(total)
2443 completeprogress()
2445 completeprogress()
2444 return timings
2446 return timings
2445
2447
2446 def _getrevisionseed(orig, rev, tr, source):
2448 def _getrevisionseed(orig, rev, tr, source):
2447 from mercurial.node import nullid
2449 from mercurial.node import nullid
2448
2450
2449 linkrev = orig.linkrev(rev)
2451 linkrev = orig.linkrev(rev)
2450 node = orig.node(rev)
2452 node = orig.node(rev)
2451 p1, p2 = orig.parents(node)
2453 p1, p2 = orig.parents(node)
2452 flags = orig.flags(rev)
2454 flags = orig.flags(rev)
2453 cachedelta = None
2455 cachedelta = None
2454 text = None
2456 text = None
2455
2457
2456 if source == b'full':
2458 if source == b'full':
2457 text = orig.revision(rev)
2459 text = orig.revision(rev)
2458 elif source == b'parent-1':
2460 elif source == b'parent-1':
2459 baserev = orig.rev(p1)
2461 baserev = orig.rev(p1)
2460 cachedelta = (baserev, orig.revdiff(p1, rev))
2462 cachedelta = (baserev, orig.revdiff(p1, rev))
2461 elif source == b'parent-2':
2463 elif source == b'parent-2':
2462 parent = p2
2464 parent = p2
2463 if p2 == nullid:
2465 if p2 == nullid:
2464 parent = p1
2466 parent = p1
2465 baserev = orig.rev(parent)
2467 baserev = orig.rev(parent)
2466 cachedelta = (baserev, orig.revdiff(parent, rev))
2468 cachedelta = (baserev, orig.revdiff(parent, rev))
2467 elif source == b'parent-smallest':
2469 elif source == b'parent-smallest':
2468 p1diff = orig.revdiff(p1, rev)
2470 p1diff = orig.revdiff(p1, rev)
2469 parent = p1
2471 parent = p1
2470 diff = p1diff
2472 diff = p1diff
2471 if p2 != nullid:
2473 if p2 != nullid:
2472 p2diff = orig.revdiff(p2, rev)
2474 p2diff = orig.revdiff(p2, rev)
2473 if len(p1diff) > len(p2diff):
2475 if len(p1diff) > len(p2diff):
2474 parent = p2
2476 parent = p2
2475 diff = p2diff
2477 diff = p2diff
2476 baserev = orig.rev(parent)
2478 baserev = orig.rev(parent)
2477 cachedelta = (baserev, diff)
2479 cachedelta = (baserev, diff)
2478 elif source == b'storage':
2480 elif source == b'storage':
2479 baserev = orig.deltaparent(rev)
2481 baserev = orig.deltaparent(rev)
2480 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2482 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2481
2483
2482 return ((text, tr, linkrev, p1, p2),
2484 return ((text, tr, linkrev, p1, p2),
2483 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2485 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2484
2486
2485 @contextlib.contextmanager
2487 @contextlib.contextmanager
2486 def _temprevlog(ui, orig, truncaterev):
2488 def _temprevlog(ui, orig, truncaterev):
2487 from mercurial import vfs as vfsmod
2489 from mercurial import vfs as vfsmod
2488
2490
2489 if orig._inline:
2491 if orig._inline:
2490 raise error.Abort('not supporting inline revlog (yet)')
2492 raise error.Abort('not supporting inline revlog (yet)')
2491 revlogkwargs = {}
2493 revlogkwargs = {}
2492 k = 'upperboundcomp'
2494 k = 'upperboundcomp'
2493 if util.safehasattr(orig, k):
2495 if util.safehasattr(orig, k):
2494 revlogkwargs[k] = getattr(orig, k)
2496 revlogkwargs[k] = getattr(orig, k)
2495
2497
2496 origindexpath = orig.opener.join(orig.indexfile)
2498 origindexpath = orig.opener.join(orig.indexfile)
2497 origdatapath = orig.opener.join(orig.datafile)
2499 origdatapath = orig.opener.join(orig.datafile)
2498 indexname = 'revlog.i'
2500 indexname = 'revlog.i'
2499 dataname = 'revlog.d'
2501 dataname = 'revlog.d'
2500
2502
2501 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2503 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2502 try:
2504 try:
2503 # copy the data file in a temporary directory
2505 # copy the data file in a temporary directory
2504 ui.debug('copying data in %s\n' % tmpdir)
2506 ui.debug('copying data in %s\n' % tmpdir)
2505 destindexpath = os.path.join(tmpdir, 'revlog.i')
2507 destindexpath = os.path.join(tmpdir, 'revlog.i')
2506 destdatapath = os.path.join(tmpdir, 'revlog.d')
2508 destdatapath = os.path.join(tmpdir, 'revlog.d')
2507 shutil.copyfile(origindexpath, destindexpath)
2509 shutil.copyfile(origindexpath, destindexpath)
2508 shutil.copyfile(origdatapath, destdatapath)
2510 shutil.copyfile(origdatapath, destdatapath)
2509
2511
2510 # remove the data we want to add again
2512 # remove the data we want to add again
2511 ui.debug('truncating data to be rewritten\n')
2513 ui.debug('truncating data to be rewritten\n')
2512 with open(destindexpath, 'ab') as index:
2514 with open(destindexpath, 'ab') as index:
2513 index.seek(0)
2515 index.seek(0)
2514 index.truncate(truncaterev * orig._io.size)
2516 index.truncate(truncaterev * orig._io.size)
2515 with open(destdatapath, 'ab') as data:
2517 with open(destdatapath, 'ab') as data:
2516 data.seek(0)
2518 data.seek(0)
2517 data.truncate(orig.start(truncaterev))
2519 data.truncate(orig.start(truncaterev))
2518
2520
2519 # instantiate a new revlog from the temporary copy
2521 # instantiate a new revlog from the temporary copy
2520 ui.debug('truncating adding to be rewritten\n')
2522 ui.debug('truncating adding to be rewritten\n')
2521 vfs = vfsmod.vfs(tmpdir)
2523 vfs = vfsmod.vfs(tmpdir)
2522 vfs.options = getattr(orig.opener, 'options', None)
2524 vfs.options = getattr(orig.opener, 'options', None)
2523
2525
2524 dest = revlog.revlog(vfs,
2526 dest = revlog.revlog(vfs,
2525 indexfile=indexname,
2527 indexfile=indexname,
2526 datafile=dataname, **revlogkwargs)
2528 datafile=dataname, **revlogkwargs)
2527 if dest._inline:
2529 if dest._inline:
2528 raise error.Abort('not supporting inline revlog (yet)')
2530 raise error.Abort('not supporting inline revlog (yet)')
2529 # make sure internals are initialized
2531 # make sure internals are initialized
2530 dest.revision(len(dest) - 1)
2532 dest.revision(len(dest) - 1)
2531 yield dest
2533 yield dest
2532 del dest, vfs
2534 del dest, vfs
2533 finally:
2535 finally:
2534 shutil.rmtree(tmpdir, True)
2536 shutil.rmtree(tmpdir, True)
2535
2537
2536 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2538 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2537 [(b'e', b'engines', b'', b'compression engines to use'),
2539 [(b'e', b'engines', b'', b'compression engines to use'),
2538 (b's', b'startrev', 0, b'revision to start at')],
2540 (b's', b'startrev', 0, b'revision to start at')],
2539 b'-c|-m|FILE')
2541 b'-c|-m|FILE')
2540 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2542 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2541 """Benchmark operations on revlog chunks.
2543 """Benchmark operations on revlog chunks.
2542
2544
2543 Logically, each revlog is a collection of fulltext revisions. However,
2545 Logically, each revlog is a collection of fulltext revisions. However,
2544 stored within each revlog are "chunks" of possibly compressed data. This
2546 stored within each revlog are "chunks" of possibly compressed data. This
2545 data needs to be read and decompressed or compressed and written.
2547 data needs to be read and decompressed or compressed and written.
2546
2548
2547 This command measures the time it takes to read+decompress and recompress
2549 This command measures the time it takes to read+decompress and recompress
2548 chunks in a revlog. It effectively isolates I/O and compression performance.
2550 chunks in a revlog. It effectively isolates I/O and compression performance.
2549 For measurements of higher-level operations like resolving revisions,
2551 For measurements of higher-level operations like resolving revisions,
2550 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2552 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2551 """
2553 """
2552 opts = _byteskwargs(opts)
2554 opts = _byteskwargs(opts)
2553
2555
2554 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2556 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2555
2557
2556 # _chunkraw was renamed to _getsegmentforrevs.
2558 # _chunkraw was renamed to _getsegmentforrevs.
2557 try:
2559 try:
2558 segmentforrevs = rl._getsegmentforrevs
2560 segmentforrevs = rl._getsegmentforrevs
2559 except AttributeError:
2561 except AttributeError:
2560 segmentforrevs = rl._chunkraw
2562 segmentforrevs = rl._chunkraw
2561
2563
2562 # Verify engines argument.
2564 # Verify engines argument.
2563 if engines:
2565 if engines:
2564 engines = set(e.strip() for e in engines.split(b','))
2566 engines = set(e.strip() for e in engines.split(b','))
2565 for engine in engines:
2567 for engine in engines:
2566 try:
2568 try:
2567 util.compressionengines[engine]
2569 util.compressionengines[engine]
2568 except KeyError:
2570 except KeyError:
2569 raise error.Abort(b'unknown compression engine: %s' % engine)
2571 raise error.Abort(b'unknown compression engine: %s' % engine)
2570 else:
2572 else:
2571 engines = []
2573 engines = []
2572 for e in util.compengines:
2574 for e in util.compengines:
2573 engine = util.compengines[e]
2575 engine = util.compengines[e]
2574 try:
2576 try:
2575 if engine.available():
2577 if engine.available():
2576 engine.revlogcompressor().compress(b'dummy')
2578 engine.revlogcompressor().compress(b'dummy')
2577 engines.append(e)
2579 engines.append(e)
2578 except NotImplementedError:
2580 except NotImplementedError:
2579 pass
2581 pass
2580
2582
2581 revs = list(rl.revs(startrev, len(rl) - 1))
2583 revs = list(rl.revs(startrev, len(rl) - 1))
2582
2584
2583 def rlfh(rl):
2585 def rlfh(rl):
2584 if rl._inline:
2586 if rl._inline:
2585 return getsvfs(repo)(rl.indexfile)
2587 return getsvfs(repo)(rl.indexfile)
2586 else:
2588 else:
2587 return getsvfs(repo)(rl.datafile)
2589 return getsvfs(repo)(rl.datafile)
2588
2590
2589 def doread():
2591 def doread():
2590 rl.clearcaches()
2592 rl.clearcaches()
2591 for rev in revs:
2593 for rev in revs:
2592 segmentforrevs(rev, rev)
2594 segmentforrevs(rev, rev)
2593
2595
2594 def doreadcachedfh():
2596 def doreadcachedfh():
2595 rl.clearcaches()
2597 rl.clearcaches()
2596 fh = rlfh(rl)
2598 fh = rlfh(rl)
2597 for rev in revs:
2599 for rev in revs:
2598 segmentforrevs(rev, rev, df=fh)
2600 segmentforrevs(rev, rev, df=fh)
2599
2601
2600 def doreadbatch():
2602 def doreadbatch():
2601 rl.clearcaches()
2603 rl.clearcaches()
2602 segmentforrevs(revs[0], revs[-1])
2604 segmentforrevs(revs[0], revs[-1])
2603
2605
2604 def doreadbatchcachedfh():
2606 def doreadbatchcachedfh():
2605 rl.clearcaches()
2607 rl.clearcaches()
2606 fh = rlfh(rl)
2608 fh = rlfh(rl)
2607 segmentforrevs(revs[0], revs[-1], df=fh)
2609 segmentforrevs(revs[0], revs[-1], df=fh)
2608
2610
2609 def dochunk():
2611 def dochunk():
2610 rl.clearcaches()
2612 rl.clearcaches()
2611 fh = rlfh(rl)
2613 fh = rlfh(rl)
2612 for rev in revs:
2614 for rev in revs:
2613 rl._chunk(rev, df=fh)
2615 rl._chunk(rev, df=fh)
2614
2616
2615 chunks = [None]
2617 chunks = [None]
2616
2618
2617 def dochunkbatch():
2619 def dochunkbatch():
2618 rl.clearcaches()
2620 rl.clearcaches()
2619 fh = rlfh(rl)
2621 fh = rlfh(rl)
2620 # Save chunks as a side-effect.
2622 # Save chunks as a side-effect.
2621 chunks[0] = rl._chunks(revs, df=fh)
2623 chunks[0] = rl._chunks(revs, df=fh)
2622
2624
2623 def docompress(compressor):
2625 def docompress(compressor):
2624 rl.clearcaches()
2626 rl.clearcaches()
2625
2627
2626 try:
2628 try:
2627 # Swap in the requested compression engine.
2629 # Swap in the requested compression engine.
2628 oldcompressor = rl._compressor
2630 oldcompressor = rl._compressor
2629 rl._compressor = compressor
2631 rl._compressor = compressor
2630 for chunk in chunks[0]:
2632 for chunk in chunks[0]:
2631 rl.compress(chunk)
2633 rl.compress(chunk)
2632 finally:
2634 finally:
2633 rl._compressor = oldcompressor
2635 rl._compressor = oldcompressor
2634
2636
2635 benches = [
2637 benches = [
2636 (lambda: doread(), b'read'),
2638 (lambda: doread(), b'read'),
2637 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2639 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2638 (lambda: doreadbatch(), b'read batch'),
2640 (lambda: doreadbatch(), b'read batch'),
2639 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2641 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2640 (lambda: dochunk(), b'chunk'),
2642 (lambda: dochunk(), b'chunk'),
2641 (lambda: dochunkbatch(), b'chunk batch'),
2643 (lambda: dochunkbatch(), b'chunk batch'),
2642 ]
2644 ]
2643
2645
2644 for engine in sorted(engines):
2646 for engine in sorted(engines):
2645 compressor = util.compengines[engine].revlogcompressor()
2647 compressor = util.compengines[engine].revlogcompressor()
2646 benches.append((functools.partial(docompress, compressor),
2648 benches.append((functools.partial(docompress, compressor),
2647 b'compress w/ %s' % engine))
2649 b'compress w/ %s' % engine))
2648
2650
2649 for fn, title in benches:
2651 for fn, title in benches:
2650 timer, fm = gettimer(ui, opts)
2652 timer, fm = gettimer(ui, opts)
2651 timer(fn, title=title)
2653 timer(fn, title=title)
2652 fm.end()
2654 fm.end()
2653
2655
2654 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2656 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2655 [(b'', b'cache', False, b'use caches instead of clearing')],
2657 [(b'', b'cache', False, b'use caches instead of clearing')],
2656 b'-c|-m|FILE REV')
2658 b'-c|-m|FILE REV')
2657 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2659 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2658 """Benchmark obtaining a revlog revision.
2660 """Benchmark obtaining a revlog revision.
2659
2661
2660 Obtaining a revlog revision consists of roughly the following steps:
2662 Obtaining a revlog revision consists of roughly the following steps:
2661
2663
2662 1. Compute the delta chain
2664 1. Compute the delta chain
2663 2. Slice the delta chain if applicable
2665 2. Slice the delta chain if applicable
2664 3. Obtain the raw chunks for that delta chain
2666 3. Obtain the raw chunks for that delta chain
2665 4. Decompress each raw chunk
2667 4. Decompress each raw chunk
2666 5. Apply binary patches to obtain fulltext
2668 5. Apply binary patches to obtain fulltext
2667 6. Verify hash of fulltext
2669 6. Verify hash of fulltext
2668
2670
2669 This command measures the time spent in each of these phases.
2671 This command measures the time spent in each of these phases.
2670 """
2672 """
2671 opts = _byteskwargs(opts)
2673 opts = _byteskwargs(opts)
2672
2674
2673 if opts.get(b'changelog') or opts.get(b'manifest'):
2675 if opts.get(b'changelog') or opts.get(b'manifest'):
2674 file_, rev = None, file_
2676 file_, rev = None, file_
2675 elif rev is None:
2677 elif rev is None:
2676 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2678 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2677
2679
2678 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2680 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2679
2681
2680 # _chunkraw was renamed to _getsegmentforrevs.
2682 # _chunkraw was renamed to _getsegmentforrevs.
2681 try:
2683 try:
2682 segmentforrevs = r._getsegmentforrevs
2684 segmentforrevs = r._getsegmentforrevs
2683 except AttributeError:
2685 except AttributeError:
2684 segmentforrevs = r._chunkraw
2686 segmentforrevs = r._chunkraw
2685
2687
2686 node = r.lookup(rev)
2688 node = r.lookup(rev)
2687 rev = r.rev(node)
2689 rev = r.rev(node)
2688
2690
2689 def getrawchunks(data, chain):
2691 def getrawchunks(data, chain):
2690 start = r.start
2692 start = r.start
2691 length = r.length
2693 length = r.length
2692 inline = r._inline
2694 inline = r._inline
2693 iosize = r._io.size
2695 iosize = r._io.size
2694 buffer = util.buffer
2696 buffer = util.buffer
2695
2697
2696 chunks = []
2698 chunks = []
2697 ladd = chunks.append
2699 ladd = chunks.append
2698 for idx, item in enumerate(chain):
2700 for idx, item in enumerate(chain):
2699 offset = start(item[0])
2701 offset = start(item[0])
2700 bits = data[idx]
2702 bits = data[idx]
2701 for rev in item:
2703 for rev in item:
2702 chunkstart = start(rev)
2704 chunkstart = start(rev)
2703 if inline:
2705 if inline:
2704 chunkstart += (rev + 1) * iosize
2706 chunkstart += (rev + 1) * iosize
2705 chunklength = length(rev)
2707 chunklength = length(rev)
2706 ladd(buffer(bits, chunkstart - offset, chunklength))
2708 ladd(buffer(bits, chunkstart - offset, chunklength))
2707
2709
2708 return chunks
2710 return chunks
2709
2711
2710 def dodeltachain(rev):
2712 def dodeltachain(rev):
2711 if not cache:
2713 if not cache:
2712 r.clearcaches()
2714 r.clearcaches()
2713 r._deltachain(rev)
2715 r._deltachain(rev)
2714
2716
2715 def doread(chain):
2717 def doread(chain):
2716 if not cache:
2718 if not cache:
2717 r.clearcaches()
2719 r.clearcaches()
2718 for item in slicedchain:
2720 for item in slicedchain:
2719 segmentforrevs(item[0], item[-1])
2721 segmentforrevs(item[0], item[-1])
2720
2722
2721 def doslice(r, chain, size):
2723 def doslice(r, chain, size):
2722 for s in slicechunk(r, chain, targetsize=size):
2724 for s in slicechunk(r, chain, targetsize=size):
2723 pass
2725 pass
2724
2726
2725 def dorawchunks(data, chain):
2727 def dorawchunks(data, chain):
2726 if not cache:
2728 if not cache:
2727 r.clearcaches()
2729 r.clearcaches()
2728 getrawchunks(data, chain)
2730 getrawchunks(data, chain)
2729
2731
2730 def dodecompress(chunks):
2732 def dodecompress(chunks):
2731 decomp = r.decompress
2733 decomp = r.decompress
2732 for chunk in chunks:
2734 for chunk in chunks:
2733 decomp(chunk)
2735 decomp(chunk)
2734
2736
2735 def dopatch(text, bins):
2737 def dopatch(text, bins):
2736 if not cache:
2738 if not cache:
2737 r.clearcaches()
2739 r.clearcaches()
2738 mdiff.patches(text, bins)
2740 mdiff.patches(text, bins)
2739
2741
2740 def dohash(text):
2742 def dohash(text):
2741 if not cache:
2743 if not cache:
2742 r.clearcaches()
2744 r.clearcaches()
2743 r.checkhash(text, node, rev=rev)
2745 r.checkhash(text, node, rev=rev)
2744
2746
2745 def dorevision():
2747 def dorevision():
2746 if not cache:
2748 if not cache:
2747 r.clearcaches()
2749 r.clearcaches()
2748 r.revision(node)
2750 r.revision(node)
2749
2751
2750 try:
2752 try:
2751 from mercurial.revlogutils.deltas import slicechunk
2753 from mercurial.revlogutils.deltas import slicechunk
2752 except ImportError:
2754 except ImportError:
2753 slicechunk = getattr(revlog, '_slicechunk', None)
2755 slicechunk = getattr(revlog, '_slicechunk', None)
2754
2756
2755 size = r.length(rev)
2757 size = r.length(rev)
2756 chain = r._deltachain(rev)[0]
2758 chain = r._deltachain(rev)[0]
2757 if not getattr(r, '_withsparseread', False):
2759 if not getattr(r, '_withsparseread', False):
2758 slicedchain = (chain,)
2760 slicedchain = (chain,)
2759 else:
2761 else:
2760 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2762 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2761 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2763 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2762 rawchunks = getrawchunks(data, slicedchain)
2764 rawchunks = getrawchunks(data, slicedchain)
2763 bins = r._chunks(chain)
2765 bins = r._chunks(chain)
2764 text = bytes(bins[0])
2766 text = bytes(bins[0])
2765 bins = bins[1:]
2767 bins = bins[1:]
2766 text = mdiff.patches(text, bins)
2768 text = mdiff.patches(text, bins)
2767
2769
2768 benches = [
2770 benches = [
2769 (lambda: dorevision(), b'full'),
2771 (lambda: dorevision(), b'full'),
2770 (lambda: dodeltachain(rev), b'deltachain'),
2772 (lambda: dodeltachain(rev), b'deltachain'),
2771 (lambda: doread(chain), b'read'),
2773 (lambda: doread(chain), b'read'),
2772 ]
2774 ]
2773
2775
2774 if getattr(r, '_withsparseread', False):
2776 if getattr(r, '_withsparseread', False):
2775 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2777 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2776 benches.append(slicing)
2778 benches.append(slicing)
2777
2779
2778 benches.extend([
2780 benches.extend([
2779 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2781 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2780 (lambda: dodecompress(rawchunks), b'decompress'),
2782 (lambda: dodecompress(rawchunks), b'decompress'),
2781 (lambda: dopatch(text, bins), b'patch'),
2783 (lambda: dopatch(text, bins), b'patch'),
2782 (lambda: dohash(text), b'hash'),
2784 (lambda: dohash(text), b'hash'),
2783 ])
2785 ])
2784
2786
2785 timer, fm = gettimer(ui, opts)
2787 timer, fm = gettimer(ui, opts)
2786 for fn, title in benches:
2788 for fn, title in benches:
2787 timer(fn, title=title)
2789 timer(fn, title=title)
2788 fm.end()
2790 fm.end()
2789
2791
2790 @command(b'perfrevset',
2792 @command(b'perfrevset',
2791 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2793 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2792 (b'', b'contexts', False, b'obtain changectx for each revision')]
2794 (b'', b'contexts', False, b'obtain changectx for each revision')]
2793 + formatteropts, b"REVSET")
2795 + formatteropts, b"REVSET")
2794 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2796 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2795 """benchmark the execution time of a revset
2797 """benchmark the execution time of a revset
2796
2798
2797 Use the --clean option if need to evaluate the impact of build volatile
2799 Use the --clean option if need to evaluate the impact of build volatile
2798 revisions set cache on the revset execution. Volatile cache hold filtered
2800 revisions set cache on the revset execution. Volatile cache hold filtered
2799 and obsolete related cache."""
2801 and obsolete related cache."""
2800 opts = _byteskwargs(opts)
2802 opts = _byteskwargs(opts)
2801
2803
2802 timer, fm = gettimer(ui, opts)
2804 timer, fm = gettimer(ui, opts)
2803 def d():
2805 def d():
2804 if clear:
2806 if clear:
2805 repo.invalidatevolatilesets()
2807 repo.invalidatevolatilesets()
2806 if contexts:
2808 if contexts:
2807 for ctx in repo.set(expr): pass
2809 for ctx in repo.set(expr): pass
2808 else:
2810 else:
2809 for r in repo.revs(expr): pass
2811 for r in repo.revs(expr): pass
2810 timer(d)
2812 timer(d)
2811 fm.end()
2813 fm.end()
2812
2814
2813 @command(b'perfvolatilesets',
2815 @command(b'perfvolatilesets',
2814 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2816 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2815 ] + formatteropts)
2817 ] + formatteropts)
2816 def perfvolatilesets(ui, repo, *names, **opts):
2818 def perfvolatilesets(ui, repo, *names, **opts):
2817 """benchmark the computation of various volatile set
2819 """benchmark the computation of various volatile set
2818
2820
2819 Volatile set computes element related to filtering and obsolescence."""
2821 Volatile set computes element related to filtering and obsolescence."""
2820 opts = _byteskwargs(opts)
2822 opts = _byteskwargs(opts)
2821 timer, fm = gettimer(ui, opts)
2823 timer, fm = gettimer(ui, opts)
2822 repo = repo.unfiltered()
2824 repo = repo.unfiltered()
2823
2825
2824 def getobs(name):
2826 def getobs(name):
2825 def d():
2827 def d():
2826 repo.invalidatevolatilesets()
2828 repo.invalidatevolatilesets()
2827 if opts[b'clear_obsstore']:
2829 if opts[b'clear_obsstore']:
2828 clearfilecache(repo, b'obsstore')
2830 clearfilecache(repo, b'obsstore')
2829 obsolete.getrevs(repo, name)
2831 obsolete.getrevs(repo, name)
2830 return d
2832 return d
2831
2833
2832 allobs = sorted(obsolete.cachefuncs)
2834 allobs = sorted(obsolete.cachefuncs)
2833 if names:
2835 if names:
2834 allobs = [n for n in allobs if n in names]
2836 allobs = [n for n in allobs if n in names]
2835
2837
2836 for name in allobs:
2838 for name in allobs:
2837 timer(getobs(name), title=name)
2839 timer(getobs(name), title=name)
2838
2840
2839 def getfiltered(name):
2841 def getfiltered(name):
2840 def d():
2842 def d():
2841 repo.invalidatevolatilesets()
2843 repo.invalidatevolatilesets()
2842 if opts[b'clear_obsstore']:
2844 if opts[b'clear_obsstore']:
2843 clearfilecache(repo, b'obsstore')
2845 clearfilecache(repo, b'obsstore')
2844 repoview.filterrevs(repo, name)
2846 repoview.filterrevs(repo, name)
2845 return d
2847 return d
2846
2848
2847 allfilter = sorted(repoview.filtertable)
2849 allfilter = sorted(repoview.filtertable)
2848 if names:
2850 if names:
2849 allfilter = [n for n in allfilter if n in names]
2851 allfilter = [n for n in allfilter if n in names]
2850
2852
2851 for name in allfilter:
2853 for name in allfilter:
2852 timer(getfiltered(name), title=name)
2854 timer(getfiltered(name), title=name)
2853 fm.end()
2855 fm.end()
2854
2856
2855 @command(b'perfbranchmap',
2857 @command(b'perfbranchmap',
2856 [(b'f', b'full', False,
2858 [(b'f', b'full', False,
2857 b'Includes build time of subset'),
2859 b'Includes build time of subset'),
2858 (b'', b'clear-revbranch', False,
2860 (b'', b'clear-revbranch', False,
2859 b'purge the revbranch cache between computation'),
2861 b'purge the revbranch cache between computation'),
2860 ] + formatteropts)
2862 ] + formatteropts)
2861 def perfbranchmap(ui, repo, *filternames, **opts):
2863 def perfbranchmap(ui, repo, *filternames, **opts):
2862 """benchmark the update of a branchmap
2864 """benchmark the update of a branchmap
2863
2865
2864 This benchmarks the full repo.branchmap() call with read and write disabled
2866 This benchmarks the full repo.branchmap() call with read and write disabled
2865 """
2867 """
2866 opts = _byteskwargs(opts)
2868 opts = _byteskwargs(opts)
2867 full = opts.get(b"full", False)
2869 full = opts.get(b"full", False)
2868 clear_revbranch = opts.get(b"clear_revbranch", False)
2870 clear_revbranch = opts.get(b"clear_revbranch", False)
2869 timer, fm = gettimer(ui, opts)
2871 timer, fm = gettimer(ui, opts)
2870 def getbranchmap(filtername):
2872 def getbranchmap(filtername):
2871 """generate a benchmark function for the filtername"""
2873 """generate a benchmark function for the filtername"""
2872 if filtername is None:
2874 if filtername is None:
2873 view = repo
2875 view = repo
2874 else:
2876 else:
2875 view = repo.filtered(filtername)
2877 view = repo.filtered(filtername)
2876 if util.safehasattr(view._branchcaches, '_per_filter'):
2878 if util.safehasattr(view._branchcaches, '_per_filter'):
2877 filtered = view._branchcaches._per_filter
2879 filtered = view._branchcaches._per_filter
2878 else:
2880 else:
2879 # older versions
2881 # older versions
2880 filtered = view._branchcaches
2882 filtered = view._branchcaches
2881 def d():
2883 def d():
2882 if clear_revbranch:
2884 if clear_revbranch:
2883 repo.revbranchcache()._clear()
2885 repo.revbranchcache()._clear()
2884 if full:
2886 if full:
2885 view._branchcaches.clear()
2887 view._branchcaches.clear()
2886 else:
2888 else:
2887 filtered.pop(filtername, None)
2889 filtered.pop(filtername, None)
2888 view.branchmap()
2890 view.branchmap()
2889 return d
2891 return d
2890 # add filter in smaller subset to bigger subset
2892 # add filter in smaller subset to bigger subset
2891 possiblefilters = set(repoview.filtertable)
2893 possiblefilters = set(repoview.filtertable)
2892 if filternames:
2894 if filternames:
2893 possiblefilters &= set(filternames)
2895 possiblefilters &= set(filternames)
2894 subsettable = getbranchmapsubsettable()
2896 subsettable = getbranchmapsubsettable()
2895 allfilters = []
2897 allfilters = []
2896 while possiblefilters:
2898 while possiblefilters:
2897 for name in possiblefilters:
2899 for name in possiblefilters:
2898 subset = subsettable.get(name)
2900 subset = subsettable.get(name)
2899 if subset not in possiblefilters:
2901 if subset not in possiblefilters:
2900 break
2902 break
2901 else:
2903 else:
2902 assert False, b'subset cycle %s!' % possiblefilters
2904 assert False, b'subset cycle %s!' % possiblefilters
2903 allfilters.append(name)
2905 allfilters.append(name)
2904 possiblefilters.remove(name)
2906 possiblefilters.remove(name)
2905
2907
2906 # warm the cache
2908 # warm the cache
2907 if not full:
2909 if not full:
2908 for name in allfilters:
2910 for name in allfilters:
2909 repo.filtered(name).branchmap()
2911 repo.filtered(name).branchmap()
2910 if not filternames or b'unfiltered' in filternames:
2912 if not filternames or b'unfiltered' in filternames:
2911 # add unfiltered
2913 # add unfiltered
2912 allfilters.append(None)
2914 allfilters.append(None)
2913
2915
2914 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2916 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2915 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2917 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2916 branchcacheread.set(classmethod(lambda *args: None))
2918 branchcacheread.set(classmethod(lambda *args: None))
2917 else:
2919 else:
2918 # older versions
2920 # older versions
2919 branchcacheread = safeattrsetter(branchmap, b'read')
2921 branchcacheread = safeattrsetter(branchmap, b'read')
2920 branchcacheread.set(lambda *args: None)
2922 branchcacheread.set(lambda *args: None)
2921 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2923 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2922 branchcachewrite.set(lambda *args: None)
2924 branchcachewrite.set(lambda *args: None)
2923 try:
2925 try:
2924 for name in allfilters:
2926 for name in allfilters:
2925 printname = name
2927 printname = name
2926 if name is None:
2928 if name is None:
2927 printname = b'unfiltered'
2929 printname = b'unfiltered'
2928 timer(getbranchmap(name), title=str(printname))
2930 timer(getbranchmap(name), title=str(printname))
2929 finally:
2931 finally:
2930 branchcacheread.restore()
2932 branchcacheread.restore()
2931 branchcachewrite.restore()
2933 branchcachewrite.restore()
2932 fm.end()
2934 fm.end()
2933
2935
2934 @command(b'perfbranchmapupdate', [
2936 @command(b'perfbranchmapupdate', [
2935 (b'', b'base', [], b'subset of revision to start from'),
2937 (b'', b'base', [], b'subset of revision to start from'),
2936 (b'', b'target', [], b'subset of revision to end with'),
2938 (b'', b'target', [], b'subset of revision to end with'),
2937 (b'', b'clear-caches', False, b'clear cache between each runs')
2939 (b'', b'clear-caches', False, b'clear cache between each runs')
2938 ] + formatteropts)
2940 ] + formatteropts)
2939 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2941 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2940 """benchmark branchmap update from for <base> revs to <target> revs
2942 """benchmark branchmap update from for <base> revs to <target> revs
2941
2943
2942 If `--clear-caches` is passed, the following items will be reset before
2944 If `--clear-caches` is passed, the following items will be reset before
2943 each update:
2945 each update:
2944 * the changelog instance and associated indexes
2946 * the changelog instance and associated indexes
2945 * the rev-branch-cache instance
2947 * the rev-branch-cache instance
2946
2948
2947 Examples:
2949 Examples:
2948
2950
2949 # update for the one last revision
2951 # update for the one last revision
2950 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2952 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2951
2953
2952 $ update for change coming with a new branch
2954 $ update for change coming with a new branch
2953 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2955 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2954 """
2956 """
2955 from mercurial import branchmap
2957 from mercurial import branchmap
2956 from mercurial import repoview
2958 from mercurial import repoview
2957 opts = _byteskwargs(opts)
2959 opts = _byteskwargs(opts)
2958 timer, fm = gettimer(ui, opts)
2960 timer, fm = gettimer(ui, opts)
2959 clearcaches = opts[b'clear_caches']
2961 clearcaches = opts[b'clear_caches']
2960 unfi = repo.unfiltered()
2962 unfi = repo.unfiltered()
2961 x = [None] # used to pass data between closure
2963 x = [None] # used to pass data between closure
2962
2964
2963 # we use a `list` here to avoid possible side effect from smartset
2965 # we use a `list` here to avoid possible side effect from smartset
2964 baserevs = list(scmutil.revrange(repo, base))
2966 baserevs = list(scmutil.revrange(repo, base))
2965 targetrevs = list(scmutil.revrange(repo, target))
2967 targetrevs = list(scmutil.revrange(repo, target))
2966 if not baserevs:
2968 if not baserevs:
2967 raise error.Abort(b'no revisions selected for --base')
2969 raise error.Abort(b'no revisions selected for --base')
2968 if not targetrevs:
2970 if not targetrevs:
2969 raise error.Abort(b'no revisions selected for --target')
2971 raise error.Abort(b'no revisions selected for --target')
2970
2972
2971 # make sure the target branchmap also contains the one in the base
2973 # make sure the target branchmap also contains the one in the base
2972 targetrevs = list(set(baserevs) | set(targetrevs))
2974 targetrevs = list(set(baserevs) | set(targetrevs))
2973 targetrevs.sort()
2975 targetrevs.sort()
2974
2976
2975 cl = repo.changelog
2977 cl = repo.changelog
2976 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2978 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2977 allbaserevs.sort()
2979 allbaserevs.sort()
2978 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2980 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2979
2981
2980 newrevs = list(alltargetrevs.difference(allbaserevs))
2982 newrevs = list(alltargetrevs.difference(allbaserevs))
2981 newrevs.sort()
2983 newrevs.sort()
2982
2984
2983 allrevs = frozenset(unfi.changelog.revs())
2985 allrevs = frozenset(unfi.changelog.revs())
2984 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2986 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2985 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2987 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2986
2988
2987 def basefilter(repo, visibilityexceptions=None):
2989 def basefilter(repo, visibilityexceptions=None):
2988 return basefilterrevs
2990 return basefilterrevs
2989
2991
2990 def targetfilter(repo, visibilityexceptions=None):
2992 def targetfilter(repo, visibilityexceptions=None):
2991 return targetfilterrevs
2993 return targetfilterrevs
2992
2994
2993 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2995 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2994 ui.status(msg % (len(allbaserevs), len(newrevs)))
2996 ui.status(msg % (len(allbaserevs), len(newrevs)))
2995 if targetfilterrevs:
2997 if targetfilterrevs:
2996 msg = b'(%d revisions still filtered)\n'
2998 msg = b'(%d revisions still filtered)\n'
2997 ui.status(msg % len(targetfilterrevs))
2999 ui.status(msg % len(targetfilterrevs))
2998
3000
2999 try:
3001 try:
3000 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3002 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3001 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3003 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3002
3004
3003 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3005 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3004 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3006 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3005
3007
3006 # try to find an existing branchmap to reuse
3008 # try to find an existing branchmap to reuse
3007 subsettable = getbranchmapsubsettable()
3009 subsettable = getbranchmapsubsettable()
3008 candidatefilter = subsettable.get(None)
3010 candidatefilter = subsettable.get(None)
3009 while candidatefilter is not None:
3011 while candidatefilter is not None:
3010 candidatebm = repo.filtered(candidatefilter).branchmap()
3012 candidatebm = repo.filtered(candidatefilter).branchmap()
3011 if candidatebm.validfor(baserepo):
3013 if candidatebm.validfor(baserepo):
3012 filtered = repoview.filterrevs(repo, candidatefilter)
3014 filtered = repoview.filterrevs(repo, candidatefilter)
3013 missing = [r for r in allbaserevs if r in filtered]
3015 missing = [r for r in allbaserevs if r in filtered]
3014 base = candidatebm.copy()
3016 base = candidatebm.copy()
3015 base.update(baserepo, missing)
3017 base.update(baserepo, missing)
3016 break
3018 break
3017 candidatefilter = subsettable.get(candidatefilter)
3019 candidatefilter = subsettable.get(candidatefilter)
3018 else:
3020 else:
3019 # no suitable subset where found
3021 # no suitable subset where found
3020 base = branchmap.branchcache()
3022 base = branchmap.branchcache()
3021 base.update(baserepo, allbaserevs)
3023 base.update(baserepo, allbaserevs)
3022
3024
3023 def setup():
3025 def setup():
3024 x[0] = base.copy()
3026 x[0] = base.copy()
3025 if clearcaches:
3027 if clearcaches:
3026 unfi._revbranchcache = None
3028 unfi._revbranchcache = None
3027 clearchangelog(repo)
3029 clearchangelog(repo)
3028
3030
3029 def bench():
3031 def bench():
3030 x[0].update(targetrepo, newrevs)
3032 x[0].update(targetrepo, newrevs)
3031
3033
3032 timer(bench, setup=setup)
3034 timer(bench, setup=setup)
3033 fm.end()
3035 fm.end()
3034 finally:
3036 finally:
3035 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3037 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3036 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3038 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3037
3039
3038 @command(b'perfbranchmapload', [
3040 @command(b'perfbranchmapload', [
3039 (b'f', b'filter', b'', b'Specify repoview filter'),
3041 (b'f', b'filter', b'', b'Specify repoview filter'),
3040 (b'', b'list', False, b'List brachmap filter caches'),
3042 (b'', b'list', False, b'List brachmap filter caches'),
3041 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3043 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3042
3044
3043 ] + formatteropts)
3045 ] + formatteropts)
3044 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3046 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3045 """benchmark reading the branchmap"""
3047 """benchmark reading the branchmap"""
3046 opts = _byteskwargs(opts)
3048 opts = _byteskwargs(opts)
3047 clearrevlogs = opts[b'clear_revlogs']
3049 clearrevlogs = opts[b'clear_revlogs']
3048
3050
3049 if list:
3051 if list:
3050 for name, kind, st in repo.cachevfs.readdir(stat=True):
3052 for name, kind, st in repo.cachevfs.readdir(stat=True):
3051 if name.startswith(b'branch2'):
3053 if name.startswith(b'branch2'):
3052 filtername = name.partition(b'-')[2] or b'unfiltered'
3054 filtername = name.partition(b'-')[2] or b'unfiltered'
3053 ui.status(b'%s - %s\n'
3055 ui.status(b'%s - %s\n'
3054 % (filtername, util.bytecount(st.st_size)))
3056 % (filtername, util.bytecount(st.st_size)))
3055 return
3057 return
3056 if not filter:
3058 if not filter:
3057 filter = None
3059 filter = None
3058 subsettable = getbranchmapsubsettable()
3060 subsettable = getbranchmapsubsettable()
3059 if filter is None:
3061 if filter is None:
3060 repo = repo.unfiltered()
3062 repo = repo.unfiltered()
3061 else:
3063 else:
3062 repo = repoview.repoview(repo, filter)
3064 repo = repoview.repoview(repo, filter)
3063
3065
3064 repo.branchmap() # make sure we have a relevant, up to date branchmap
3066 repo.branchmap() # make sure we have a relevant, up to date branchmap
3065
3067
3066 try:
3068 try:
3067 fromfile = branchmap.branchcache.fromfile
3069 fromfile = branchmap.branchcache.fromfile
3068 except AttributeError:
3070 except AttributeError:
3069 # older versions
3071 # older versions
3070 fromfile = branchmap.read
3072 fromfile = branchmap.read
3071
3073
3072 currentfilter = filter
3074 currentfilter = filter
3073 # try once without timer, the filter may not be cached
3075 # try once without timer, the filter may not be cached
3074 while fromfile(repo) is None:
3076 while fromfile(repo) is None:
3075 currentfilter = subsettable.get(currentfilter)
3077 currentfilter = subsettable.get(currentfilter)
3076 if currentfilter is None:
3078 if currentfilter is None:
3077 raise error.Abort(b'No branchmap cached for %s repo'
3079 raise error.Abort(b'No branchmap cached for %s repo'
3078 % (filter or b'unfiltered'))
3080 % (filter or b'unfiltered'))
3079 repo = repo.filtered(currentfilter)
3081 repo = repo.filtered(currentfilter)
3080 timer, fm = gettimer(ui, opts)
3082 timer, fm = gettimer(ui, opts)
3081 def setup():
3083 def setup():
3082 if clearrevlogs:
3084 if clearrevlogs:
3083 clearchangelog(repo)
3085 clearchangelog(repo)
3084 def bench():
3086 def bench():
3085 fromfile(repo)
3087 fromfile(repo)
3086 timer(bench, setup=setup)
3088 timer(bench, setup=setup)
3087 fm.end()
3089 fm.end()
3088
3090
3089 @command(b'perfloadmarkers')
3091 @command(b'perfloadmarkers')
3090 def perfloadmarkers(ui, repo):
3092 def perfloadmarkers(ui, repo):
3091 """benchmark the time to parse the on-disk markers for a repo
3093 """benchmark the time to parse the on-disk markers for a repo
3092
3094
3093 Result is the number of markers in the repo."""
3095 Result is the number of markers in the repo."""
3094 timer, fm = gettimer(ui)
3096 timer, fm = gettimer(ui)
3095 svfs = getsvfs(repo)
3097 svfs = getsvfs(repo)
3096 timer(lambda: len(obsolete.obsstore(svfs)))
3098 timer(lambda: len(obsolete.obsstore(svfs)))
3097 fm.end()
3099 fm.end()
3098
3100
3099 @command(b'perflrucachedict', formatteropts +
3101 @command(b'perflrucachedict', formatteropts +
3100 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3102 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3101 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3103 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3102 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3104 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3103 (b'', b'size', 4, b'size of cache'),
3105 (b'', b'size', 4, b'size of cache'),
3104 (b'', b'gets', 10000, b'number of key lookups'),
3106 (b'', b'gets', 10000, b'number of key lookups'),
3105 (b'', b'sets', 10000, b'number of key sets'),
3107 (b'', b'sets', 10000, b'number of key sets'),
3106 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3108 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3107 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3109 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
3108 norepo=True)
3110 norepo=True)
3109 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3111 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
3110 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3112 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
3111 opts = _byteskwargs(opts)
3113 opts = _byteskwargs(opts)
3112
3114
3113 def doinit():
3115 def doinit():
3114 for i in _xrange(10000):
3116 for i in _xrange(10000):
3115 util.lrucachedict(size)
3117 util.lrucachedict(size)
3116
3118
3117 costrange = list(range(mincost, maxcost + 1))
3119 costrange = list(range(mincost, maxcost + 1))
3118
3120
3119 values = []
3121 values = []
3120 for i in _xrange(size):
3122 for i in _xrange(size):
3121 values.append(random.randint(0, _maxint))
3123 values.append(random.randint(0, _maxint))
3122
3124
3123 # Get mode fills the cache and tests raw lookup performance with no
3125 # Get mode fills the cache and tests raw lookup performance with no
3124 # eviction.
3126 # eviction.
3125 getseq = []
3127 getseq = []
3126 for i in _xrange(gets):
3128 for i in _xrange(gets):
3127 getseq.append(random.choice(values))
3129 getseq.append(random.choice(values))
3128
3130
3129 def dogets():
3131 def dogets():
3130 d = util.lrucachedict(size)
3132 d = util.lrucachedict(size)
3131 for v in values:
3133 for v in values:
3132 d[v] = v
3134 d[v] = v
3133 for key in getseq:
3135 for key in getseq:
3134 value = d[key]
3136 value = d[key]
3135 value # silence pyflakes warning
3137 value # silence pyflakes warning
3136
3138
3137 def dogetscost():
3139 def dogetscost():
3138 d = util.lrucachedict(size, maxcost=costlimit)
3140 d = util.lrucachedict(size, maxcost=costlimit)
3139 for i, v in enumerate(values):
3141 for i, v in enumerate(values):
3140 d.insert(v, v, cost=costs[i])
3142 d.insert(v, v, cost=costs[i])
3141 for key in getseq:
3143 for key in getseq:
3142 try:
3144 try:
3143 value = d[key]
3145 value = d[key]
3144 value # silence pyflakes warning
3146 value # silence pyflakes warning
3145 except KeyError:
3147 except KeyError:
3146 pass
3148 pass
3147
3149
3148 # Set mode tests insertion speed with cache eviction.
3150 # Set mode tests insertion speed with cache eviction.
3149 setseq = []
3151 setseq = []
3150 costs = []
3152 costs = []
3151 for i in _xrange(sets):
3153 for i in _xrange(sets):
3152 setseq.append(random.randint(0, _maxint))
3154 setseq.append(random.randint(0, _maxint))
3153 costs.append(random.choice(costrange))
3155 costs.append(random.choice(costrange))
3154
3156
3155 def doinserts():
3157 def doinserts():
3156 d = util.lrucachedict(size)
3158 d = util.lrucachedict(size)
3157 for v in setseq:
3159 for v in setseq:
3158 d.insert(v, v)
3160 d.insert(v, v)
3159
3161
3160 def doinsertscost():
3162 def doinsertscost():
3161 d = util.lrucachedict(size, maxcost=costlimit)
3163 d = util.lrucachedict(size, maxcost=costlimit)
3162 for i, v in enumerate(setseq):
3164 for i, v in enumerate(setseq):
3163 d.insert(v, v, cost=costs[i])
3165 d.insert(v, v, cost=costs[i])
3164
3166
3165 def dosets():
3167 def dosets():
3166 d = util.lrucachedict(size)
3168 d = util.lrucachedict(size)
3167 for v in setseq:
3169 for v in setseq:
3168 d[v] = v
3170 d[v] = v
3169
3171
3170 # Mixed mode randomly performs gets and sets with eviction.
3172 # Mixed mode randomly performs gets and sets with eviction.
3171 mixedops = []
3173 mixedops = []
3172 for i in _xrange(mixed):
3174 for i in _xrange(mixed):
3173 r = random.randint(0, 100)
3175 r = random.randint(0, 100)
3174 if r < mixedgetfreq:
3176 if r < mixedgetfreq:
3175 op = 0
3177 op = 0
3176 else:
3178 else:
3177 op = 1
3179 op = 1
3178
3180
3179 mixedops.append((op,
3181 mixedops.append((op,
3180 random.randint(0, size * 2),
3182 random.randint(0, size * 2),
3181 random.choice(costrange)))
3183 random.choice(costrange)))
3182
3184
3183 def domixed():
3185 def domixed():
3184 d = util.lrucachedict(size)
3186 d = util.lrucachedict(size)
3185
3187
3186 for op, v, cost in mixedops:
3188 for op, v, cost in mixedops:
3187 if op == 0:
3189 if op == 0:
3188 try:
3190 try:
3189 d[v]
3191 d[v]
3190 except KeyError:
3192 except KeyError:
3191 pass
3193 pass
3192 else:
3194 else:
3193 d[v] = v
3195 d[v] = v
3194
3196
3195 def domixedcost():
3197 def domixedcost():
3196 d = util.lrucachedict(size, maxcost=costlimit)
3198 d = util.lrucachedict(size, maxcost=costlimit)
3197
3199
3198 for op, v, cost in mixedops:
3200 for op, v, cost in mixedops:
3199 if op == 0:
3201 if op == 0:
3200 try:
3202 try:
3201 d[v]
3203 d[v]
3202 except KeyError:
3204 except KeyError:
3203 pass
3205 pass
3204 else:
3206 else:
3205 d.insert(v, v, cost=cost)
3207 d.insert(v, v, cost=cost)
3206
3208
3207 benches = [
3209 benches = [
3208 (doinit, b'init'),
3210 (doinit, b'init'),
3209 ]
3211 ]
3210
3212
3211 if costlimit:
3213 if costlimit:
3212 benches.extend([
3214 benches.extend([
3213 (dogetscost, b'gets w/ cost limit'),
3215 (dogetscost, b'gets w/ cost limit'),
3214 (doinsertscost, b'inserts w/ cost limit'),
3216 (doinsertscost, b'inserts w/ cost limit'),
3215 (domixedcost, b'mixed w/ cost limit'),
3217 (domixedcost, b'mixed w/ cost limit'),
3216 ])
3218 ])
3217 else:
3219 else:
3218 benches.extend([
3220 benches.extend([
3219 (dogets, b'gets'),
3221 (dogets, b'gets'),
3220 (doinserts, b'inserts'),
3222 (doinserts, b'inserts'),
3221 (dosets, b'sets'),
3223 (dosets, b'sets'),
3222 (domixed, b'mixed')
3224 (domixed, b'mixed')
3223 ])
3225 ])
3224
3226
3225 for fn, title in benches:
3227 for fn, title in benches:
3226 timer, fm = gettimer(ui, opts)
3228 timer, fm = gettimer(ui, opts)
3227 timer(fn, title=title)
3229 timer(fn, title=title)
3228 fm.end()
3230 fm.end()
3229
3231
3230 @command(b'perfwrite', formatteropts)
3232 @command(b'perfwrite', formatteropts)
3231 def perfwrite(ui, repo, **opts):
3233 def perfwrite(ui, repo, **opts):
3232 """microbenchmark ui.write
3234 """microbenchmark ui.write
3233 """
3235 """
3234 opts = _byteskwargs(opts)
3236 opts = _byteskwargs(opts)
3235
3237
3236 timer, fm = gettimer(ui, opts)
3238 timer, fm = gettimer(ui, opts)
3237 def write():
3239 def write():
3238 for i in range(100000):
3240 for i in range(100000):
3239 ui.write((b'Testing write performance\n'))
3241 ui.write((b'Testing write performance\n'))
3240 timer(write)
3242 timer(write)
3241 fm.end()
3243 fm.end()
3242
3244
3243 def uisetup(ui):
3245 def uisetup(ui):
3244 if (util.safehasattr(cmdutil, b'openrevlog') and
3246 if (util.safehasattr(cmdutil, b'openrevlog') and
3245 not util.safehasattr(commands, b'debugrevlogopts')):
3247 not util.safehasattr(commands, b'debugrevlogopts')):
3246 # for "historical portability":
3248 # for "historical portability":
3247 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3249 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3248 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3250 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3249 # openrevlog() should cause failure, because it has been
3251 # openrevlog() should cause failure, because it has been
3250 # available since 3.5 (or 49c583ca48c4).
3252 # available since 3.5 (or 49c583ca48c4).
3251 def openrevlog(orig, repo, cmd, file_, opts):
3253 def openrevlog(orig, repo, cmd, file_, opts):
3252 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3254 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3253 raise error.Abort(b"This version doesn't support --dir option",
3255 raise error.Abort(b"This version doesn't support --dir option",
3254 hint=b"use 3.5 or later")
3256 hint=b"use 3.5 or later")
3255 return orig(repo, cmd, file_, opts)
3257 return orig(repo, cmd, file_, opts)
3256 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3258 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3257
3259
3258 @command(b'perfprogress', formatteropts + [
3260 @command(b'perfprogress', formatteropts + [
3259 (b'', b'topic', b'topic', b'topic for progress messages'),
3261 (b'', b'topic', b'topic', b'topic for progress messages'),
3260 (b'c', b'total', 1000000, b'total value we are progressing to'),
3262 (b'c', b'total', 1000000, b'total value we are progressing to'),
3261 ], norepo=True)
3263 ], norepo=True)
3262 def perfprogress(ui, topic=None, total=None, **opts):
3264 def perfprogress(ui, topic=None, total=None, **opts):
3263 """printing of progress bars"""
3265 """printing of progress bars"""
3264 opts = _byteskwargs(opts)
3266 opts = _byteskwargs(opts)
3265
3267
3266 timer, fm = gettimer(ui, opts)
3268 timer, fm = gettimer(ui, opts)
3267
3269
3268 def doprogress():
3270 def doprogress():
3269 with ui.makeprogress(topic, total=total) as progress:
3271 with ui.makeprogress(topic, total=total) as progress:
3270 for i in _xrange(total):
3272 for i in _xrange(total):
3271 progress.increment()
3273 progress.increment()
3272
3274
3273 timer(doprogress)
3275 timer(doprogress)
3274 fm.end()
3276 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now