##// END OF EJS Templates
perf: use `setup` function in `perfdirstatewrite`...
marmoute -
r43400:c88075eb default
parent child Browse files
Show More
@@ -1,3776 +1,3778 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1115 def setup():
1116 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1117
1117
1118 def d():
1118 def d():
1119 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1120
1120
1121 timer(d, setup=setup)
1121 timer(d, setup=setup)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1128 """
1129 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131 repo.dirstate.hasdir(b"a")
1131 repo.dirstate.hasdir(b"a")
1132
1132
1133 def setup():
1133 def setup():
1134 del repo.dirstate._map._dirs
1134 del repo.dirstate._map._dirs
1135
1135
1136 def d():
1136 def d():
1137 repo.dirstate.hasdir(b"a")
1137 repo.dirstate.hasdir(b"a")
1138
1138
1139 timer(d, setup=setup)
1139 timer(d, setup=setup)
1140 fm.end()
1140 fm.end()
1141
1141
1142
1142
1143 @command(b'perfdirstatefoldmap', formatteropts)
1143 @command(b'perfdirstatefoldmap', formatteropts)
1144 def perfdirstatefoldmap(ui, repo, **opts):
1144 def perfdirstatefoldmap(ui, repo, **opts):
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1146
1146
1147 The dirstate filefoldmap cache is dropped between every request.
1147 The dirstate filefoldmap cache is dropped between every request.
1148 """
1148 """
1149 opts = _byteskwargs(opts)
1149 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1151 dirstate = repo.dirstate
1151 dirstate = repo.dirstate
1152 dirstate._map.filefoldmap.get(b'a')
1152 dirstate._map.filefoldmap.get(b'a')
1153
1153
1154 def setup():
1154 def setup():
1155 del dirstate._map.filefoldmap
1155 del dirstate._map.filefoldmap
1156
1156
1157 def d():
1157 def d():
1158 dirstate._map.filefoldmap.get(b'a')
1158 dirstate._map.filefoldmap.get(b'a')
1159
1159
1160 timer(d, setup=setup)
1160 timer(d, setup=setup)
1161 fm.end()
1161 fm.end()
1162
1162
1163
1163
1164 @command(b'perfdirfoldmap', formatteropts)
1164 @command(b'perfdirfoldmap', formatteropts)
1165 def perfdirfoldmap(ui, repo, **opts):
1165 def perfdirfoldmap(ui, repo, **opts):
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1167
1167
1168 The dirstate dirfoldmap cache is dropped between every request.
1168 The dirstate dirfoldmap cache is dropped between every request.
1169 """
1169 """
1170 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1171 timer, fm = gettimer(ui, opts)
1171 timer, fm = gettimer(ui, opts)
1172 dirstate = repo.dirstate
1172 dirstate = repo.dirstate
1173 dirstate._map.dirfoldmap.get(b'a')
1173 dirstate._map.dirfoldmap.get(b'a')
1174
1174
1175 def setup():
1175 def setup():
1176 del dirstate._map.dirfoldmap
1176 del dirstate._map.dirfoldmap
1177 del dirstate._map._dirs
1177 del dirstate._map._dirs
1178
1178
1179 def d():
1179 def d():
1180 dirstate._map.dirfoldmap.get(b'a')
1180 dirstate._map.dirfoldmap.get(b'a')
1181
1181
1182 timer(d, setup=setup)
1182 timer(d, setup=setup)
1183 fm.end()
1183 fm.end()
1184
1184
1185
1185
1186 @command(b'perfdirstatewrite', formatteropts)
1186 @command(b'perfdirstatewrite', formatteropts)
1187 def perfdirstatewrite(ui, repo, **opts):
1187 def perfdirstatewrite(ui, repo, **opts):
1188 """benchmap the time it take to write a dirstate on disk
1188 """benchmap the time it take to write a dirstate on disk
1189 """
1189 """
1190 opts = _byteskwargs(opts)
1190 opts = _byteskwargs(opts)
1191 timer, fm = gettimer(ui, opts)
1191 timer, fm = gettimer(ui, opts)
1192 ds = repo.dirstate
1192 ds = repo.dirstate
1193 b"a" in ds
1193 b"a" in ds
1194
1194
1195 def setup():
1196 ds._dirty = True
1197
1195 def d():
1198 def d():
1196 ds._dirty = True
1197 ds.write(repo.currenttransaction())
1199 ds.write(repo.currenttransaction())
1198
1200
1199 timer(d)
1201 timer(d, setup=setup)
1200 fm.end()
1202 fm.end()
1201
1203
1202
1204
1203 def _getmergerevs(repo, opts):
1205 def _getmergerevs(repo, opts):
1204 """parse command argument to return rev involved in merge
1206 """parse command argument to return rev involved in merge
1205
1207
1206 input: options dictionnary with `rev`, `from` and `bse`
1208 input: options dictionnary with `rev`, `from` and `bse`
1207 output: (localctx, otherctx, basectx)
1209 output: (localctx, otherctx, basectx)
1208 """
1210 """
1209 if opts[b'from']:
1211 if opts[b'from']:
1210 fromrev = scmutil.revsingle(repo, opts[b'from'])
1212 fromrev = scmutil.revsingle(repo, opts[b'from'])
1211 wctx = repo[fromrev]
1213 wctx = repo[fromrev]
1212 else:
1214 else:
1213 wctx = repo[None]
1215 wctx = repo[None]
1214 # we don't want working dir files to be stat'd in the benchmark, so
1216 # we don't want working dir files to be stat'd in the benchmark, so
1215 # prime that cache
1217 # prime that cache
1216 wctx.dirty()
1218 wctx.dirty()
1217 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1219 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1218 if opts[b'base']:
1220 if opts[b'base']:
1219 fromrev = scmutil.revsingle(repo, opts[b'base'])
1221 fromrev = scmutil.revsingle(repo, opts[b'base'])
1220 ancestor = repo[fromrev]
1222 ancestor = repo[fromrev]
1221 else:
1223 else:
1222 ancestor = wctx.ancestor(rctx)
1224 ancestor = wctx.ancestor(rctx)
1223 return (wctx, rctx, ancestor)
1225 return (wctx, rctx, ancestor)
1224
1226
1225
1227
1226 @command(
1228 @command(
1227 b'perfmergecalculate',
1229 b'perfmergecalculate',
1228 [
1230 [
1229 (b'r', b'rev', b'.', b'rev to merge against'),
1231 (b'r', b'rev', b'.', b'rev to merge against'),
1230 (b'', b'from', b'', b'rev to merge from'),
1232 (b'', b'from', b'', b'rev to merge from'),
1231 (b'', b'base', b'', b'the revision to use as base'),
1233 (b'', b'base', b'', b'the revision to use as base'),
1232 ]
1234 ]
1233 + formatteropts,
1235 + formatteropts,
1234 )
1236 )
1235 def perfmergecalculate(ui, repo, **opts):
1237 def perfmergecalculate(ui, repo, **opts):
1236 opts = _byteskwargs(opts)
1238 opts = _byteskwargs(opts)
1237 timer, fm = gettimer(ui, opts)
1239 timer, fm = gettimer(ui, opts)
1238
1240
1239 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1241 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1240
1242
1241 def d():
1243 def d():
1242 # acceptremote is True because we don't want prompts in the middle of
1244 # acceptremote is True because we don't want prompts in the middle of
1243 # our benchmark
1245 # our benchmark
1244 merge.calculateupdates(
1246 merge.calculateupdates(
1245 repo,
1247 repo,
1246 wctx,
1248 wctx,
1247 rctx,
1249 rctx,
1248 [ancestor],
1250 [ancestor],
1249 branchmerge=False,
1251 branchmerge=False,
1250 force=False,
1252 force=False,
1251 acceptremote=True,
1253 acceptremote=True,
1252 followcopies=True,
1254 followcopies=True,
1253 )
1255 )
1254
1256
1255 timer(d)
1257 timer(d)
1256 fm.end()
1258 fm.end()
1257
1259
1258
1260
1259 @command(
1261 @command(
1260 b'perfmergecopies',
1262 b'perfmergecopies',
1261 [
1263 [
1262 (b'r', b'rev', b'.', b'rev to merge against'),
1264 (b'r', b'rev', b'.', b'rev to merge against'),
1263 (b'', b'from', b'', b'rev to merge from'),
1265 (b'', b'from', b'', b'rev to merge from'),
1264 (b'', b'base', b'', b'the revision to use as base'),
1266 (b'', b'base', b'', b'the revision to use as base'),
1265 ]
1267 ]
1266 + formatteropts,
1268 + formatteropts,
1267 )
1269 )
1268 def perfmergecopies(ui, repo, **opts):
1270 def perfmergecopies(ui, repo, **opts):
1269 """measure runtime of `copies.mergecopies`"""
1271 """measure runtime of `copies.mergecopies`"""
1270 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1271 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1272 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1274 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1273
1275
1274 def d():
1276 def d():
1275 # acceptremote is True because we don't want prompts in the middle of
1277 # acceptremote is True because we don't want prompts in the middle of
1276 # our benchmark
1278 # our benchmark
1277 copies.mergecopies(repo, wctx, rctx, ancestor)
1279 copies.mergecopies(repo, wctx, rctx, ancestor)
1278
1280
1279 timer(d)
1281 timer(d)
1280 fm.end()
1282 fm.end()
1281
1283
1282
1284
1283 @command(b'perfpathcopies', [], b"REV REV")
1285 @command(b'perfpathcopies', [], b"REV REV")
1284 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1286 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1285 """benchmark the copy tracing logic"""
1287 """benchmark the copy tracing logic"""
1286 opts = _byteskwargs(opts)
1288 opts = _byteskwargs(opts)
1287 timer, fm = gettimer(ui, opts)
1289 timer, fm = gettimer(ui, opts)
1288 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1290 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1289 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1291 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1290
1292
1291 def d():
1293 def d():
1292 copies.pathcopies(ctx1, ctx2)
1294 copies.pathcopies(ctx1, ctx2)
1293
1295
1294 timer(d)
1296 timer(d)
1295 fm.end()
1297 fm.end()
1296
1298
1297
1299
1298 @command(
1300 @command(
1299 b'perfphases',
1301 b'perfphases',
1300 [(b'', b'full', False, b'include file reading time too'),],
1302 [(b'', b'full', False, b'include file reading time too'),],
1301 b"",
1303 b"",
1302 )
1304 )
1303 def perfphases(ui, repo, **opts):
1305 def perfphases(ui, repo, **opts):
1304 """benchmark phasesets computation"""
1306 """benchmark phasesets computation"""
1305 opts = _byteskwargs(opts)
1307 opts = _byteskwargs(opts)
1306 timer, fm = gettimer(ui, opts)
1308 timer, fm = gettimer(ui, opts)
1307 _phases = repo._phasecache
1309 _phases = repo._phasecache
1308 full = opts.get(b'full')
1310 full = opts.get(b'full')
1309
1311
1310 def d():
1312 def d():
1311 phases = _phases
1313 phases = _phases
1312 if full:
1314 if full:
1313 clearfilecache(repo, b'_phasecache')
1315 clearfilecache(repo, b'_phasecache')
1314 phases = repo._phasecache
1316 phases = repo._phasecache
1315 phases.invalidate()
1317 phases.invalidate()
1316 phases.loadphaserevs(repo)
1318 phases.loadphaserevs(repo)
1317
1319
1318 timer(d)
1320 timer(d)
1319 fm.end()
1321 fm.end()
1320
1322
1321
1323
1322 @command(b'perfphasesremote', [], b"[DEST]")
1324 @command(b'perfphasesremote', [], b"[DEST]")
1323 def perfphasesremote(ui, repo, dest=None, **opts):
1325 def perfphasesremote(ui, repo, dest=None, **opts):
1324 """benchmark time needed to analyse phases of the remote server"""
1326 """benchmark time needed to analyse phases of the remote server"""
1325 from mercurial.node import bin
1327 from mercurial.node import bin
1326 from mercurial import (
1328 from mercurial import (
1327 exchange,
1329 exchange,
1328 hg,
1330 hg,
1329 phases,
1331 phases,
1330 )
1332 )
1331
1333
1332 opts = _byteskwargs(opts)
1334 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1335 timer, fm = gettimer(ui, opts)
1334
1336
1335 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1337 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1336 if not path:
1338 if not path:
1337 raise error.Abort(
1339 raise error.Abort(
1338 b'default repository not configured!',
1340 b'default repository not configured!',
1339 hint=b"see 'hg help config.paths'",
1341 hint=b"see 'hg help config.paths'",
1340 )
1342 )
1341 dest = path.pushloc or path.loc
1343 dest = path.pushloc or path.loc
1342 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1344 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1343 other = hg.peer(repo, opts, dest)
1345 other = hg.peer(repo, opts, dest)
1344
1346
1345 # easier to perform discovery through the operation
1347 # easier to perform discovery through the operation
1346 op = exchange.pushoperation(repo, other)
1348 op = exchange.pushoperation(repo, other)
1347 exchange._pushdiscoverychangeset(op)
1349 exchange._pushdiscoverychangeset(op)
1348
1350
1349 remotesubset = op.fallbackheads
1351 remotesubset = op.fallbackheads
1350
1352
1351 with other.commandexecutor() as e:
1353 with other.commandexecutor() as e:
1352 remotephases = e.callcommand(
1354 remotephases = e.callcommand(
1353 b'listkeys', {b'namespace': b'phases'}
1355 b'listkeys', {b'namespace': b'phases'}
1354 ).result()
1356 ).result()
1355 del other
1357 del other
1356 publishing = remotephases.get(b'publishing', False)
1358 publishing = remotephases.get(b'publishing', False)
1357 if publishing:
1359 if publishing:
1358 ui.statusnoi18n(b'publishing: yes\n')
1360 ui.statusnoi18n(b'publishing: yes\n')
1359 else:
1361 else:
1360 ui.statusnoi18n(b'publishing: no\n')
1362 ui.statusnoi18n(b'publishing: no\n')
1361
1363
1362 nodemap = repo.changelog.nodemap
1364 nodemap = repo.changelog.nodemap
1363 nonpublishroots = 0
1365 nonpublishroots = 0
1364 for nhex, phase in remotephases.iteritems():
1366 for nhex, phase in remotephases.iteritems():
1365 if nhex == b'publishing': # ignore data related to publish option
1367 if nhex == b'publishing': # ignore data related to publish option
1366 continue
1368 continue
1367 node = bin(nhex)
1369 node = bin(nhex)
1368 if node in nodemap and int(phase):
1370 if node in nodemap and int(phase):
1369 nonpublishroots += 1
1371 nonpublishroots += 1
1370 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1372 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1371 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1373 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1372
1374
1373 def d():
1375 def d():
1374 phases.remotephasessummary(repo, remotesubset, remotephases)
1376 phases.remotephasessummary(repo, remotesubset, remotephases)
1375
1377
1376 timer(d)
1378 timer(d)
1377 fm.end()
1379 fm.end()
1378
1380
1379
1381
1380 @command(
1382 @command(
1381 b'perfmanifest',
1383 b'perfmanifest',
1382 [
1384 [
1383 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1385 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1384 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1386 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1385 ]
1387 ]
1386 + formatteropts,
1388 + formatteropts,
1387 b'REV|NODE',
1389 b'REV|NODE',
1388 )
1390 )
1389 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1391 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1390 """benchmark the time to read a manifest from disk and return a usable
1392 """benchmark the time to read a manifest from disk and return a usable
1391 dict-like object
1393 dict-like object
1392
1394
1393 Manifest caches are cleared before retrieval."""
1395 Manifest caches are cleared before retrieval."""
1394 opts = _byteskwargs(opts)
1396 opts = _byteskwargs(opts)
1395 timer, fm = gettimer(ui, opts)
1397 timer, fm = gettimer(ui, opts)
1396 if not manifest_rev:
1398 if not manifest_rev:
1397 ctx = scmutil.revsingle(repo, rev, rev)
1399 ctx = scmutil.revsingle(repo, rev, rev)
1398 t = ctx.manifestnode()
1400 t = ctx.manifestnode()
1399 else:
1401 else:
1400 from mercurial.node import bin
1402 from mercurial.node import bin
1401
1403
1402 if len(rev) == 40:
1404 if len(rev) == 40:
1403 t = bin(rev)
1405 t = bin(rev)
1404 else:
1406 else:
1405 try:
1407 try:
1406 rev = int(rev)
1408 rev = int(rev)
1407
1409
1408 if util.safehasattr(repo.manifestlog, b'getstorage'):
1410 if util.safehasattr(repo.manifestlog, b'getstorage'):
1409 t = repo.manifestlog.getstorage(b'').node(rev)
1411 t = repo.manifestlog.getstorage(b'').node(rev)
1410 else:
1412 else:
1411 t = repo.manifestlog._revlog.lookup(rev)
1413 t = repo.manifestlog._revlog.lookup(rev)
1412 except ValueError:
1414 except ValueError:
1413 raise error.Abort(
1415 raise error.Abort(
1414 b'manifest revision must be integer or full node'
1416 b'manifest revision must be integer or full node'
1415 )
1417 )
1416
1418
1417 def d():
1419 def d():
1418 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1420 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1419 repo.manifestlog[t].read()
1421 repo.manifestlog[t].read()
1420
1422
1421 timer(d)
1423 timer(d)
1422 fm.end()
1424 fm.end()
1423
1425
1424
1426
1425 @command(b'perfchangeset', formatteropts)
1427 @command(b'perfchangeset', formatteropts)
1426 def perfchangeset(ui, repo, rev, **opts):
1428 def perfchangeset(ui, repo, rev, **opts):
1427 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1428 timer, fm = gettimer(ui, opts)
1430 timer, fm = gettimer(ui, opts)
1429 n = scmutil.revsingle(repo, rev).node()
1431 n = scmutil.revsingle(repo, rev).node()
1430
1432
1431 def d():
1433 def d():
1432 repo.changelog.read(n)
1434 repo.changelog.read(n)
1433 # repo.changelog._cache = None
1435 # repo.changelog._cache = None
1434
1436
1435 timer(d)
1437 timer(d)
1436 fm.end()
1438 fm.end()
1437
1439
1438
1440
1439 @command(b'perfignore', formatteropts)
1441 @command(b'perfignore', formatteropts)
1440 def perfignore(ui, repo, **opts):
1442 def perfignore(ui, repo, **opts):
1441 """benchmark operation related to computing ignore"""
1443 """benchmark operation related to computing ignore"""
1442 opts = _byteskwargs(opts)
1444 opts = _byteskwargs(opts)
1443 timer, fm = gettimer(ui, opts)
1445 timer, fm = gettimer(ui, opts)
1444 dirstate = repo.dirstate
1446 dirstate = repo.dirstate
1445
1447
1446 def setupone():
1448 def setupone():
1447 dirstate.invalidate()
1449 dirstate.invalidate()
1448 clearfilecache(dirstate, b'_ignore')
1450 clearfilecache(dirstate, b'_ignore')
1449
1451
1450 def runone():
1452 def runone():
1451 dirstate._ignore
1453 dirstate._ignore
1452
1454
1453 timer(runone, setup=setupone, title=b"load")
1455 timer(runone, setup=setupone, title=b"load")
1454 fm.end()
1456 fm.end()
1455
1457
1456
1458
1457 @command(
1459 @command(
1458 b'perfindex',
1460 b'perfindex',
1459 [
1461 [
1460 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1462 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1461 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1463 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1462 ]
1464 ]
1463 + formatteropts,
1465 + formatteropts,
1464 )
1466 )
1465 def perfindex(ui, repo, **opts):
1467 def perfindex(ui, repo, **opts):
1466 """benchmark index creation time followed by a lookup
1468 """benchmark index creation time followed by a lookup
1467
1469
1468 The default is to look `tip` up. Depending on the index implementation,
1470 The default is to look `tip` up. Depending on the index implementation,
1469 the revision looked up can matters. For example, an implementation
1471 the revision looked up can matters. For example, an implementation
1470 scanning the index will have a faster lookup time for `--rev tip` than for
1472 scanning the index will have a faster lookup time for `--rev tip` than for
1471 `--rev 0`. The number of looked up revisions and their order can also
1473 `--rev 0`. The number of looked up revisions and their order can also
1472 matters.
1474 matters.
1473
1475
1474 Example of useful set to test:
1476 Example of useful set to test:
1475 * tip
1477 * tip
1476 * 0
1478 * 0
1477 * -10:
1479 * -10:
1478 * :10
1480 * :10
1479 * -10: + :10
1481 * -10: + :10
1480 * :10: + -10:
1482 * :10: + -10:
1481 * -10000:
1483 * -10000:
1482 * -10000: + 0
1484 * -10000: + 0
1483
1485
1484 It is not currently possible to check for lookup of a missing node. For
1486 It is not currently possible to check for lookup of a missing node. For
1485 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1487 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1486 import mercurial.revlog
1488 import mercurial.revlog
1487
1489
1488 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1489 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1490 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1492 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1491 if opts[b'no_lookup']:
1493 if opts[b'no_lookup']:
1492 if opts['rev']:
1494 if opts['rev']:
1493 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1495 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1494 nodes = []
1496 nodes = []
1495 elif not opts[b'rev']:
1497 elif not opts[b'rev']:
1496 nodes = [repo[b"tip"].node()]
1498 nodes = [repo[b"tip"].node()]
1497 else:
1499 else:
1498 revs = scmutil.revrange(repo, opts[b'rev'])
1500 revs = scmutil.revrange(repo, opts[b'rev'])
1499 cl = repo.changelog
1501 cl = repo.changelog
1500 nodes = [cl.node(r) for r in revs]
1502 nodes = [cl.node(r) for r in revs]
1501
1503
1502 unfi = repo.unfiltered()
1504 unfi = repo.unfiltered()
1503 # find the filecache func directly
1505 # find the filecache func directly
1504 # This avoid polluting the benchmark with the filecache logic
1506 # This avoid polluting the benchmark with the filecache logic
1505 makecl = unfi.__class__.changelog.func
1507 makecl = unfi.__class__.changelog.func
1506
1508
1507 def setup():
1509 def setup():
1508 # probably not necessary, but for good measure
1510 # probably not necessary, but for good measure
1509 clearchangelog(unfi)
1511 clearchangelog(unfi)
1510
1512
1511 def d():
1513 def d():
1512 cl = makecl(unfi)
1514 cl = makecl(unfi)
1513 for n in nodes:
1515 for n in nodes:
1514 cl.rev(n)
1516 cl.rev(n)
1515
1517
1516 timer(d, setup=setup)
1518 timer(d, setup=setup)
1517 fm.end()
1519 fm.end()
1518
1520
1519
1521
1520 @command(
1522 @command(
1521 b'perfnodemap',
1523 b'perfnodemap',
1522 [
1524 [
1523 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1525 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1524 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1526 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1525 ]
1527 ]
1526 + formatteropts,
1528 + formatteropts,
1527 )
1529 )
1528 def perfnodemap(ui, repo, **opts):
1530 def perfnodemap(ui, repo, **opts):
1529 """benchmark the time necessary to look up revision from a cold nodemap
1531 """benchmark the time necessary to look up revision from a cold nodemap
1530
1532
1531 Depending on the implementation, the amount and order of revision we look
1533 Depending on the implementation, the amount and order of revision we look
1532 up can varies. Example of useful set to test:
1534 up can varies. Example of useful set to test:
1533 * tip
1535 * tip
1534 * 0
1536 * 0
1535 * -10:
1537 * -10:
1536 * :10
1538 * :10
1537 * -10: + :10
1539 * -10: + :10
1538 * :10: + -10:
1540 * :10: + -10:
1539 * -10000:
1541 * -10000:
1540 * -10000: + 0
1542 * -10000: + 0
1541
1543
1542 The command currently focus on valid binary lookup. Benchmarking for
1544 The command currently focus on valid binary lookup. Benchmarking for
1543 hexlookup, prefix lookup and missing lookup would also be valuable.
1545 hexlookup, prefix lookup and missing lookup would also be valuable.
1544 """
1546 """
1545 import mercurial.revlog
1547 import mercurial.revlog
1546
1548
1547 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1548 timer, fm = gettimer(ui, opts)
1550 timer, fm = gettimer(ui, opts)
1549 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1551 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1550
1552
1551 unfi = repo.unfiltered()
1553 unfi = repo.unfiltered()
1552 clearcaches = opts['clear_caches']
1554 clearcaches = opts['clear_caches']
1553 # find the filecache func directly
1555 # find the filecache func directly
1554 # This avoid polluting the benchmark with the filecache logic
1556 # This avoid polluting the benchmark with the filecache logic
1555 makecl = unfi.__class__.changelog.func
1557 makecl = unfi.__class__.changelog.func
1556 if not opts[b'rev']:
1558 if not opts[b'rev']:
1557 raise error.Abort('use --rev to specify revisions to look up')
1559 raise error.Abort('use --rev to specify revisions to look up')
1558 revs = scmutil.revrange(repo, opts[b'rev'])
1560 revs = scmutil.revrange(repo, opts[b'rev'])
1559 cl = repo.changelog
1561 cl = repo.changelog
1560 nodes = [cl.node(r) for r in revs]
1562 nodes = [cl.node(r) for r in revs]
1561
1563
1562 # use a list to pass reference to a nodemap from one closure to the next
1564 # use a list to pass reference to a nodemap from one closure to the next
1563 nodeget = [None]
1565 nodeget = [None]
1564
1566
1565 def setnodeget():
1567 def setnodeget():
1566 # probably not necessary, but for good measure
1568 # probably not necessary, but for good measure
1567 clearchangelog(unfi)
1569 clearchangelog(unfi)
1568 nodeget[0] = makecl(unfi).nodemap.get
1570 nodeget[0] = makecl(unfi).nodemap.get
1569
1571
1570 def d():
1572 def d():
1571 get = nodeget[0]
1573 get = nodeget[0]
1572 for n in nodes:
1574 for n in nodes:
1573 get(n)
1575 get(n)
1574
1576
1575 setup = None
1577 setup = None
1576 if clearcaches:
1578 if clearcaches:
1577
1579
1578 def setup():
1580 def setup():
1579 setnodeget()
1581 setnodeget()
1580
1582
1581 else:
1583 else:
1582 setnodeget()
1584 setnodeget()
1583 d() # prewarm the data structure
1585 d() # prewarm the data structure
1584 timer(d, setup=setup)
1586 timer(d, setup=setup)
1585 fm.end()
1587 fm.end()
1586
1588
1587
1589
1588 @command(b'perfstartup', formatteropts)
1590 @command(b'perfstartup', formatteropts)
1589 def perfstartup(ui, repo, **opts):
1591 def perfstartup(ui, repo, **opts):
1590 opts = _byteskwargs(opts)
1592 opts = _byteskwargs(opts)
1591 timer, fm = gettimer(ui, opts)
1593 timer, fm = gettimer(ui, opts)
1592
1594
1593 def d():
1595 def d():
1594 if os.name != r'nt':
1596 if os.name != r'nt':
1595 os.system(
1597 os.system(
1596 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1598 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1597 )
1599 )
1598 else:
1600 else:
1599 os.environ[r'HGRCPATH'] = r' '
1601 os.environ[r'HGRCPATH'] = r' '
1600 os.system(r"%s version -q > NUL" % sys.argv[0])
1602 os.system(r"%s version -q > NUL" % sys.argv[0])
1601
1603
1602 timer(d)
1604 timer(d)
1603 fm.end()
1605 fm.end()
1604
1606
1605
1607
1606 @command(b'perfparents', formatteropts)
1608 @command(b'perfparents', formatteropts)
1607 def perfparents(ui, repo, **opts):
1609 def perfparents(ui, repo, **opts):
1608 """benchmark the time necessary to fetch one changeset's parents.
1610 """benchmark the time necessary to fetch one changeset's parents.
1609
1611
1610 The fetch is done using the `node identifier`, traversing all object layers
1612 The fetch is done using the `node identifier`, traversing all object layers
1611 from the repository object. The first N revisions will be used for this
1613 from the repository object. The first N revisions will be used for this
1612 benchmark. N is controlled by the ``perf.parentscount`` config option
1614 benchmark. N is controlled by the ``perf.parentscount`` config option
1613 (default: 1000).
1615 (default: 1000).
1614 """
1616 """
1615 opts = _byteskwargs(opts)
1617 opts = _byteskwargs(opts)
1616 timer, fm = gettimer(ui, opts)
1618 timer, fm = gettimer(ui, opts)
1617 # control the number of commits perfparents iterates over
1619 # control the number of commits perfparents iterates over
1618 # experimental config: perf.parentscount
1620 # experimental config: perf.parentscount
1619 count = getint(ui, b"perf", b"parentscount", 1000)
1621 count = getint(ui, b"perf", b"parentscount", 1000)
1620 if len(repo.changelog) < count:
1622 if len(repo.changelog) < count:
1621 raise error.Abort(b"repo needs %d commits for this test" % count)
1623 raise error.Abort(b"repo needs %d commits for this test" % count)
1622 repo = repo.unfiltered()
1624 repo = repo.unfiltered()
1623 nl = [repo.changelog.node(i) for i in _xrange(count)]
1625 nl = [repo.changelog.node(i) for i in _xrange(count)]
1624
1626
1625 def d():
1627 def d():
1626 for n in nl:
1628 for n in nl:
1627 repo.changelog.parents(n)
1629 repo.changelog.parents(n)
1628
1630
1629 timer(d)
1631 timer(d)
1630 fm.end()
1632 fm.end()
1631
1633
1632
1634
1633 @command(b'perfctxfiles', formatteropts)
1635 @command(b'perfctxfiles', formatteropts)
1634 def perfctxfiles(ui, repo, x, **opts):
1636 def perfctxfiles(ui, repo, x, **opts):
1635 opts = _byteskwargs(opts)
1637 opts = _byteskwargs(opts)
1636 x = int(x)
1638 x = int(x)
1637 timer, fm = gettimer(ui, opts)
1639 timer, fm = gettimer(ui, opts)
1638
1640
1639 def d():
1641 def d():
1640 len(repo[x].files())
1642 len(repo[x].files())
1641
1643
1642 timer(d)
1644 timer(d)
1643 fm.end()
1645 fm.end()
1644
1646
1645
1647
1646 @command(b'perfrawfiles', formatteropts)
1648 @command(b'perfrawfiles', formatteropts)
1647 def perfrawfiles(ui, repo, x, **opts):
1649 def perfrawfiles(ui, repo, x, **opts):
1648 opts = _byteskwargs(opts)
1650 opts = _byteskwargs(opts)
1649 x = int(x)
1651 x = int(x)
1650 timer, fm = gettimer(ui, opts)
1652 timer, fm = gettimer(ui, opts)
1651 cl = repo.changelog
1653 cl = repo.changelog
1652
1654
1653 def d():
1655 def d():
1654 len(cl.read(x)[3])
1656 len(cl.read(x)[3])
1655
1657
1656 timer(d)
1658 timer(d)
1657 fm.end()
1659 fm.end()
1658
1660
1659
1661
1660 @command(b'perflookup', formatteropts)
1662 @command(b'perflookup', formatteropts)
1661 def perflookup(ui, repo, rev, **opts):
1663 def perflookup(ui, repo, rev, **opts):
1662 opts = _byteskwargs(opts)
1664 opts = _byteskwargs(opts)
1663 timer, fm = gettimer(ui, opts)
1665 timer, fm = gettimer(ui, opts)
1664 timer(lambda: len(repo.lookup(rev)))
1666 timer(lambda: len(repo.lookup(rev)))
1665 fm.end()
1667 fm.end()
1666
1668
1667
1669
1668 @command(
1670 @command(
1669 b'perflinelogedits',
1671 b'perflinelogedits',
1670 [
1672 [
1671 (b'n', b'edits', 10000, b'number of edits'),
1673 (b'n', b'edits', 10000, b'number of edits'),
1672 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1674 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1673 ],
1675 ],
1674 norepo=True,
1676 norepo=True,
1675 )
1677 )
1676 def perflinelogedits(ui, **opts):
1678 def perflinelogedits(ui, **opts):
1677 from mercurial import linelog
1679 from mercurial import linelog
1678
1680
1679 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1680
1682
1681 edits = opts[b'edits']
1683 edits = opts[b'edits']
1682 maxhunklines = opts[b'max_hunk_lines']
1684 maxhunklines = opts[b'max_hunk_lines']
1683
1685
1684 maxb1 = 100000
1686 maxb1 = 100000
1685 random.seed(0)
1687 random.seed(0)
1686 randint = random.randint
1688 randint = random.randint
1687 currentlines = 0
1689 currentlines = 0
1688 arglist = []
1690 arglist = []
1689 for rev in _xrange(edits):
1691 for rev in _xrange(edits):
1690 a1 = randint(0, currentlines)
1692 a1 = randint(0, currentlines)
1691 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1693 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1692 b1 = randint(0, maxb1)
1694 b1 = randint(0, maxb1)
1693 b2 = randint(b1, b1 + maxhunklines)
1695 b2 = randint(b1, b1 + maxhunklines)
1694 currentlines += (b2 - b1) - (a2 - a1)
1696 currentlines += (b2 - b1) - (a2 - a1)
1695 arglist.append((rev, a1, a2, b1, b2))
1697 arglist.append((rev, a1, a2, b1, b2))
1696
1698
1697 def d():
1699 def d():
1698 ll = linelog.linelog()
1700 ll = linelog.linelog()
1699 for args in arglist:
1701 for args in arglist:
1700 ll.replacelines(*args)
1702 ll.replacelines(*args)
1701
1703
1702 timer, fm = gettimer(ui, opts)
1704 timer, fm = gettimer(ui, opts)
1703 timer(d)
1705 timer(d)
1704 fm.end()
1706 fm.end()
1705
1707
1706
1708
1707 @command(b'perfrevrange', formatteropts)
1709 @command(b'perfrevrange', formatteropts)
1708 def perfrevrange(ui, repo, *specs, **opts):
1710 def perfrevrange(ui, repo, *specs, **opts):
1709 opts = _byteskwargs(opts)
1711 opts = _byteskwargs(opts)
1710 timer, fm = gettimer(ui, opts)
1712 timer, fm = gettimer(ui, opts)
1711 revrange = scmutil.revrange
1713 revrange = scmutil.revrange
1712 timer(lambda: len(revrange(repo, specs)))
1714 timer(lambda: len(revrange(repo, specs)))
1713 fm.end()
1715 fm.end()
1714
1716
1715
1717
1716 @command(b'perfnodelookup', formatteropts)
1718 @command(b'perfnodelookup', formatteropts)
1717 def perfnodelookup(ui, repo, rev, **opts):
1719 def perfnodelookup(ui, repo, rev, **opts):
1718 opts = _byteskwargs(opts)
1720 opts = _byteskwargs(opts)
1719 timer, fm = gettimer(ui, opts)
1721 timer, fm = gettimer(ui, opts)
1720 import mercurial.revlog
1722 import mercurial.revlog
1721
1723
1722 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1724 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1723 n = scmutil.revsingle(repo, rev).node()
1725 n = scmutil.revsingle(repo, rev).node()
1724 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1726 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1725
1727
1726 def d():
1728 def d():
1727 cl.rev(n)
1729 cl.rev(n)
1728 clearcaches(cl)
1730 clearcaches(cl)
1729
1731
1730 timer(d)
1732 timer(d)
1731 fm.end()
1733 fm.end()
1732
1734
1733
1735
1734 @command(
1736 @command(
1735 b'perflog',
1737 b'perflog',
1736 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1738 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1737 )
1739 )
1738 def perflog(ui, repo, rev=None, **opts):
1740 def perflog(ui, repo, rev=None, **opts):
1739 opts = _byteskwargs(opts)
1741 opts = _byteskwargs(opts)
1740 if rev is None:
1742 if rev is None:
1741 rev = []
1743 rev = []
1742 timer, fm = gettimer(ui, opts)
1744 timer, fm = gettimer(ui, opts)
1743 ui.pushbuffer()
1745 ui.pushbuffer()
1744 timer(
1746 timer(
1745 lambda: commands.log(
1747 lambda: commands.log(
1746 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1748 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1747 )
1749 )
1748 )
1750 )
1749 ui.popbuffer()
1751 ui.popbuffer()
1750 fm.end()
1752 fm.end()
1751
1753
1752
1754
1753 @command(b'perfmoonwalk', formatteropts)
1755 @command(b'perfmoonwalk', formatteropts)
1754 def perfmoonwalk(ui, repo, **opts):
1756 def perfmoonwalk(ui, repo, **opts):
1755 """benchmark walking the changelog backwards
1757 """benchmark walking the changelog backwards
1756
1758
1757 This also loads the changelog data for each revision in the changelog.
1759 This also loads the changelog data for each revision in the changelog.
1758 """
1760 """
1759 opts = _byteskwargs(opts)
1761 opts = _byteskwargs(opts)
1760 timer, fm = gettimer(ui, opts)
1762 timer, fm = gettimer(ui, opts)
1761
1763
1762 def moonwalk():
1764 def moonwalk():
1763 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1765 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1764 ctx = repo[i]
1766 ctx = repo[i]
1765 ctx.branch() # read changelog data (in addition to the index)
1767 ctx.branch() # read changelog data (in addition to the index)
1766
1768
1767 timer(moonwalk)
1769 timer(moonwalk)
1768 fm.end()
1770 fm.end()
1769
1771
1770
1772
1771 @command(
1773 @command(
1772 b'perftemplating',
1774 b'perftemplating',
1773 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1775 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1774 )
1776 )
1775 def perftemplating(ui, repo, testedtemplate=None, **opts):
1777 def perftemplating(ui, repo, testedtemplate=None, **opts):
1776 """test the rendering time of a given template"""
1778 """test the rendering time of a given template"""
1777 if makelogtemplater is None:
1779 if makelogtemplater is None:
1778 raise error.Abort(
1780 raise error.Abort(
1779 b"perftemplating not available with this Mercurial",
1781 b"perftemplating not available with this Mercurial",
1780 hint=b"use 4.3 or later",
1782 hint=b"use 4.3 or later",
1781 )
1783 )
1782
1784
1783 opts = _byteskwargs(opts)
1785 opts = _byteskwargs(opts)
1784
1786
1785 nullui = ui.copy()
1787 nullui = ui.copy()
1786 nullui.fout = open(os.devnull, r'wb')
1788 nullui.fout = open(os.devnull, r'wb')
1787 nullui.disablepager()
1789 nullui.disablepager()
1788 revs = opts.get(b'rev')
1790 revs = opts.get(b'rev')
1789 if not revs:
1791 if not revs:
1790 revs = [b'all()']
1792 revs = [b'all()']
1791 revs = list(scmutil.revrange(repo, revs))
1793 revs = list(scmutil.revrange(repo, revs))
1792
1794
1793 defaulttemplate = (
1795 defaulttemplate = (
1794 b'{date|shortdate} [{rev}:{node|short}]'
1796 b'{date|shortdate} [{rev}:{node|short}]'
1795 b' {author|person}: {desc|firstline}\n'
1797 b' {author|person}: {desc|firstline}\n'
1796 )
1798 )
1797 if testedtemplate is None:
1799 if testedtemplate is None:
1798 testedtemplate = defaulttemplate
1800 testedtemplate = defaulttemplate
1799 displayer = makelogtemplater(nullui, repo, testedtemplate)
1801 displayer = makelogtemplater(nullui, repo, testedtemplate)
1800
1802
1801 def format():
1803 def format():
1802 for r in revs:
1804 for r in revs:
1803 ctx = repo[r]
1805 ctx = repo[r]
1804 displayer.show(ctx)
1806 displayer.show(ctx)
1805 displayer.flush(ctx)
1807 displayer.flush(ctx)
1806
1808
1807 timer, fm = gettimer(ui, opts)
1809 timer, fm = gettimer(ui, opts)
1808 timer(format)
1810 timer(format)
1809 fm.end()
1811 fm.end()
1810
1812
1811
1813
1812 def _displaystats(ui, opts, entries, data):
1814 def _displaystats(ui, opts, entries, data):
1813 pass
1815 pass
1814 # use a second formatter because the data are quite different, not sure
1816 # use a second formatter because the data are quite different, not sure
1815 # how it flies with the templater.
1817 # how it flies with the templater.
1816 fm = ui.formatter(b'perf-stats', opts)
1818 fm = ui.formatter(b'perf-stats', opts)
1817 for key, title in entries:
1819 for key, title in entries:
1818 values = data[key]
1820 values = data[key]
1819 nbvalues = len(data)
1821 nbvalues = len(data)
1820 values.sort()
1822 values.sort()
1821 stats = {
1823 stats = {
1822 'key': key,
1824 'key': key,
1823 'title': title,
1825 'title': title,
1824 'nbitems': len(values),
1826 'nbitems': len(values),
1825 'min': values[0][0],
1827 'min': values[0][0],
1826 '10%': values[(nbvalues * 10) // 100][0],
1828 '10%': values[(nbvalues * 10) // 100][0],
1827 '25%': values[(nbvalues * 25) // 100][0],
1829 '25%': values[(nbvalues * 25) // 100][0],
1828 '50%': values[(nbvalues * 50) // 100][0],
1830 '50%': values[(nbvalues * 50) // 100][0],
1829 '75%': values[(nbvalues * 75) // 100][0],
1831 '75%': values[(nbvalues * 75) // 100][0],
1830 '80%': values[(nbvalues * 80) // 100][0],
1832 '80%': values[(nbvalues * 80) // 100][0],
1831 '85%': values[(nbvalues * 85) // 100][0],
1833 '85%': values[(nbvalues * 85) // 100][0],
1832 '90%': values[(nbvalues * 90) // 100][0],
1834 '90%': values[(nbvalues * 90) // 100][0],
1833 '95%': values[(nbvalues * 95) // 100][0],
1835 '95%': values[(nbvalues * 95) // 100][0],
1834 '99%': values[(nbvalues * 99) // 100][0],
1836 '99%': values[(nbvalues * 99) // 100][0],
1835 'max': values[-1][0],
1837 'max': values[-1][0],
1836 }
1838 }
1837 fm.startitem()
1839 fm.startitem()
1838 fm.data(**stats)
1840 fm.data(**stats)
1839 # make node pretty for the human output
1841 # make node pretty for the human output
1840 fm.plain('### %s (%d items)\n' % (title, len(values)))
1842 fm.plain('### %s (%d items)\n' % (title, len(values)))
1841 lines = [
1843 lines = [
1842 'min',
1844 'min',
1843 '10%',
1845 '10%',
1844 '25%',
1846 '25%',
1845 '50%',
1847 '50%',
1846 '75%',
1848 '75%',
1847 '80%',
1849 '80%',
1848 '85%',
1850 '85%',
1849 '90%',
1851 '90%',
1850 '95%',
1852 '95%',
1851 '99%',
1853 '99%',
1852 'max',
1854 'max',
1853 ]
1855 ]
1854 for l in lines:
1856 for l in lines:
1855 fm.plain('%s: %s\n' % (l, stats[l]))
1857 fm.plain('%s: %s\n' % (l, stats[l]))
1856 fm.end()
1858 fm.end()
1857
1859
1858
1860
1859 @command(
1861 @command(
1860 b'perfhelper-mergecopies',
1862 b'perfhelper-mergecopies',
1861 formatteropts
1863 formatteropts
1862 + [
1864 + [
1863 (b'r', b'revs', [], b'restrict search to these revisions'),
1865 (b'r', b'revs', [], b'restrict search to these revisions'),
1864 (b'', b'timing', False, b'provides extra data (costly)'),
1866 (b'', b'timing', False, b'provides extra data (costly)'),
1865 (b'', b'stats', False, b'provides statistic about the measured data'),
1867 (b'', b'stats', False, b'provides statistic about the measured data'),
1866 ],
1868 ],
1867 )
1869 )
1868 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1870 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1869 """find statistics about potential parameters for `perfmergecopies`
1871 """find statistics about potential parameters for `perfmergecopies`
1870
1872
1871 This command find (base, p1, p2) triplet relevant for copytracing
1873 This command find (base, p1, p2) triplet relevant for copytracing
1872 benchmarking in the context of a merge. It reports values for some of the
1874 benchmarking in the context of a merge. It reports values for some of the
1873 parameters that impact merge copy tracing time during merge.
1875 parameters that impact merge copy tracing time during merge.
1874
1876
1875 If `--timing` is set, rename detection is run and the associated timing
1877 If `--timing` is set, rename detection is run and the associated timing
1876 will be reported. The extra details come at the cost of slower command
1878 will be reported. The extra details come at the cost of slower command
1877 execution.
1879 execution.
1878
1880
1879 Since rename detection is only run once, other factors might easily
1881 Since rename detection is only run once, other factors might easily
1880 affect the precision of the timing. However it should give a good
1882 affect the precision of the timing. However it should give a good
1881 approximation of which revision triplets are very costly.
1883 approximation of which revision triplets are very costly.
1882 """
1884 """
1883 opts = _byteskwargs(opts)
1885 opts = _byteskwargs(opts)
1884 fm = ui.formatter(b'perf', opts)
1886 fm = ui.formatter(b'perf', opts)
1885 dotiming = opts[b'timing']
1887 dotiming = opts[b'timing']
1886 dostats = opts[b'stats']
1888 dostats = opts[b'stats']
1887
1889
1888 output_template = [
1890 output_template = [
1889 ("base", "%(base)12s"),
1891 ("base", "%(base)12s"),
1890 ("p1", "%(p1.node)12s"),
1892 ("p1", "%(p1.node)12s"),
1891 ("p2", "%(p2.node)12s"),
1893 ("p2", "%(p2.node)12s"),
1892 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1894 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1893 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1895 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1894 ("p1.renames", "%(p1.renamedfiles)12d"),
1896 ("p1.renames", "%(p1.renamedfiles)12d"),
1895 ("p1.time", "%(p1.time)12.3f"),
1897 ("p1.time", "%(p1.time)12.3f"),
1896 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1898 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1897 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1899 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1898 ("p2.renames", "%(p2.renamedfiles)12d"),
1900 ("p2.renames", "%(p2.renamedfiles)12d"),
1899 ("p2.time", "%(p2.time)12.3f"),
1901 ("p2.time", "%(p2.time)12.3f"),
1900 ("renames", "%(nbrenamedfiles)12d"),
1902 ("renames", "%(nbrenamedfiles)12d"),
1901 ("total.time", "%(time)12.3f"),
1903 ("total.time", "%(time)12.3f"),
1902 ]
1904 ]
1903 if not dotiming:
1905 if not dotiming:
1904 output_template = [
1906 output_template = [
1905 i
1907 i
1906 for i in output_template
1908 for i in output_template
1907 if not ('time' in i[0] or 'renames' in i[0])
1909 if not ('time' in i[0] or 'renames' in i[0])
1908 ]
1910 ]
1909 header_names = [h for (h, v) in output_template]
1911 header_names = [h for (h, v) in output_template]
1910 output = ' '.join([v for (h, v) in output_template]) + '\n'
1912 output = ' '.join([v for (h, v) in output_template]) + '\n'
1911 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1913 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1912 fm.plain(header % tuple(header_names))
1914 fm.plain(header % tuple(header_names))
1913
1915
1914 if not revs:
1916 if not revs:
1915 revs = ['all()']
1917 revs = ['all()']
1916 revs = scmutil.revrange(repo, revs)
1918 revs = scmutil.revrange(repo, revs)
1917
1919
1918 if dostats:
1920 if dostats:
1919 alldata = {
1921 alldata = {
1920 'nbrevs': [],
1922 'nbrevs': [],
1921 'nbmissingfiles': [],
1923 'nbmissingfiles': [],
1922 }
1924 }
1923 if dotiming:
1925 if dotiming:
1924 alldata['parentnbrenames'] = []
1926 alldata['parentnbrenames'] = []
1925 alldata['totalnbrenames'] = []
1927 alldata['totalnbrenames'] = []
1926 alldata['parenttime'] = []
1928 alldata['parenttime'] = []
1927 alldata['totaltime'] = []
1929 alldata['totaltime'] = []
1928
1930
1929 roi = repo.revs('merge() and %ld', revs)
1931 roi = repo.revs('merge() and %ld', revs)
1930 for r in roi:
1932 for r in roi:
1931 ctx = repo[r]
1933 ctx = repo[r]
1932 p1 = ctx.p1()
1934 p1 = ctx.p1()
1933 p2 = ctx.p2()
1935 p2 = ctx.p2()
1934 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1936 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1935 for b in bases:
1937 for b in bases:
1936 b = repo[b]
1938 b = repo[b]
1937 p1missing = copies._computeforwardmissing(b, p1)
1939 p1missing = copies._computeforwardmissing(b, p1)
1938 p2missing = copies._computeforwardmissing(b, p2)
1940 p2missing = copies._computeforwardmissing(b, p2)
1939 data = {
1941 data = {
1940 b'base': b.hex(),
1942 b'base': b.hex(),
1941 b'p1.node': p1.hex(),
1943 b'p1.node': p1.hex(),
1942 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1944 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1943 b'p1.nbmissingfiles': len(p1missing),
1945 b'p1.nbmissingfiles': len(p1missing),
1944 b'p2.node': p2.hex(),
1946 b'p2.node': p2.hex(),
1945 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1947 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1946 b'p2.nbmissingfiles': len(p2missing),
1948 b'p2.nbmissingfiles': len(p2missing),
1947 }
1949 }
1948 if dostats:
1950 if dostats:
1949 if p1missing:
1951 if p1missing:
1950 alldata['nbrevs'].append(
1952 alldata['nbrevs'].append(
1951 (data['p1.nbrevs'], b.hex(), p1.hex())
1953 (data['p1.nbrevs'], b.hex(), p1.hex())
1952 )
1954 )
1953 alldata['nbmissingfiles'].append(
1955 alldata['nbmissingfiles'].append(
1954 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1956 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1955 )
1957 )
1956 if p2missing:
1958 if p2missing:
1957 alldata['nbrevs'].append(
1959 alldata['nbrevs'].append(
1958 (data['p2.nbrevs'], b.hex(), p2.hex())
1960 (data['p2.nbrevs'], b.hex(), p2.hex())
1959 )
1961 )
1960 alldata['nbmissingfiles'].append(
1962 alldata['nbmissingfiles'].append(
1961 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1963 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1962 )
1964 )
1963 if dotiming:
1965 if dotiming:
1964 begin = util.timer()
1966 begin = util.timer()
1965 mergedata = copies.mergecopies(repo, p1, p2, b)
1967 mergedata = copies.mergecopies(repo, p1, p2, b)
1966 end = util.timer()
1968 end = util.timer()
1967 # not very stable timing since we did only one run
1969 # not very stable timing since we did only one run
1968 data['time'] = end - begin
1970 data['time'] = end - begin
1969 # mergedata contains five dicts: "copy", "movewithdir",
1971 # mergedata contains five dicts: "copy", "movewithdir",
1970 # "diverge", "renamedelete" and "dirmove".
1972 # "diverge", "renamedelete" and "dirmove".
1971 # The first 4 are about renamed file so lets count that.
1973 # The first 4 are about renamed file so lets count that.
1972 renames = len(mergedata[0])
1974 renames = len(mergedata[0])
1973 renames += len(mergedata[1])
1975 renames += len(mergedata[1])
1974 renames += len(mergedata[2])
1976 renames += len(mergedata[2])
1975 renames += len(mergedata[3])
1977 renames += len(mergedata[3])
1976 data['nbrenamedfiles'] = renames
1978 data['nbrenamedfiles'] = renames
1977 begin = util.timer()
1979 begin = util.timer()
1978 p1renames = copies.pathcopies(b, p1)
1980 p1renames = copies.pathcopies(b, p1)
1979 end = util.timer()
1981 end = util.timer()
1980 data['p1.time'] = end - begin
1982 data['p1.time'] = end - begin
1981 begin = util.timer()
1983 begin = util.timer()
1982 p2renames = copies.pathcopies(b, p2)
1984 p2renames = copies.pathcopies(b, p2)
1983 data['p2.time'] = end - begin
1985 data['p2.time'] = end - begin
1984 end = util.timer()
1986 end = util.timer()
1985 data['p1.renamedfiles'] = len(p1renames)
1987 data['p1.renamedfiles'] = len(p1renames)
1986 data['p2.renamedfiles'] = len(p2renames)
1988 data['p2.renamedfiles'] = len(p2renames)
1987
1989
1988 if dostats:
1990 if dostats:
1989 if p1missing:
1991 if p1missing:
1990 alldata['parentnbrenames'].append(
1992 alldata['parentnbrenames'].append(
1991 (data['p1.renamedfiles'], b.hex(), p1.hex())
1993 (data['p1.renamedfiles'], b.hex(), p1.hex())
1992 )
1994 )
1993 alldata['parenttime'].append(
1995 alldata['parenttime'].append(
1994 (data['p1.time'], b.hex(), p1.hex())
1996 (data['p1.time'], b.hex(), p1.hex())
1995 )
1997 )
1996 if p2missing:
1998 if p2missing:
1997 alldata['parentnbrenames'].append(
1999 alldata['parentnbrenames'].append(
1998 (data['p2.renamedfiles'], b.hex(), p2.hex())
2000 (data['p2.renamedfiles'], b.hex(), p2.hex())
1999 )
2001 )
2000 alldata['parenttime'].append(
2002 alldata['parenttime'].append(
2001 (data['p2.time'], b.hex(), p2.hex())
2003 (data['p2.time'], b.hex(), p2.hex())
2002 )
2004 )
2003 if p1missing or p2missing:
2005 if p1missing or p2missing:
2004 alldata['totalnbrenames'].append(
2006 alldata['totalnbrenames'].append(
2005 (
2007 (
2006 data['nbrenamedfiles'],
2008 data['nbrenamedfiles'],
2007 b.hex(),
2009 b.hex(),
2008 p1.hex(),
2010 p1.hex(),
2009 p2.hex(),
2011 p2.hex(),
2010 )
2012 )
2011 )
2013 )
2012 alldata['totaltime'].append(
2014 alldata['totaltime'].append(
2013 (data['time'], b.hex(), p1.hex(), p2.hex())
2015 (data['time'], b.hex(), p1.hex(), p2.hex())
2014 )
2016 )
2015 fm.startitem()
2017 fm.startitem()
2016 fm.data(**data)
2018 fm.data(**data)
2017 # make node pretty for the human output
2019 # make node pretty for the human output
2018 out = data.copy()
2020 out = data.copy()
2019 out['base'] = fm.hexfunc(b.node())
2021 out['base'] = fm.hexfunc(b.node())
2020 out['p1.node'] = fm.hexfunc(p1.node())
2022 out['p1.node'] = fm.hexfunc(p1.node())
2021 out['p2.node'] = fm.hexfunc(p2.node())
2023 out['p2.node'] = fm.hexfunc(p2.node())
2022 fm.plain(output % out)
2024 fm.plain(output % out)
2023
2025
2024 fm.end()
2026 fm.end()
2025 if dostats:
2027 if dostats:
2026 # use a second formatter because the data are quite different, not sure
2028 # use a second formatter because the data are quite different, not sure
2027 # how it flies with the templater.
2029 # how it flies with the templater.
2028 entries = [
2030 entries = [
2029 ('nbrevs', 'number of revision covered'),
2031 ('nbrevs', 'number of revision covered'),
2030 ('nbmissingfiles', 'number of missing files at head'),
2032 ('nbmissingfiles', 'number of missing files at head'),
2031 ]
2033 ]
2032 if dotiming:
2034 if dotiming:
2033 entries.append(
2035 entries.append(
2034 ('parentnbrenames', 'rename from one parent to base')
2036 ('parentnbrenames', 'rename from one parent to base')
2035 )
2037 )
2036 entries.append(('totalnbrenames', 'total number of renames'))
2038 entries.append(('totalnbrenames', 'total number of renames'))
2037 entries.append(('parenttime', 'time for one parent'))
2039 entries.append(('parenttime', 'time for one parent'))
2038 entries.append(('totaltime', 'time for both parents'))
2040 entries.append(('totaltime', 'time for both parents'))
2039 _displaystats(ui, opts, entries, alldata)
2041 _displaystats(ui, opts, entries, alldata)
2040
2042
2041
2043
2042 @command(
2044 @command(
2043 b'perfhelper-pathcopies',
2045 b'perfhelper-pathcopies',
2044 formatteropts
2046 formatteropts
2045 + [
2047 + [
2046 (b'r', b'revs', [], b'restrict search to these revisions'),
2048 (b'r', b'revs', [], b'restrict search to these revisions'),
2047 (b'', b'timing', False, b'provides extra data (costly)'),
2049 (b'', b'timing', False, b'provides extra data (costly)'),
2048 (b'', b'stats', False, b'provides statistic about the measured data'),
2050 (b'', b'stats', False, b'provides statistic about the measured data'),
2049 ],
2051 ],
2050 )
2052 )
2051 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2053 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2052 """find statistic about potential parameters for the `perftracecopies`
2054 """find statistic about potential parameters for the `perftracecopies`
2053
2055
2054 This command find source-destination pair relevant for copytracing testing.
2056 This command find source-destination pair relevant for copytracing testing.
2055 It report value for some of the parameters that impact copy tracing time.
2057 It report value for some of the parameters that impact copy tracing time.
2056
2058
2057 If `--timing` is set, rename detection is run and the associated timing
2059 If `--timing` is set, rename detection is run and the associated timing
2058 will be reported. The extra details comes at the cost of a slower command
2060 will be reported. The extra details comes at the cost of a slower command
2059 execution.
2061 execution.
2060
2062
2061 Since the rename detection is only run once, other factors might easily
2063 Since the rename detection is only run once, other factors might easily
2062 affect the precision of the timing. However it should give a good
2064 affect the precision of the timing. However it should give a good
2063 approximation of which revision pairs are very costly.
2065 approximation of which revision pairs are very costly.
2064 """
2066 """
2065 opts = _byteskwargs(opts)
2067 opts = _byteskwargs(opts)
2066 fm = ui.formatter(b'perf', opts)
2068 fm = ui.formatter(b'perf', opts)
2067 dotiming = opts[b'timing']
2069 dotiming = opts[b'timing']
2068 dostats = opts[b'stats']
2070 dostats = opts[b'stats']
2069
2071
2070 if dotiming:
2072 if dotiming:
2071 header = '%12s %12s %12s %12s %12s %12s\n'
2073 header = '%12s %12s %12s %12s %12s %12s\n'
2072 output = (
2074 output = (
2073 "%(source)12s %(destination)12s "
2075 "%(source)12s %(destination)12s "
2074 "%(nbrevs)12d %(nbmissingfiles)12d "
2076 "%(nbrevs)12d %(nbmissingfiles)12d "
2075 "%(nbrenamedfiles)12d %(time)18.5f\n"
2077 "%(nbrenamedfiles)12d %(time)18.5f\n"
2076 )
2078 )
2077 header_names = (
2079 header_names = (
2078 "source",
2080 "source",
2079 "destination",
2081 "destination",
2080 "nb-revs",
2082 "nb-revs",
2081 "nb-files",
2083 "nb-files",
2082 "nb-renames",
2084 "nb-renames",
2083 "time",
2085 "time",
2084 )
2086 )
2085 fm.plain(header % header_names)
2087 fm.plain(header % header_names)
2086 else:
2088 else:
2087 header = '%12s %12s %12s %12s\n'
2089 header = '%12s %12s %12s %12s\n'
2088 output = (
2090 output = (
2089 "%(source)12s %(destination)12s "
2091 "%(source)12s %(destination)12s "
2090 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2092 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2091 )
2093 )
2092 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2094 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2093
2095
2094 if not revs:
2096 if not revs:
2095 revs = ['all()']
2097 revs = ['all()']
2096 revs = scmutil.revrange(repo, revs)
2098 revs = scmutil.revrange(repo, revs)
2097
2099
2098 if dostats:
2100 if dostats:
2099 alldata = {
2101 alldata = {
2100 'nbrevs': [],
2102 'nbrevs': [],
2101 'nbmissingfiles': [],
2103 'nbmissingfiles': [],
2102 }
2104 }
2103 if dotiming:
2105 if dotiming:
2104 alldata['nbrenames'] = []
2106 alldata['nbrenames'] = []
2105 alldata['time'] = []
2107 alldata['time'] = []
2106
2108
2107 roi = repo.revs('merge() and %ld', revs)
2109 roi = repo.revs('merge() and %ld', revs)
2108 for r in roi:
2110 for r in roi:
2109 ctx = repo[r]
2111 ctx = repo[r]
2110 p1 = ctx.p1().rev()
2112 p1 = ctx.p1().rev()
2111 p2 = ctx.p2().rev()
2113 p2 = ctx.p2().rev()
2112 bases = repo.changelog._commonancestorsheads(p1, p2)
2114 bases = repo.changelog._commonancestorsheads(p1, p2)
2113 for p in (p1, p2):
2115 for p in (p1, p2):
2114 for b in bases:
2116 for b in bases:
2115 base = repo[b]
2117 base = repo[b]
2116 parent = repo[p]
2118 parent = repo[p]
2117 missing = copies._computeforwardmissing(base, parent)
2119 missing = copies._computeforwardmissing(base, parent)
2118 if not missing:
2120 if not missing:
2119 continue
2121 continue
2120 data = {
2122 data = {
2121 b'source': base.hex(),
2123 b'source': base.hex(),
2122 b'destination': parent.hex(),
2124 b'destination': parent.hex(),
2123 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2125 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2124 b'nbmissingfiles': len(missing),
2126 b'nbmissingfiles': len(missing),
2125 }
2127 }
2126 if dostats:
2128 if dostats:
2127 alldata['nbrevs'].append(
2129 alldata['nbrevs'].append(
2128 (data['nbrevs'], base.hex(), parent.hex(),)
2130 (data['nbrevs'], base.hex(), parent.hex(),)
2129 )
2131 )
2130 alldata['nbmissingfiles'].append(
2132 alldata['nbmissingfiles'].append(
2131 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2133 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2132 )
2134 )
2133 if dotiming:
2135 if dotiming:
2134 begin = util.timer()
2136 begin = util.timer()
2135 renames = copies.pathcopies(base, parent)
2137 renames = copies.pathcopies(base, parent)
2136 end = util.timer()
2138 end = util.timer()
2137 # not very stable timing since we did only one run
2139 # not very stable timing since we did only one run
2138 data['time'] = end - begin
2140 data['time'] = end - begin
2139 data['nbrenamedfiles'] = len(renames)
2141 data['nbrenamedfiles'] = len(renames)
2140 if dostats:
2142 if dostats:
2141 alldata['time'].append(
2143 alldata['time'].append(
2142 (data['time'], base.hex(), parent.hex(),)
2144 (data['time'], base.hex(), parent.hex(),)
2143 )
2145 )
2144 alldata['nbrenames'].append(
2146 alldata['nbrenames'].append(
2145 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2147 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2146 )
2148 )
2147 fm.startitem()
2149 fm.startitem()
2148 fm.data(**data)
2150 fm.data(**data)
2149 out = data.copy()
2151 out = data.copy()
2150 out['source'] = fm.hexfunc(base.node())
2152 out['source'] = fm.hexfunc(base.node())
2151 out['destination'] = fm.hexfunc(parent.node())
2153 out['destination'] = fm.hexfunc(parent.node())
2152 fm.plain(output % out)
2154 fm.plain(output % out)
2153
2155
2154 fm.end()
2156 fm.end()
2155 if dostats:
2157 if dostats:
2156 # use a second formatter because the data are quite different, not sure
2158 # use a second formatter because the data are quite different, not sure
2157 # how it flies with the templater.
2159 # how it flies with the templater.
2158 fm = ui.formatter(b'perf', opts)
2160 fm = ui.formatter(b'perf', opts)
2159 entries = [
2161 entries = [
2160 ('nbrevs', 'number of revision covered'),
2162 ('nbrevs', 'number of revision covered'),
2161 ('nbmissingfiles', 'number of missing files at head'),
2163 ('nbmissingfiles', 'number of missing files at head'),
2162 ]
2164 ]
2163 if dotiming:
2165 if dotiming:
2164 entries.append(('nbrenames', 'renamed files'))
2166 entries.append(('nbrenames', 'renamed files'))
2165 entries.append(('time', 'time'))
2167 entries.append(('time', 'time'))
2166 _displaystats(ui, opts, entries, alldata)
2168 _displaystats(ui, opts, entries, alldata)
2167
2169
2168
2170
2169 @command(b'perfcca', formatteropts)
2171 @command(b'perfcca', formatteropts)
2170 def perfcca(ui, repo, **opts):
2172 def perfcca(ui, repo, **opts):
2171 opts = _byteskwargs(opts)
2173 opts = _byteskwargs(opts)
2172 timer, fm = gettimer(ui, opts)
2174 timer, fm = gettimer(ui, opts)
2173 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2175 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2174 fm.end()
2176 fm.end()
2175
2177
2176
2178
2177 @command(b'perffncacheload', formatteropts)
2179 @command(b'perffncacheload', formatteropts)
2178 def perffncacheload(ui, repo, **opts):
2180 def perffncacheload(ui, repo, **opts):
2179 opts = _byteskwargs(opts)
2181 opts = _byteskwargs(opts)
2180 timer, fm = gettimer(ui, opts)
2182 timer, fm = gettimer(ui, opts)
2181 s = repo.store
2183 s = repo.store
2182
2184
2183 def d():
2185 def d():
2184 s.fncache._load()
2186 s.fncache._load()
2185
2187
2186 timer(d)
2188 timer(d)
2187 fm.end()
2189 fm.end()
2188
2190
2189
2191
2190 @command(b'perffncachewrite', formatteropts)
2192 @command(b'perffncachewrite', formatteropts)
2191 def perffncachewrite(ui, repo, **opts):
2193 def perffncachewrite(ui, repo, **opts):
2192 opts = _byteskwargs(opts)
2194 opts = _byteskwargs(opts)
2193 timer, fm = gettimer(ui, opts)
2195 timer, fm = gettimer(ui, opts)
2194 s = repo.store
2196 s = repo.store
2195 lock = repo.lock()
2197 lock = repo.lock()
2196 s.fncache._load()
2198 s.fncache._load()
2197 tr = repo.transaction(b'perffncachewrite')
2199 tr = repo.transaction(b'perffncachewrite')
2198 tr.addbackup(b'fncache')
2200 tr.addbackup(b'fncache')
2199
2201
2200 def d():
2202 def d():
2201 s.fncache._dirty = True
2203 s.fncache._dirty = True
2202 s.fncache.write(tr)
2204 s.fncache.write(tr)
2203
2205
2204 timer(d)
2206 timer(d)
2205 tr.close()
2207 tr.close()
2206 lock.release()
2208 lock.release()
2207 fm.end()
2209 fm.end()
2208
2210
2209
2211
2210 @command(b'perffncacheencode', formatteropts)
2212 @command(b'perffncacheencode', formatteropts)
2211 def perffncacheencode(ui, repo, **opts):
2213 def perffncacheencode(ui, repo, **opts):
2212 opts = _byteskwargs(opts)
2214 opts = _byteskwargs(opts)
2213 timer, fm = gettimer(ui, opts)
2215 timer, fm = gettimer(ui, opts)
2214 s = repo.store
2216 s = repo.store
2215 s.fncache._load()
2217 s.fncache._load()
2216
2218
2217 def d():
2219 def d():
2218 for p in s.fncache.entries:
2220 for p in s.fncache.entries:
2219 s.encode(p)
2221 s.encode(p)
2220
2222
2221 timer(d)
2223 timer(d)
2222 fm.end()
2224 fm.end()
2223
2225
2224
2226
2225 def _bdiffworker(q, blocks, xdiff, ready, done):
2227 def _bdiffworker(q, blocks, xdiff, ready, done):
2226 while not done.is_set():
2228 while not done.is_set():
2227 pair = q.get()
2229 pair = q.get()
2228 while pair is not None:
2230 while pair is not None:
2229 if xdiff:
2231 if xdiff:
2230 mdiff.bdiff.xdiffblocks(*pair)
2232 mdiff.bdiff.xdiffblocks(*pair)
2231 elif blocks:
2233 elif blocks:
2232 mdiff.bdiff.blocks(*pair)
2234 mdiff.bdiff.blocks(*pair)
2233 else:
2235 else:
2234 mdiff.textdiff(*pair)
2236 mdiff.textdiff(*pair)
2235 q.task_done()
2237 q.task_done()
2236 pair = q.get()
2238 pair = q.get()
2237 q.task_done() # for the None one
2239 q.task_done() # for the None one
2238 with ready:
2240 with ready:
2239 ready.wait()
2241 ready.wait()
2240
2242
2241
2243
2242 def _manifestrevision(repo, mnode):
2244 def _manifestrevision(repo, mnode):
2243 ml = repo.manifestlog
2245 ml = repo.manifestlog
2244
2246
2245 if util.safehasattr(ml, b'getstorage'):
2247 if util.safehasattr(ml, b'getstorage'):
2246 store = ml.getstorage(b'')
2248 store = ml.getstorage(b'')
2247 else:
2249 else:
2248 store = ml._revlog
2250 store = ml._revlog
2249
2251
2250 return store.revision(mnode)
2252 return store.revision(mnode)
2251
2253
2252
2254
2253 @command(
2255 @command(
2254 b'perfbdiff',
2256 b'perfbdiff',
2255 revlogopts
2257 revlogopts
2256 + formatteropts
2258 + formatteropts
2257 + [
2259 + [
2258 (
2260 (
2259 b'',
2261 b'',
2260 b'count',
2262 b'count',
2261 1,
2263 1,
2262 b'number of revisions to test (when using --startrev)',
2264 b'number of revisions to test (when using --startrev)',
2263 ),
2265 ),
2264 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2266 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2265 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2267 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2266 (b'', b'blocks', False, b'test computing diffs into blocks'),
2268 (b'', b'blocks', False, b'test computing diffs into blocks'),
2267 (b'', b'xdiff', False, b'use xdiff algorithm'),
2269 (b'', b'xdiff', False, b'use xdiff algorithm'),
2268 ],
2270 ],
2269 b'-c|-m|FILE REV',
2271 b'-c|-m|FILE REV',
2270 )
2272 )
2271 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2273 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2272 """benchmark a bdiff between revisions
2274 """benchmark a bdiff between revisions
2273
2275
2274 By default, benchmark a bdiff between its delta parent and itself.
2276 By default, benchmark a bdiff between its delta parent and itself.
2275
2277
2276 With ``--count``, benchmark bdiffs between delta parents and self for N
2278 With ``--count``, benchmark bdiffs between delta parents and self for N
2277 revisions starting at the specified revision.
2279 revisions starting at the specified revision.
2278
2280
2279 With ``--alldata``, assume the requested revision is a changeset and
2281 With ``--alldata``, assume the requested revision is a changeset and
2280 measure bdiffs for all changes related to that changeset (manifest
2282 measure bdiffs for all changes related to that changeset (manifest
2281 and filelogs).
2283 and filelogs).
2282 """
2284 """
2283 opts = _byteskwargs(opts)
2285 opts = _byteskwargs(opts)
2284
2286
2285 if opts[b'xdiff'] and not opts[b'blocks']:
2287 if opts[b'xdiff'] and not opts[b'blocks']:
2286 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2288 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2287
2289
2288 if opts[b'alldata']:
2290 if opts[b'alldata']:
2289 opts[b'changelog'] = True
2291 opts[b'changelog'] = True
2290
2292
2291 if opts.get(b'changelog') or opts.get(b'manifest'):
2293 if opts.get(b'changelog') or opts.get(b'manifest'):
2292 file_, rev = None, file_
2294 file_, rev = None, file_
2293 elif rev is None:
2295 elif rev is None:
2294 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2296 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2295
2297
2296 blocks = opts[b'blocks']
2298 blocks = opts[b'blocks']
2297 xdiff = opts[b'xdiff']
2299 xdiff = opts[b'xdiff']
2298 textpairs = []
2300 textpairs = []
2299
2301
2300 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2302 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2301
2303
2302 startrev = r.rev(r.lookup(rev))
2304 startrev = r.rev(r.lookup(rev))
2303 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2305 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2304 if opts[b'alldata']:
2306 if opts[b'alldata']:
2305 # Load revisions associated with changeset.
2307 # Load revisions associated with changeset.
2306 ctx = repo[rev]
2308 ctx = repo[rev]
2307 mtext = _manifestrevision(repo, ctx.manifestnode())
2309 mtext = _manifestrevision(repo, ctx.manifestnode())
2308 for pctx in ctx.parents():
2310 for pctx in ctx.parents():
2309 pman = _manifestrevision(repo, pctx.manifestnode())
2311 pman = _manifestrevision(repo, pctx.manifestnode())
2310 textpairs.append((pman, mtext))
2312 textpairs.append((pman, mtext))
2311
2313
2312 # Load filelog revisions by iterating manifest delta.
2314 # Load filelog revisions by iterating manifest delta.
2313 man = ctx.manifest()
2315 man = ctx.manifest()
2314 pman = ctx.p1().manifest()
2316 pman = ctx.p1().manifest()
2315 for filename, change in pman.diff(man).items():
2317 for filename, change in pman.diff(man).items():
2316 fctx = repo.file(filename)
2318 fctx = repo.file(filename)
2317 f1 = fctx.revision(change[0][0] or -1)
2319 f1 = fctx.revision(change[0][0] or -1)
2318 f2 = fctx.revision(change[1][0] or -1)
2320 f2 = fctx.revision(change[1][0] or -1)
2319 textpairs.append((f1, f2))
2321 textpairs.append((f1, f2))
2320 else:
2322 else:
2321 dp = r.deltaparent(rev)
2323 dp = r.deltaparent(rev)
2322 textpairs.append((r.revision(dp), r.revision(rev)))
2324 textpairs.append((r.revision(dp), r.revision(rev)))
2323
2325
2324 withthreads = threads > 0
2326 withthreads = threads > 0
2325 if not withthreads:
2327 if not withthreads:
2326
2328
2327 def d():
2329 def d():
2328 for pair in textpairs:
2330 for pair in textpairs:
2329 if xdiff:
2331 if xdiff:
2330 mdiff.bdiff.xdiffblocks(*pair)
2332 mdiff.bdiff.xdiffblocks(*pair)
2331 elif blocks:
2333 elif blocks:
2332 mdiff.bdiff.blocks(*pair)
2334 mdiff.bdiff.blocks(*pair)
2333 else:
2335 else:
2334 mdiff.textdiff(*pair)
2336 mdiff.textdiff(*pair)
2335
2337
2336 else:
2338 else:
2337 q = queue()
2339 q = queue()
2338 for i in _xrange(threads):
2340 for i in _xrange(threads):
2339 q.put(None)
2341 q.put(None)
2340 ready = threading.Condition()
2342 ready = threading.Condition()
2341 done = threading.Event()
2343 done = threading.Event()
2342 for i in _xrange(threads):
2344 for i in _xrange(threads):
2343 threading.Thread(
2345 threading.Thread(
2344 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2346 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2345 ).start()
2347 ).start()
2346 q.join()
2348 q.join()
2347
2349
2348 def d():
2350 def d():
2349 for pair in textpairs:
2351 for pair in textpairs:
2350 q.put(pair)
2352 q.put(pair)
2351 for i in _xrange(threads):
2353 for i in _xrange(threads):
2352 q.put(None)
2354 q.put(None)
2353 with ready:
2355 with ready:
2354 ready.notify_all()
2356 ready.notify_all()
2355 q.join()
2357 q.join()
2356
2358
2357 timer, fm = gettimer(ui, opts)
2359 timer, fm = gettimer(ui, opts)
2358 timer(d)
2360 timer(d)
2359 fm.end()
2361 fm.end()
2360
2362
2361 if withthreads:
2363 if withthreads:
2362 done.set()
2364 done.set()
2363 for i in _xrange(threads):
2365 for i in _xrange(threads):
2364 q.put(None)
2366 q.put(None)
2365 with ready:
2367 with ready:
2366 ready.notify_all()
2368 ready.notify_all()
2367
2369
2368
2370
2369 @command(
2371 @command(
2370 b'perfunidiff',
2372 b'perfunidiff',
2371 revlogopts
2373 revlogopts
2372 + formatteropts
2374 + formatteropts
2373 + [
2375 + [
2374 (
2376 (
2375 b'',
2377 b'',
2376 b'count',
2378 b'count',
2377 1,
2379 1,
2378 b'number of revisions to test (when using --startrev)',
2380 b'number of revisions to test (when using --startrev)',
2379 ),
2381 ),
2380 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2382 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2381 ],
2383 ],
2382 b'-c|-m|FILE REV',
2384 b'-c|-m|FILE REV',
2383 )
2385 )
2384 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2386 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2385 """benchmark a unified diff between revisions
2387 """benchmark a unified diff between revisions
2386
2388
2387 This doesn't include any copy tracing - it's just a unified diff
2389 This doesn't include any copy tracing - it's just a unified diff
2388 of the texts.
2390 of the texts.
2389
2391
2390 By default, benchmark a diff between its delta parent and itself.
2392 By default, benchmark a diff between its delta parent and itself.
2391
2393
2392 With ``--count``, benchmark diffs between delta parents and self for N
2394 With ``--count``, benchmark diffs between delta parents and self for N
2393 revisions starting at the specified revision.
2395 revisions starting at the specified revision.
2394
2396
2395 With ``--alldata``, assume the requested revision is a changeset and
2397 With ``--alldata``, assume the requested revision is a changeset and
2396 measure diffs for all changes related to that changeset (manifest
2398 measure diffs for all changes related to that changeset (manifest
2397 and filelogs).
2399 and filelogs).
2398 """
2400 """
2399 opts = _byteskwargs(opts)
2401 opts = _byteskwargs(opts)
2400 if opts[b'alldata']:
2402 if opts[b'alldata']:
2401 opts[b'changelog'] = True
2403 opts[b'changelog'] = True
2402
2404
2403 if opts.get(b'changelog') or opts.get(b'manifest'):
2405 if opts.get(b'changelog') or opts.get(b'manifest'):
2404 file_, rev = None, file_
2406 file_, rev = None, file_
2405 elif rev is None:
2407 elif rev is None:
2406 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2408 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2407
2409
2408 textpairs = []
2410 textpairs = []
2409
2411
2410 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2412 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2411
2413
2412 startrev = r.rev(r.lookup(rev))
2414 startrev = r.rev(r.lookup(rev))
2413 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2415 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2414 if opts[b'alldata']:
2416 if opts[b'alldata']:
2415 # Load revisions associated with changeset.
2417 # Load revisions associated with changeset.
2416 ctx = repo[rev]
2418 ctx = repo[rev]
2417 mtext = _manifestrevision(repo, ctx.manifestnode())
2419 mtext = _manifestrevision(repo, ctx.manifestnode())
2418 for pctx in ctx.parents():
2420 for pctx in ctx.parents():
2419 pman = _manifestrevision(repo, pctx.manifestnode())
2421 pman = _manifestrevision(repo, pctx.manifestnode())
2420 textpairs.append((pman, mtext))
2422 textpairs.append((pman, mtext))
2421
2423
2422 # Load filelog revisions by iterating manifest delta.
2424 # Load filelog revisions by iterating manifest delta.
2423 man = ctx.manifest()
2425 man = ctx.manifest()
2424 pman = ctx.p1().manifest()
2426 pman = ctx.p1().manifest()
2425 for filename, change in pman.diff(man).items():
2427 for filename, change in pman.diff(man).items():
2426 fctx = repo.file(filename)
2428 fctx = repo.file(filename)
2427 f1 = fctx.revision(change[0][0] or -1)
2429 f1 = fctx.revision(change[0][0] or -1)
2428 f2 = fctx.revision(change[1][0] or -1)
2430 f2 = fctx.revision(change[1][0] or -1)
2429 textpairs.append((f1, f2))
2431 textpairs.append((f1, f2))
2430 else:
2432 else:
2431 dp = r.deltaparent(rev)
2433 dp = r.deltaparent(rev)
2432 textpairs.append((r.revision(dp), r.revision(rev)))
2434 textpairs.append((r.revision(dp), r.revision(rev)))
2433
2435
2434 def d():
2436 def d():
2435 for left, right in textpairs:
2437 for left, right in textpairs:
2436 # The date strings don't matter, so we pass empty strings.
2438 # The date strings don't matter, so we pass empty strings.
2437 headerlines, hunks = mdiff.unidiff(
2439 headerlines, hunks = mdiff.unidiff(
2438 left, b'', right, b'', b'left', b'right', binary=False
2440 left, b'', right, b'', b'left', b'right', binary=False
2439 )
2441 )
2440 # consume iterators in roughly the way patch.py does
2442 # consume iterators in roughly the way patch.py does
2441 b'\n'.join(headerlines)
2443 b'\n'.join(headerlines)
2442 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2443
2445
2444 timer, fm = gettimer(ui, opts)
2446 timer, fm = gettimer(ui, opts)
2445 timer(d)
2447 timer(d)
2446 fm.end()
2448 fm.end()
2447
2449
2448
2450
2449 @command(b'perfdiffwd', formatteropts)
2451 @command(b'perfdiffwd', formatteropts)
2450 def perfdiffwd(ui, repo, **opts):
2452 def perfdiffwd(ui, repo, **opts):
2451 """Profile diff of working directory changes"""
2453 """Profile diff of working directory changes"""
2452 opts = _byteskwargs(opts)
2454 opts = _byteskwargs(opts)
2453 timer, fm = gettimer(ui, opts)
2455 timer, fm = gettimer(ui, opts)
2454 options = {
2456 options = {
2455 'w': 'ignore_all_space',
2457 'w': 'ignore_all_space',
2456 'b': 'ignore_space_change',
2458 'b': 'ignore_space_change',
2457 'B': 'ignore_blank_lines',
2459 'B': 'ignore_blank_lines',
2458 }
2460 }
2459
2461
2460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2462 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2461 opts = dict((options[c], b'1') for c in diffopt)
2463 opts = dict((options[c], b'1') for c in diffopt)
2462
2464
2463 def d():
2465 def d():
2464 ui.pushbuffer()
2466 ui.pushbuffer()
2465 commands.diff(ui, repo, **opts)
2467 commands.diff(ui, repo, **opts)
2466 ui.popbuffer()
2468 ui.popbuffer()
2467
2469
2468 diffopt = diffopt.encode('ascii')
2470 diffopt = diffopt.encode('ascii')
2469 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2471 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2470 timer(d, title=title)
2472 timer(d, title=title)
2471 fm.end()
2473 fm.end()
2472
2474
2473
2475
2474 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2476 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2475 def perfrevlogindex(ui, repo, file_=None, **opts):
2477 def perfrevlogindex(ui, repo, file_=None, **opts):
2476 """Benchmark operations against a revlog index.
2478 """Benchmark operations against a revlog index.
2477
2479
2478 This tests constructing a revlog instance, reading index data,
2480 This tests constructing a revlog instance, reading index data,
2479 parsing index data, and performing various operations related to
2481 parsing index data, and performing various operations related to
2480 index data.
2482 index data.
2481 """
2483 """
2482
2484
2483 opts = _byteskwargs(opts)
2485 opts = _byteskwargs(opts)
2484
2486
2485 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2487 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2486
2488
2487 opener = getattr(rl, 'opener') # trick linter
2489 opener = getattr(rl, 'opener') # trick linter
2488 indexfile = rl.indexfile
2490 indexfile = rl.indexfile
2489 data = opener.read(indexfile)
2491 data = opener.read(indexfile)
2490
2492
2491 header = struct.unpack(b'>I', data[0:4])[0]
2493 header = struct.unpack(b'>I', data[0:4])[0]
2492 version = header & 0xFFFF
2494 version = header & 0xFFFF
2493 if version == 1:
2495 if version == 1:
2494 revlogio = revlog.revlogio()
2496 revlogio = revlog.revlogio()
2495 inline = header & (1 << 16)
2497 inline = header & (1 << 16)
2496 else:
2498 else:
2497 raise error.Abort(b'unsupported revlog version: %d' % version)
2499 raise error.Abort(b'unsupported revlog version: %d' % version)
2498
2500
2499 rllen = len(rl)
2501 rllen = len(rl)
2500
2502
2501 node0 = rl.node(0)
2503 node0 = rl.node(0)
2502 node25 = rl.node(rllen // 4)
2504 node25 = rl.node(rllen // 4)
2503 node50 = rl.node(rllen // 2)
2505 node50 = rl.node(rllen // 2)
2504 node75 = rl.node(rllen // 4 * 3)
2506 node75 = rl.node(rllen // 4 * 3)
2505 node100 = rl.node(rllen - 1)
2507 node100 = rl.node(rllen - 1)
2506
2508
2507 allrevs = range(rllen)
2509 allrevs = range(rllen)
2508 allrevsrev = list(reversed(allrevs))
2510 allrevsrev = list(reversed(allrevs))
2509 allnodes = [rl.node(rev) for rev in range(rllen)]
2511 allnodes = [rl.node(rev) for rev in range(rllen)]
2510 allnodesrev = list(reversed(allnodes))
2512 allnodesrev = list(reversed(allnodes))
2511
2513
2512 def constructor():
2514 def constructor():
2513 revlog.revlog(opener, indexfile)
2515 revlog.revlog(opener, indexfile)
2514
2516
2515 def read():
2517 def read():
2516 with opener(indexfile) as fh:
2518 with opener(indexfile) as fh:
2517 fh.read()
2519 fh.read()
2518
2520
2519 def parseindex():
2521 def parseindex():
2520 revlogio.parseindex(data, inline)
2522 revlogio.parseindex(data, inline)
2521
2523
2522 def getentry(revornode):
2524 def getentry(revornode):
2523 index = revlogio.parseindex(data, inline)[0]
2525 index = revlogio.parseindex(data, inline)[0]
2524 index[revornode]
2526 index[revornode]
2525
2527
2526 def getentries(revs, count=1):
2528 def getentries(revs, count=1):
2527 index = revlogio.parseindex(data, inline)[0]
2529 index = revlogio.parseindex(data, inline)[0]
2528
2530
2529 for i in range(count):
2531 for i in range(count):
2530 for rev in revs:
2532 for rev in revs:
2531 index[rev]
2533 index[rev]
2532
2534
2533 def resolvenode(node):
2535 def resolvenode(node):
2534 nodemap = revlogio.parseindex(data, inline)[1]
2536 nodemap = revlogio.parseindex(data, inline)[1]
2535 # This only works for the C code.
2537 # This only works for the C code.
2536 if nodemap is None:
2538 if nodemap is None:
2537 return
2539 return
2538
2540
2539 try:
2541 try:
2540 nodemap[node]
2542 nodemap[node]
2541 except error.RevlogError:
2543 except error.RevlogError:
2542 pass
2544 pass
2543
2545
2544 def resolvenodes(nodes, count=1):
2546 def resolvenodes(nodes, count=1):
2545 nodemap = revlogio.parseindex(data, inline)[1]
2547 nodemap = revlogio.parseindex(data, inline)[1]
2546 if nodemap is None:
2548 if nodemap is None:
2547 return
2549 return
2548
2550
2549 for i in range(count):
2551 for i in range(count):
2550 for node in nodes:
2552 for node in nodes:
2551 try:
2553 try:
2552 nodemap[node]
2554 nodemap[node]
2553 except error.RevlogError:
2555 except error.RevlogError:
2554 pass
2556 pass
2555
2557
2556 benches = [
2558 benches = [
2557 (constructor, b'revlog constructor'),
2559 (constructor, b'revlog constructor'),
2558 (read, b'read'),
2560 (read, b'read'),
2559 (parseindex, b'create index object'),
2561 (parseindex, b'create index object'),
2560 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2562 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2561 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2563 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2562 (lambda: resolvenode(node0), b'look up node at rev 0'),
2564 (lambda: resolvenode(node0), b'look up node at rev 0'),
2563 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2565 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2564 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2566 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2565 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2567 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2566 (lambda: resolvenode(node100), b'look up node at tip'),
2568 (lambda: resolvenode(node100), b'look up node at tip'),
2567 # 2x variation is to measure caching impact.
2569 # 2x variation is to measure caching impact.
2568 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2570 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2569 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2571 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2570 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2572 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2571 (
2573 (
2572 lambda: resolvenodes(allnodesrev, 2),
2574 lambda: resolvenodes(allnodesrev, 2),
2573 b'look up all nodes 2x (reverse)',
2575 b'look up all nodes 2x (reverse)',
2574 ),
2576 ),
2575 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2577 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2576 (
2578 (
2577 lambda: getentries(allrevs, 2),
2579 lambda: getentries(allrevs, 2),
2578 b'retrieve all index entries 2x (forward)',
2580 b'retrieve all index entries 2x (forward)',
2579 ),
2581 ),
2580 (
2582 (
2581 lambda: getentries(allrevsrev),
2583 lambda: getentries(allrevsrev),
2582 b'retrieve all index entries (reverse)',
2584 b'retrieve all index entries (reverse)',
2583 ),
2585 ),
2584 (
2586 (
2585 lambda: getentries(allrevsrev, 2),
2587 lambda: getentries(allrevsrev, 2),
2586 b'retrieve all index entries 2x (reverse)',
2588 b'retrieve all index entries 2x (reverse)',
2587 ),
2589 ),
2588 ]
2590 ]
2589
2591
2590 for fn, title in benches:
2592 for fn, title in benches:
2591 timer, fm = gettimer(ui, opts)
2593 timer, fm = gettimer(ui, opts)
2592 timer(fn, title=title)
2594 timer(fn, title=title)
2593 fm.end()
2595 fm.end()
2594
2596
2595
2597
2596 @command(
2598 @command(
2597 b'perfrevlogrevisions',
2599 b'perfrevlogrevisions',
2598 revlogopts
2600 revlogopts
2599 + formatteropts
2601 + formatteropts
2600 + [
2602 + [
2601 (b'd', b'dist', 100, b'distance between the revisions'),
2603 (b'd', b'dist', 100, b'distance between the revisions'),
2602 (b's', b'startrev', 0, b'revision to start reading at'),
2604 (b's', b'startrev', 0, b'revision to start reading at'),
2603 (b'', b'reverse', False, b'read in reverse'),
2605 (b'', b'reverse', False, b'read in reverse'),
2604 ],
2606 ],
2605 b'-c|-m|FILE',
2607 b'-c|-m|FILE',
2606 )
2608 )
2607 def perfrevlogrevisions(
2609 def perfrevlogrevisions(
2608 ui, repo, file_=None, startrev=0, reverse=False, **opts
2610 ui, repo, file_=None, startrev=0, reverse=False, **opts
2609 ):
2611 ):
2610 """Benchmark reading a series of revisions from a revlog.
2612 """Benchmark reading a series of revisions from a revlog.
2611
2613
2612 By default, we read every ``-d/--dist`` revision from 0 to tip of
2614 By default, we read every ``-d/--dist`` revision from 0 to tip of
2613 the specified revlog.
2615 the specified revlog.
2614
2616
2615 The start revision can be defined via ``-s/--startrev``.
2617 The start revision can be defined via ``-s/--startrev``.
2616 """
2618 """
2617 opts = _byteskwargs(opts)
2619 opts = _byteskwargs(opts)
2618
2620
2619 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2621 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2620 rllen = getlen(ui)(rl)
2622 rllen = getlen(ui)(rl)
2621
2623
2622 if startrev < 0:
2624 if startrev < 0:
2623 startrev = rllen + startrev
2625 startrev = rllen + startrev
2624
2626
2625 def d():
2627 def d():
2626 rl.clearcaches()
2628 rl.clearcaches()
2627
2629
2628 beginrev = startrev
2630 beginrev = startrev
2629 endrev = rllen
2631 endrev = rllen
2630 dist = opts[b'dist']
2632 dist = opts[b'dist']
2631
2633
2632 if reverse:
2634 if reverse:
2633 beginrev, endrev = endrev - 1, beginrev - 1
2635 beginrev, endrev = endrev - 1, beginrev - 1
2634 dist = -1 * dist
2636 dist = -1 * dist
2635
2637
2636 for x in _xrange(beginrev, endrev, dist):
2638 for x in _xrange(beginrev, endrev, dist):
2637 # Old revisions don't support passing int.
2639 # Old revisions don't support passing int.
2638 n = rl.node(x)
2640 n = rl.node(x)
2639 rl.revision(n)
2641 rl.revision(n)
2640
2642
2641 timer, fm = gettimer(ui, opts)
2643 timer, fm = gettimer(ui, opts)
2642 timer(d)
2644 timer(d)
2643 fm.end()
2645 fm.end()
2644
2646
2645
2647
2646 @command(
2648 @command(
2647 b'perfrevlogwrite',
2649 b'perfrevlogwrite',
2648 revlogopts
2650 revlogopts
2649 + formatteropts
2651 + formatteropts
2650 + [
2652 + [
2651 (b's', b'startrev', 1000, b'revision to start writing at'),
2653 (b's', b'startrev', 1000, b'revision to start writing at'),
2652 (b'', b'stoprev', -1, b'last revision to write'),
2654 (b'', b'stoprev', -1, b'last revision to write'),
2653 (b'', b'count', 3, b'number of passes to perform'),
2655 (b'', b'count', 3, b'number of passes to perform'),
2654 (b'', b'details', False, b'print timing for every revisions tested'),
2656 (b'', b'details', False, b'print timing for every revisions tested'),
2655 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2657 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2656 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2658 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2657 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2659 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2658 ],
2660 ],
2659 b'-c|-m|FILE',
2661 b'-c|-m|FILE',
2660 )
2662 )
2661 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2663 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2662 """Benchmark writing a series of revisions to a revlog.
2664 """Benchmark writing a series of revisions to a revlog.
2663
2665
2664 Possible source values are:
2666 Possible source values are:
2665 * `full`: add from a full text (default).
2667 * `full`: add from a full text (default).
2666 * `parent-1`: add from a delta to the first parent
2668 * `parent-1`: add from a delta to the first parent
2667 * `parent-2`: add from a delta to the second parent if it exists
2669 * `parent-2`: add from a delta to the second parent if it exists
2668 (use a delta from the first parent otherwise)
2670 (use a delta from the first parent otherwise)
2669 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2671 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2670 * `storage`: add from the existing precomputed deltas
2672 * `storage`: add from the existing precomputed deltas
2671
2673
2672 Note: This performance command measures performance in a custom way. As a
2674 Note: This performance command measures performance in a custom way. As a
2673 result some of the global configuration of the 'perf' command does not
2675 result some of the global configuration of the 'perf' command does not
2674 apply to it:
2676 apply to it:
2675
2677
2676 * ``pre-run``: disabled
2678 * ``pre-run``: disabled
2677
2679
2678 * ``profile-benchmark``: disabled
2680 * ``profile-benchmark``: disabled
2679
2681
2680 * ``run-limits``: disabled use --count instead
2682 * ``run-limits``: disabled use --count instead
2681 """
2683 """
2682 opts = _byteskwargs(opts)
2684 opts = _byteskwargs(opts)
2683
2685
2684 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2686 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2685 rllen = getlen(ui)(rl)
2687 rllen = getlen(ui)(rl)
2686 if startrev < 0:
2688 if startrev < 0:
2687 startrev = rllen + startrev
2689 startrev = rllen + startrev
2688 if stoprev < 0:
2690 if stoprev < 0:
2689 stoprev = rllen + stoprev
2691 stoprev = rllen + stoprev
2690
2692
2691 lazydeltabase = opts['lazydeltabase']
2693 lazydeltabase = opts['lazydeltabase']
2692 source = opts['source']
2694 source = opts['source']
2693 clearcaches = opts['clear_caches']
2695 clearcaches = opts['clear_caches']
2694 validsource = (
2696 validsource = (
2695 b'full',
2697 b'full',
2696 b'parent-1',
2698 b'parent-1',
2697 b'parent-2',
2699 b'parent-2',
2698 b'parent-smallest',
2700 b'parent-smallest',
2699 b'storage',
2701 b'storage',
2700 )
2702 )
2701 if source not in validsource:
2703 if source not in validsource:
2702 raise error.Abort('invalid source type: %s' % source)
2704 raise error.Abort('invalid source type: %s' % source)
2703
2705
2704 ### actually gather results
2706 ### actually gather results
2705 count = opts['count']
2707 count = opts['count']
2706 if count <= 0:
2708 if count <= 0:
2707 raise error.Abort('invalide run count: %d' % count)
2709 raise error.Abort('invalide run count: %d' % count)
2708 allresults = []
2710 allresults = []
2709 for c in range(count):
2711 for c in range(count):
2710 timing = _timeonewrite(
2712 timing = _timeonewrite(
2711 ui,
2713 ui,
2712 rl,
2714 rl,
2713 source,
2715 source,
2714 startrev,
2716 startrev,
2715 stoprev,
2717 stoprev,
2716 c + 1,
2718 c + 1,
2717 lazydeltabase=lazydeltabase,
2719 lazydeltabase=lazydeltabase,
2718 clearcaches=clearcaches,
2720 clearcaches=clearcaches,
2719 )
2721 )
2720 allresults.append(timing)
2722 allresults.append(timing)
2721
2723
2722 ### consolidate the results in a single list
2724 ### consolidate the results in a single list
2723 results = []
2725 results = []
2724 for idx, (rev, t) in enumerate(allresults[0]):
2726 for idx, (rev, t) in enumerate(allresults[0]):
2725 ts = [t]
2727 ts = [t]
2726 for other in allresults[1:]:
2728 for other in allresults[1:]:
2727 orev, ot = other[idx]
2729 orev, ot = other[idx]
2728 assert orev == rev
2730 assert orev == rev
2729 ts.append(ot)
2731 ts.append(ot)
2730 results.append((rev, ts))
2732 results.append((rev, ts))
2731 resultcount = len(results)
2733 resultcount = len(results)
2732
2734
2733 ### Compute and display relevant statistics
2735 ### Compute and display relevant statistics
2734
2736
2735 # get a formatter
2737 # get a formatter
2736 fm = ui.formatter(b'perf', opts)
2738 fm = ui.formatter(b'perf', opts)
2737 displayall = ui.configbool(b"perf", b"all-timing", False)
2739 displayall = ui.configbool(b"perf", b"all-timing", False)
2738
2740
2739 # print individual details if requested
2741 # print individual details if requested
2740 if opts['details']:
2742 if opts['details']:
2741 for idx, item in enumerate(results, 1):
2743 for idx, item in enumerate(results, 1):
2742 rev, data = item
2744 rev, data = item
2743 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2745 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2744 formatone(fm, data, title=title, displayall=displayall)
2746 formatone(fm, data, title=title, displayall=displayall)
2745
2747
2746 # sorts results by median time
2748 # sorts results by median time
2747 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2749 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2748 # list of (name, index) to display)
2750 # list of (name, index) to display)
2749 relevants = [
2751 relevants = [
2750 ("min", 0),
2752 ("min", 0),
2751 ("10%", resultcount * 10 // 100),
2753 ("10%", resultcount * 10 // 100),
2752 ("25%", resultcount * 25 // 100),
2754 ("25%", resultcount * 25 // 100),
2753 ("50%", resultcount * 70 // 100),
2755 ("50%", resultcount * 70 // 100),
2754 ("75%", resultcount * 75 // 100),
2756 ("75%", resultcount * 75 // 100),
2755 ("90%", resultcount * 90 // 100),
2757 ("90%", resultcount * 90 // 100),
2756 ("95%", resultcount * 95 // 100),
2758 ("95%", resultcount * 95 // 100),
2757 ("99%", resultcount * 99 // 100),
2759 ("99%", resultcount * 99 // 100),
2758 ("99.9%", resultcount * 999 // 1000),
2760 ("99.9%", resultcount * 999 // 1000),
2759 ("99.99%", resultcount * 9999 // 10000),
2761 ("99.99%", resultcount * 9999 // 10000),
2760 ("99.999%", resultcount * 99999 // 100000),
2762 ("99.999%", resultcount * 99999 // 100000),
2761 ("max", -1),
2763 ("max", -1),
2762 ]
2764 ]
2763 if not ui.quiet:
2765 if not ui.quiet:
2764 for name, idx in relevants:
2766 for name, idx in relevants:
2765 data = results[idx]
2767 data = results[idx]
2766 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2768 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2767 formatone(fm, data[1], title=title, displayall=displayall)
2769 formatone(fm, data[1], title=title, displayall=displayall)
2768
2770
2769 # XXX summing that many float will not be very precise, we ignore this fact
2771 # XXX summing that many float will not be very precise, we ignore this fact
2770 # for now
2772 # for now
2771 totaltime = []
2773 totaltime = []
2772 for item in allresults:
2774 for item in allresults:
2773 totaltime.append(
2775 totaltime.append(
2774 (
2776 (
2775 sum(x[1][0] for x in item),
2777 sum(x[1][0] for x in item),
2776 sum(x[1][1] for x in item),
2778 sum(x[1][1] for x in item),
2777 sum(x[1][2] for x in item),
2779 sum(x[1][2] for x in item),
2778 )
2780 )
2779 )
2781 )
2780 formatone(
2782 formatone(
2781 fm,
2783 fm,
2782 totaltime,
2784 totaltime,
2783 title="total time (%d revs)" % resultcount,
2785 title="total time (%d revs)" % resultcount,
2784 displayall=displayall,
2786 displayall=displayall,
2785 )
2787 )
2786 fm.end()
2788 fm.end()
2787
2789
2788
2790
2789 class _faketr(object):
2791 class _faketr(object):
2790 def add(s, x, y, z=None):
2792 def add(s, x, y, z=None):
2791 return None
2793 return None
2792
2794
2793
2795
2794 def _timeonewrite(
2796 def _timeonewrite(
2795 ui,
2797 ui,
2796 orig,
2798 orig,
2797 source,
2799 source,
2798 startrev,
2800 startrev,
2799 stoprev,
2801 stoprev,
2800 runidx=None,
2802 runidx=None,
2801 lazydeltabase=True,
2803 lazydeltabase=True,
2802 clearcaches=True,
2804 clearcaches=True,
2803 ):
2805 ):
2804 timings = []
2806 timings = []
2805 tr = _faketr()
2807 tr = _faketr()
2806 with _temprevlog(ui, orig, startrev) as dest:
2808 with _temprevlog(ui, orig, startrev) as dest:
2807 dest._lazydeltabase = lazydeltabase
2809 dest._lazydeltabase = lazydeltabase
2808 revs = list(orig.revs(startrev, stoprev))
2810 revs = list(orig.revs(startrev, stoprev))
2809 total = len(revs)
2811 total = len(revs)
2810 topic = 'adding'
2812 topic = 'adding'
2811 if runidx is not None:
2813 if runidx is not None:
2812 topic += ' (run #%d)' % runidx
2814 topic += ' (run #%d)' % runidx
2813 # Support both old and new progress API
2815 # Support both old and new progress API
2814 if util.safehasattr(ui, 'makeprogress'):
2816 if util.safehasattr(ui, 'makeprogress'):
2815 progress = ui.makeprogress(topic, unit='revs', total=total)
2817 progress = ui.makeprogress(topic, unit='revs', total=total)
2816
2818
2817 def updateprogress(pos):
2819 def updateprogress(pos):
2818 progress.update(pos)
2820 progress.update(pos)
2819
2821
2820 def completeprogress():
2822 def completeprogress():
2821 progress.complete()
2823 progress.complete()
2822
2824
2823 else:
2825 else:
2824
2826
2825 def updateprogress(pos):
2827 def updateprogress(pos):
2826 ui.progress(topic, pos, unit='revs', total=total)
2828 ui.progress(topic, pos, unit='revs', total=total)
2827
2829
2828 def completeprogress():
2830 def completeprogress():
2829 ui.progress(topic, None, unit='revs', total=total)
2831 ui.progress(topic, None, unit='revs', total=total)
2830
2832
2831 for idx, rev in enumerate(revs):
2833 for idx, rev in enumerate(revs):
2832 updateprogress(idx)
2834 updateprogress(idx)
2833 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2835 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2834 if clearcaches:
2836 if clearcaches:
2835 dest.index.clearcaches()
2837 dest.index.clearcaches()
2836 dest.clearcaches()
2838 dest.clearcaches()
2837 with timeone() as r:
2839 with timeone() as r:
2838 dest.addrawrevision(*addargs, **addkwargs)
2840 dest.addrawrevision(*addargs, **addkwargs)
2839 timings.append((rev, r[0]))
2841 timings.append((rev, r[0]))
2840 updateprogress(total)
2842 updateprogress(total)
2841 completeprogress()
2843 completeprogress()
2842 return timings
2844 return timings
2843
2845
2844
2846
2845 def _getrevisionseed(orig, rev, tr, source):
2847 def _getrevisionseed(orig, rev, tr, source):
2846 from mercurial.node import nullid
2848 from mercurial.node import nullid
2847
2849
2848 linkrev = orig.linkrev(rev)
2850 linkrev = orig.linkrev(rev)
2849 node = orig.node(rev)
2851 node = orig.node(rev)
2850 p1, p2 = orig.parents(node)
2852 p1, p2 = orig.parents(node)
2851 flags = orig.flags(rev)
2853 flags = orig.flags(rev)
2852 cachedelta = None
2854 cachedelta = None
2853 text = None
2855 text = None
2854
2856
2855 if source == b'full':
2857 if source == b'full':
2856 text = orig.revision(rev)
2858 text = orig.revision(rev)
2857 elif source == b'parent-1':
2859 elif source == b'parent-1':
2858 baserev = orig.rev(p1)
2860 baserev = orig.rev(p1)
2859 cachedelta = (baserev, orig.revdiff(p1, rev))
2861 cachedelta = (baserev, orig.revdiff(p1, rev))
2860 elif source == b'parent-2':
2862 elif source == b'parent-2':
2861 parent = p2
2863 parent = p2
2862 if p2 == nullid:
2864 if p2 == nullid:
2863 parent = p1
2865 parent = p1
2864 baserev = orig.rev(parent)
2866 baserev = orig.rev(parent)
2865 cachedelta = (baserev, orig.revdiff(parent, rev))
2867 cachedelta = (baserev, orig.revdiff(parent, rev))
2866 elif source == b'parent-smallest':
2868 elif source == b'parent-smallest':
2867 p1diff = orig.revdiff(p1, rev)
2869 p1diff = orig.revdiff(p1, rev)
2868 parent = p1
2870 parent = p1
2869 diff = p1diff
2871 diff = p1diff
2870 if p2 != nullid:
2872 if p2 != nullid:
2871 p2diff = orig.revdiff(p2, rev)
2873 p2diff = orig.revdiff(p2, rev)
2872 if len(p1diff) > len(p2diff):
2874 if len(p1diff) > len(p2diff):
2873 parent = p2
2875 parent = p2
2874 diff = p2diff
2876 diff = p2diff
2875 baserev = orig.rev(parent)
2877 baserev = orig.rev(parent)
2876 cachedelta = (baserev, diff)
2878 cachedelta = (baserev, diff)
2877 elif source == b'storage':
2879 elif source == b'storage':
2878 baserev = orig.deltaparent(rev)
2880 baserev = orig.deltaparent(rev)
2879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2881 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2880
2882
2881 return (
2883 return (
2882 (text, tr, linkrev, p1, p2),
2884 (text, tr, linkrev, p1, p2),
2883 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2885 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2884 )
2886 )
2885
2887
2886
2888
2887 @contextlib.contextmanager
2889 @contextlib.contextmanager
2888 def _temprevlog(ui, orig, truncaterev):
2890 def _temprevlog(ui, orig, truncaterev):
2889 from mercurial import vfs as vfsmod
2891 from mercurial import vfs as vfsmod
2890
2892
2891 if orig._inline:
2893 if orig._inline:
2892 raise error.Abort('not supporting inline revlog (yet)')
2894 raise error.Abort('not supporting inline revlog (yet)')
2893 revlogkwargs = {}
2895 revlogkwargs = {}
2894 k = 'upperboundcomp'
2896 k = 'upperboundcomp'
2895 if util.safehasattr(orig, k):
2897 if util.safehasattr(orig, k):
2896 revlogkwargs[k] = getattr(orig, k)
2898 revlogkwargs[k] = getattr(orig, k)
2897
2899
2898 origindexpath = orig.opener.join(orig.indexfile)
2900 origindexpath = orig.opener.join(orig.indexfile)
2899 origdatapath = orig.opener.join(orig.datafile)
2901 origdatapath = orig.opener.join(orig.datafile)
2900 indexname = 'revlog.i'
2902 indexname = 'revlog.i'
2901 dataname = 'revlog.d'
2903 dataname = 'revlog.d'
2902
2904
2903 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2905 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2904 try:
2906 try:
2905 # copy the data file in a temporary directory
2907 # copy the data file in a temporary directory
2906 ui.debug('copying data in %s\n' % tmpdir)
2908 ui.debug('copying data in %s\n' % tmpdir)
2907 destindexpath = os.path.join(tmpdir, 'revlog.i')
2909 destindexpath = os.path.join(tmpdir, 'revlog.i')
2908 destdatapath = os.path.join(tmpdir, 'revlog.d')
2910 destdatapath = os.path.join(tmpdir, 'revlog.d')
2909 shutil.copyfile(origindexpath, destindexpath)
2911 shutil.copyfile(origindexpath, destindexpath)
2910 shutil.copyfile(origdatapath, destdatapath)
2912 shutil.copyfile(origdatapath, destdatapath)
2911
2913
2912 # remove the data we want to add again
2914 # remove the data we want to add again
2913 ui.debug('truncating data to be rewritten\n')
2915 ui.debug('truncating data to be rewritten\n')
2914 with open(destindexpath, 'ab') as index:
2916 with open(destindexpath, 'ab') as index:
2915 index.seek(0)
2917 index.seek(0)
2916 index.truncate(truncaterev * orig._io.size)
2918 index.truncate(truncaterev * orig._io.size)
2917 with open(destdatapath, 'ab') as data:
2919 with open(destdatapath, 'ab') as data:
2918 data.seek(0)
2920 data.seek(0)
2919 data.truncate(orig.start(truncaterev))
2921 data.truncate(orig.start(truncaterev))
2920
2922
2921 # instantiate a new revlog from the temporary copy
2923 # instantiate a new revlog from the temporary copy
2922 ui.debug('truncating adding to be rewritten\n')
2924 ui.debug('truncating adding to be rewritten\n')
2923 vfs = vfsmod.vfs(tmpdir)
2925 vfs = vfsmod.vfs(tmpdir)
2924 vfs.options = getattr(orig.opener, 'options', None)
2926 vfs.options = getattr(orig.opener, 'options', None)
2925
2927
2926 dest = revlog.revlog(
2928 dest = revlog.revlog(
2927 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2929 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2928 )
2930 )
2929 if dest._inline:
2931 if dest._inline:
2930 raise error.Abort('not supporting inline revlog (yet)')
2932 raise error.Abort('not supporting inline revlog (yet)')
2931 # make sure internals are initialized
2933 # make sure internals are initialized
2932 dest.revision(len(dest) - 1)
2934 dest.revision(len(dest) - 1)
2933 yield dest
2935 yield dest
2934 del dest, vfs
2936 del dest, vfs
2935 finally:
2937 finally:
2936 shutil.rmtree(tmpdir, True)
2938 shutil.rmtree(tmpdir, True)
2937
2939
2938
2940
2939 @command(
2941 @command(
2940 b'perfrevlogchunks',
2942 b'perfrevlogchunks',
2941 revlogopts
2943 revlogopts
2942 + formatteropts
2944 + formatteropts
2943 + [
2945 + [
2944 (b'e', b'engines', b'', b'compression engines to use'),
2946 (b'e', b'engines', b'', b'compression engines to use'),
2945 (b's', b'startrev', 0, b'revision to start at'),
2947 (b's', b'startrev', 0, b'revision to start at'),
2946 ],
2948 ],
2947 b'-c|-m|FILE',
2949 b'-c|-m|FILE',
2948 )
2950 )
2949 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2951 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2950 """Benchmark operations on revlog chunks.
2952 """Benchmark operations on revlog chunks.
2951
2953
2952 Logically, each revlog is a collection of fulltext revisions. However,
2954 Logically, each revlog is a collection of fulltext revisions. However,
2953 stored within each revlog are "chunks" of possibly compressed data. This
2955 stored within each revlog are "chunks" of possibly compressed data. This
2954 data needs to be read and decompressed or compressed and written.
2956 data needs to be read and decompressed or compressed and written.
2955
2957
2956 This command measures the time it takes to read+decompress and recompress
2958 This command measures the time it takes to read+decompress and recompress
2957 chunks in a revlog. It effectively isolates I/O and compression performance.
2959 chunks in a revlog. It effectively isolates I/O and compression performance.
2958 For measurements of higher-level operations like resolving revisions,
2960 For measurements of higher-level operations like resolving revisions,
2959 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2961 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2960 """
2962 """
2961 opts = _byteskwargs(opts)
2963 opts = _byteskwargs(opts)
2962
2964
2963 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2965 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2964
2966
2965 # _chunkraw was renamed to _getsegmentforrevs.
2967 # _chunkraw was renamed to _getsegmentforrevs.
2966 try:
2968 try:
2967 segmentforrevs = rl._getsegmentforrevs
2969 segmentforrevs = rl._getsegmentforrevs
2968 except AttributeError:
2970 except AttributeError:
2969 segmentforrevs = rl._chunkraw
2971 segmentforrevs = rl._chunkraw
2970
2972
2971 # Verify engines argument.
2973 # Verify engines argument.
2972 if engines:
2974 if engines:
2973 engines = set(e.strip() for e in engines.split(b','))
2975 engines = set(e.strip() for e in engines.split(b','))
2974 for engine in engines:
2976 for engine in engines:
2975 try:
2977 try:
2976 util.compressionengines[engine]
2978 util.compressionengines[engine]
2977 except KeyError:
2979 except KeyError:
2978 raise error.Abort(b'unknown compression engine: %s' % engine)
2980 raise error.Abort(b'unknown compression engine: %s' % engine)
2979 else:
2981 else:
2980 engines = []
2982 engines = []
2981 for e in util.compengines:
2983 for e in util.compengines:
2982 engine = util.compengines[e]
2984 engine = util.compengines[e]
2983 try:
2985 try:
2984 if engine.available():
2986 if engine.available():
2985 engine.revlogcompressor().compress(b'dummy')
2987 engine.revlogcompressor().compress(b'dummy')
2986 engines.append(e)
2988 engines.append(e)
2987 except NotImplementedError:
2989 except NotImplementedError:
2988 pass
2990 pass
2989
2991
2990 revs = list(rl.revs(startrev, len(rl) - 1))
2992 revs = list(rl.revs(startrev, len(rl) - 1))
2991
2993
2992 def rlfh(rl):
2994 def rlfh(rl):
2993 if rl._inline:
2995 if rl._inline:
2994 return getsvfs(repo)(rl.indexfile)
2996 return getsvfs(repo)(rl.indexfile)
2995 else:
2997 else:
2996 return getsvfs(repo)(rl.datafile)
2998 return getsvfs(repo)(rl.datafile)
2997
2999
2998 def doread():
3000 def doread():
2999 rl.clearcaches()
3001 rl.clearcaches()
3000 for rev in revs:
3002 for rev in revs:
3001 segmentforrevs(rev, rev)
3003 segmentforrevs(rev, rev)
3002
3004
3003 def doreadcachedfh():
3005 def doreadcachedfh():
3004 rl.clearcaches()
3006 rl.clearcaches()
3005 fh = rlfh(rl)
3007 fh = rlfh(rl)
3006 for rev in revs:
3008 for rev in revs:
3007 segmentforrevs(rev, rev, df=fh)
3009 segmentforrevs(rev, rev, df=fh)
3008
3010
3009 def doreadbatch():
3011 def doreadbatch():
3010 rl.clearcaches()
3012 rl.clearcaches()
3011 segmentforrevs(revs[0], revs[-1])
3013 segmentforrevs(revs[0], revs[-1])
3012
3014
3013 def doreadbatchcachedfh():
3015 def doreadbatchcachedfh():
3014 rl.clearcaches()
3016 rl.clearcaches()
3015 fh = rlfh(rl)
3017 fh = rlfh(rl)
3016 segmentforrevs(revs[0], revs[-1], df=fh)
3018 segmentforrevs(revs[0], revs[-1], df=fh)
3017
3019
3018 def dochunk():
3020 def dochunk():
3019 rl.clearcaches()
3021 rl.clearcaches()
3020 fh = rlfh(rl)
3022 fh = rlfh(rl)
3021 for rev in revs:
3023 for rev in revs:
3022 rl._chunk(rev, df=fh)
3024 rl._chunk(rev, df=fh)
3023
3025
3024 chunks = [None]
3026 chunks = [None]
3025
3027
3026 def dochunkbatch():
3028 def dochunkbatch():
3027 rl.clearcaches()
3029 rl.clearcaches()
3028 fh = rlfh(rl)
3030 fh = rlfh(rl)
3029 # Save chunks as a side-effect.
3031 # Save chunks as a side-effect.
3030 chunks[0] = rl._chunks(revs, df=fh)
3032 chunks[0] = rl._chunks(revs, df=fh)
3031
3033
3032 def docompress(compressor):
3034 def docompress(compressor):
3033 rl.clearcaches()
3035 rl.clearcaches()
3034
3036
3035 try:
3037 try:
3036 # Swap in the requested compression engine.
3038 # Swap in the requested compression engine.
3037 oldcompressor = rl._compressor
3039 oldcompressor = rl._compressor
3038 rl._compressor = compressor
3040 rl._compressor = compressor
3039 for chunk in chunks[0]:
3041 for chunk in chunks[0]:
3040 rl.compress(chunk)
3042 rl.compress(chunk)
3041 finally:
3043 finally:
3042 rl._compressor = oldcompressor
3044 rl._compressor = oldcompressor
3043
3045
3044 benches = [
3046 benches = [
3045 (lambda: doread(), b'read'),
3047 (lambda: doread(), b'read'),
3046 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3048 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3047 (lambda: doreadbatch(), b'read batch'),
3049 (lambda: doreadbatch(), b'read batch'),
3048 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3050 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3049 (lambda: dochunk(), b'chunk'),
3051 (lambda: dochunk(), b'chunk'),
3050 (lambda: dochunkbatch(), b'chunk batch'),
3052 (lambda: dochunkbatch(), b'chunk batch'),
3051 ]
3053 ]
3052
3054
3053 for engine in sorted(engines):
3055 for engine in sorted(engines):
3054 compressor = util.compengines[engine].revlogcompressor()
3056 compressor = util.compengines[engine].revlogcompressor()
3055 benches.append(
3057 benches.append(
3056 (
3058 (
3057 functools.partial(docompress, compressor),
3059 functools.partial(docompress, compressor),
3058 b'compress w/ %s' % engine,
3060 b'compress w/ %s' % engine,
3059 )
3061 )
3060 )
3062 )
3061
3063
3062 for fn, title in benches:
3064 for fn, title in benches:
3063 timer, fm = gettimer(ui, opts)
3065 timer, fm = gettimer(ui, opts)
3064 timer(fn, title=title)
3066 timer(fn, title=title)
3065 fm.end()
3067 fm.end()
3066
3068
3067
3069
3068 @command(
3070 @command(
3069 b'perfrevlogrevision',
3071 b'perfrevlogrevision',
3070 revlogopts
3072 revlogopts
3071 + formatteropts
3073 + formatteropts
3072 + [(b'', b'cache', False, b'use caches instead of clearing')],
3074 + [(b'', b'cache', False, b'use caches instead of clearing')],
3073 b'-c|-m|FILE REV',
3075 b'-c|-m|FILE REV',
3074 )
3076 )
3075 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3077 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3076 """Benchmark obtaining a revlog revision.
3078 """Benchmark obtaining a revlog revision.
3077
3079
3078 Obtaining a revlog revision consists of roughly the following steps:
3080 Obtaining a revlog revision consists of roughly the following steps:
3079
3081
3080 1. Compute the delta chain
3082 1. Compute the delta chain
3081 2. Slice the delta chain if applicable
3083 2. Slice the delta chain if applicable
3082 3. Obtain the raw chunks for that delta chain
3084 3. Obtain the raw chunks for that delta chain
3083 4. Decompress each raw chunk
3085 4. Decompress each raw chunk
3084 5. Apply binary patches to obtain fulltext
3086 5. Apply binary patches to obtain fulltext
3085 6. Verify hash of fulltext
3087 6. Verify hash of fulltext
3086
3088
3087 This command measures the time spent in each of these phases.
3089 This command measures the time spent in each of these phases.
3088 """
3090 """
3089 opts = _byteskwargs(opts)
3091 opts = _byteskwargs(opts)
3090
3092
3091 if opts.get(b'changelog') or opts.get(b'manifest'):
3093 if opts.get(b'changelog') or opts.get(b'manifest'):
3092 file_, rev = None, file_
3094 file_, rev = None, file_
3093 elif rev is None:
3095 elif rev is None:
3094 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3096 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3095
3097
3096 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3098 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3097
3099
3098 # _chunkraw was renamed to _getsegmentforrevs.
3100 # _chunkraw was renamed to _getsegmentforrevs.
3099 try:
3101 try:
3100 segmentforrevs = r._getsegmentforrevs
3102 segmentforrevs = r._getsegmentforrevs
3101 except AttributeError:
3103 except AttributeError:
3102 segmentforrevs = r._chunkraw
3104 segmentforrevs = r._chunkraw
3103
3105
3104 node = r.lookup(rev)
3106 node = r.lookup(rev)
3105 rev = r.rev(node)
3107 rev = r.rev(node)
3106
3108
3107 def getrawchunks(data, chain):
3109 def getrawchunks(data, chain):
3108 start = r.start
3110 start = r.start
3109 length = r.length
3111 length = r.length
3110 inline = r._inline
3112 inline = r._inline
3111 iosize = r._io.size
3113 iosize = r._io.size
3112 buffer = util.buffer
3114 buffer = util.buffer
3113
3115
3114 chunks = []
3116 chunks = []
3115 ladd = chunks.append
3117 ladd = chunks.append
3116 for idx, item in enumerate(chain):
3118 for idx, item in enumerate(chain):
3117 offset = start(item[0])
3119 offset = start(item[0])
3118 bits = data[idx]
3120 bits = data[idx]
3119 for rev in item:
3121 for rev in item:
3120 chunkstart = start(rev)
3122 chunkstart = start(rev)
3121 if inline:
3123 if inline:
3122 chunkstart += (rev + 1) * iosize
3124 chunkstart += (rev + 1) * iosize
3123 chunklength = length(rev)
3125 chunklength = length(rev)
3124 ladd(buffer(bits, chunkstart - offset, chunklength))
3126 ladd(buffer(bits, chunkstart - offset, chunklength))
3125
3127
3126 return chunks
3128 return chunks
3127
3129
3128 def dodeltachain(rev):
3130 def dodeltachain(rev):
3129 if not cache:
3131 if not cache:
3130 r.clearcaches()
3132 r.clearcaches()
3131 r._deltachain(rev)
3133 r._deltachain(rev)
3132
3134
3133 def doread(chain):
3135 def doread(chain):
3134 if not cache:
3136 if not cache:
3135 r.clearcaches()
3137 r.clearcaches()
3136 for item in slicedchain:
3138 for item in slicedchain:
3137 segmentforrevs(item[0], item[-1])
3139 segmentforrevs(item[0], item[-1])
3138
3140
3139 def doslice(r, chain, size):
3141 def doslice(r, chain, size):
3140 for s in slicechunk(r, chain, targetsize=size):
3142 for s in slicechunk(r, chain, targetsize=size):
3141 pass
3143 pass
3142
3144
3143 def dorawchunks(data, chain):
3145 def dorawchunks(data, chain):
3144 if not cache:
3146 if not cache:
3145 r.clearcaches()
3147 r.clearcaches()
3146 getrawchunks(data, chain)
3148 getrawchunks(data, chain)
3147
3149
3148 def dodecompress(chunks):
3150 def dodecompress(chunks):
3149 decomp = r.decompress
3151 decomp = r.decompress
3150 for chunk in chunks:
3152 for chunk in chunks:
3151 decomp(chunk)
3153 decomp(chunk)
3152
3154
3153 def dopatch(text, bins):
3155 def dopatch(text, bins):
3154 if not cache:
3156 if not cache:
3155 r.clearcaches()
3157 r.clearcaches()
3156 mdiff.patches(text, bins)
3158 mdiff.patches(text, bins)
3157
3159
3158 def dohash(text):
3160 def dohash(text):
3159 if not cache:
3161 if not cache:
3160 r.clearcaches()
3162 r.clearcaches()
3161 r.checkhash(text, node, rev=rev)
3163 r.checkhash(text, node, rev=rev)
3162
3164
3163 def dorevision():
3165 def dorevision():
3164 if not cache:
3166 if not cache:
3165 r.clearcaches()
3167 r.clearcaches()
3166 r.revision(node)
3168 r.revision(node)
3167
3169
3168 try:
3170 try:
3169 from mercurial.revlogutils.deltas import slicechunk
3171 from mercurial.revlogutils.deltas import slicechunk
3170 except ImportError:
3172 except ImportError:
3171 slicechunk = getattr(revlog, '_slicechunk', None)
3173 slicechunk = getattr(revlog, '_slicechunk', None)
3172
3174
3173 size = r.length(rev)
3175 size = r.length(rev)
3174 chain = r._deltachain(rev)[0]
3176 chain = r._deltachain(rev)[0]
3175 if not getattr(r, '_withsparseread', False):
3177 if not getattr(r, '_withsparseread', False):
3176 slicedchain = (chain,)
3178 slicedchain = (chain,)
3177 else:
3179 else:
3178 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3180 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3179 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3181 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3180 rawchunks = getrawchunks(data, slicedchain)
3182 rawchunks = getrawchunks(data, slicedchain)
3181 bins = r._chunks(chain)
3183 bins = r._chunks(chain)
3182 text = bytes(bins[0])
3184 text = bytes(bins[0])
3183 bins = bins[1:]
3185 bins = bins[1:]
3184 text = mdiff.patches(text, bins)
3186 text = mdiff.patches(text, bins)
3185
3187
3186 benches = [
3188 benches = [
3187 (lambda: dorevision(), b'full'),
3189 (lambda: dorevision(), b'full'),
3188 (lambda: dodeltachain(rev), b'deltachain'),
3190 (lambda: dodeltachain(rev), b'deltachain'),
3189 (lambda: doread(chain), b'read'),
3191 (lambda: doread(chain), b'read'),
3190 ]
3192 ]
3191
3193
3192 if getattr(r, '_withsparseread', False):
3194 if getattr(r, '_withsparseread', False):
3193 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3195 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3194 benches.append(slicing)
3196 benches.append(slicing)
3195
3197
3196 benches.extend(
3198 benches.extend(
3197 [
3199 [
3198 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3200 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3199 (lambda: dodecompress(rawchunks), b'decompress'),
3201 (lambda: dodecompress(rawchunks), b'decompress'),
3200 (lambda: dopatch(text, bins), b'patch'),
3202 (lambda: dopatch(text, bins), b'patch'),
3201 (lambda: dohash(text), b'hash'),
3203 (lambda: dohash(text), b'hash'),
3202 ]
3204 ]
3203 )
3205 )
3204
3206
3205 timer, fm = gettimer(ui, opts)
3207 timer, fm = gettimer(ui, opts)
3206 for fn, title in benches:
3208 for fn, title in benches:
3207 timer(fn, title=title)
3209 timer(fn, title=title)
3208 fm.end()
3210 fm.end()
3209
3211
3210
3212
3211 @command(
3213 @command(
3212 b'perfrevset',
3214 b'perfrevset',
3213 [
3215 [
3214 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3216 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3215 (b'', b'contexts', False, b'obtain changectx for each revision'),
3217 (b'', b'contexts', False, b'obtain changectx for each revision'),
3216 ]
3218 ]
3217 + formatteropts,
3219 + formatteropts,
3218 b"REVSET",
3220 b"REVSET",
3219 )
3221 )
3220 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3222 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3221 """benchmark the execution time of a revset
3223 """benchmark the execution time of a revset
3222
3224
3223 Use the --clean option if need to evaluate the impact of build volatile
3225 Use the --clean option if need to evaluate the impact of build volatile
3224 revisions set cache on the revset execution. Volatile cache hold filtered
3226 revisions set cache on the revset execution. Volatile cache hold filtered
3225 and obsolete related cache."""
3227 and obsolete related cache."""
3226 opts = _byteskwargs(opts)
3228 opts = _byteskwargs(opts)
3227
3229
3228 timer, fm = gettimer(ui, opts)
3230 timer, fm = gettimer(ui, opts)
3229
3231
3230 def d():
3232 def d():
3231 if clear:
3233 if clear:
3232 repo.invalidatevolatilesets()
3234 repo.invalidatevolatilesets()
3233 if contexts:
3235 if contexts:
3234 for ctx in repo.set(expr):
3236 for ctx in repo.set(expr):
3235 pass
3237 pass
3236 else:
3238 else:
3237 for r in repo.revs(expr):
3239 for r in repo.revs(expr):
3238 pass
3240 pass
3239
3241
3240 timer(d)
3242 timer(d)
3241 fm.end()
3243 fm.end()
3242
3244
3243
3245
3244 @command(
3246 @command(
3245 b'perfvolatilesets',
3247 b'perfvolatilesets',
3246 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3248 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3247 + formatteropts,
3249 + formatteropts,
3248 )
3250 )
3249 def perfvolatilesets(ui, repo, *names, **opts):
3251 def perfvolatilesets(ui, repo, *names, **opts):
3250 """benchmark the computation of various volatile set
3252 """benchmark the computation of various volatile set
3251
3253
3252 Volatile set computes element related to filtering and obsolescence."""
3254 Volatile set computes element related to filtering and obsolescence."""
3253 opts = _byteskwargs(opts)
3255 opts = _byteskwargs(opts)
3254 timer, fm = gettimer(ui, opts)
3256 timer, fm = gettimer(ui, opts)
3255 repo = repo.unfiltered()
3257 repo = repo.unfiltered()
3256
3258
3257 def getobs(name):
3259 def getobs(name):
3258 def d():
3260 def d():
3259 repo.invalidatevolatilesets()
3261 repo.invalidatevolatilesets()
3260 if opts[b'clear_obsstore']:
3262 if opts[b'clear_obsstore']:
3261 clearfilecache(repo, b'obsstore')
3263 clearfilecache(repo, b'obsstore')
3262 obsolete.getrevs(repo, name)
3264 obsolete.getrevs(repo, name)
3263
3265
3264 return d
3266 return d
3265
3267
3266 allobs = sorted(obsolete.cachefuncs)
3268 allobs = sorted(obsolete.cachefuncs)
3267 if names:
3269 if names:
3268 allobs = [n for n in allobs if n in names]
3270 allobs = [n for n in allobs if n in names]
3269
3271
3270 for name in allobs:
3272 for name in allobs:
3271 timer(getobs(name), title=name)
3273 timer(getobs(name), title=name)
3272
3274
3273 def getfiltered(name):
3275 def getfiltered(name):
3274 def d():
3276 def d():
3275 repo.invalidatevolatilesets()
3277 repo.invalidatevolatilesets()
3276 if opts[b'clear_obsstore']:
3278 if opts[b'clear_obsstore']:
3277 clearfilecache(repo, b'obsstore')
3279 clearfilecache(repo, b'obsstore')
3278 repoview.filterrevs(repo, name)
3280 repoview.filterrevs(repo, name)
3279
3281
3280 return d
3282 return d
3281
3283
3282 allfilter = sorted(repoview.filtertable)
3284 allfilter = sorted(repoview.filtertable)
3283 if names:
3285 if names:
3284 allfilter = [n for n in allfilter if n in names]
3286 allfilter = [n for n in allfilter if n in names]
3285
3287
3286 for name in allfilter:
3288 for name in allfilter:
3287 timer(getfiltered(name), title=name)
3289 timer(getfiltered(name), title=name)
3288 fm.end()
3290 fm.end()
3289
3291
3290
3292
3291 @command(
3293 @command(
3292 b'perfbranchmap',
3294 b'perfbranchmap',
3293 [
3295 [
3294 (b'f', b'full', False, b'Includes build time of subset'),
3296 (b'f', b'full', False, b'Includes build time of subset'),
3295 (
3297 (
3296 b'',
3298 b'',
3297 b'clear-revbranch',
3299 b'clear-revbranch',
3298 False,
3300 False,
3299 b'purge the revbranch cache between computation',
3301 b'purge the revbranch cache between computation',
3300 ),
3302 ),
3301 ]
3303 ]
3302 + formatteropts,
3304 + formatteropts,
3303 )
3305 )
3304 def perfbranchmap(ui, repo, *filternames, **opts):
3306 def perfbranchmap(ui, repo, *filternames, **opts):
3305 """benchmark the update of a branchmap
3307 """benchmark the update of a branchmap
3306
3308
3307 This benchmarks the full repo.branchmap() call with read and write disabled
3309 This benchmarks the full repo.branchmap() call with read and write disabled
3308 """
3310 """
3309 opts = _byteskwargs(opts)
3311 opts = _byteskwargs(opts)
3310 full = opts.get(b"full", False)
3312 full = opts.get(b"full", False)
3311 clear_revbranch = opts.get(b"clear_revbranch", False)
3313 clear_revbranch = opts.get(b"clear_revbranch", False)
3312 timer, fm = gettimer(ui, opts)
3314 timer, fm = gettimer(ui, opts)
3313
3315
3314 def getbranchmap(filtername):
3316 def getbranchmap(filtername):
3315 """generate a benchmark function for the filtername"""
3317 """generate a benchmark function for the filtername"""
3316 if filtername is None:
3318 if filtername is None:
3317 view = repo
3319 view = repo
3318 else:
3320 else:
3319 view = repo.filtered(filtername)
3321 view = repo.filtered(filtername)
3320 if util.safehasattr(view._branchcaches, '_per_filter'):
3322 if util.safehasattr(view._branchcaches, '_per_filter'):
3321 filtered = view._branchcaches._per_filter
3323 filtered = view._branchcaches._per_filter
3322 else:
3324 else:
3323 # older versions
3325 # older versions
3324 filtered = view._branchcaches
3326 filtered = view._branchcaches
3325
3327
3326 def d():
3328 def d():
3327 if clear_revbranch:
3329 if clear_revbranch:
3328 repo.revbranchcache()._clear()
3330 repo.revbranchcache()._clear()
3329 if full:
3331 if full:
3330 view._branchcaches.clear()
3332 view._branchcaches.clear()
3331 else:
3333 else:
3332 filtered.pop(filtername, None)
3334 filtered.pop(filtername, None)
3333 view.branchmap()
3335 view.branchmap()
3334
3336
3335 return d
3337 return d
3336
3338
3337 # add filter in smaller subset to bigger subset
3339 # add filter in smaller subset to bigger subset
3338 possiblefilters = set(repoview.filtertable)
3340 possiblefilters = set(repoview.filtertable)
3339 if filternames:
3341 if filternames:
3340 possiblefilters &= set(filternames)
3342 possiblefilters &= set(filternames)
3341 subsettable = getbranchmapsubsettable()
3343 subsettable = getbranchmapsubsettable()
3342 allfilters = []
3344 allfilters = []
3343 while possiblefilters:
3345 while possiblefilters:
3344 for name in possiblefilters:
3346 for name in possiblefilters:
3345 subset = subsettable.get(name)
3347 subset = subsettable.get(name)
3346 if subset not in possiblefilters:
3348 if subset not in possiblefilters:
3347 break
3349 break
3348 else:
3350 else:
3349 assert False, b'subset cycle %s!' % possiblefilters
3351 assert False, b'subset cycle %s!' % possiblefilters
3350 allfilters.append(name)
3352 allfilters.append(name)
3351 possiblefilters.remove(name)
3353 possiblefilters.remove(name)
3352
3354
3353 # warm the cache
3355 # warm the cache
3354 if not full:
3356 if not full:
3355 for name in allfilters:
3357 for name in allfilters:
3356 repo.filtered(name).branchmap()
3358 repo.filtered(name).branchmap()
3357 if not filternames or b'unfiltered' in filternames:
3359 if not filternames or b'unfiltered' in filternames:
3358 # add unfiltered
3360 # add unfiltered
3359 allfilters.append(None)
3361 allfilters.append(None)
3360
3362
3361 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3363 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3362 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3364 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3363 branchcacheread.set(classmethod(lambda *args: None))
3365 branchcacheread.set(classmethod(lambda *args: None))
3364 else:
3366 else:
3365 # older versions
3367 # older versions
3366 branchcacheread = safeattrsetter(branchmap, b'read')
3368 branchcacheread = safeattrsetter(branchmap, b'read')
3367 branchcacheread.set(lambda *args: None)
3369 branchcacheread.set(lambda *args: None)
3368 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3370 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3369 branchcachewrite.set(lambda *args: None)
3371 branchcachewrite.set(lambda *args: None)
3370 try:
3372 try:
3371 for name in allfilters:
3373 for name in allfilters:
3372 printname = name
3374 printname = name
3373 if name is None:
3375 if name is None:
3374 printname = b'unfiltered'
3376 printname = b'unfiltered'
3375 timer(getbranchmap(name), title=str(printname))
3377 timer(getbranchmap(name), title=str(printname))
3376 finally:
3378 finally:
3377 branchcacheread.restore()
3379 branchcacheread.restore()
3378 branchcachewrite.restore()
3380 branchcachewrite.restore()
3379 fm.end()
3381 fm.end()
3380
3382
3381
3383
3382 @command(
3384 @command(
3383 b'perfbranchmapupdate',
3385 b'perfbranchmapupdate',
3384 [
3386 [
3385 (b'', b'base', [], b'subset of revision to start from'),
3387 (b'', b'base', [], b'subset of revision to start from'),
3386 (b'', b'target', [], b'subset of revision to end with'),
3388 (b'', b'target', [], b'subset of revision to end with'),
3387 (b'', b'clear-caches', False, b'clear cache between each runs'),
3389 (b'', b'clear-caches', False, b'clear cache between each runs'),
3388 ]
3390 ]
3389 + formatteropts,
3391 + formatteropts,
3390 )
3392 )
3391 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3393 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3392 """benchmark branchmap update from for <base> revs to <target> revs
3394 """benchmark branchmap update from for <base> revs to <target> revs
3393
3395
3394 If `--clear-caches` is passed, the following items will be reset before
3396 If `--clear-caches` is passed, the following items will be reset before
3395 each update:
3397 each update:
3396 * the changelog instance and associated indexes
3398 * the changelog instance and associated indexes
3397 * the rev-branch-cache instance
3399 * the rev-branch-cache instance
3398
3400
3399 Examples:
3401 Examples:
3400
3402
3401 # update for the one last revision
3403 # update for the one last revision
3402 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3404 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3403
3405
3404 $ update for change coming with a new branch
3406 $ update for change coming with a new branch
3405 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3407 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3406 """
3408 """
3407 from mercurial import branchmap
3409 from mercurial import branchmap
3408 from mercurial import repoview
3410 from mercurial import repoview
3409
3411
3410 opts = _byteskwargs(opts)
3412 opts = _byteskwargs(opts)
3411 timer, fm = gettimer(ui, opts)
3413 timer, fm = gettimer(ui, opts)
3412 clearcaches = opts[b'clear_caches']
3414 clearcaches = opts[b'clear_caches']
3413 unfi = repo.unfiltered()
3415 unfi = repo.unfiltered()
3414 x = [None] # used to pass data between closure
3416 x = [None] # used to pass data between closure
3415
3417
3416 # we use a `list` here to avoid possible side effect from smartset
3418 # we use a `list` here to avoid possible side effect from smartset
3417 baserevs = list(scmutil.revrange(repo, base))
3419 baserevs = list(scmutil.revrange(repo, base))
3418 targetrevs = list(scmutil.revrange(repo, target))
3420 targetrevs = list(scmutil.revrange(repo, target))
3419 if not baserevs:
3421 if not baserevs:
3420 raise error.Abort(b'no revisions selected for --base')
3422 raise error.Abort(b'no revisions selected for --base')
3421 if not targetrevs:
3423 if not targetrevs:
3422 raise error.Abort(b'no revisions selected for --target')
3424 raise error.Abort(b'no revisions selected for --target')
3423
3425
3424 # make sure the target branchmap also contains the one in the base
3426 # make sure the target branchmap also contains the one in the base
3425 targetrevs = list(set(baserevs) | set(targetrevs))
3427 targetrevs = list(set(baserevs) | set(targetrevs))
3426 targetrevs.sort()
3428 targetrevs.sort()
3427
3429
3428 cl = repo.changelog
3430 cl = repo.changelog
3429 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3431 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3430 allbaserevs.sort()
3432 allbaserevs.sort()
3431 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3433 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3432
3434
3433 newrevs = list(alltargetrevs.difference(allbaserevs))
3435 newrevs = list(alltargetrevs.difference(allbaserevs))
3434 newrevs.sort()
3436 newrevs.sort()
3435
3437
3436 allrevs = frozenset(unfi.changelog.revs())
3438 allrevs = frozenset(unfi.changelog.revs())
3437 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3439 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3438 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3440 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3439
3441
3440 def basefilter(repo, visibilityexceptions=None):
3442 def basefilter(repo, visibilityexceptions=None):
3441 return basefilterrevs
3443 return basefilterrevs
3442
3444
3443 def targetfilter(repo, visibilityexceptions=None):
3445 def targetfilter(repo, visibilityexceptions=None):
3444 return targetfilterrevs
3446 return targetfilterrevs
3445
3447
3446 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3448 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3447 ui.status(msg % (len(allbaserevs), len(newrevs)))
3449 ui.status(msg % (len(allbaserevs), len(newrevs)))
3448 if targetfilterrevs:
3450 if targetfilterrevs:
3449 msg = b'(%d revisions still filtered)\n'
3451 msg = b'(%d revisions still filtered)\n'
3450 ui.status(msg % len(targetfilterrevs))
3452 ui.status(msg % len(targetfilterrevs))
3451
3453
3452 try:
3454 try:
3453 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3455 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3454 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3456 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3455
3457
3456 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3458 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3457 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3459 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3458
3460
3459 # try to find an existing branchmap to reuse
3461 # try to find an existing branchmap to reuse
3460 subsettable = getbranchmapsubsettable()
3462 subsettable = getbranchmapsubsettable()
3461 candidatefilter = subsettable.get(None)
3463 candidatefilter = subsettable.get(None)
3462 while candidatefilter is not None:
3464 while candidatefilter is not None:
3463 candidatebm = repo.filtered(candidatefilter).branchmap()
3465 candidatebm = repo.filtered(candidatefilter).branchmap()
3464 if candidatebm.validfor(baserepo):
3466 if candidatebm.validfor(baserepo):
3465 filtered = repoview.filterrevs(repo, candidatefilter)
3467 filtered = repoview.filterrevs(repo, candidatefilter)
3466 missing = [r for r in allbaserevs if r in filtered]
3468 missing = [r for r in allbaserevs if r in filtered]
3467 base = candidatebm.copy()
3469 base = candidatebm.copy()
3468 base.update(baserepo, missing)
3470 base.update(baserepo, missing)
3469 break
3471 break
3470 candidatefilter = subsettable.get(candidatefilter)
3472 candidatefilter = subsettable.get(candidatefilter)
3471 else:
3473 else:
3472 # no suitable subset where found
3474 # no suitable subset where found
3473 base = branchmap.branchcache()
3475 base = branchmap.branchcache()
3474 base.update(baserepo, allbaserevs)
3476 base.update(baserepo, allbaserevs)
3475
3477
3476 def setup():
3478 def setup():
3477 x[0] = base.copy()
3479 x[0] = base.copy()
3478 if clearcaches:
3480 if clearcaches:
3479 unfi._revbranchcache = None
3481 unfi._revbranchcache = None
3480 clearchangelog(repo)
3482 clearchangelog(repo)
3481
3483
3482 def bench():
3484 def bench():
3483 x[0].update(targetrepo, newrevs)
3485 x[0].update(targetrepo, newrevs)
3484
3486
3485 timer(bench, setup=setup)
3487 timer(bench, setup=setup)
3486 fm.end()
3488 fm.end()
3487 finally:
3489 finally:
3488 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3490 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3489 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3491 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3490
3492
3491
3493
3492 @command(
3494 @command(
3493 b'perfbranchmapload',
3495 b'perfbranchmapload',
3494 [
3496 [
3495 (b'f', b'filter', b'', b'Specify repoview filter'),
3497 (b'f', b'filter', b'', b'Specify repoview filter'),
3496 (b'', b'list', False, b'List brachmap filter caches'),
3498 (b'', b'list', False, b'List brachmap filter caches'),
3497 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3499 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3498 ]
3500 ]
3499 + formatteropts,
3501 + formatteropts,
3500 )
3502 )
3501 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3503 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3502 """benchmark reading the branchmap"""
3504 """benchmark reading the branchmap"""
3503 opts = _byteskwargs(opts)
3505 opts = _byteskwargs(opts)
3504 clearrevlogs = opts[b'clear_revlogs']
3506 clearrevlogs = opts[b'clear_revlogs']
3505
3507
3506 if list:
3508 if list:
3507 for name, kind, st in repo.cachevfs.readdir(stat=True):
3509 for name, kind, st in repo.cachevfs.readdir(stat=True):
3508 if name.startswith(b'branch2'):
3510 if name.startswith(b'branch2'):
3509 filtername = name.partition(b'-')[2] or b'unfiltered'
3511 filtername = name.partition(b'-')[2] or b'unfiltered'
3510 ui.status(
3512 ui.status(
3511 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3513 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3512 )
3514 )
3513 return
3515 return
3514 if not filter:
3516 if not filter:
3515 filter = None
3517 filter = None
3516 subsettable = getbranchmapsubsettable()
3518 subsettable = getbranchmapsubsettable()
3517 if filter is None:
3519 if filter is None:
3518 repo = repo.unfiltered()
3520 repo = repo.unfiltered()
3519 else:
3521 else:
3520 repo = repoview.repoview(repo, filter)
3522 repo = repoview.repoview(repo, filter)
3521
3523
3522 repo.branchmap() # make sure we have a relevant, up to date branchmap
3524 repo.branchmap() # make sure we have a relevant, up to date branchmap
3523
3525
3524 try:
3526 try:
3525 fromfile = branchmap.branchcache.fromfile
3527 fromfile = branchmap.branchcache.fromfile
3526 except AttributeError:
3528 except AttributeError:
3527 # older versions
3529 # older versions
3528 fromfile = branchmap.read
3530 fromfile = branchmap.read
3529
3531
3530 currentfilter = filter
3532 currentfilter = filter
3531 # try once without timer, the filter may not be cached
3533 # try once without timer, the filter may not be cached
3532 while fromfile(repo) is None:
3534 while fromfile(repo) is None:
3533 currentfilter = subsettable.get(currentfilter)
3535 currentfilter = subsettable.get(currentfilter)
3534 if currentfilter is None:
3536 if currentfilter is None:
3535 raise error.Abort(
3537 raise error.Abort(
3536 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3538 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3537 )
3539 )
3538 repo = repo.filtered(currentfilter)
3540 repo = repo.filtered(currentfilter)
3539 timer, fm = gettimer(ui, opts)
3541 timer, fm = gettimer(ui, opts)
3540
3542
3541 def setup():
3543 def setup():
3542 if clearrevlogs:
3544 if clearrevlogs:
3543 clearchangelog(repo)
3545 clearchangelog(repo)
3544
3546
3545 def bench():
3547 def bench():
3546 fromfile(repo)
3548 fromfile(repo)
3547
3549
3548 timer(bench, setup=setup)
3550 timer(bench, setup=setup)
3549 fm.end()
3551 fm.end()
3550
3552
3551
3553
3552 @command(b'perfloadmarkers')
3554 @command(b'perfloadmarkers')
3553 def perfloadmarkers(ui, repo):
3555 def perfloadmarkers(ui, repo):
3554 """benchmark the time to parse the on-disk markers for a repo
3556 """benchmark the time to parse the on-disk markers for a repo
3555
3557
3556 Result is the number of markers in the repo."""
3558 Result is the number of markers in the repo."""
3557 timer, fm = gettimer(ui)
3559 timer, fm = gettimer(ui)
3558 svfs = getsvfs(repo)
3560 svfs = getsvfs(repo)
3559 timer(lambda: len(obsolete.obsstore(svfs)))
3561 timer(lambda: len(obsolete.obsstore(svfs)))
3560 fm.end()
3562 fm.end()
3561
3563
3562
3564
3563 @command(
3565 @command(
3564 b'perflrucachedict',
3566 b'perflrucachedict',
3565 formatteropts
3567 formatteropts
3566 + [
3568 + [
3567 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3569 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3568 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3570 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3569 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3571 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3570 (b'', b'size', 4, b'size of cache'),
3572 (b'', b'size', 4, b'size of cache'),
3571 (b'', b'gets', 10000, b'number of key lookups'),
3573 (b'', b'gets', 10000, b'number of key lookups'),
3572 (b'', b'sets', 10000, b'number of key sets'),
3574 (b'', b'sets', 10000, b'number of key sets'),
3573 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3575 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3574 (
3576 (
3575 b'',
3577 b'',
3576 b'mixedgetfreq',
3578 b'mixedgetfreq',
3577 50,
3579 50,
3578 b'frequency of get vs set ops in mixed mode',
3580 b'frequency of get vs set ops in mixed mode',
3579 ),
3581 ),
3580 ],
3582 ],
3581 norepo=True,
3583 norepo=True,
3582 )
3584 )
3583 def perflrucache(
3585 def perflrucache(
3584 ui,
3586 ui,
3585 mincost=0,
3587 mincost=0,
3586 maxcost=100,
3588 maxcost=100,
3587 costlimit=0,
3589 costlimit=0,
3588 size=4,
3590 size=4,
3589 gets=10000,
3591 gets=10000,
3590 sets=10000,
3592 sets=10000,
3591 mixed=10000,
3593 mixed=10000,
3592 mixedgetfreq=50,
3594 mixedgetfreq=50,
3593 **opts
3595 **opts
3594 ):
3596 ):
3595 opts = _byteskwargs(opts)
3597 opts = _byteskwargs(opts)
3596
3598
3597 def doinit():
3599 def doinit():
3598 for i in _xrange(10000):
3600 for i in _xrange(10000):
3599 util.lrucachedict(size)
3601 util.lrucachedict(size)
3600
3602
3601 costrange = list(range(mincost, maxcost + 1))
3603 costrange = list(range(mincost, maxcost + 1))
3602
3604
3603 values = []
3605 values = []
3604 for i in _xrange(size):
3606 for i in _xrange(size):
3605 values.append(random.randint(0, _maxint))
3607 values.append(random.randint(0, _maxint))
3606
3608
3607 # Get mode fills the cache and tests raw lookup performance with no
3609 # Get mode fills the cache and tests raw lookup performance with no
3608 # eviction.
3610 # eviction.
3609 getseq = []
3611 getseq = []
3610 for i in _xrange(gets):
3612 for i in _xrange(gets):
3611 getseq.append(random.choice(values))
3613 getseq.append(random.choice(values))
3612
3614
3613 def dogets():
3615 def dogets():
3614 d = util.lrucachedict(size)
3616 d = util.lrucachedict(size)
3615 for v in values:
3617 for v in values:
3616 d[v] = v
3618 d[v] = v
3617 for key in getseq:
3619 for key in getseq:
3618 value = d[key]
3620 value = d[key]
3619 value # silence pyflakes warning
3621 value # silence pyflakes warning
3620
3622
3621 def dogetscost():
3623 def dogetscost():
3622 d = util.lrucachedict(size, maxcost=costlimit)
3624 d = util.lrucachedict(size, maxcost=costlimit)
3623 for i, v in enumerate(values):
3625 for i, v in enumerate(values):
3624 d.insert(v, v, cost=costs[i])
3626 d.insert(v, v, cost=costs[i])
3625 for key in getseq:
3627 for key in getseq:
3626 try:
3628 try:
3627 value = d[key]
3629 value = d[key]
3628 value # silence pyflakes warning
3630 value # silence pyflakes warning
3629 except KeyError:
3631 except KeyError:
3630 pass
3632 pass
3631
3633
3632 # Set mode tests insertion speed with cache eviction.
3634 # Set mode tests insertion speed with cache eviction.
3633 setseq = []
3635 setseq = []
3634 costs = []
3636 costs = []
3635 for i in _xrange(sets):
3637 for i in _xrange(sets):
3636 setseq.append(random.randint(0, _maxint))
3638 setseq.append(random.randint(0, _maxint))
3637 costs.append(random.choice(costrange))
3639 costs.append(random.choice(costrange))
3638
3640
3639 def doinserts():
3641 def doinserts():
3640 d = util.lrucachedict(size)
3642 d = util.lrucachedict(size)
3641 for v in setseq:
3643 for v in setseq:
3642 d.insert(v, v)
3644 d.insert(v, v)
3643
3645
3644 def doinsertscost():
3646 def doinsertscost():
3645 d = util.lrucachedict(size, maxcost=costlimit)
3647 d = util.lrucachedict(size, maxcost=costlimit)
3646 for i, v in enumerate(setseq):
3648 for i, v in enumerate(setseq):
3647 d.insert(v, v, cost=costs[i])
3649 d.insert(v, v, cost=costs[i])
3648
3650
3649 def dosets():
3651 def dosets():
3650 d = util.lrucachedict(size)
3652 d = util.lrucachedict(size)
3651 for v in setseq:
3653 for v in setseq:
3652 d[v] = v
3654 d[v] = v
3653
3655
3654 # Mixed mode randomly performs gets and sets with eviction.
3656 # Mixed mode randomly performs gets and sets with eviction.
3655 mixedops = []
3657 mixedops = []
3656 for i in _xrange(mixed):
3658 for i in _xrange(mixed):
3657 r = random.randint(0, 100)
3659 r = random.randint(0, 100)
3658 if r < mixedgetfreq:
3660 if r < mixedgetfreq:
3659 op = 0
3661 op = 0
3660 else:
3662 else:
3661 op = 1
3663 op = 1
3662
3664
3663 mixedops.append(
3665 mixedops.append(
3664 (op, random.randint(0, size * 2), random.choice(costrange))
3666 (op, random.randint(0, size * 2), random.choice(costrange))
3665 )
3667 )
3666
3668
3667 def domixed():
3669 def domixed():
3668 d = util.lrucachedict(size)
3670 d = util.lrucachedict(size)
3669
3671
3670 for op, v, cost in mixedops:
3672 for op, v, cost in mixedops:
3671 if op == 0:
3673 if op == 0:
3672 try:
3674 try:
3673 d[v]
3675 d[v]
3674 except KeyError:
3676 except KeyError:
3675 pass
3677 pass
3676 else:
3678 else:
3677 d[v] = v
3679 d[v] = v
3678
3680
3679 def domixedcost():
3681 def domixedcost():
3680 d = util.lrucachedict(size, maxcost=costlimit)
3682 d = util.lrucachedict(size, maxcost=costlimit)
3681
3683
3682 for op, v, cost in mixedops:
3684 for op, v, cost in mixedops:
3683 if op == 0:
3685 if op == 0:
3684 try:
3686 try:
3685 d[v]
3687 d[v]
3686 except KeyError:
3688 except KeyError:
3687 pass
3689 pass
3688 else:
3690 else:
3689 d.insert(v, v, cost=cost)
3691 d.insert(v, v, cost=cost)
3690
3692
3691 benches = [
3693 benches = [
3692 (doinit, b'init'),
3694 (doinit, b'init'),
3693 ]
3695 ]
3694
3696
3695 if costlimit:
3697 if costlimit:
3696 benches.extend(
3698 benches.extend(
3697 [
3699 [
3698 (dogetscost, b'gets w/ cost limit'),
3700 (dogetscost, b'gets w/ cost limit'),
3699 (doinsertscost, b'inserts w/ cost limit'),
3701 (doinsertscost, b'inserts w/ cost limit'),
3700 (domixedcost, b'mixed w/ cost limit'),
3702 (domixedcost, b'mixed w/ cost limit'),
3701 ]
3703 ]
3702 )
3704 )
3703 else:
3705 else:
3704 benches.extend(
3706 benches.extend(
3705 [
3707 [
3706 (dogets, b'gets'),
3708 (dogets, b'gets'),
3707 (doinserts, b'inserts'),
3709 (doinserts, b'inserts'),
3708 (dosets, b'sets'),
3710 (dosets, b'sets'),
3709 (domixed, b'mixed'),
3711 (domixed, b'mixed'),
3710 ]
3712 ]
3711 )
3713 )
3712
3714
3713 for fn, title in benches:
3715 for fn, title in benches:
3714 timer, fm = gettimer(ui, opts)
3716 timer, fm = gettimer(ui, opts)
3715 timer(fn, title=title)
3717 timer(fn, title=title)
3716 fm.end()
3718 fm.end()
3717
3719
3718
3720
3719 @command(b'perfwrite', formatteropts)
3721 @command(b'perfwrite', formatteropts)
3720 def perfwrite(ui, repo, **opts):
3722 def perfwrite(ui, repo, **opts):
3721 """microbenchmark ui.write
3723 """microbenchmark ui.write
3722 """
3724 """
3723 opts = _byteskwargs(opts)
3725 opts = _byteskwargs(opts)
3724
3726
3725 timer, fm = gettimer(ui, opts)
3727 timer, fm = gettimer(ui, opts)
3726
3728
3727 def write():
3729 def write():
3728 for i in range(100000):
3730 for i in range(100000):
3729 ui.writenoi18n(b'Testing write performance\n')
3731 ui.writenoi18n(b'Testing write performance\n')
3730
3732
3731 timer(write)
3733 timer(write)
3732 fm.end()
3734 fm.end()
3733
3735
3734
3736
3735 def uisetup(ui):
3737 def uisetup(ui):
3736 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3738 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3737 commands, b'debugrevlogopts'
3739 commands, b'debugrevlogopts'
3738 ):
3740 ):
3739 # for "historical portability":
3741 # for "historical portability":
3740 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3742 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3741 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3743 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3742 # openrevlog() should cause failure, because it has been
3744 # openrevlog() should cause failure, because it has been
3743 # available since 3.5 (or 49c583ca48c4).
3745 # available since 3.5 (or 49c583ca48c4).
3744 def openrevlog(orig, repo, cmd, file_, opts):
3746 def openrevlog(orig, repo, cmd, file_, opts):
3745 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3747 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3746 raise error.Abort(
3748 raise error.Abort(
3747 b"This version doesn't support --dir option",
3749 b"This version doesn't support --dir option",
3748 hint=b"use 3.5 or later",
3750 hint=b"use 3.5 or later",
3749 )
3751 )
3750 return orig(repo, cmd, file_, opts)
3752 return orig(repo, cmd, file_, opts)
3751
3753
3752 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3754 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3753
3755
3754
3756
3755 @command(
3757 @command(
3756 b'perfprogress',
3758 b'perfprogress',
3757 formatteropts
3759 formatteropts
3758 + [
3760 + [
3759 (b'', b'topic', b'topic', b'topic for progress messages'),
3761 (b'', b'topic', b'topic', b'topic for progress messages'),
3760 (b'c', b'total', 1000000, b'total value we are progressing to'),
3762 (b'c', b'total', 1000000, b'total value we are progressing to'),
3761 ],
3763 ],
3762 norepo=True,
3764 norepo=True,
3763 )
3765 )
3764 def perfprogress(ui, topic=None, total=None, **opts):
3766 def perfprogress(ui, topic=None, total=None, **opts):
3765 """printing of progress bars"""
3767 """printing of progress bars"""
3766 opts = _byteskwargs(opts)
3768 opts = _byteskwargs(opts)
3767
3769
3768 timer, fm = gettimer(ui, opts)
3770 timer, fm = gettimer(ui, opts)
3769
3771
3770 def doprogress():
3772 def doprogress():
3771 with ui.makeprogress(topic, total=total) as progress:
3773 with ui.makeprogress(topic, total=total) as progress:
3772 for i in _xrange(total):
3774 for i in _xrange(total):
3773 progress.increment()
3775 progress.increment()
3774
3776
3775 timer(doprogress)
3777 timer(doprogress)
3776 fm.end()
3778 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now