##// END OF EJS Templates
perf: use `setup` function in `perfdirstatedirs`...
marmoute -
r43394:100e7e0c default
parent child Browse files
Show More
@@ -1,3760 +1,3762 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1109 answered.
1110 """
1110 """
1111 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1112 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1113 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1114
1114
1115 def setup():
1115 def setup():
1116 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1117
1117
1118 def d():
1118 def d():
1119 b"a" in repo.dirstate
1119 b"a" in repo.dirstate
1120
1120
1121 timer(d, setup=setup)
1121 timer(d, setup=setup)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perfdirstatedirs', formatteropts)
1125 @command(b'perfdirstatedirs', formatteropts)
1126 def perfdirstatedirs(ui, repo, **opts):
1126 def perfdirstatedirs(ui, repo, **opts):
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 """
1128 """
1129 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131 b"a" in repo.dirstate
1131 repo.dirstate.hasdir(b"a")
1132
1133 def setup():
1134 del repo.dirstate._map._dirs
1132
1135
1133 def d():
1136 def d():
1134 repo.dirstate.hasdir(b"a")
1137 repo.dirstate.hasdir(b"a")
1135 del repo.dirstate._map._dirs
1138
1136
1139 timer(d, setup=setup)
1137 timer(d)
1138 fm.end()
1140 fm.end()
1139
1141
1140
1142
1141 @command(b'perfdirstatefoldmap', formatteropts)
1143 @command(b'perfdirstatefoldmap', formatteropts)
1142 def perfdirstatefoldmap(ui, repo, **opts):
1144 def perfdirstatefoldmap(ui, repo, **opts):
1143 opts = _byteskwargs(opts)
1145 opts = _byteskwargs(opts)
1144 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1145 dirstate = repo.dirstate
1147 dirstate = repo.dirstate
1146 b'a' in dirstate
1148 b'a' in dirstate
1147
1149
1148 def d():
1150 def d():
1149 dirstate._map.filefoldmap.get(b'a')
1151 dirstate._map.filefoldmap.get(b'a')
1150 del dirstate._map.filefoldmap
1152 del dirstate._map.filefoldmap
1151
1153
1152 timer(d)
1154 timer(d)
1153 fm.end()
1155 fm.end()
1154
1156
1155
1157
1156 @command(b'perfdirfoldmap', formatteropts)
1158 @command(b'perfdirfoldmap', formatteropts)
1157 def perfdirfoldmap(ui, repo, **opts):
1159 def perfdirfoldmap(ui, repo, **opts):
1158 opts = _byteskwargs(opts)
1160 opts = _byteskwargs(opts)
1159 timer, fm = gettimer(ui, opts)
1161 timer, fm = gettimer(ui, opts)
1160 dirstate = repo.dirstate
1162 dirstate = repo.dirstate
1161 b'a' in dirstate
1163 b'a' in dirstate
1162
1164
1163 def d():
1165 def d():
1164 dirstate._map.dirfoldmap.get(b'a')
1166 dirstate._map.dirfoldmap.get(b'a')
1165 del dirstate._map.dirfoldmap
1167 del dirstate._map.dirfoldmap
1166 del dirstate._map._dirs
1168 del dirstate._map._dirs
1167
1169
1168 timer(d)
1170 timer(d)
1169 fm.end()
1171 fm.end()
1170
1172
1171
1173
1172 @command(b'perfdirstatewrite', formatteropts)
1174 @command(b'perfdirstatewrite', formatteropts)
1173 def perfdirstatewrite(ui, repo, **opts):
1175 def perfdirstatewrite(ui, repo, **opts):
1174 opts = _byteskwargs(opts)
1176 opts = _byteskwargs(opts)
1175 timer, fm = gettimer(ui, opts)
1177 timer, fm = gettimer(ui, opts)
1176 ds = repo.dirstate
1178 ds = repo.dirstate
1177 b"a" in ds
1179 b"a" in ds
1178
1180
1179 def d():
1181 def d():
1180 ds._dirty = True
1182 ds._dirty = True
1181 ds.write(repo.currenttransaction())
1183 ds.write(repo.currenttransaction())
1182
1184
1183 timer(d)
1185 timer(d)
1184 fm.end()
1186 fm.end()
1185
1187
1186
1188
1187 def _getmergerevs(repo, opts):
1189 def _getmergerevs(repo, opts):
1188 """parse command argument to return rev involved in merge
1190 """parse command argument to return rev involved in merge
1189
1191
1190 input: options dictionnary with `rev`, `from` and `bse`
1192 input: options dictionnary with `rev`, `from` and `bse`
1191 output: (localctx, otherctx, basectx)
1193 output: (localctx, otherctx, basectx)
1192 """
1194 """
1193 if opts[b'from']:
1195 if opts[b'from']:
1194 fromrev = scmutil.revsingle(repo, opts[b'from'])
1196 fromrev = scmutil.revsingle(repo, opts[b'from'])
1195 wctx = repo[fromrev]
1197 wctx = repo[fromrev]
1196 else:
1198 else:
1197 wctx = repo[None]
1199 wctx = repo[None]
1198 # we don't want working dir files to be stat'd in the benchmark, so
1200 # we don't want working dir files to be stat'd in the benchmark, so
1199 # prime that cache
1201 # prime that cache
1200 wctx.dirty()
1202 wctx.dirty()
1201 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1203 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1202 if opts[b'base']:
1204 if opts[b'base']:
1203 fromrev = scmutil.revsingle(repo, opts[b'base'])
1205 fromrev = scmutil.revsingle(repo, opts[b'base'])
1204 ancestor = repo[fromrev]
1206 ancestor = repo[fromrev]
1205 else:
1207 else:
1206 ancestor = wctx.ancestor(rctx)
1208 ancestor = wctx.ancestor(rctx)
1207 return (wctx, rctx, ancestor)
1209 return (wctx, rctx, ancestor)
1208
1210
1209
1211
1210 @command(
1212 @command(
1211 b'perfmergecalculate',
1213 b'perfmergecalculate',
1212 [
1214 [
1213 (b'r', b'rev', b'.', b'rev to merge against'),
1215 (b'r', b'rev', b'.', b'rev to merge against'),
1214 (b'', b'from', b'', b'rev to merge from'),
1216 (b'', b'from', b'', b'rev to merge from'),
1215 (b'', b'base', b'', b'the revision to use as base'),
1217 (b'', b'base', b'', b'the revision to use as base'),
1216 ]
1218 ]
1217 + formatteropts,
1219 + formatteropts,
1218 )
1220 )
1219 def perfmergecalculate(ui, repo, **opts):
1221 def perfmergecalculate(ui, repo, **opts):
1220 opts = _byteskwargs(opts)
1222 opts = _byteskwargs(opts)
1221 timer, fm = gettimer(ui, opts)
1223 timer, fm = gettimer(ui, opts)
1222
1224
1223 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1225 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1224
1226
1225 def d():
1227 def d():
1226 # acceptremote is True because we don't want prompts in the middle of
1228 # acceptremote is True because we don't want prompts in the middle of
1227 # our benchmark
1229 # our benchmark
1228 merge.calculateupdates(
1230 merge.calculateupdates(
1229 repo,
1231 repo,
1230 wctx,
1232 wctx,
1231 rctx,
1233 rctx,
1232 [ancestor],
1234 [ancestor],
1233 branchmerge=False,
1235 branchmerge=False,
1234 force=False,
1236 force=False,
1235 acceptremote=True,
1237 acceptremote=True,
1236 followcopies=True,
1238 followcopies=True,
1237 )
1239 )
1238
1240
1239 timer(d)
1241 timer(d)
1240 fm.end()
1242 fm.end()
1241
1243
1242
1244
1243 @command(
1245 @command(
1244 b'perfmergecopies',
1246 b'perfmergecopies',
1245 [
1247 [
1246 (b'r', b'rev', b'.', b'rev to merge against'),
1248 (b'r', b'rev', b'.', b'rev to merge against'),
1247 (b'', b'from', b'', b'rev to merge from'),
1249 (b'', b'from', b'', b'rev to merge from'),
1248 (b'', b'base', b'', b'the revision to use as base'),
1250 (b'', b'base', b'', b'the revision to use as base'),
1249 ]
1251 ]
1250 + formatteropts,
1252 + formatteropts,
1251 )
1253 )
1252 def perfmergecopies(ui, repo, **opts):
1254 def perfmergecopies(ui, repo, **opts):
1253 """measure runtime of `copies.mergecopies`"""
1255 """measure runtime of `copies.mergecopies`"""
1254 opts = _byteskwargs(opts)
1256 opts = _byteskwargs(opts)
1255 timer, fm = gettimer(ui, opts)
1257 timer, fm = gettimer(ui, opts)
1256 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1258 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1257
1259
1258 def d():
1260 def d():
1259 # acceptremote is True because we don't want prompts in the middle of
1261 # acceptremote is True because we don't want prompts in the middle of
1260 # our benchmark
1262 # our benchmark
1261 copies.mergecopies(repo, wctx, rctx, ancestor)
1263 copies.mergecopies(repo, wctx, rctx, ancestor)
1262
1264
1263 timer(d)
1265 timer(d)
1264 fm.end()
1266 fm.end()
1265
1267
1266
1268
1267 @command(b'perfpathcopies', [], b"REV REV")
1269 @command(b'perfpathcopies', [], b"REV REV")
1268 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1270 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1269 """benchmark the copy tracing logic"""
1271 """benchmark the copy tracing logic"""
1270 opts = _byteskwargs(opts)
1272 opts = _byteskwargs(opts)
1271 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1272 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1274 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1273 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1275 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1274
1276
1275 def d():
1277 def d():
1276 copies.pathcopies(ctx1, ctx2)
1278 copies.pathcopies(ctx1, ctx2)
1277
1279
1278 timer(d)
1280 timer(d)
1279 fm.end()
1281 fm.end()
1280
1282
1281
1283
1282 @command(
1284 @command(
1283 b'perfphases',
1285 b'perfphases',
1284 [(b'', b'full', False, b'include file reading time too'),],
1286 [(b'', b'full', False, b'include file reading time too'),],
1285 b"",
1287 b"",
1286 )
1288 )
1287 def perfphases(ui, repo, **opts):
1289 def perfphases(ui, repo, **opts):
1288 """benchmark phasesets computation"""
1290 """benchmark phasesets computation"""
1289 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1290 timer, fm = gettimer(ui, opts)
1292 timer, fm = gettimer(ui, opts)
1291 _phases = repo._phasecache
1293 _phases = repo._phasecache
1292 full = opts.get(b'full')
1294 full = opts.get(b'full')
1293
1295
1294 def d():
1296 def d():
1295 phases = _phases
1297 phases = _phases
1296 if full:
1298 if full:
1297 clearfilecache(repo, b'_phasecache')
1299 clearfilecache(repo, b'_phasecache')
1298 phases = repo._phasecache
1300 phases = repo._phasecache
1299 phases.invalidate()
1301 phases.invalidate()
1300 phases.loadphaserevs(repo)
1302 phases.loadphaserevs(repo)
1301
1303
1302 timer(d)
1304 timer(d)
1303 fm.end()
1305 fm.end()
1304
1306
1305
1307
1306 @command(b'perfphasesremote', [], b"[DEST]")
1308 @command(b'perfphasesremote', [], b"[DEST]")
1307 def perfphasesremote(ui, repo, dest=None, **opts):
1309 def perfphasesremote(ui, repo, dest=None, **opts):
1308 """benchmark time needed to analyse phases of the remote server"""
1310 """benchmark time needed to analyse phases of the remote server"""
1309 from mercurial.node import bin
1311 from mercurial.node import bin
1310 from mercurial import (
1312 from mercurial import (
1311 exchange,
1313 exchange,
1312 hg,
1314 hg,
1313 phases,
1315 phases,
1314 )
1316 )
1315
1317
1316 opts = _byteskwargs(opts)
1318 opts = _byteskwargs(opts)
1317 timer, fm = gettimer(ui, opts)
1319 timer, fm = gettimer(ui, opts)
1318
1320
1319 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1321 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1320 if not path:
1322 if not path:
1321 raise error.Abort(
1323 raise error.Abort(
1322 b'default repository not configured!',
1324 b'default repository not configured!',
1323 hint=b"see 'hg help config.paths'",
1325 hint=b"see 'hg help config.paths'",
1324 )
1326 )
1325 dest = path.pushloc or path.loc
1327 dest = path.pushloc or path.loc
1326 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1328 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1327 other = hg.peer(repo, opts, dest)
1329 other = hg.peer(repo, opts, dest)
1328
1330
1329 # easier to perform discovery through the operation
1331 # easier to perform discovery through the operation
1330 op = exchange.pushoperation(repo, other)
1332 op = exchange.pushoperation(repo, other)
1331 exchange._pushdiscoverychangeset(op)
1333 exchange._pushdiscoverychangeset(op)
1332
1334
1333 remotesubset = op.fallbackheads
1335 remotesubset = op.fallbackheads
1334
1336
1335 with other.commandexecutor() as e:
1337 with other.commandexecutor() as e:
1336 remotephases = e.callcommand(
1338 remotephases = e.callcommand(
1337 b'listkeys', {b'namespace': b'phases'}
1339 b'listkeys', {b'namespace': b'phases'}
1338 ).result()
1340 ).result()
1339 del other
1341 del other
1340 publishing = remotephases.get(b'publishing', False)
1342 publishing = remotephases.get(b'publishing', False)
1341 if publishing:
1343 if publishing:
1342 ui.statusnoi18n(b'publishing: yes\n')
1344 ui.statusnoi18n(b'publishing: yes\n')
1343 else:
1345 else:
1344 ui.statusnoi18n(b'publishing: no\n')
1346 ui.statusnoi18n(b'publishing: no\n')
1345
1347
1346 nodemap = repo.changelog.nodemap
1348 nodemap = repo.changelog.nodemap
1347 nonpublishroots = 0
1349 nonpublishroots = 0
1348 for nhex, phase in remotephases.iteritems():
1350 for nhex, phase in remotephases.iteritems():
1349 if nhex == b'publishing': # ignore data related to publish option
1351 if nhex == b'publishing': # ignore data related to publish option
1350 continue
1352 continue
1351 node = bin(nhex)
1353 node = bin(nhex)
1352 if node in nodemap and int(phase):
1354 if node in nodemap and int(phase):
1353 nonpublishroots += 1
1355 nonpublishroots += 1
1354 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1356 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1355 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1357 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1356
1358
1357 def d():
1359 def d():
1358 phases.remotephasessummary(repo, remotesubset, remotephases)
1360 phases.remotephasessummary(repo, remotesubset, remotephases)
1359
1361
1360 timer(d)
1362 timer(d)
1361 fm.end()
1363 fm.end()
1362
1364
1363
1365
1364 @command(
1366 @command(
1365 b'perfmanifest',
1367 b'perfmanifest',
1366 [
1368 [
1367 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1369 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1368 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1370 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1369 ]
1371 ]
1370 + formatteropts,
1372 + formatteropts,
1371 b'REV|NODE',
1373 b'REV|NODE',
1372 )
1374 )
1373 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1375 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1374 """benchmark the time to read a manifest from disk and return a usable
1376 """benchmark the time to read a manifest from disk and return a usable
1375 dict-like object
1377 dict-like object
1376
1378
1377 Manifest caches are cleared before retrieval."""
1379 Manifest caches are cleared before retrieval."""
1378 opts = _byteskwargs(opts)
1380 opts = _byteskwargs(opts)
1379 timer, fm = gettimer(ui, opts)
1381 timer, fm = gettimer(ui, opts)
1380 if not manifest_rev:
1382 if not manifest_rev:
1381 ctx = scmutil.revsingle(repo, rev, rev)
1383 ctx = scmutil.revsingle(repo, rev, rev)
1382 t = ctx.manifestnode()
1384 t = ctx.manifestnode()
1383 else:
1385 else:
1384 from mercurial.node import bin
1386 from mercurial.node import bin
1385
1387
1386 if len(rev) == 40:
1388 if len(rev) == 40:
1387 t = bin(rev)
1389 t = bin(rev)
1388 else:
1390 else:
1389 try:
1391 try:
1390 rev = int(rev)
1392 rev = int(rev)
1391
1393
1392 if util.safehasattr(repo.manifestlog, b'getstorage'):
1394 if util.safehasattr(repo.manifestlog, b'getstorage'):
1393 t = repo.manifestlog.getstorage(b'').node(rev)
1395 t = repo.manifestlog.getstorage(b'').node(rev)
1394 else:
1396 else:
1395 t = repo.manifestlog._revlog.lookup(rev)
1397 t = repo.manifestlog._revlog.lookup(rev)
1396 except ValueError:
1398 except ValueError:
1397 raise error.Abort(
1399 raise error.Abort(
1398 b'manifest revision must be integer or full node'
1400 b'manifest revision must be integer or full node'
1399 )
1401 )
1400
1402
1401 def d():
1403 def d():
1402 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1404 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1403 repo.manifestlog[t].read()
1405 repo.manifestlog[t].read()
1404
1406
1405 timer(d)
1407 timer(d)
1406 fm.end()
1408 fm.end()
1407
1409
1408
1410
1409 @command(b'perfchangeset', formatteropts)
1411 @command(b'perfchangeset', formatteropts)
1410 def perfchangeset(ui, repo, rev, **opts):
1412 def perfchangeset(ui, repo, rev, **opts):
1411 opts = _byteskwargs(opts)
1413 opts = _byteskwargs(opts)
1412 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1413 n = scmutil.revsingle(repo, rev).node()
1415 n = scmutil.revsingle(repo, rev).node()
1414
1416
1415 def d():
1417 def d():
1416 repo.changelog.read(n)
1418 repo.changelog.read(n)
1417 # repo.changelog._cache = None
1419 # repo.changelog._cache = None
1418
1420
1419 timer(d)
1421 timer(d)
1420 fm.end()
1422 fm.end()
1421
1423
1422
1424
1423 @command(b'perfignore', formatteropts)
1425 @command(b'perfignore', formatteropts)
1424 def perfignore(ui, repo, **opts):
1426 def perfignore(ui, repo, **opts):
1425 """benchmark operation related to computing ignore"""
1427 """benchmark operation related to computing ignore"""
1426 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1427 timer, fm = gettimer(ui, opts)
1429 timer, fm = gettimer(ui, opts)
1428 dirstate = repo.dirstate
1430 dirstate = repo.dirstate
1429
1431
1430 def setupone():
1432 def setupone():
1431 dirstate.invalidate()
1433 dirstate.invalidate()
1432 clearfilecache(dirstate, b'_ignore')
1434 clearfilecache(dirstate, b'_ignore')
1433
1435
1434 def runone():
1436 def runone():
1435 dirstate._ignore
1437 dirstate._ignore
1436
1438
1437 timer(runone, setup=setupone, title=b"load")
1439 timer(runone, setup=setupone, title=b"load")
1438 fm.end()
1440 fm.end()
1439
1441
1440
1442
1441 @command(
1443 @command(
1442 b'perfindex',
1444 b'perfindex',
1443 [
1445 [
1444 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1446 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1445 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1447 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1446 ]
1448 ]
1447 + formatteropts,
1449 + formatteropts,
1448 )
1450 )
1449 def perfindex(ui, repo, **opts):
1451 def perfindex(ui, repo, **opts):
1450 """benchmark index creation time followed by a lookup
1452 """benchmark index creation time followed by a lookup
1451
1453
1452 The default is to look `tip` up. Depending on the index implementation,
1454 The default is to look `tip` up. Depending on the index implementation,
1453 the revision looked up can matters. For example, an implementation
1455 the revision looked up can matters. For example, an implementation
1454 scanning the index will have a faster lookup time for `--rev tip` than for
1456 scanning the index will have a faster lookup time for `--rev tip` than for
1455 `--rev 0`. The number of looked up revisions and their order can also
1457 `--rev 0`. The number of looked up revisions and their order can also
1456 matters.
1458 matters.
1457
1459
1458 Example of useful set to test:
1460 Example of useful set to test:
1459 * tip
1461 * tip
1460 * 0
1462 * 0
1461 * -10:
1463 * -10:
1462 * :10
1464 * :10
1463 * -10: + :10
1465 * -10: + :10
1464 * :10: + -10:
1466 * :10: + -10:
1465 * -10000:
1467 * -10000:
1466 * -10000: + 0
1468 * -10000: + 0
1467
1469
1468 It is not currently possible to check for lookup of a missing node. For
1470 It is not currently possible to check for lookup of a missing node. For
1469 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1471 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1470 import mercurial.revlog
1472 import mercurial.revlog
1471
1473
1472 opts = _byteskwargs(opts)
1474 opts = _byteskwargs(opts)
1473 timer, fm = gettimer(ui, opts)
1475 timer, fm = gettimer(ui, opts)
1474 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1476 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1475 if opts[b'no_lookup']:
1477 if opts[b'no_lookup']:
1476 if opts['rev']:
1478 if opts['rev']:
1477 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1479 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1478 nodes = []
1480 nodes = []
1479 elif not opts[b'rev']:
1481 elif not opts[b'rev']:
1480 nodes = [repo[b"tip"].node()]
1482 nodes = [repo[b"tip"].node()]
1481 else:
1483 else:
1482 revs = scmutil.revrange(repo, opts[b'rev'])
1484 revs = scmutil.revrange(repo, opts[b'rev'])
1483 cl = repo.changelog
1485 cl = repo.changelog
1484 nodes = [cl.node(r) for r in revs]
1486 nodes = [cl.node(r) for r in revs]
1485
1487
1486 unfi = repo.unfiltered()
1488 unfi = repo.unfiltered()
1487 # find the filecache func directly
1489 # find the filecache func directly
1488 # This avoid polluting the benchmark with the filecache logic
1490 # This avoid polluting the benchmark with the filecache logic
1489 makecl = unfi.__class__.changelog.func
1491 makecl = unfi.__class__.changelog.func
1490
1492
1491 def setup():
1493 def setup():
1492 # probably not necessary, but for good measure
1494 # probably not necessary, but for good measure
1493 clearchangelog(unfi)
1495 clearchangelog(unfi)
1494
1496
1495 def d():
1497 def d():
1496 cl = makecl(unfi)
1498 cl = makecl(unfi)
1497 for n in nodes:
1499 for n in nodes:
1498 cl.rev(n)
1500 cl.rev(n)
1499
1501
1500 timer(d, setup=setup)
1502 timer(d, setup=setup)
1501 fm.end()
1503 fm.end()
1502
1504
1503
1505
1504 @command(
1506 @command(
1505 b'perfnodemap',
1507 b'perfnodemap',
1506 [
1508 [
1507 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1509 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1508 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1510 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1509 ]
1511 ]
1510 + formatteropts,
1512 + formatteropts,
1511 )
1513 )
1512 def perfnodemap(ui, repo, **opts):
1514 def perfnodemap(ui, repo, **opts):
1513 """benchmark the time necessary to look up revision from a cold nodemap
1515 """benchmark the time necessary to look up revision from a cold nodemap
1514
1516
1515 Depending on the implementation, the amount and order of revision we look
1517 Depending on the implementation, the amount and order of revision we look
1516 up can varies. Example of useful set to test:
1518 up can varies. Example of useful set to test:
1517 * tip
1519 * tip
1518 * 0
1520 * 0
1519 * -10:
1521 * -10:
1520 * :10
1522 * :10
1521 * -10: + :10
1523 * -10: + :10
1522 * :10: + -10:
1524 * :10: + -10:
1523 * -10000:
1525 * -10000:
1524 * -10000: + 0
1526 * -10000: + 0
1525
1527
1526 The command currently focus on valid binary lookup. Benchmarking for
1528 The command currently focus on valid binary lookup. Benchmarking for
1527 hexlookup, prefix lookup and missing lookup would also be valuable.
1529 hexlookup, prefix lookup and missing lookup would also be valuable.
1528 """
1530 """
1529 import mercurial.revlog
1531 import mercurial.revlog
1530
1532
1531 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1532 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1533 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1535 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1534
1536
1535 unfi = repo.unfiltered()
1537 unfi = repo.unfiltered()
1536 clearcaches = opts['clear_caches']
1538 clearcaches = opts['clear_caches']
1537 # find the filecache func directly
1539 # find the filecache func directly
1538 # This avoid polluting the benchmark with the filecache logic
1540 # This avoid polluting the benchmark with the filecache logic
1539 makecl = unfi.__class__.changelog.func
1541 makecl = unfi.__class__.changelog.func
1540 if not opts[b'rev']:
1542 if not opts[b'rev']:
1541 raise error.Abort('use --rev to specify revisions to look up')
1543 raise error.Abort('use --rev to specify revisions to look up')
1542 revs = scmutil.revrange(repo, opts[b'rev'])
1544 revs = scmutil.revrange(repo, opts[b'rev'])
1543 cl = repo.changelog
1545 cl = repo.changelog
1544 nodes = [cl.node(r) for r in revs]
1546 nodes = [cl.node(r) for r in revs]
1545
1547
1546 # use a list to pass reference to a nodemap from one closure to the next
1548 # use a list to pass reference to a nodemap from one closure to the next
1547 nodeget = [None]
1549 nodeget = [None]
1548
1550
1549 def setnodeget():
1551 def setnodeget():
1550 # probably not necessary, but for good measure
1552 # probably not necessary, but for good measure
1551 clearchangelog(unfi)
1553 clearchangelog(unfi)
1552 nodeget[0] = makecl(unfi).nodemap.get
1554 nodeget[0] = makecl(unfi).nodemap.get
1553
1555
1554 def d():
1556 def d():
1555 get = nodeget[0]
1557 get = nodeget[0]
1556 for n in nodes:
1558 for n in nodes:
1557 get(n)
1559 get(n)
1558
1560
1559 setup = None
1561 setup = None
1560 if clearcaches:
1562 if clearcaches:
1561
1563
1562 def setup():
1564 def setup():
1563 setnodeget()
1565 setnodeget()
1564
1566
1565 else:
1567 else:
1566 setnodeget()
1568 setnodeget()
1567 d() # prewarm the data structure
1569 d() # prewarm the data structure
1568 timer(d, setup=setup)
1570 timer(d, setup=setup)
1569 fm.end()
1571 fm.end()
1570
1572
1571
1573
1572 @command(b'perfstartup', formatteropts)
1574 @command(b'perfstartup', formatteropts)
1573 def perfstartup(ui, repo, **opts):
1575 def perfstartup(ui, repo, **opts):
1574 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1575 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1576
1578
1577 def d():
1579 def d():
1578 if os.name != r'nt':
1580 if os.name != r'nt':
1579 os.system(
1581 os.system(
1580 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1582 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1581 )
1583 )
1582 else:
1584 else:
1583 os.environ[r'HGRCPATH'] = r' '
1585 os.environ[r'HGRCPATH'] = r' '
1584 os.system(r"%s version -q > NUL" % sys.argv[0])
1586 os.system(r"%s version -q > NUL" % sys.argv[0])
1585
1587
1586 timer(d)
1588 timer(d)
1587 fm.end()
1589 fm.end()
1588
1590
1589
1591
1590 @command(b'perfparents', formatteropts)
1592 @command(b'perfparents', formatteropts)
1591 def perfparents(ui, repo, **opts):
1593 def perfparents(ui, repo, **opts):
1592 """benchmark the time necessary to fetch one changeset's parents.
1594 """benchmark the time necessary to fetch one changeset's parents.
1593
1595
1594 The fetch is done using the `node identifier`, traversing all object layers
1596 The fetch is done using the `node identifier`, traversing all object layers
1595 from the repository object. The first N revisions will be used for this
1597 from the repository object. The first N revisions will be used for this
1596 benchmark. N is controlled by the ``perf.parentscount`` config option
1598 benchmark. N is controlled by the ``perf.parentscount`` config option
1597 (default: 1000).
1599 (default: 1000).
1598 """
1600 """
1599 opts = _byteskwargs(opts)
1601 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1602 timer, fm = gettimer(ui, opts)
1601 # control the number of commits perfparents iterates over
1603 # control the number of commits perfparents iterates over
1602 # experimental config: perf.parentscount
1604 # experimental config: perf.parentscount
1603 count = getint(ui, b"perf", b"parentscount", 1000)
1605 count = getint(ui, b"perf", b"parentscount", 1000)
1604 if len(repo.changelog) < count:
1606 if len(repo.changelog) < count:
1605 raise error.Abort(b"repo needs %d commits for this test" % count)
1607 raise error.Abort(b"repo needs %d commits for this test" % count)
1606 repo = repo.unfiltered()
1608 repo = repo.unfiltered()
1607 nl = [repo.changelog.node(i) for i in _xrange(count)]
1609 nl = [repo.changelog.node(i) for i in _xrange(count)]
1608
1610
1609 def d():
1611 def d():
1610 for n in nl:
1612 for n in nl:
1611 repo.changelog.parents(n)
1613 repo.changelog.parents(n)
1612
1614
1613 timer(d)
1615 timer(d)
1614 fm.end()
1616 fm.end()
1615
1617
1616
1618
1617 @command(b'perfctxfiles', formatteropts)
1619 @command(b'perfctxfiles', formatteropts)
1618 def perfctxfiles(ui, repo, x, **opts):
1620 def perfctxfiles(ui, repo, x, **opts):
1619 opts = _byteskwargs(opts)
1621 opts = _byteskwargs(opts)
1620 x = int(x)
1622 x = int(x)
1621 timer, fm = gettimer(ui, opts)
1623 timer, fm = gettimer(ui, opts)
1622
1624
1623 def d():
1625 def d():
1624 len(repo[x].files())
1626 len(repo[x].files())
1625
1627
1626 timer(d)
1628 timer(d)
1627 fm.end()
1629 fm.end()
1628
1630
1629
1631
1630 @command(b'perfrawfiles', formatteropts)
1632 @command(b'perfrawfiles', formatteropts)
1631 def perfrawfiles(ui, repo, x, **opts):
1633 def perfrawfiles(ui, repo, x, **opts):
1632 opts = _byteskwargs(opts)
1634 opts = _byteskwargs(opts)
1633 x = int(x)
1635 x = int(x)
1634 timer, fm = gettimer(ui, opts)
1636 timer, fm = gettimer(ui, opts)
1635 cl = repo.changelog
1637 cl = repo.changelog
1636
1638
1637 def d():
1639 def d():
1638 len(cl.read(x)[3])
1640 len(cl.read(x)[3])
1639
1641
1640 timer(d)
1642 timer(d)
1641 fm.end()
1643 fm.end()
1642
1644
1643
1645
1644 @command(b'perflookup', formatteropts)
1646 @command(b'perflookup', formatteropts)
1645 def perflookup(ui, repo, rev, **opts):
1647 def perflookup(ui, repo, rev, **opts):
1646 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1647 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1648 timer(lambda: len(repo.lookup(rev)))
1650 timer(lambda: len(repo.lookup(rev)))
1649 fm.end()
1651 fm.end()
1650
1652
1651
1653
1652 @command(
1654 @command(
1653 b'perflinelogedits',
1655 b'perflinelogedits',
1654 [
1656 [
1655 (b'n', b'edits', 10000, b'number of edits'),
1657 (b'n', b'edits', 10000, b'number of edits'),
1656 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1658 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1657 ],
1659 ],
1658 norepo=True,
1660 norepo=True,
1659 )
1661 )
1660 def perflinelogedits(ui, **opts):
1662 def perflinelogedits(ui, **opts):
1661 from mercurial import linelog
1663 from mercurial import linelog
1662
1664
1663 opts = _byteskwargs(opts)
1665 opts = _byteskwargs(opts)
1664
1666
1665 edits = opts[b'edits']
1667 edits = opts[b'edits']
1666 maxhunklines = opts[b'max_hunk_lines']
1668 maxhunklines = opts[b'max_hunk_lines']
1667
1669
1668 maxb1 = 100000
1670 maxb1 = 100000
1669 random.seed(0)
1671 random.seed(0)
1670 randint = random.randint
1672 randint = random.randint
1671 currentlines = 0
1673 currentlines = 0
1672 arglist = []
1674 arglist = []
1673 for rev in _xrange(edits):
1675 for rev in _xrange(edits):
1674 a1 = randint(0, currentlines)
1676 a1 = randint(0, currentlines)
1675 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1677 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1676 b1 = randint(0, maxb1)
1678 b1 = randint(0, maxb1)
1677 b2 = randint(b1, b1 + maxhunklines)
1679 b2 = randint(b1, b1 + maxhunklines)
1678 currentlines += (b2 - b1) - (a2 - a1)
1680 currentlines += (b2 - b1) - (a2 - a1)
1679 arglist.append((rev, a1, a2, b1, b2))
1681 arglist.append((rev, a1, a2, b1, b2))
1680
1682
1681 def d():
1683 def d():
1682 ll = linelog.linelog()
1684 ll = linelog.linelog()
1683 for args in arglist:
1685 for args in arglist:
1684 ll.replacelines(*args)
1686 ll.replacelines(*args)
1685
1687
1686 timer, fm = gettimer(ui, opts)
1688 timer, fm = gettimer(ui, opts)
1687 timer(d)
1689 timer(d)
1688 fm.end()
1690 fm.end()
1689
1691
1690
1692
1691 @command(b'perfrevrange', formatteropts)
1693 @command(b'perfrevrange', formatteropts)
1692 def perfrevrange(ui, repo, *specs, **opts):
1694 def perfrevrange(ui, repo, *specs, **opts):
1693 opts = _byteskwargs(opts)
1695 opts = _byteskwargs(opts)
1694 timer, fm = gettimer(ui, opts)
1696 timer, fm = gettimer(ui, opts)
1695 revrange = scmutil.revrange
1697 revrange = scmutil.revrange
1696 timer(lambda: len(revrange(repo, specs)))
1698 timer(lambda: len(revrange(repo, specs)))
1697 fm.end()
1699 fm.end()
1698
1700
1699
1701
1700 @command(b'perfnodelookup', formatteropts)
1702 @command(b'perfnodelookup', formatteropts)
1701 def perfnodelookup(ui, repo, rev, **opts):
1703 def perfnodelookup(ui, repo, rev, **opts):
1702 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1703 timer, fm = gettimer(ui, opts)
1705 timer, fm = gettimer(ui, opts)
1704 import mercurial.revlog
1706 import mercurial.revlog
1705
1707
1706 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1708 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1707 n = scmutil.revsingle(repo, rev).node()
1709 n = scmutil.revsingle(repo, rev).node()
1708 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1710 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1709
1711
1710 def d():
1712 def d():
1711 cl.rev(n)
1713 cl.rev(n)
1712 clearcaches(cl)
1714 clearcaches(cl)
1713
1715
1714 timer(d)
1716 timer(d)
1715 fm.end()
1717 fm.end()
1716
1718
1717
1719
1718 @command(
1720 @command(
1719 b'perflog',
1721 b'perflog',
1720 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1722 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1721 )
1723 )
1722 def perflog(ui, repo, rev=None, **opts):
1724 def perflog(ui, repo, rev=None, **opts):
1723 opts = _byteskwargs(opts)
1725 opts = _byteskwargs(opts)
1724 if rev is None:
1726 if rev is None:
1725 rev = []
1727 rev = []
1726 timer, fm = gettimer(ui, opts)
1728 timer, fm = gettimer(ui, opts)
1727 ui.pushbuffer()
1729 ui.pushbuffer()
1728 timer(
1730 timer(
1729 lambda: commands.log(
1731 lambda: commands.log(
1730 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1732 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1731 )
1733 )
1732 )
1734 )
1733 ui.popbuffer()
1735 ui.popbuffer()
1734 fm.end()
1736 fm.end()
1735
1737
1736
1738
1737 @command(b'perfmoonwalk', formatteropts)
1739 @command(b'perfmoonwalk', formatteropts)
1738 def perfmoonwalk(ui, repo, **opts):
1740 def perfmoonwalk(ui, repo, **opts):
1739 """benchmark walking the changelog backwards
1741 """benchmark walking the changelog backwards
1740
1742
1741 This also loads the changelog data for each revision in the changelog.
1743 This also loads the changelog data for each revision in the changelog.
1742 """
1744 """
1743 opts = _byteskwargs(opts)
1745 opts = _byteskwargs(opts)
1744 timer, fm = gettimer(ui, opts)
1746 timer, fm = gettimer(ui, opts)
1745
1747
1746 def moonwalk():
1748 def moonwalk():
1747 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1749 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1748 ctx = repo[i]
1750 ctx = repo[i]
1749 ctx.branch() # read changelog data (in addition to the index)
1751 ctx.branch() # read changelog data (in addition to the index)
1750
1752
1751 timer(moonwalk)
1753 timer(moonwalk)
1752 fm.end()
1754 fm.end()
1753
1755
1754
1756
1755 @command(
1757 @command(
1756 b'perftemplating',
1758 b'perftemplating',
1757 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1759 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1758 )
1760 )
1759 def perftemplating(ui, repo, testedtemplate=None, **opts):
1761 def perftemplating(ui, repo, testedtemplate=None, **opts):
1760 """test the rendering time of a given template"""
1762 """test the rendering time of a given template"""
1761 if makelogtemplater is None:
1763 if makelogtemplater is None:
1762 raise error.Abort(
1764 raise error.Abort(
1763 b"perftemplating not available with this Mercurial",
1765 b"perftemplating not available with this Mercurial",
1764 hint=b"use 4.3 or later",
1766 hint=b"use 4.3 or later",
1765 )
1767 )
1766
1768
1767 opts = _byteskwargs(opts)
1769 opts = _byteskwargs(opts)
1768
1770
1769 nullui = ui.copy()
1771 nullui = ui.copy()
1770 nullui.fout = open(os.devnull, r'wb')
1772 nullui.fout = open(os.devnull, r'wb')
1771 nullui.disablepager()
1773 nullui.disablepager()
1772 revs = opts.get(b'rev')
1774 revs = opts.get(b'rev')
1773 if not revs:
1775 if not revs:
1774 revs = [b'all()']
1776 revs = [b'all()']
1775 revs = list(scmutil.revrange(repo, revs))
1777 revs = list(scmutil.revrange(repo, revs))
1776
1778
1777 defaulttemplate = (
1779 defaulttemplate = (
1778 b'{date|shortdate} [{rev}:{node|short}]'
1780 b'{date|shortdate} [{rev}:{node|short}]'
1779 b' {author|person}: {desc|firstline}\n'
1781 b' {author|person}: {desc|firstline}\n'
1780 )
1782 )
1781 if testedtemplate is None:
1783 if testedtemplate is None:
1782 testedtemplate = defaulttemplate
1784 testedtemplate = defaulttemplate
1783 displayer = makelogtemplater(nullui, repo, testedtemplate)
1785 displayer = makelogtemplater(nullui, repo, testedtemplate)
1784
1786
1785 def format():
1787 def format():
1786 for r in revs:
1788 for r in revs:
1787 ctx = repo[r]
1789 ctx = repo[r]
1788 displayer.show(ctx)
1790 displayer.show(ctx)
1789 displayer.flush(ctx)
1791 displayer.flush(ctx)
1790
1792
1791 timer, fm = gettimer(ui, opts)
1793 timer, fm = gettimer(ui, opts)
1792 timer(format)
1794 timer(format)
1793 fm.end()
1795 fm.end()
1794
1796
1795
1797
1796 def _displaystats(ui, opts, entries, data):
1798 def _displaystats(ui, opts, entries, data):
1797 pass
1799 pass
1798 # use a second formatter because the data are quite different, not sure
1800 # use a second formatter because the data are quite different, not sure
1799 # how it flies with the templater.
1801 # how it flies with the templater.
1800 fm = ui.formatter(b'perf-stats', opts)
1802 fm = ui.formatter(b'perf-stats', opts)
1801 for key, title in entries:
1803 for key, title in entries:
1802 values = data[key]
1804 values = data[key]
1803 nbvalues = len(data)
1805 nbvalues = len(data)
1804 values.sort()
1806 values.sort()
1805 stats = {
1807 stats = {
1806 'key': key,
1808 'key': key,
1807 'title': title,
1809 'title': title,
1808 'nbitems': len(values),
1810 'nbitems': len(values),
1809 'min': values[0][0],
1811 'min': values[0][0],
1810 '10%': values[(nbvalues * 10) // 100][0],
1812 '10%': values[(nbvalues * 10) // 100][0],
1811 '25%': values[(nbvalues * 25) // 100][0],
1813 '25%': values[(nbvalues * 25) // 100][0],
1812 '50%': values[(nbvalues * 50) // 100][0],
1814 '50%': values[(nbvalues * 50) // 100][0],
1813 '75%': values[(nbvalues * 75) // 100][0],
1815 '75%': values[(nbvalues * 75) // 100][0],
1814 '80%': values[(nbvalues * 80) // 100][0],
1816 '80%': values[(nbvalues * 80) // 100][0],
1815 '85%': values[(nbvalues * 85) // 100][0],
1817 '85%': values[(nbvalues * 85) // 100][0],
1816 '90%': values[(nbvalues * 90) // 100][0],
1818 '90%': values[(nbvalues * 90) // 100][0],
1817 '95%': values[(nbvalues * 95) // 100][0],
1819 '95%': values[(nbvalues * 95) // 100][0],
1818 '99%': values[(nbvalues * 99) // 100][0],
1820 '99%': values[(nbvalues * 99) // 100][0],
1819 'max': values[-1][0],
1821 'max': values[-1][0],
1820 }
1822 }
1821 fm.startitem()
1823 fm.startitem()
1822 fm.data(**stats)
1824 fm.data(**stats)
1823 # make node pretty for the human output
1825 # make node pretty for the human output
1824 fm.plain('### %s (%d items)\n' % (title, len(values)))
1826 fm.plain('### %s (%d items)\n' % (title, len(values)))
1825 lines = [
1827 lines = [
1826 'min',
1828 'min',
1827 '10%',
1829 '10%',
1828 '25%',
1830 '25%',
1829 '50%',
1831 '50%',
1830 '75%',
1832 '75%',
1831 '80%',
1833 '80%',
1832 '85%',
1834 '85%',
1833 '90%',
1835 '90%',
1834 '95%',
1836 '95%',
1835 '99%',
1837 '99%',
1836 'max',
1838 'max',
1837 ]
1839 ]
1838 for l in lines:
1840 for l in lines:
1839 fm.plain('%s: %s\n' % (l, stats[l]))
1841 fm.plain('%s: %s\n' % (l, stats[l]))
1840 fm.end()
1842 fm.end()
1841
1843
1842
1844
1843 @command(
1845 @command(
1844 b'perfhelper-mergecopies',
1846 b'perfhelper-mergecopies',
1845 formatteropts
1847 formatteropts
1846 + [
1848 + [
1847 (b'r', b'revs', [], b'restrict search to these revisions'),
1849 (b'r', b'revs', [], b'restrict search to these revisions'),
1848 (b'', b'timing', False, b'provides extra data (costly)'),
1850 (b'', b'timing', False, b'provides extra data (costly)'),
1849 (b'', b'stats', False, b'provides statistic about the measured data'),
1851 (b'', b'stats', False, b'provides statistic about the measured data'),
1850 ],
1852 ],
1851 )
1853 )
1852 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1854 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1853 """find statistics about potential parameters for `perfmergecopies`
1855 """find statistics about potential parameters for `perfmergecopies`
1854
1856
1855 This command find (base, p1, p2) triplet relevant for copytracing
1857 This command find (base, p1, p2) triplet relevant for copytracing
1856 benchmarking in the context of a merge. It reports values for some of the
1858 benchmarking in the context of a merge. It reports values for some of the
1857 parameters that impact merge copy tracing time during merge.
1859 parameters that impact merge copy tracing time during merge.
1858
1860
1859 If `--timing` is set, rename detection is run and the associated timing
1861 If `--timing` is set, rename detection is run and the associated timing
1860 will be reported. The extra details come at the cost of slower command
1862 will be reported. The extra details come at the cost of slower command
1861 execution.
1863 execution.
1862
1864
1863 Since rename detection is only run once, other factors might easily
1865 Since rename detection is only run once, other factors might easily
1864 affect the precision of the timing. However it should give a good
1866 affect the precision of the timing. However it should give a good
1865 approximation of which revision triplets are very costly.
1867 approximation of which revision triplets are very costly.
1866 """
1868 """
1867 opts = _byteskwargs(opts)
1869 opts = _byteskwargs(opts)
1868 fm = ui.formatter(b'perf', opts)
1870 fm = ui.formatter(b'perf', opts)
1869 dotiming = opts[b'timing']
1871 dotiming = opts[b'timing']
1870 dostats = opts[b'stats']
1872 dostats = opts[b'stats']
1871
1873
1872 output_template = [
1874 output_template = [
1873 ("base", "%(base)12s"),
1875 ("base", "%(base)12s"),
1874 ("p1", "%(p1.node)12s"),
1876 ("p1", "%(p1.node)12s"),
1875 ("p2", "%(p2.node)12s"),
1877 ("p2", "%(p2.node)12s"),
1876 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1878 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1877 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1879 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1878 ("p1.renames", "%(p1.renamedfiles)12d"),
1880 ("p1.renames", "%(p1.renamedfiles)12d"),
1879 ("p1.time", "%(p1.time)12.3f"),
1881 ("p1.time", "%(p1.time)12.3f"),
1880 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1882 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1881 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1883 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1882 ("p2.renames", "%(p2.renamedfiles)12d"),
1884 ("p2.renames", "%(p2.renamedfiles)12d"),
1883 ("p2.time", "%(p2.time)12.3f"),
1885 ("p2.time", "%(p2.time)12.3f"),
1884 ("renames", "%(nbrenamedfiles)12d"),
1886 ("renames", "%(nbrenamedfiles)12d"),
1885 ("total.time", "%(time)12.3f"),
1887 ("total.time", "%(time)12.3f"),
1886 ]
1888 ]
1887 if not dotiming:
1889 if not dotiming:
1888 output_template = [
1890 output_template = [
1889 i
1891 i
1890 for i in output_template
1892 for i in output_template
1891 if not ('time' in i[0] or 'renames' in i[0])
1893 if not ('time' in i[0] or 'renames' in i[0])
1892 ]
1894 ]
1893 header_names = [h for (h, v) in output_template]
1895 header_names = [h for (h, v) in output_template]
1894 output = ' '.join([v for (h, v) in output_template]) + '\n'
1896 output = ' '.join([v for (h, v) in output_template]) + '\n'
1895 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1897 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1896 fm.plain(header % tuple(header_names))
1898 fm.plain(header % tuple(header_names))
1897
1899
1898 if not revs:
1900 if not revs:
1899 revs = ['all()']
1901 revs = ['all()']
1900 revs = scmutil.revrange(repo, revs)
1902 revs = scmutil.revrange(repo, revs)
1901
1903
1902 if dostats:
1904 if dostats:
1903 alldata = {
1905 alldata = {
1904 'nbrevs': [],
1906 'nbrevs': [],
1905 'nbmissingfiles': [],
1907 'nbmissingfiles': [],
1906 }
1908 }
1907 if dotiming:
1909 if dotiming:
1908 alldata['parentnbrenames'] = []
1910 alldata['parentnbrenames'] = []
1909 alldata['totalnbrenames'] = []
1911 alldata['totalnbrenames'] = []
1910 alldata['parenttime'] = []
1912 alldata['parenttime'] = []
1911 alldata['totaltime'] = []
1913 alldata['totaltime'] = []
1912
1914
1913 roi = repo.revs('merge() and %ld', revs)
1915 roi = repo.revs('merge() and %ld', revs)
1914 for r in roi:
1916 for r in roi:
1915 ctx = repo[r]
1917 ctx = repo[r]
1916 p1 = ctx.p1()
1918 p1 = ctx.p1()
1917 p2 = ctx.p2()
1919 p2 = ctx.p2()
1918 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1920 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1919 for b in bases:
1921 for b in bases:
1920 b = repo[b]
1922 b = repo[b]
1921 p1missing = copies._computeforwardmissing(b, p1)
1923 p1missing = copies._computeforwardmissing(b, p1)
1922 p2missing = copies._computeforwardmissing(b, p2)
1924 p2missing = copies._computeforwardmissing(b, p2)
1923 data = {
1925 data = {
1924 b'base': b.hex(),
1926 b'base': b.hex(),
1925 b'p1.node': p1.hex(),
1927 b'p1.node': p1.hex(),
1926 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1928 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1927 b'p1.nbmissingfiles': len(p1missing),
1929 b'p1.nbmissingfiles': len(p1missing),
1928 b'p2.node': p2.hex(),
1930 b'p2.node': p2.hex(),
1929 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1931 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1930 b'p2.nbmissingfiles': len(p2missing),
1932 b'p2.nbmissingfiles': len(p2missing),
1931 }
1933 }
1932 if dostats:
1934 if dostats:
1933 if p1missing:
1935 if p1missing:
1934 alldata['nbrevs'].append(
1936 alldata['nbrevs'].append(
1935 (data['p1.nbrevs'], b.hex(), p1.hex())
1937 (data['p1.nbrevs'], b.hex(), p1.hex())
1936 )
1938 )
1937 alldata['nbmissingfiles'].append(
1939 alldata['nbmissingfiles'].append(
1938 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1940 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1939 )
1941 )
1940 if p2missing:
1942 if p2missing:
1941 alldata['nbrevs'].append(
1943 alldata['nbrevs'].append(
1942 (data['p2.nbrevs'], b.hex(), p2.hex())
1944 (data['p2.nbrevs'], b.hex(), p2.hex())
1943 )
1945 )
1944 alldata['nbmissingfiles'].append(
1946 alldata['nbmissingfiles'].append(
1945 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1947 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1946 )
1948 )
1947 if dotiming:
1949 if dotiming:
1948 begin = util.timer()
1950 begin = util.timer()
1949 mergedata = copies.mergecopies(repo, p1, p2, b)
1951 mergedata = copies.mergecopies(repo, p1, p2, b)
1950 end = util.timer()
1952 end = util.timer()
1951 # not very stable timing since we did only one run
1953 # not very stable timing since we did only one run
1952 data['time'] = end - begin
1954 data['time'] = end - begin
1953 # mergedata contains five dicts: "copy", "movewithdir",
1955 # mergedata contains five dicts: "copy", "movewithdir",
1954 # "diverge", "renamedelete" and "dirmove".
1956 # "diverge", "renamedelete" and "dirmove".
1955 # The first 4 are about renamed file so lets count that.
1957 # The first 4 are about renamed file so lets count that.
1956 renames = len(mergedata[0])
1958 renames = len(mergedata[0])
1957 renames += len(mergedata[1])
1959 renames += len(mergedata[1])
1958 renames += len(mergedata[2])
1960 renames += len(mergedata[2])
1959 renames += len(mergedata[3])
1961 renames += len(mergedata[3])
1960 data['nbrenamedfiles'] = renames
1962 data['nbrenamedfiles'] = renames
1961 begin = util.timer()
1963 begin = util.timer()
1962 p1renames = copies.pathcopies(b, p1)
1964 p1renames = copies.pathcopies(b, p1)
1963 end = util.timer()
1965 end = util.timer()
1964 data['p1.time'] = end - begin
1966 data['p1.time'] = end - begin
1965 begin = util.timer()
1967 begin = util.timer()
1966 p2renames = copies.pathcopies(b, p2)
1968 p2renames = copies.pathcopies(b, p2)
1967 data['p2.time'] = end - begin
1969 data['p2.time'] = end - begin
1968 end = util.timer()
1970 end = util.timer()
1969 data['p1.renamedfiles'] = len(p1renames)
1971 data['p1.renamedfiles'] = len(p1renames)
1970 data['p2.renamedfiles'] = len(p2renames)
1972 data['p2.renamedfiles'] = len(p2renames)
1971
1973
1972 if dostats:
1974 if dostats:
1973 if p1missing:
1975 if p1missing:
1974 alldata['parentnbrenames'].append(
1976 alldata['parentnbrenames'].append(
1975 (data['p1.renamedfiles'], b.hex(), p1.hex())
1977 (data['p1.renamedfiles'], b.hex(), p1.hex())
1976 )
1978 )
1977 alldata['parenttime'].append(
1979 alldata['parenttime'].append(
1978 (data['p1.time'], b.hex(), p1.hex())
1980 (data['p1.time'], b.hex(), p1.hex())
1979 )
1981 )
1980 if p2missing:
1982 if p2missing:
1981 alldata['parentnbrenames'].append(
1983 alldata['parentnbrenames'].append(
1982 (data['p2.renamedfiles'], b.hex(), p2.hex())
1984 (data['p2.renamedfiles'], b.hex(), p2.hex())
1983 )
1985 )
1984 alldata['parenttime'].append(
1986 alldata['parenttime'].append(
1985 (data['p2.time'], b.hex(), p2.hex())
1987 (data['p2.time'], b.hex(), p2.hex())
1986 )
1988 )
1987 if p1missing or p2missing:
1989 if p1missing or p2missing:
1988 alldata['totalnbrenames'].append(
1990 alldata['totalnbrenames'].append(
1989 (
1991 (
1990 data['nbrenamedfiles'],
1992 data['nbrenamedfiles'],
1991 b.hex(),
1993 b.hex(),
1992 p1.hex(),
1994 p1.hex(),
1993 p2.hex(),
1995 p2.hex(),
1994 )
1996 )
1995 )
1997 )
1996 alldata['totaltime'].append(
1998 alldata['totaltime'].append(
1997 (data['time'], b.hex(), p1.hex(), p2.hex())
1999 (data['time'], b.hex(), p1.hex(), p2.hex())
1998 )
2000 )
1999 fm.startitem()
2001 fm.startitem()
2000 fm.data(**data)
2002 fm.data(**data)
2001 # make node pretty for the human output
2003 # make node pretty for the human output
2002 out = data.copy()
2004 out = data.copy()
2003 out['base'] = fm.hexfunc(b.node())
2005 out['base'] = fm.hexfunc(b.node())
2004 out['p1.node'] = fm.hexfunc(p1.node())
2006 out['p1.node'] = fm.hexfunc(p1.node())
2005 out['p2.node'] = fm.hexfunc(p2.node())
2007 out['p2.node'] = fm.hexfunc(p2.node())
2006 fm.plain(output % out)
2008 fm.plain(output % out)
2007
2009
2008 fm.end()
2010 fm.end()
2009 if dostats:
2011 if dostats:
2010 # use a second formatter because the data are quite different, not sure
2012 # use a second formatter because the data are quite different, not sure
2011 # how it flies with the templater.
2013 # how it flies with the templater.
2012 entries = [
2014 entries = [
2013 ('nbrevs', 'number of revision covered'),
2015 ('nbrevs', 'number of revision covered'),
2014 ('nbmissingfiles', 'number of missing files at head'),
2016 ('nbmissingfiles', 'number of missing files at head'),
2015 ]
2017 ]
2016 if dotiming:
2018 if dotiming:
2017 entries.append(
2019 entries.append(
2018 ('parentnbrenames', 'rename from one parent to base')
2020 ('parentnbrenames', 'rename from one parent to base')
2019 )
2021 )
2020 entries.append(('totalnbrenames', 'total number of renames'))
2022 entries.append(('totalnbrenames', 'total number of renames'))
2021 entries.append(('parenttime', 'time for one parent'))
2023 entries.append(('parenttime', 'time for one parent'))
2022 entries.append(('totaltime', 'time for both parents'))
2024 entries.append(('totaltime', 'time for both parents'))
2023 _displaystats(ui, opts, entries, alldata)
2025 _displaystats(ui, opts, entries, alldata)
2024
2026
2025
2027
2026 @command(
2028 @command(
2027 b'perfhelper-pathcopies',
2029 b'perfhelper-pathcopies',
2028 formatteropts
2030 formatteropts
2029 + [
2031 + [
2030 (b'r', b'revs', [], b'restrict search to these revisions'),
2032 (b'r', b'revs', [], b'restrict search to these revisions'),
2031 (b'', b'timing', False, b'provides extra data (costly)'),
2033 (b'', b'timing', False, b'provides extra data (costly)'),
2032 (b'', b'stats', False, b'provides statistic about the measured data'),
2034 (b'', b'stats', False, b'provides statistic about the measured data'),
2033 ],
2035 ],
2034 )
2036 )
2035 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2037 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2036 """find statistic about potential parameters for the `perftracecopies`
2038 """find statistic about potential parameters for the `perftracecopies`
2037
2039
2038 This command find source-destination pair relevant for copytracing testing.
2040 This command find source-destination pair relevant for copytracing testing.
2039 It report value for some of the parameters that impact copy tracing time.
2041 It report value for some of the parameters that impact copy tracing time.
2040
2042
2041 If `--timing` is set, rename detection is run and the associated timing
2043 If `--timing` is set, rename detection is run and the associated timing
2042 will be reported. The extra details comes at the cost of a slower command
2044 will be reported. The extra details comes at the cost of a slower command
2043 execution.
2045 execution.
2044
2046
2045 Since the rename detection is only run once, other factors might easily
2047 Since the rename detection is only run once, other factors might easily
2046 affect the precision of the timing. However it should give a good
2048 affect the precision of the timing. However it should give a good
2047 approximation of which revision pairs are very costly.
2049 approximation of which revision pairs are very costly.
2048 """
2050 """
2049 opts = _byteskwargs(opts)
2051 opts = _byteskwargs(opts)
2050 fm = ui.formatter(b'perf', opts)
2052 fm = ui.formatter(b'perf', opts)
2051 dotiming = opts[b'timing']
2053 dotiming = opts[b'timing']
2052 dostats = opts[b'stats']
2054 dostats = opts[b'stats']
2053
2055
2054 if dotiming:
2056 if dotiming:
2055 header = '%12s %12s %12s %12s %12s %12s\n'
2057 header = '%12s %12s %12s %12s %12s %12s\n'
2056 output = (
2058 output = (
2057 "%(source)12s %(destination)12s "
2059 "%(source)12s %(destination)12s "
2058 "%(nbrevs)12d %(nbmissingfiles)12d "
2060 "%(nbrevs)12d %(nbmissingfiles)12d "
2059 "%(nbrenamedfiles)12d %(time)18.5f\n"
2061 "%(nbrenamedfiles)12d %(time)18.5f\n"
2060 )
2062 )
2061 header_names = (
2063 header_names = (
2062 "source",
2064 "source",
2063 "destination",
2065 "destination",
2064 "nb-revs",
2066 "nb-revs",
2065 "nb-files",
2067 "nb-files",
2066 "nb-renames",
2068 "nb-renames",
2067 "time",
2069 "time",
2068 )
2070 )
2069 fm.plain(header % header_names)
2071 fm.plain(header % header_names)
2070 else:
2072 else:
2071 header = '%12s %12s %12s %12s\n'
2073 header = '%12s %12s %12s %12s\n'
2072 output = (
2074 output = (
2073 "%(source)12s %(destination)12s "
2075 "%(source)12s %(destination)12s "
2074 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2076 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2075 )
2077 )
2076 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2078 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2077
2079
2078 if not revs:
2080 if not revs:
2079 revs = ['all()']
2081 revs = ['all()']
2080 revs = scmutil.revrange(repo, revs)
2082 revs = scmutil.revrange(repo, revs)
2081
2083
2082 if dostats:
2084 if dostats:
2083 alldata = {
2085 alldata = {
2084 'nbrevs': [],
2086 'nbrevs': [],
2085 'nbmissingfiles': [],
2087 'nbmissingfiles': [],
2086 }
2088 }
2087 if dotiming:
2089 if dotiming:
2088 alldata['nbrenames'] = []
2090 alldata['nbrenames'] = []
2089 alldata['time'] = []
2091 alldata['time'] = []
2090
2092
2091 roi = repo.revs('merge() and %ld', revs)
2093 roi = repo.revs('merge() and %ld', revs)
2092 for r in roi:
2094 for r in roi:
2093 ctx = repo[r]
2095 ctx = repo[r]
2094 p1 = ctx.p1().rev()
2096 p1 = ctx.p1().rev()
2095 p2 = ctx.p2().rev()
2097 p2 = ctx.p2().rev()
2096 bases = repo.changelog._commonancestorsheads(p1, p2)
2098 bases = repo.changelog._commonancestorsheads(p1, p2)
2097 for p in (p1, p2):
2099 for p in (p1, p2):
2098 for b in bases:
2100 for b in bases:
2099 base = repo[b]
2101 base = repo[b]
2100 parent = repo[p]
2102 parent = repo[p]
2101 missing = copies._computeforwardmissing(base, parent)
2103 missing = copies._computeforwardmissing(base, parent)
2102 if not missing:
2104 if not missing:
2103 continue
2105 continue
2104 data = {
2106 data = {
2105 b'source': base.hex(),
2107 b'source': base.hex(),
2106 b'destination': parent.hex(),
2108 b'destination': parent.hex(),
2107 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2109 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2108 b'nbmissingfiles': len(missing),
2110 b'nbmissingfiles': len(missing),
2109 }
2111 }
2110 if dostats:
2112 if dostats:
2111 alldata['nbrevs'].append(
2113 alldata['nbrevs'].append(
2112 (data['nbrevs'], base.hex(), parent.hex(),)
2114 (data['nbrevs'], base.hex(), parent.hex(),)
2113 )
2115 )
2114 alldata['nbmissingfiles'].append(
2116 alldata['nbmissingfiles'].append(
2115 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2117 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2116 )
2118 )
2117 if dotiming:
2119 if dotiming:
2118 begin = util.timer()
2120 begin = util.timer()
2119 renames = copies.pathcopies(base, parent)
2121 renames = copies.pathcopies(base, parent)
2120 end = util.timer()
2122 end = util.timer()
2121 # not very stable timing since we did only one run
2123 # not very stable timing since we did only one run
2122 data['time'] = end - begin
2124 data['time'] = end - begin
2123 data['nbrenamedfiles'] = len(renames)
2125 data['nbrenamedfiles'] = len(renames)
2124 if dostats:
2126 if dostats:
2125 alldata['time'].append(
2127 alldata['time'].append(
2126 (data['time'], base.hex(), parent.hex(),)
2128 (data['time'], base.hex(), parent.hex(),)
2127 )
2129 )
2128 alldata['nbrenames'].append(
2130 alldata['nbrenames'].append(
2129 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2131 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2130 )
2132 )
2131 fm.startitem()
2133 fm.startitem()
2132 fm.data(**data)
2134 fm.data(**data)
2133 out = data.copy()
2135 out = data.copy()
2134 out['source'] = fm.hexfunc(base.node())
2136 out['source'] = fm.hexfunc(base.node())
2135 out['destination'] = fm.hexfunc(parent.node())
2137 out['destination'] = fm.hexfunc(parent.node())
2136 fm.plain(output % out)
2138 fm.plain(output % out)
2137
2139
2138 fm.end()
2140 fm.end()
2139 if dostats:
2141 if dostats:
2140 # use a second formatter because the data are quite different, not sure
2142 # use a second formatter because the data are quite different, not sure
2141 # how it flies with the templater.
2143 # how it flies with the templater.
2142 fm = ui.formatter(b'perf', opts)
2144 fm = ui.formatter(b'perf', opts)
2143 entries = [
2145 entries = [
2144 ('nbrevs', 'number of revision covered'),
2146 ('nbrevs', 'number of revision covered'),
2145 ('nbmissingfiles', 'number of missing files at head'),
2147 ('nbmissingfiles', 'number of missing files at head'),
2146 ]
2148 ]
2147 if dotiming:
2149 if dotiming:
2148 entries.append(('nbrenames', 'renamed files'))
2150 entries.append(('nbrenames', 'renamed files'))
2149 entries.append(('time', 'time'))
2151 entries.append(('time', 'time'))
2150 _displaystats(ui, opts, entries, alldata)
2152 _displaystats(ui, opts, entries, alldata)
2151
2153
2152
2154
2153 @command(b'perfcca', formatteropts)
2155 @command(b'perfcca', formatteropts)
2154 def perfcca(ui, repo, **opts):
2156 def perfcca(ui, repo, **opts):
2155 opts = _byteskwargs(opts)
2157 opts = _byteskwargs(opts)
2156 timer, fm = gettimer(ui, opts)
2158 timer, fm = gettimer(ui, opts)
2157 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2159 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2158 fm.end()
2160 fm.end()
2159
2161
2160
2162
2161 @command(b'perffncacheload', formatteropts)
2163 @command(b'perffncacheload', formatteropts)
2162 def perffncacheload(ui, repo, **opts):
2164 def perffncacheload(ui, repo, **opts):
2163 opts = _byteskwargs(opts)
2165 opts = _byteskwargs(opts)
2164 timer, fm = gettimer(ui, opts)
2166 timer, fm = gettimer(ui, opts)
2165 s = repo.store
2167 s = repo.store
2166
2168
2167 def d():
2169 def d():
2168 s.fncache._load()
2170 s.fncache._load()
2169
2171
2170 timer(d)
2172 timer(d)
2171 fm.end()
2173 fm.end()
2172
2174
2173
2175
2174 @command(b'perffncachewrite', formatteropts)
2176 @command(b'perffncachewrite', formatteropts)
2175 def perffncachewrite(ui, repo, **opts):
2177 def perffncachewrite(ui, repo, **opts):
2176 opts = _byteskwargs(opts)
2178 opts = _byteskwargs(opts)
2177 timer, fm = gettimer(ui, opts)
2179 timer, fm = gettimer(ui, opts)
2178 s = repo.store
2180 s = repo.store
2179 lock = repo.lock()
2181 lock = repo.lock()
2180 s.fncache._load()
2182 s.fncache._load()
2181 tr = repo.transaction(b'perffncachewrite')
2183 tr = repo.transaction(b'perffncachewrite')
2182 tr.addbackup(b'fncache')
2184 tr.addbackup(b'fncache')
2183
2185
2184 def d():
2186 def d():
2185 s.fncache._dirty = True
2187 s.fncache._dirty = True
2186 s.fncache.write(tr)
2188 s.fncache.write(tr)
2187
2189
2188 timer(d)
2190 timer(d)
2189 tr.close()
2191 tr.close()
2190 lock.release()
2192 lock.release()
2191 fm.end()
2193 fm.end()
2192
2194
2193
2195
2194 @command(b'perffncacheencode', formatteropts)
2196 @command(b'perffncacheencode', formatteropts)
2195 def perffncacheencode(ui, repo, **opts):
2197 def perffncacheencode(ui, repo, **opts):
2196 opts = _byteskwargs(opts)
2198 opts = _byteskwargs(opts)
2197 timer, fm = gettimer(ui, opts)
2199 timer, fm = gettimer(ui, opts)
2198 s = repo.store
2200 s = repo.store
2199 s.fncache._load()
2201 s.fncache._load()
2200
2202
2201 def d():
2203 def d():
2202 for p in s.fncache.entries:
2204 for p in s.fncache.entries:
2203 s.encode(p)
2205 s.encode(p)
2204
2206
2205 timer(d)
2207 timer(d)
2206 fm.end()
2208 fm.end()
2207
2209
2208
2210
2209 def _bdiffworker(q, blocks, xdiff, ready, done):
2211 def _bdiffworker(q, blocks, xdiff, ready, done):
2210 while not done.is_set():
2212 while not done.is_set():
2211 pair = q.get()
2213 pair = q.get()
2212 while pair is not None:
2214 while pair is not None:
2213 if xdiff:
2215 if xdiff:
2214 mdiff.bdiff.xdiffblocks(*pair)
2216 mdiff.bdiff.xdiffblocks(*pair)
2215 elif blocks:
2217 elif blocks:
2216 mdiff.bdiff.blocks(*pair)
2218 mdiff.bdiff.blocks(*pair)
2217 else:
2219 else:
2218 mdiff.textdiff(*pair)
2220 mdiff.textdiff(*pair)
2219 q.task_done()
2221 q.task_done()
2220 pair = q.get()
2222 pair = q.get()
2221 q.task_done() # for the None one
2223 q.task_done() # for the None one
2222 with ready:
2224 with ready:
2223 ready.wait()
2225 ready.wait()
2224
2226
2225
2227
2226 def _manifestrevision(repo, mnode):
2228 def _manifestrevision(repo, mnode):
2227 ml = repo.manifestlog
2229 ml = repo.manifestlog
2228
2230
2229 if util.safehasattr(ml, b'getstorage'):
2231 if util.safehasattr(ml, b'getstorage'):
2230 store = ml.getstorage(b'')
2232 store = ml.getstorage(b'')
2231 else:
2233 else:
2232 store = ml._revlog
2234 store = ml._revlog
2233
2235
2234 return store.revision(mnode)
2236 return store.revision(mnode)
2235
2237
2236
2238
2237 @command(
2239 @command(
2238 b'perfbdiff',
2240 b'perfbdiff',
2239 revlogopts
2241 revlogopts
2240 + formatteropts
2242 + formatteropts
2241 + [
2243 + [
2242 (
2244 (
2243 b'',
2245 b'',
2244 b'count',
2246 b'count',
2245 1,
2247 1,
2246 b'number of revisions to test (when using --startrev)',
2248 b'number of revisions to test (when using --startrev)',
2247 ),
2249 ),
2248 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2250 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2249 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2251 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2250 (b'', b'blocks', False, b'test computing diffs into blocks'),
2252 (b'', b'blocks', False, b'test computing diffs into blocks'),
2251 (b'', b'xdiff', False, b'use xdiff algorithm'),
2253 (b'', b'xdiff', False, b'use xdiff algorithm'),
2252 ],
2254 ],
2253 b'-c|-m|FILE REV',
2255 b'-c|-m|FILE REV',
2254 )
2256 )
2255 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2257 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2256 """benchmark a bdiff between revisions
2258 """benchmark a bdiff between revisions
2257
2259
2258 By default, benchmark a bdiff between its delta parent and itself.
2260 By default, benchmark a bdiff between its delta parent and itself.
2259
2261
2260 With ``--count``, benchmark bdiffs between delta parents and self for N
2262 With ``--count``, benchmark bdiffs between delta parents and self for N
2261 revisions starting at the specified revision.
2263 revisions starting at the specified revision.
2262
2264
2263 With ``--alldata``, assume the requested revision is a changeset and
2265 With ``--alldata``, assume the requested revision is a changeset and
2264 measure bdiffs for all changes related to that changeset (manifest
2266 measure bdiffs for all changes related to that changeset (manifest
2265 and filelogs).
2267 and filelogs).
2266 """
2268 """
2267 opts = _byteskwargs(opts)
2269 opts = _byteskwargs(opts)
2268
2270
2269 if opts[b'xdiff'] and not opts[b'blocks']:
2271 if opts[b'xdiff'] and not opts[b'blocks']:
2270 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2272 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2271
2273
2272 if opts[b'alldata']:
2274 if opts[b'alldata']:
2273 opts[b'changelog'] = True
2275 opts[b'changelog'] = True
2274
2276
2275 if opts.get(b'changelog') or opts.get(b'manifest'):
2277 if opts.get(b'changelog') or opts.get(b'manifest'):
2276 file_, rev = None, file_
2278 file_, rev = None, file_
2277 elif rev is None:
2279 elif rev is None:
2278 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2280 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2279
2281
2280 blocks = opts[b'blocks']
2282 blocks = opts[b'blocks']
2281 xdiff = opts[b'xdiff']
2283 xdiff = opts[b'xdiff']
2282 textpairs = []
2284 textpairs = []
2283
2285
2284 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2286 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2285
2287
2286 startrev = r.rev(r.lookup(rev))
2288 startrev = r.rev(r.lookup(rev))
2287 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2289 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2288 if opts[b'alldata']:
2290 if opts[b'alldata']:
2289 # Load revisions associated with changeset.
2291 # Load revisions associated with changeset.
2290 ctx = repo[rev]
2292 ctx = repo[rev]
2291 mtext = _manifestrevision(repo, ctx.manifestnode())
2293 mtext = _manifestrevision(repo, ctx.manifestnode())
2292 for pctx in ctx.parents():
2294 for pctx in ctx.parents():
2293 pman = _manifestrevision(repo, pctx.manifestnode())
2295 pman = _manifestrevision(repo, pctx.manifestnode())
2294 textpairs.append((pman, mtext))
2296 textpairs.append((pman, mtext))
2295
2297
2296 # Load filelog revisions by iterating manifest delta.
2298 # Load filelog revisions by iterating manifest delta.
2297 man = ctx.manifest()
2299 man = ctx.manifest()
2298 pman = ctx.p1().manifest()
2300 pman = ctx.p1().manifest()
2299 for filename, change in pman.diff(man).items():
2301 for filename, change in pman.diff(man).items():
2300 fctx = repo.file(filename)
2302 fctx = repo.file(filename)
2301 f1 = fctx.revision(change[0][0] or -1)
2303 f1 = fctx.revision(change[0][0] or -1)
2302 f2 = fctx.revision(change[1][0] or -1)
2304 f2 = fctx.revision(change[1][0] or -1)
2303 textpairs.append((f1, f2))
2305 textpairs.append((f1, f2))
2304 else:
2306 else:
2305 dp = r.deltaparent(rev)
2307 dp = r.deltaparent(rev)
2306 textpairs.append((r.revision(dp), r.revision(rev)))
2308 textpairs.append((r.revision(dp), r.revision(rev)))
2307
2309
2308 withthreads = threads > 0
2310 withthreads = threads > 0
2309 if not withthreads:
2311 if not withthreads:
2310
2312
2311 def d():
2313 def d():
2312 for pair in textpairs:
2314 for pair in textpairs:
2313 if xdiff:
2315 if xdiff:
2314 mdiff.bdiff.xdiffblocks(*pair)
2316 mdiff.bdiff.xdiffblocks(*pair)
2315 elif blocks:
2317 elif blocks:
2316 mdiff.bdiff.blocks(*pair)
2318 mdiff.bdiff.blocks(*pair)
2317 else:
2319 else:
2318 mdiff.textdiff(*pair)
2320 mdiff.textdiff(*pair)
2319
2321
2320 else:
2322 else:
2321 q = queue()
2323 q = queue()
2322 for i in _xrange(threads):
2324 for i in _xrange(threads):
2323 q.put(None)
2325 q.put(None)
2324 ready = threading.Condition()
2326 ready = threading.Condition()
2325 done = threading.Event()
2327 done = threading.Event()
2326 for i in _xrange(threads):
2328 for i in _xrange(threads):
2327 threading.Thread(
2329 threading.Thread(
2328 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2330 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2329 ).start()
2331 ).start()
2330 q.join()
2332 q.join()
2331
2333
2332 def d():
2334 def d():
2333 for pair in textpairs:
2335 for pair in textpairs:
2334 q.put(pair)
2336 q.put(pair)
2335 for i in _xrange(threads):
2337 for i in _xrange(threads):
2336 q.put(None)
2338 q.put(None)
2337 with ready:
2339 with ready:
2338 ready.notify_all()
2340 ready.notify_all()
2339 q.join()
2341 q.join()
2340
2342
2341 timer, fm = gettimer(ui, opts)
2343 timer, fm = gettimer(ui, opts)
2342 timer(d)
2344 timer(d)
2343 fm.end()
2345 fm.end()
2344
2346
2345 if withthreads:
2347 if withthreads:
2346 done.set()
2348 done.set()
2347 for i in _xrange(threads):
2349 for i in _xrange(threads):
2348 q.put(None)
2350 q.put(None)
2349 with ready:
2351 with ready:
2350 ready.notify_all()
2352 ready.notify_all()
2351
2353
2352
2354
2353 @command(
2355 @command(
2354 b'perfunidiff',
2356 b'perfunidiff',
2355 revlogopts
2357 revlogopts
2356 + formatteropts
2358 + formatteropts
2357 + [
2359 + [
2358 (
2360 (
2359 b'',
2361 b'',
2360 b'count',
2362 b'count',
2361 1,
2363 1,
2362 b'number of revisions to test (when using --startrev)',
2364 b'number of revisions to test (when using --startrev)',
2363 ),
2365 ),
2364 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2366 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2365 ],
2367 ],
2366 b'-c|-m|FILE REV',
2368 b'-c|-m|FILE REV',
2367 )
2369 )
2368 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2370 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2369 """benchmark a unified diff between revisions
2371 """benchmark a unified diff between revisions
2370
2372
2371 This doesn't include any copy tracing - it's just a unified diff
2373 This doesn't include any copy tracing - it's just a unified diff
2372 of the texts.
2374 of the texts.
2373
2375
2374 By default, benchmark a diff between its delta parent and itself.
2376 By default, benchmark a diff between its delta parent and itself.
2375
2377
2376 With ``--count``, benchmark diffs between delta parents and self for N
2378 With ``--count``, benchmark diffs between delta parents and self for N
2377 revisions starting at the specified revision.
2379 revisions starting at the specified revision.
2378
2380
2379 With ``--alldata``, assume the requested revision is a changeset and
2381 With ``--alldata``, assume the requested revision is a changeset and
2380 measure diffs for all changes related to that changeset (manifest
2382 measure diffs for all changes related to that changeset (manifest
2381 and filelogs).
2383 and filelogs).
2382 """
2384 """
2383 opts = _byteskwargs(opts)
2385 opts = _byteskwargs(opts)
2384 if opts[b'alldata']:
2386 if opts[b'alldata']:
2385 opts[b'changelog'] = True
2387 opts[b'changelog'] = True
2386
2388
2387 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 if opts.get(b'changelog') or opts.get(b'manifest'):
2388 file_, rev = None, file_
2390 file_, rev = None, file_
2389 elif rev is None:
2391 elif rev is None:
2390 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2392 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2391
2393
2392 textpairs = []
2394 textpairs = []
2393
2395
2394 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2396 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2395
2397
2396 startrev = r.rev(r.lookup(rev))
2398 startrev = r.rev(r.lookup(rev))
2397 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2399 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2398 if opts[b'alldata']:
2400 if opts[b'alldata']:
2399 # Load revisions associated with changeset.
2401 # Load revisions associated with changeset.
2400 ctx = repo[rev]
2402 ctx = repo[rev]
2401 mtext = _manifestrevision(repo, ctx.manifestnode())
2403 mtext = _manifestrevision(repo, ctx.manifestnode())
2402 for pctx in ctx.parents():
2404 for pctx in ctx.parents():
2403 pman = _manifestrevision(repo, pctx.manifestnode())
2405 pman = _manifestrevision(repo, pctx.manifestnode())
2404 textpairs.append((pman, mtext))
2406 textpairs.append((pman, mtext))
2405
2407
2406 # Load filelog revisions by iterating manifest delta.
2408 # Load filelog revisions by iterating manifest delta.
2407 man = ctx.manifest()
2409 man = ctx.manifest()
2408 pman = ctx.p1().manifest()
2410 pman = ctx.p1().manifest()
2409 for filename, change in pman.diff(man).items():
2411 for filename, change in pman.diff(man).items():
2410 fctx = repo.file(filename)
2412 fctx = repo.file(filename)
2411 f1 = fctx.revision(change[0][0] or -1)
2413 f1 = fctx.revision(change[0][0] or -1)
2412 f2 = fctx.revision(change[1][0] or -1)
2414 f2 = fctx.revision(change[1][0] or -1)
2413 textpairs.append((f1, f2))
2415 textpairs.append((f1, f2))
2414 else:
2416 else:
2415 dp = r.deltaparent(rev)
2417 dp = r.deltaparent(rev)
2416 textpairs.append((r.revision(dp), r.revision(rev)))
2418 textpairs.append((r.revision(dp), r.revision(rev)))
2417
2419
2418 def d():
2420 def d():
2419 for left, right in textpairs:
2421 for left, right in textpairs:
2420 # The date strings don't matter, so we pass empty strings.
2422 # The date strings don't matter, so we pass empty strings.
2421 headerlines, hunks = mdiff.unidiff(
2423 headerlines, hunks = mdiff.unidiff(
2422 left, b'', right, b'', b'left', b'right', binary=False
2424 left, b'', right, b'', b'left', b'right', binary=False
2423 )
2425 )
2424 # consume iterators in roughly the way patch.py does
2426 # consume iterators in roughly the way patch.py does
2425 b'\n'.join(headerlines)
2427 b'\n'.join(headerlines)
2426 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2428 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2427
2429
2428 timer, fm = gettimer(ui, opts)
2430 timer, fm = gettimer(ui, opts)
2429 timer(d)
2431 timer(d)
2430 fm.end()
2432 fm.end()
2431
2433
2432
2434
2433 @command(b'perfdiffwd', formatteropts)
2435 @command(b'perfdiffwd', formatteropts)
2434 def perfdiffwd(ui, repo, **opts):
2436 def perfdiffwd(ui, repo, **opts):
2435 """Profile diff of working directory changes"""
2437 """Profile diff of working directory changes"""
2436 opts = _byteskwargs(opts)
2438 opts = _byteskwargs(opts)
2437 timer, fm = gettimer(ui, opts)
2439 timer, fm = gettimer(ui, opts)
2438 options = {
2440 options = {
2439 'w': 'ignore_all_space',
2441 'w': 'ignore_all_space',
2440 'b': 'ignore_space_change',
2442 'b': 'ignore_space_change',
2441 'B': 'ignore_blank_lines',
2443 'B': 'ignore_blank_lines',
2442 }
2444 }
2443
2445
2444 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2446 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2445 opts = dict((options[c], b'1') for c in diffopt)
2447 opts = dict((options[c], b'1') for c in diffopt)
2446
2448
2447 def d():
2449 def d():
2448 ui.pushbuffer()
2450 ui.pushbuffer()
2449 commands.diff(ui, repo, **opts)
2451 commands.diff(ui, repo, **opts)
2450 ui.popbuffer()
2452 ui.popbuffer()
2451
2453
2452 diffopt = diffopt.encode('ascii')
2454 diffopt = diffopt.encode('ascii')
2453 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2455 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2454 timer(d, title=title)
2456 timer(d, title=title)
2455 fm.end()
2457 fm.end()
2456
2458
2457
2459
2458 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2460 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2459 def perfrevlogindex(ui, repo, file_=None, **opts):
2461 def perfrevlogindex(ui, repo, file_=None, **opts):
2460 """Benchmark operations against a revlog index.
2462 """Benchmark operations against a revlog index.
2461
2463
2462 This tests constructing a revlog instance, reading index data,
2464 This tests constructing a revlog instance, reading index data,
2463 parsing index data, and performing various operations related to
2465 parsing index data, and performing various operations related to
2464 index data.
2466 index data.
2465 """
2467 """
2466
2468
2467 opts = _byteskwargs(opts)
2469 opts = _byteskwargs(opts)
2468
2470
2469 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2471 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2470
2472
2471 opener = getattr(rl, 'opener') # trick linter
2473 opener = getattr(rl, 'opener') # trick linter
2472 indexfile = rl.indexfile
2474 indexfile = rl.indexfile
2473 data = opener.read(indexfile)
2475 data = opener.read(indexfile)
2474
2476
2475 header = struct.unpack(b'>I', data[0:4])[0]
2477 header = struct.unpack(b'>I', data[0:4])[0]
2476 version = header & 0xFFFF
2478 version = header & 0xFFFF
2477 if version == 1:
2479 if version == 1:
2478 revlogio = revlog.revlogio()
2480 revlogio = revlog.revlogio()
2479 inline = header & (1 << 16)
2481 inline = header & (1 << 16)
2480 else:
2482 else:
2481 raise error.Abort(b'unsupported revlog version: %d' % version)
2483 raise error.Abort(b'unsupported revlog version: %d' % version)
2482
2484
2483 rllen = len(rl)
2485 rllen = len(rl)
2484
2486
2485 node0 = rl.node(0)
2487 node0 = rl.node(0)
2486 node25 = rl.node(rllen // 4)
2488 node25 = rl.node(rllen // 4)
2487 node50 = rl.node(rllen // 2)
2489 node50 = rl.node(rllen // 2)
2488 node75 = rl.node(rllen // 4 * 3)
2490 node75 = rl.node(rllen // 4 * 3)
2489 node100 = rl.node(rllen - 1)
2491 node100 = rl.node(rllen - 1)
2490
2492
2491 allrevs = range(rllen)
2493 allrevs = range(rllen)
2492 allrevsrev = list(reversed(allrevs))
2494 allrevsrev = list(reversed(allrevs))
2493 allnodes = [rl.node(rev) for rev in range(rllen)]
2495 allnodes = [rl.node(rev) for rev in range(rllen)]
2494 allnodesrev = list(reversed(allnodes))
2496 allnodesrev = list(reversed(allnodes))
2495
2497
2496 def constructor():
2498 def constructor():
2497 revlog.revlog(opener, indexfile)
2499 revlog.revlog(opener, indexfile)
2498
2500
2499 def read():
2501 def read():
2500 with opener(indexfile) as fh:
2502 with opener(indexfile) as fh:
2501 fh.read()
2503 fh.read()
2502
2504
2503 def parseindex():
2505 def parseindex():
2504 revlogio.parseindex(data, inline)
2506 revlogio.parseindex(data, inline)
2505
2507
2506 def getentry(revornode):
2508 def getentry(revornode):
2507 index = revlogio.parseindex(data, inline)[0]
2509 index = revlogio.parseindex(data, inline)[0]
2508 index[revornode]
2510 index[revornode]
2509
2511
2510 def getentries(revs, count=1):
2512 def getentries(revs, count=1):
2511 index = revlogio.parseindex(data, inline)[0]
2513 index = revlogio.parseindex(data, inline)[0]
2512
2514
2513 for i in range(count):
2515 for i in range(count):
2514 for rev in revs:
2516 for rev in revs:
2515 index[rev]
2517 index[rev]
2516
2518
2517 def resolvenode(node):
2519 def resolvenode(node):
2518 nodemap = revlogio.parseindex(data, inline)[1]
2520 nodemap = revlogio.parseindex(data, inline)[1]
2519 # This only works for the C code.
2521 # This only works for the C code.
2520 if nodemap is None:
2522 if nodemap is None:
2521 return
2523 return
2522
2524
2523 try:
2525 try:
2524 nodemap[node]
2526 nodemap[node]
2525 except error.RevlogError:
2527 except error.RevlogError:
2526 pass
2528 pass
2527
2529
2528 def resolvenodes(nodes, count=1):
2530 def resolvenodes(nodes, count=1):
2529 nodemap = revlogio.parseindex(data, inline)[1]
2531 nodemap = revlogio.parseindex(data, inline)[1]
2530 if nodemap is None:
2532 if nodemap is None:
2531 return
2533 return
2532
2534
2533 for i in range(count):
2535 for i in range(count):
2534 for node in nodes:
2536 for node in nodes:
2535 try:
2537 try:
2536 nodemap[node]
2538 nodemap[node]
2537 except error.RevlogError:
2539 except error.RevlogError:
2538 pass
2540 pass
2539
2541
2540 benches = [
2542 benches = [
2541 (constructor, b'revlog constructor'),
2543 (constructor, b'revlog constructor'),
2542 (read, b'read'),
2544 (read, b'read'),
2543 (parseindex, b'create index object'),
2545 (parseindex, b'create index object'),
2544 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2546 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2545 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2547 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2546 (lambda: resolvenode(node0), b'look up node at rev 0'),
2548 (lambda: resolvenode(node0), b'look up node at rev 0'),
2547 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2549 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2548 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2550 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2549 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2551 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2550 (lambda: resolvenode(node100), b'look up node at tip'),
2552 (lambda: resolvenode(node100), b'look up node at tip'),
2551 # 2x variation is to measure caching impact.
2553 # 2x variation is to measure caching impact.
2552 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2554 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2553 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2555 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2554 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2556 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2555 (
2557 (
2556 lambda: resolvenodes(allnodesrev, 2),
2558 lambda: resolvenodes(allnodesrev, 2),
2557 b'look up all nodes 2x (reverse)',
2559 b'look up all nodes 2x (reverse)',
2558 ),
2560 ),
2559 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2561 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2560 (
2562 (
2561 lambda: getentries(allrevs, 2),
2563 lambda: getentries(allrevs, 2),
2562 b'retrieve all index entries 2x (forward)',
2564 b'retrieve all index entries 2x (forward)',
2563 ),
2565 ),
2564 (
2566 (
2565 lambda: getentries(allrevsrev),
2567 lambda: getentries(allrevsrev),
2566 b'retrieve all index entries (reverse)',
2568 b'retrieve all index entries (reverse)',
2567 ),
2569 ),
2568 (
2570 (
2569 lambda: getentries(allrevsrev, 2),
2571 lambda: getentries(allrevsrev, 2),
2570 b'retrieve all index entries 2x (reverse)',
2572 b'retrieve all index entries 2x (reverse)',
2571 ),
2573 ),
2572 ]
2574 ]
2573
2575
2574 for fn, title in benches:
2576 for fn, title in benches:
2575 timer, fm = gettimer(ui, opts)
2577 timer, fm = gettimer(ui, opts)
2576 timer(fn, title=title)
2578 timer(fn, title=title)
2577 fm.end()
2579 fm.end()
2578
2580
2579
2581
2580 @command(
2582 @command(
2581 b'perfrevlogrevisions',
2583 b'perfrevlogrevisions',
2582 revlogopts
2584 revlogopts
2583 + formatteropts
2585 + formatteropts
2584 + [
2586 + [
2585 (b'd', b'dist', 100, b'distance between the revisions'),
2587 (b'd', b'dist', 100, b'distance between the revisions'),
2586 (b's', b'startrev', 0, b'revision to start reading at'),
2588 (b's', b'startrev', 0, b'revision to start reading at'),
2587 (b'', b'reverse', False, b'read in reverse'),
2589 (b'', b'reverse', False, b'read in reverse'),
2588 ],
2590 ],
2589 b'-c|-m|FILE',
2591 b'-c|-m|FILE',
2590 )
2592 )
2591 def perfrevlogrevisions(
2593 def perfrevlogrevisions(
2592 ui, repo, file_=None, startrev=0, reverse=False, **opts
2594 ui, repo, file_=None, startrev=0, reverse=False, **opts
2593 ):
2595 ):
2594 """Benchmark reading a series of revisions from a revlog.
2596 """Benchmark reading a series of revisions from a revlog.
2595
2597
2596 By default, we read every ``-d/--dist`` revision from 0 to tip of
2598 By default, we read every ``-d/--dist`` revision from 0 to tip of
2597 the specified revlog.
2599 the specified revlog.
2598
2600
2599 The start revision can be defined via ``-s/--startrev``.
2601 The start revision can be defined via ``-s/--startrev``.
2600 """
2602 """
2601 opts = _byteskwargs(opts)
2603 opts = _byteskwargs(opts)
2602
2604
2603 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2604 rllen = getlen(ui)(rl)
2606 rllen = getlen(ui)(rl)
2605
2607
2606 if startrev < 0:
2608 if startrev < 0:
2607 startrev = rllen + startrev
2609 startrev = rllen + startrev
2608
2610
2609 def d():
2611 def d():
2610 rl.clearcaches()
2612 rl.clearcaches()
2611
2613
2612 beginrev = startrev
2614 beginrev = startrev
2613 endrev = rllen
2615 endrev = rllen
2614 dist = opts[b'dist']
2616 dist = opts[b'dist']
2615
2617
2616 if reverse:
2618 if reverse:
2617 beginrev, endrev = endrev - 1, beginrev - 1
2619 beginrev, endrev = endrev - 1, beginrev - 1
2618 dist = -1 * dist
2620 dist = -1 * dist
2619
2621
2620 for x in _xrange(beginrev, endrev, dist):
2622 for x in _xrange(beginrev, endrev, dist):
2621 # Old revisions don't support passing int.
2623 # Old revisions don't support passing int.
2622 n = rl.node(x)
2624 n = rl.node(x)
2623 rl.revision(n)
2625 rl.revision(n)
2624
2626
2625 timer, fm = gettimer(ui, opts)
2627 timer, fm = gettimer(ui, opts)
2626 timer(d)
2628 timer(d)
2627 fm.end()
2629 fm.end()
2628
2630
2629
2631
2630 @command(
2632 @command(
2631 b'perfrevlogwrite',
2633 b'perfrevlogwrite',
2632 revlogopts
2634 revlogopts
2633 + formatteropts
2635 + formatteropts
2634 + [
2636 + [
2635 (b's', b'startrev', 1000, b'revision to start writing at'),
2637 (b's', b'startrev', 1000, b'revision to start writing at'),
2636 (b'', b'stoprev', -1, b'last revision to write'),
2638 (b'', b'stoprev', -1, b'last revision to write'),
2637 (b'', b'count', 3, b'number of passes to perform'),
2639 (b'', b'count', 3, b'number of passes to perform'),
2638 (b'', b'details', False, b'print timing for every revisions tested'),
2640 (b'', b'details', False, b'print timing for every revisions tested'),
2639 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2641 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2640 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2642 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2641 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2643 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2642 ],
2644 ],
2643 b'-c|-m|FILE',
2645 b'-c|-m|FILE',
2644 )
2646 )
2645 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2647 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2646 """Benchmark writing a series of revisions to a revlog.
2648 """Benchmark writing a series of revisions to a revlog.
2647
2649
2648 Possible source values are:
2650 Possible source values are:
2649 * `full`: add from a full text (default).
2651 * `full`: add from a full text (default).
2650 * `parent-1`: add from a delta to the first parent
2652 * `parent-1`: add from a delta to the first parent
2651 * `parent-2`: add from a delta to the second parent if it exists
2653 * `parent-2`: add from a delta to the second parent if it exists
2652 (use a delta from the first parent otherwise)
2654 (use a delta from the first parent otherwise)
2653 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2655 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2654 * `storage`: add from the existing precomputed deltas
2656 * `storage`: add from the existing precomputed deltas
2655
2657
2656 Note: This performance command measures performance in a custom way. As a
2658 Note: This performance command measures performance in a custom way. As a
2657 result some of the global configuration of the 'perf' command does not
2659 result some of the global configuration of the 'perf' command does not
2658 apply to it:
2660 apply to it:
2659
2661
2660 * ``pre-run``: disabled
2662 * ``pre-run``: disabled
2661
2663
2662 * ``profile-benchmark``: disabled
2664 * ``profile-benchmark``: disabled
2663
2665
2664 * ``run-limits``: disabled use --count instead
2666 * ``run-limits``: disabled use --count instead
2665 """
2667 """
2666 opts = _byteskwargs(opts)
2668 opts = _byteskwargs(opts)
2667
2669
2668 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2670 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2669 rllen = getlen(ui)(rl)
2671 rllen = getlen(ui)(rl)
2670 if startrev < 0:
2672 if startrev < 0:
2671 startrev = rllen + startrev
2673 startrev = rllen + startrev
2672 if stoprev < 0:
2674 if stoprev < 0:
2673 stoprev = rllen + stoprev
2675 stoprev = rllen + stoprev
2674
2676
2675 lazydeltabase = opts['lazydeltabase']
2677 lazydeltabase = opts['lazydeltabase']
2676 source = opts['source']
2678 source = opts['source']
2677 clearcaches = opts['clear_caches']
2679 clearcaches = opts['clear_caches']
2678 validsource = (
2680 validsource = (
2679 b'full',
2681 b'full',
2680 b'parent-1',
2682 b'parent-1',
2681 b'parent-2',
2683 b'parent-2',
2682 b'parent-smallest',
2684 b'parent-smallest',
2683 b'storage',
2685 b'storage',
2684 )
2686 )
2685 if source not in validsource:
2687 if source not in validsource:
2686 raise error.Abort('invalid source type: %s' % source)
2688 raise error.Abort('invalid source type: %s' % source)
2687
2689
2688 ### actually gather results
2690 ### actually gather results
2689 count = opts['count']
2691 count = opts['count']
2690 if count <= 0:
2692 if count <= 0:
2691 raise error.Abort('invalide run count: %d' % count)
2693 raise error.Abort('invalide run count: %d' % count)
2692 allresults = []
2694 allresults = []
2693 for c in range(count):
2695 for c in range(count):
2694 timing = _timeonewrite(
2696 timing = _timeonewrite(
2695 ui,
2697 ui,
2696 rl,
2698 rl,
2697 source,
2699 source,
2698 startrev,
2700 startrev,
2699 stoprev,
2701 stoprev,
2700 c + 1,
2702 c + 1,
2701 lazydeltabase=lazydeltabase,
2703 lazydeltabase=lazydeltabase,
2702 clearcaches=clearcaches,
2704 clearcaches=clearcaches,
2703 )
2705 )
2704 allresults.append(timing)
2706 allresults.append(timing)
2705
2707
2706 ### consolidate the results in a single list
2708 ### consolidate the results in a single list
2707 results = []
2709 results = []
2708 for idx, (rev, t) in enumerate(allresults[0]):
2710 for idx, (rev, t) in enumerate(allresults[0]):
2709 ts = [t]
2711 ts = [t]
2710 for other in allresults[1:]:
2712 for other in allresults[1:]:
2711 orev, ot = other[idx]
2713 orev, ot = other[idx]
2712 assert orev == rev
2714 assert orev == rev
2713 ts.append(ot)
2715 ts.append(ot)
2714 results.append((rev, ts))
2716 results.append((rev, ts))
2715 resultcount = len(results)
2717 resultcount = len(results)
2716
2718
2717 ### Compute and display relevant statistics
2719 ### Compute and display relevant statistics
2718
2720
2719 # get a formatter
2721 # get a formatter
2720 fm = ui.formatter(b'perf', opts)
2722 fm = ui.formatter(b'perf', opts)
2721 displayall = ui.configbool(b"perf", b"all-timing", False)
2723 displayall = ui.configbool(b"perf", b"all-timing", False)
2722
2724
2723 # print individual details if requested
2725 # print individual details if requested
2724 if opts['details']:
2726 if opts['details']:
2725 for idx, item in enumerate(results, 1):
2727 for idx, item in enumerate(results, 1):
2726 rev, data = item
2728 rev, data = item
2727 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2729 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2728 formatone(fm, data, title=title, displayall=displayall)
2730 formatone(fm, data, title=title, displayall=displayall)
2729
2731
2730 # sorts results by median time
2732 # sorts results by median time
2731 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2733 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2732 # list of (name, index) to display)
2734 # list of (name, index) to display)
2733 relevants = [
2735 relevants = [
2734 ("min", 0),
2736 ("min", 0),
2735 ("10%", resultcount * 10 // 100),
2737 ("10%", resultcount * 10 // 100),
2736 ("25%", resultcount * 25 // 100),
2738 ("25%", resultcount * 25 // 100),
2737 ("50%", resultcount * 70 // 100),
2739 ("50%", resultcount * 70 // 100),
2738 ("75%", resultcount * 75 // 100),
2740 ("75%", resultcount * 75 // 100),
2739 ("90%", resultcount * 90 // 100),
2741 ("90%", resultcount * 90 // 100),
2740 ("95%", resultcount * 95 // 100),
2742 ("95%", resultcount * 95 // 100),
2741 ("99%", resultcount * 99 // 100),
2743 ("99%", resultcount * 99 // 100),
2742 ("99.9%", resultcount * 999 // 1000),
2744 ("99.9%", resultcount * 999 // 1000),
2743 ("99.99%", resultcount * 9999 // 10000),
2745 ("99.99%", resultcount * 9999 // 10000),
2744 ("99.999%", resultcount * 99999 // 100000),
2746 ("99.999%", resultcount * 99999 // 100000),
2745 ("max", -1),
2747 ("max", -1),
2746 ]
2748 ]
2747 if not ui.quiet:
2749 if not ui.quiet:
2748 for name, idx in relevants:
2750 for name, idx in relevants:
2749 data = results[idx]
2751 data = results[idx]
2750 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2752 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2751 formatone(fm, data[1], title=title, displayall=displayall)
2753 formatone(fm, data[1], title=title, displayall=displayall)
2752
2754
2753 # XXX summing that many float will not be very precise, we ignore this fact
2755 # XXX summing that many float will not be very precise, we ignore this fact
2754 # for now
2756 # for now
2755 totaltime = []
2757 totaltime = []
2756 for item in allresults:
2758 for item in allresults:
2757 totaltime.append(
2759 totaltime.append(
2758 (
2760 (
2759 sum(x[1][0] for x in item),
2761 sum(x[1][0] for x in item),
2760 sum(x[1][1] for x in item),
2762 sum(x[1][1] for x in item),
2761 sum(x[1][2] for x in item),
2763 sum(x[1][2] for x in item),
2762 )
2764 )
2763 )
2765 )
2764 formatone(
2766 formatone(
2765 fm,
2767 fm,
2766 totaltime,
2768 totaltime,
2767 title="total time (%d revs)" % resultcount,
2769 title="total time (%d revs)" % resultcount,
2768 displayall=displayall,
2770 displayall=displayall,
2769 )
2771 )
2770 fm.end()
2772 fm.end()
2771
2773
2772
2774
2773 class _faketr(object):
2775 class _faketr(object):
2774 def add(s, x, y, z=None):
2776 def add(s, x, y, z=None):
2775 return None
2777 return None
2776
2778
2777
2779
2778 def _timeonewrite(
2780 def _timeonewrite(
2779 ui,
2781 ui,
2780 orig,
2782 orig,
2781 source,
2783 source,
2782 startrev,
2784 startrev,
2783 stoprev,
2785 stoprev,
2784 runidx=None,
2786 runidx=None,
2785 lazydeltabase=True,
2787 lazydeltabase=True,
2786 clearcaches=True,
2788 clearcaches=True,
2787 ):
2789 ):
2788 timings = []
2790 timings = []
2789 tr = _faketr()
2791 tr = _faketr()
2790 with _temprevlog(ui, orig, startrev) as dest:
2792 with _temprevlog(ui, orig, startrev) as dest:
2791 dest._lazydeltabase = lazydeltabase
2793 dest._lazydeltabase = lazydeltabase
2792 revs = list(orig.revs(startrev, stoprev))
2794 revs = list(orig.revs(startrev, stoprev))
2793 total = len(revs)
2795 total = len(revs)
2794 topic = 'adding'
2796 topic = 'adding'
2795 if runidx is not None:
2797 if runidx is not None:
2796 topic += ' (run #%d)' % runidx
2798 topic += ' (run #%d)' % runidx
2797 # Support both old and new progress API
2799 # Support both old and new progress API
2798 if util.safehasattr(ui, 'makeprogress'):
2800 if util.safehasattr(ui, 'makeprogress'):
2799 progress = ui.makeprogress(topic, unit='revs', total=total)
2801 progress = ui.makeprogress(topic, unit='revs', total=total)
2800
2802
2801 def updateprogress(pos):
2803 def updateprogress(pos):
2802 progress.update(pos)
2804 progress.update(pos)
2803
2805
2804 def completeprogress():
2806 def completeprogress():
2805 progress.complete()
2807 progress.complete()
2806
2808
2807 else:
2809 else:
2808
2810
2809 def updateprogress(pos):
2811 def updateprogress(pos):
2810 ui.progress(topic, pos, unit='revs', total=total)
2812 ui.progress(topic, pos, unit='revs', total=total)
2811
2813
2812 def completeprogress():
2814 def completeprogress():
2813 ui.progress(topic, None, unit='revs', total=total)
2815 ui.progress(topic, None, unit='revs', total=total)
2814
2816
2815 for idx, rev in enumerate(revs):
2817 for idx, rev in enumerate(revs):
2816 updateprogress(idx)
2818 updateprogress(idx)
2817 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2819 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2818 if clearcaches:
2820 if clearcaches:
2819 dest.index.clearcaches()
2821 dest.index.clearcaches()
2820 dest.clearcaches()
2822 dest.clearcaches()
2821 with timeone() as r:
2823 with timeone() as r:
2822 dest.addrawrevision(*addargs, **addkwargs)
2824 dest.addrawrevision(*addargs, **addkwargs)
2823 timings.append((rev, r[0]))
2825 timings.append((rev, r[0]))
2824 updateprogress(total)
2826 updateprogress(total)
2825 completeprogress()
2827 completeprogress()
2826 return timings
2828 return timings
2827
2829
2828
2830
2829 def _getrevisionseed(orig, rev, tr, source):
2831 def _getrevisionseed(orig, rev, tr, source):
2830 from mercurial.node import nullid
2832 from mercurial.node import nullid
2831
2833
2832 linkrev = orig.linkrev(rev)
2834 linkrev = orig.linkrev(rev)
2833 node = orig.node(rev)
2835 node = orig.node(rev)
2834 p1, p2 = orig.parents(node)
2836 p1, p2 = orig.parents(node)
2835 flags = orig.flags(rev)
2837 flags = orig.flags(rev)
2836 cachedelta = None
2838 cachedelta = None
2837 text = None
2839 text = None
2838
2840
2839 if source == b'full':
2841 if source == b'full':
2840 text = orig.revision(rev)
2842 text = orig.revision(rev)
2841 elif source == b'parent-1':
2843 elif source == b'parent-1':
2842 baserev = orig.rev(p1)
2844 baserev = orig.rev(p1)
2843 cachedelta = (baserev, orig.revdiff(p1, rev))
2845 cachedelta = (baserev, orig.revdiff(p1, rev))
2844 elif source == b'parent-2':
2846 elif source == b'parent-2':
2845 parent = p2
2847 parent = p2
2846 if p2 == nullid:
2848 if p2 == nullid:
2847 parent = p1
2849 parent = p1
2848 baserev = orig.rev(parent)
2850 baserev = orig.rev(parent)
2849 cachedelta = (baserev, orig.revdiff(parent, rev))
2851 cachedelta = (baserev, orig.revdiff(parent, rev))
2850 elif source == b'parent-smallest':
2852 elif source == b'parent-smallest':
2851 p1diff = orig.revdiff(p1, rev)
2853 p1diff = orig.revdiff(p1, rev)
2852 parent = p1
2854 parent = p1
2853 diff = p1diff
2855 diff = p1diff
2854 if p2 != nullid:
2856 if p2 != nullid:
2855 p2diff = orig.revdiff(p2, rev)
2857 p2diff = orig.revdiff(p2, rev)
2856 if len(p1diff) > len(p2diff):
2858 if len(p1diff) > len(p2diff):
2857 parent = p2
2859 parent = p2
2858 diff = p2diff
2860 diff = p2diff
2859 baserev = orig.rev(parent)
2861 baserev = orig.rev(parent)
2860 cachedelta = (baserev, diff)
2862 cachedelta = (baserev, diff)
2861 elif source == b'storage':
2863 elif source == b'storage':
2862 baserev = orig.deltaparent(rev)
2864 baserev = orig.deltaparent(rev)
2863 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2865 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2864
2866
2865 return (
2867 return (
2866 (text, tr, linkrev, p1, p2),
2868 (text, tr, linkrev, p1, p2),
2867 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2869 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2868 )
2870 )
2869
2871
2870
2872
2871 @contextlib.contextmanager
2873 @contextlib.contextmanager
2872 def _temprevlog(ui, orig, truncaterev):
2874 def _temprevlog(ui, orig, truncaterev):
2873 from mercurial import vfs as vfsmod
2875 from mercurial import vfs as vfsmod
2874
2876
2875 if orig._inline:
2877 if orig._inline:
2876 raise error.Abort('not supporting inline revlog (yet)')
2878 raise error.Abort('not supporting inline revlog (yet)')
2877 revlogkwargs = {}
2879 revlogkwargs = {}
2878 k = 'upperboundcomp'
2880 k = 'upperboundcomp'
2879 if util.safehasattr(orig, k):
2881 if util.safehasattr(orig, k):
2880 revlogkwargs[k] = getattr(orig, k)
2882 revlogkwargs[k] = getattr(orig, k)
2881
2883
2882 origindexpath = orig.opener.join(orig.indexfile)
2884 origindexpath = orig.opener.join(orig.indexfile)
2883 origdatapath = orig.opener.join(orig.datafile)
2885 origdatapath = orig.opener.join(orig.datafile)
2884 indexname = 'revlog.i'
2886 indexname = 'revlog.i'
2885 dataname = 'revlog.d'
2887 dataname = 'revlog.d'
2886
2888
2887 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2889 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2888 try:
2890 try:
2889 # copy the data file in a temporary directory
2891 # copy the data file in a temporary directory
2890 ui.debug('copying data in %s\n' % tmpdir)
2892 ui.debug('copying data in %s\n' % tmpdir)
2891 destindexpath = os.path.join(tmpdir, 'revlog.i')
2893 destindexpath = os.path.join(tmpdir, 'revlog.i')
2892 destdatapath = os.path.join(tmpdir, 'revlog.d')
2894 destdatapath = os.path.join(tmpdir, 'revlog.d')
2893 shutil.copyfile(origindexpath, destindexpath)
2895 shutil.copyfile(origindexpath, destindexpath)
2894 shutil.copyfile(origdatapath, destdatapath)
2896 shutil.copyfile(origdatapath, destdatapath)
2895
2897
2896 # remove the data we want to add again
2898 # remove the data we want to add again
2897 ui.debug('truncating data to be rewritten\n')
2899 ui.debug('truncating data to be rewritten\n')
2898 with open(destindexpath, 'ab') as index:
2900 with open(destindexpath, 'ab') as index:
2899 index.seek(0)
2901 index.seek(0)
2900 index.truncate(truncaterev * orig._io.size)
2902 index.truncate(truncaterev * orig._io.size)
2901 with open(destdatapath, 'ab') as data:
2903 with open(destdatapath, 'ab') as data:
2902 data.seek(0)
2904 data.seek(0)
2903 data.truncate(orig.start(truncaterev))
2905 data.truncate(orig.start(truncaterev))
2904
2906
2905 # instantiate a new revlog from the temporary copy
2907 # instantiate a new revlog from the temporary copy
2906 ui.debug('truncating adding to be rewritten\n')
2908 ui.debug('truncating adding to be rewritten\n')
2907 vfs = vfsmod.vfs(tmpdir)
2909 vfs = vfsmod.vfs(tmpdir)
2908 vfs.options = getattr(orig.opener, 'options', None)
2910 vfs.options = getattr(orig.opener, 'options', None)
2909
2911
2910 dest = revlog.revlog(
2912 dest = revlog.revlog(
2911 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2913 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2912 )
2914 )
2913 if dest._inline:
2915 if dest._inline:
2914 raise error.Abort('not supporting inline revlog (yet)')
2916 raise error.Abort('not supporting inline revlog (yet)')
2915 # make sure internals are initialized
2917 # make sure internals are initialized
2916 dest.revision(len(dest) - 1)
2918 dest.revision(len(dest) - 1)
2917 yield dest
2919 yield dest
2918 del dest, vfs
2920 del dest, vfs
2919 finally:
2921 finally:
2920 shutil.rmtree(tmpdir, True)
2922 shutil.rmtree(tmpdir, True)
2921
2923
2922
2924
2923 @command(
2925 @command(
2924 b'perfrevlogchunks',
2926 b'perfrevlogchunks',
2925 revlogopts
2927 revlogopts
2926 + formatteropts
2928 + formatteropts
2927 + [
2929 + [
2928 (b'e', b'engines', b'', b'compression engines to use'),
2930 (b'e', b'engines', b'', b'compression engines to use'),
2929 (b's', b'startrev', 0, b'revision to start at'),
2931 (b's', b'startrev', 0, b'revision to start at'),
2930 ],
2932 ],
2931 b'-c|-m|FILE',
2933 b'-c|-m|FILE',
2932 )
2934 )
2933 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2934 """Benchmark operations on revlog chunks.
2936 """Benchmark operations on revlog chunks.
2935
2937
2936 Logically, each revlog is a collection of fulltext revisions. However,
2938 Logically, each revlog is a collection of fulltext revisions. However,
2937 stored within each revlog are "chunks" of possibly compressed data. This
2939 stored within each revlog are "chunks" of possibly compressed data. This
2938 data needs to be read and decompressed or compressed and written.
2940 data needs to be read and decompressed or compressed and written.
2939
2941
2940 This command measures the time it takes to read+decompress and recompress
2942 This command measures the time it takes to read+decompress and recompress
2941 chunks in a revlog. It effectively isolates I/O and compression performance.
2943 chunks in a revlog. It effectively isolates I/O and compression performance.
2942 For measurements of higher-level operations like resolving revisions,
2944 For measurements of higher-level operations like resolving revisions,
2943 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2944 """
2946 """
2945 opts = _byteskwargs(opts)
2947 opts = _byteskwargs(opts)
2946
2948
2947 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2948
2950
2949 # _chunkraw was renamed to _getsegmentforrevs.
2951 # _chunkraw was renamed to _getsegmentforrevs.
2950 try:
2952 try:
2951 segmentforrevs = rl._getsegmentforrevs
2953 segmentforrevs = rl._getsegmentforrevs
2952 except AttributeError:
2954 except AttributeError:
2953 segmentforrevs = rl._chunkraw
2955 segmentforrevs = rl._chunkraw
2954
2956
2955 # Verify engines argument.
2957 # Verify engines argument.
2956 if engines:
2958 if engines:
2957 engines = set(e.strip() for e in engines.split(b','))
2959 engines = set(e.strip() for e in engines.split(b','))
2958 for engine in engines:
2960 for engine in engines:
2959 try:
2961 try:
2960 util.compressionengines[engine]
2962 util.compressionengines[engine]
2961 except KeyError:
2963 except KeyError:
2962 raise error.Abort(b'unknown compression engine: %s' % engine)
2964 raise error.Abort(b'unknown compression engine: %s' % engine)
2963 else:
2965 else:
2964 engines = []
2966 engines = []
2965 for e in util.compengines:
2967 for e in util.compengines:
2966 engine = util.compengines[e]
2968 engine = util.compengines[e]
2967 try:
2969 try:
2968 if engine.available():
2970 if engine.available():
2969 engine.revlogcompressor().compress(b'dummy')
2971 engine.revlogcompressor().compress(b'dummy')
2970 engines.append(e)
2972 engines.append(e)
2971 except NotImplementedError:
2973 except NotImplementedError:
2972 pass
2974 pass
2973
2975
2974 revs = list(rl.revs(startrev, len(rl) - 1))
2976 revs = list(rl.revs(startrev, len(rl) - 1))
2975
2977
2976 def rlfh(rl):
2978 def rlfh(rl):
2977 if rl._inline:
2979 if rl._inline:
2978 return getsvfs(repo)(rl.indexfile)
2980 return getsvfs(repo)(rl.indexfile)
2979 else:
2981 else:
2980 return getsvfs(repo)(rl.datafile)
2982 return getsvfs(repo)(rl.datafile)
2981
2983
2982 def doread():
2984 def doread():
2983 rl.clearcaches()
2985 rl.clearcaches()
2984 for rev in revs:
2986 for rev in revs:
2985 segmentforrevs(rev, rev)
2987 segmentforrevs(rev, rev)
2986
2988
2987 def doreadcachedfh():
2989 def doreadcachedfh():
2988 rl.clearcaches()
2990 rl.clearcaches()
2989 fh = rlfh(rl)
2991 fh = rlfh(rl)
2990 for rev in revs:
2992 for rev in revs:
2991 segmentforrevs(rev, rev, df=fh)
2993 segmentforrevs(rev, rev, df=fh)
2992
2994
2993 def doreadbatch():
2995 def doreadbatch():
2994 rl.clearcaches()
2996 rl.clearcaches()
2995 segmentforrevs(revs[0], revs[-1])
2997 segmentforrevs(revs[0], revs[-1])
2996
2998
2997 def doreadbatchcachedfh():
2999 def doreadbatchcachedfh():
2998 rl.clearcaches()
3000 rl.clearcaches()
2999 fh = rlfh(rl)
3001 fh = rlfh(rl)
3000 segmentforrevs(revs[0], revs[-1], df=fh)
3002 segmentforrevs(revs[0], revs[-1], df=fh)
3001
3003
3002 def dochunk():
3004 def dochunk():
3003 rl.clearcaches()
3005 rl.clearcaches()
3004 fh = rlfh(rl)
3006 fh = rlfh(rl)
3005 for rev in revs:
3007 for rev in revs:
3006 rl._chunk(rev, df=fh)
3008 rl._chunk(rev, df=fh)
3007
3009
3008 chunks = [None]
3010 chunks = [None]
3009
3011
3010 def dochunkbatch():
3012 def dochunkbatch():
3011 rl.clearcaches()
3013 rl.clearcaches()
3012 fh = rlfh(rl)
3014 fh = rlfh(rl)
3013 # Save chunks as a side-effect.
3015 # Save chunks as a side-effect.
3014 chunks[0] = rl._chunks(revs, df=fh)
3016 chunks[0] = rl._chunks(revs, df=fh)
3015
3017
3016 def docompress(compressor):
3018 def docompress(compressor):
3017 rl.clearcaches()
3019 rl.clearcaches()
3018
3020
3019 try:
3021 try:
3020 # Swap in the requested compression engine.
3022 # Swap in the requested compression engine.
3021 oldcompressor = rl._compressor
3023 oldcompressor = rl._compressor
3022 rl._compressor = compressor
3024 rl._compressor = compressor
3023 for chunk in chunks[0]:
3025 for chunk in chunks[0]:
3024 rl.compress(chunk)
3026 rl.compress(chunk)
3025 finally:
3027 finally:
3026 rl._compressor = oldcompressor
3028 rl._compressor = oldcompressor
3027
3029
3028 benches = [
3030 benches = [
3029 (lambda: doread(), b'read'),
3031 (lambda: doread(), b'read'),
3030 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3031 (lambda: doreadbatch(), b'read batch'),
3033 (lambda: doreadbatch(), b'read batch'),
3032 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3033 (lambda: dochunk(), b'chunk'),
3035 (lambda: dochunk(), b'chunk'),
3034 (lambda: dochunkbatch(), b'chunk batch'),
3036 (lambda: dochunkbatch(), b'chunk batch'),
3035 ]
3037 ]
3036
3038
3037 for engine in sorted(engines):
3039 for engine in sorted(engines):
3038 compressor = util.compengines[engine].revlogcompressor()
3040 compressor = util.compengines[engine].revlogcompressor()
3039 benches.append(
3041 benches.append(
3040 (
3042 (
3041 functools.partial(docompress, compressor),
3043 functools.partial(docompress, compressor),
3042 b'compress w/ %s' % engine,
3044 b'compress w/ %s' % engine,
3043 )
3045 )
3044 )
3046 )
3045
3047
3046 for fn, title in benches:
3048 for fn, title in benches:
3047 timer, fm = gettimer(ui, opts)
3049 timer, fm = gettimer(ui, opts)
3048 timer(fn, title=title)
3050 timer(fn, title=title)
3049 fm.end()
3051 fm.end()
3050
3052
3051
3053
3052 @command(
3054 @command(
3053 b'perfrevlogrevision',
3055 b'perfrevlogrevision',
3054 revlogopts
3056 revlogopts
3055 + formatteropts
3057 + formatteropts
3056 + [(b'', b'cache', False, b'use caches instead of clearing')],
3058 + [(b'', b'cache', False, b'use caches instead of clearing')],
3057 b'-c|-m|FILE REV',
3059 b'-c|-m|FILE REV',
3058 )
3060 )
3059 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3061 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3060 """Benchmark obtaining a revlog revision.
3062 """Benchmark obtaining a revlog revision.
3061
3063
3062 Obtaining a revlog revision consists of roughly the following steps:
3064 Obtaining a revlog revision consists of roughly the following steps:
3063
3065
3064 1. Compute the delta chain
3066 1. Compute the delta chain
3065 2. Slice the delta chain if applicable
3067 2. Slice the delta chain if applicable
3066 3. Obtain the raw chunks for that delta chain
3068 3. Obtain the raw chunks for that delta chain
3067 4. Decompress each raw chunk
3069 4. Decompress each raw chunk
3068 5. Apply binary patches to obtain fulltext
3070 5. Apply binary patches to obtain fulltext
3069 6. Verify hash of fulltext
3071 6. Verify hash of fulltext
3070
3072
3071 This command measures the time spent in each of these phases.
3073 This command measures the time spent in each of these phases.
3072 """
3074 """
3073 opts = _byteskwargs(opts)
3075 opts = _byteskwargs(opts)
3074
3076
3075 if opts.get(b'changelog') or opts.get(b'manifest'):
3077 if opts.get(b'changelog') or opts.get(b'manifest'):
3076 file_, rev = None, file_
3078 file_, rev = None, file_
3077 elif rev is None:
3079 elif rev is None:
3078 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3080 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3079
3081
3080 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3082 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3081
3083
3082 # _chunkraw was renamed to _getsegmentforrevs.
3084 # _chunkraw was renamed to _getsegmentforrevs.
3083 try:
3085 try:
3084 segmentforrevs = r._getsegmentforrevs
3086 segmentforrevs = r._getsegmentforrevs
3085 except AttributeError:
3087 except AttributeError:
3086 segmentforrevs = r._chunkraw
3088 segmentforrevs = r._chunkraw
3087
3089
3088 node = r.lookup(rev)
3090 node = r.lookup(rev)
3089 rev = r.rev(node)
3091 rev = r.rev(node)
3090
3092
3091 def getrawchunks(data, chain):
3093 def getrawchunks(data, chain):
3092 start = r.start
3094 start = r.start
3093 length = r.length
3095 length = r.length
3094 inline = r._inline
3096 inline = r._inline
3095 iosize = r._io.size
3097 iosize = r._io.size
3096 buffer = util.buffer
3098 buffer = util.buffer
3097
3099
3098 chunks = []
3100 chunks = []
3099 ladd = chunks.append
3101 ladd = chunks.append
3100 for idx, item in enumerate(chain):
3102 for idx, item in enumerate(chain):
3101 offset = start(item[0])
3103 offset = start(item[0])
3102 bits = data[idx]
3104 bits = data[idx]
3103 for rev in item:
3105 for rev in item:
3104 chunkstart = start(rev)
3106 chunkstart = start(rev)
3105 if inline:
3107 if inline:
3106 chunkstart += (rev + 1) * iosize
3108 chunkstart += (rev + 1) * iosize
3107 chunklength = length(rev)
3109 chunklength = length(rev)
3108 ladd(buffer(bits, chunkstart - offset, chunklength))
3110 ladd(buffer(bits, chunkstart - offset, chunklength))
3109
3111
3110 return chunks
3112 return chunks
3111
3113
3112 def dodeltachain(rev):
3114 def dodeltachain(rev):
3113 if not cache:
3115 if not cache:
3114 r.clearcaches()
3116 r.clearcaches()
3115 r._deltachain(rev)
3117 r._deltachain(rev)
3116
3118
3117 def doread(chain):
3119 def doread(chain):
3118 if not cache:
3120 if not cache:
3119 r.clearcaches()
3121 r.clearcaches()
3120 for item in slicedchain:
3122 for item in slicedchain:
3121 segmentforrevs(item[0], item[-1])
3123 segmentforrevs(item[0], item[-1])
3122
3124
3123 def doslice(r, chain, size):
3125 def doslice(r, chain, size):
3124 for s in slicechunk(r, chain, targetsize=size):
3126 for s in slicechunk(r, chain, targetsize=size):
3125 pass
3127 pass
3126
3128
3127 def dorawchunks(data, chain):
3129 def dorawchunks(data, chain):
3128 if not cache:
3130 if not cache:
3129 r.clearcaches()
3131 r.clearcaches()
3130 getrawchunks(data, chain)
3132 getrawchunks(data, chain)
3131
3133
3132 def dodecompress(chunks):
3134 def dodecompress(chunks):
3133 decomp = r.decompress
3135 decomp = r.decompress
3134 for chunk in chunks:
3136 for chunk in chunks:
3135 decomp(chunk)
3137 decomp(chunk)
3136
3138
3137 def dopatch(text, bins):
3139 def dopatch(text, bins):
3138 if not cache:
3140 if not cache:
3139 r.clearcaches()
3141 r.clearcaches()
3140 mdiff.patches(text, bins)
3142 mdiff.patches(text, bins)
3141
3143
3142 def dohash(text):
3144 def dohash(text):
3143 if not cache:
3145 if not cache:
3144 r.clearcaches()
3146 r.clearcaches()
3145 r.checkhash(text, node, rev=rev)
3147 r.checkhash(text, node, rev=rev)
3146
3148
3147 def dorevision():
3149 def dorevision():
3148 if not cache:
3150 if not cache:
3149 r.clearcaches()
3151 r.clearcaches()
3150 r.revision(node)
3152 r.revision(node)
3151
3153
3152 try:
3154 try:
3153 from mercurial.revlogutils.deltas import slicechunk
3155 from mercurial.revlogutils.deltas import slicechunk
3154 except ImportError:
3156 except ImportError:
3155 slicechunk = getattr(revlog, '_slicechunk', None)
3157 slicechunk = getattr(revlog, '_slicechunk', None)
3156
3158
3157 size = r.length(rev)
3159 size = r.length(rev)
3158 chain = r._deltachain(rev)[0]
3160 chain = r._deltachain(rev)[0]
3159 if not getattr(r, '_withsparseread', False):
3161 if not getattr(r, '_withsparseread', False):
3160 slicedchain = (chain,)
3162 slicedchain = (chain,)
3161 else:
3163 else:
3162 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3164 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3163 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3165 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3164 rawchunks = getrawchunks(data, slicedchain)
3166 rawchunks = getrawchunks(data, slicedchain)
3165 bins = r._chunks(chain)
3167 bins = r._chunks(chain)
3166 text = bytes(bins[0])
3168 text = bytes(bins[0])
3167 bins = bins[1:]
3169 bins = bins[1:]
3168 text = mdiff.patches(text, bins)
3170 text = mdiff.patches(text, bins)
3169
3171
3170 benches = [
3172 benches = [
3171 (lambda: dorevision(), b'full'),
3173 (lambda: dorevision(), b'full'),
3172 (lambda: dodeltachain(rev), b'deltachain'),
3174 (lambda: dodeltachain(rev), b'deltachain'),
3173 (lambda: doread(chain), b'read'),
3175 (lambda: doread(chain), b'read'),
3174 ]
3176 ]
3175
3177
3176 if getattr(r, '_withsparseread', False):
3178 if getattr(r, '_withsparseread', False):
3177 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3179 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3178 benches.append(slicing)
3180 benches.append(slicing)
3179
3181
3180 benches.extend(
3182 benches.extend(
3181 [
3183 [
3182 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3184 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3183 (lambda: dodecompress(rawchunks), b'decompress'),
3185 (lambda: dodecompress(rawchunks), b'decompress'),
3184 (lambda: dopatch(text, bins), b'patch'),
3186 (lambda: dopatch(text, bins), b'patch'),
3185 (lambda: dohash(text), b'hash'),
3187 (lambda: dohash(text), b'hash'),
3186 ]
3188 ]
3187 )
3189 )
3188
3190
3189 timer, fm = gettimer(ui, opts)
3191 timer, fm = gettimer(ui, opts)
3190 for fn, title in benches:
3192 for fn, title in benches:
3191 timer(fn, title=title)
3193 timer(fn, title=title)
3192 fm.end()
3194 fm.end()
3193
3195
3194
3196
3195 @command(
3197 @command(
3196 b'perfrevset',
3198 b'perfrevset',
3197 [
3199 [
3198 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3200 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3199 (b'', b'contexts', False, b'obtain changectx for each revision'),
3201 (b'', b'contexts', False, b'obtain changectx for each revision'),
3200 ]
3202 ]
3201 + formatteropts,
3203 + formatteropts,
3202 b"REVSET",
3204 b"REVSET",
3203 )
3205 )
3204 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3206 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3205 """benchmark the execution time of a revset
3207 """benchmark the execution time of a revset
3206
3208
3207 Use the --clean option if need to evaluate the impact of build volatile
3209 Use the --clean option if need to evaluate the impact of build volatile
3208 revisions set cache on the revset execution. Volatile cache hold filtered
3210 revisions set cache on the revset execution. Volatile cache hold filtered
3209 and obsolete related cache."""
3211 and obsolete related cache."""
3210 opts = _byteskwargs(opts)
3212 opts = _byteskwargs(opts)
3211
3213
3212 timer, fm = gettimer(ui, opts)
3214 timer, fm = gettimer(ui, opts)
3213
3215
3214 def d():
3216 def d():
3215 if clear:
3217 if clear:
3216 repo.invalidatevolatilesets()
3218 repo.invalidatevolatilesets()
3217 if contexts:
3219 if contexts:
3218 for ctx in repo.set(expr):
3220 for ctx in repo.set(expr):
3219 pass
3221 pass
3220 else:
3222 else:
3221 for r in repo.revs(expr):
3223 for r in repo.revs(expr):
3222 pass
3224 pass
3223
3225
3224 timer(d)
3226 timer(d)
3225 fm.end()
3227 fm.end()
3226
3228
3227
3229
3228 @command(
3230 @command(
3229 b'perfvolatilesets',
3231 b'perfvolatilesets',
3230 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3232 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3231 + formatteropts,
3233 + formatteropts,
3232 )
3234 )
3233 def perfvolatilesets(ui, repo, *names, **opts):
3235 def perfvolatilesets(ui, repo, *names, **opts):
3234 """benchmark the computation of various volatile set
3236 """benchmark the computation of various volatile set
3235
3237
3236 Volatile set computes element related to filtering and obsolescence."""
3238 Volatile set computes element related to filtering and obsolescence."""
3237 opts = _byteskwargs(opts)
3239 opts = _byteskwargs(opts)
3238 timer, fm = gettimer(ui, opts)
3240 timer, fm = gettimer(ui, opts)
3239 repo = repo.unfiltered()
3241 repo = repo.unfiltered()
3240
3242
3241 def getobs(name):
3243 def getobs(name):
3242 def d():
3244 def d():
3243 repo.invalidatevolatilesets()
3245 repo.invalidatevolatilesets()
3244 if opts[b'clear_obsstore']:
3246 if opts[b'clear_obsstore']:
3245 clearfilecache(repo, b'obsstore')
3247 clearfilecache(repo, b'obsstore')
3246 obsolete.getrevs(repo, name)
3248 obsolete.getrevs(repo, name)
3247
3249
3248 return d
3250 return d
3249
3251
3250 allobs = sorted(obsolete.cachefuncs)
3252 allobs = sorted(obsolete.cachefuncs)
3251 if names:
3253 if names:
3252 allobs = [n for n in allobs if n in names]
3254 allobs = [n for n in allobs if n in names]
3253
3255
3254 for name in allobs:
3256 for name in allobs:
3255 timer(getobs(name), title=name)
3257 timer(getobs(name), title=name)
3256
3258
3257 def getfiltered(name):
3259 def getfiltered(name):
3258 def d():
3260 def d():
3259 repo.invalidatevolatilesets()
3261 repo.invalidatevolatilesets()
3260 if opts[b'clear_obsstore']:
3262 if opts[b'clear_obsstore']:
3261 clearfilecache(repo, b'obsstore')
3263 clearfilecache(repo, b'obsstore')
3262 repoview.filterrevs(repo, name)
3264 repoview.filterrevs(repo, name)
3263
3265
3264 return d
3266 return d
3265
3267
3266 allfilter = sorted(repoview.filtertable)
3268 allfilter = sorted(repoview.filtertable)
3267 if names:
3269 if names:
3268 allfilter = [n for n in allfilter if n in names]
3270 allfilter = [n for n in allfilter if n in names]
3269
3271
3270 for name in allfilter:
3272 for name in allfilter:
3271 timer(getfiltered(name), title=name)
3273 timer(getfiltered(name), title=name)
3272 fm.end()
3274 fm.end()
3273
3275
3274
3276
3275 @command(
3277 @command(
3276 b'perfbranchmap',
3278 b'perfbranchmap',
3277 [
3279 [
3278 (b'f', b'full', False, b'Includes build time of subset'),
3280 (b'f', b'full', False, b'Includes build time of subset'),
3279 (
3281 (
3280 b'',
3282 b'',
3281 b'clear-revbranch',
3283 b'clear-revbranch',
3282 False,
3284 False,
3283 b'purge the revbranch cache between computation',
3285 b'purge the revbranch cache between computation',
3284 ),
3286 ),
3285 ]
3287 ]
3286 + formatteropts,
3288 + formatteropts,
3287 )
3289 )
3288 def perfbranchmap(ui, repo, *filternames, **opts):
3290 def perfbranchmap(ui, repo, *filternames, **opts):
3289 """benchmark the update of a branchmap
3291 """benchmark the update of a branchmap
3290
3292
3291 This benchmarks the full repo.branchmap() call with read and write disabled
3293 This benchmarks the full repo.branchmap() call with read and write disabled
3292 """
3294 """
3293 opts = _byteskwargs(opts)
3295 opts = _byteskwargs(opts)
3294 full = opts.get(b"full", False)
3296 full = opts.get(b"full", False)
3295 clear_revbranch = opts.get(b"clear_revbranch", False)
3297 clear_revbranch = opts.get(b"clear_revbranch", False)
3296 timer, fm = gettimer(ui, opts)
3298 timer, fm = gettimer(ui, opts)
3297
3299
3298 def getbranchmap(filtername):
3300 def getbranchmap(filtername):
3299 """generate a benchmark function for the filtername"""
3301 """generate a benchmark function for the filtername"""
3300 if filtername is None:
3302 if filtername is None:
3301 view = repo
3303 view = repo
3302 else:
3304 else:
3303 view = repo.filtered(filtername)
3305 view = repo.filtered(filtername)
3304 if util.safehasattr(view._branchcaches, '_per_filter'):
3306 if util.safehasattr(view._branchcaches, '_per_filter'):
3305 filtered = view._branchcaches._per_filter
3307 filtered = view._branchcaches._per_filter
3306 else:
3308 else:
3307 # older versions
3309 # older versions
3308 filtered = view._branchcaches
3310 filtered = view._branchcaches
3309
3311
3310 def d():
3312 def d():
3311 if clear_revbranch:
3313 if clear_revbranch:
3312 repo.revbranchcache()._clear()
3314 repo.revbranchcache()._clear()
3313 if full:
3315 if full:
3314 view._branchcaches.clear()
3316 view._branchcaches.clear()
3315 else:
3317 else:
3316 filtered.pop(filtername, None)
3318 filtered.pop(filtername, None)
3317 view.branchmap()
3319 view.branchmap()
3318
3320
3319 return d
3321 return d
3320
3322
3321 # add filter in smaller subset to bigger subset
3323 # add filter in smaller subset to bigger subset
3322 possiblefilters = set(repoview.filtertable)
3324 possiblefilters = set(repoview.filtertable)
3323 if filternames:
3325 if filternames:
3324 possiblefilters &= set(filternames)
3326 possiblefilters &= set(filternames)
3325 subsettable = getbranchmapsubsettable()
3327 subsettable = getbranchmapsubsettable()
3326 allfilters = []
3328 allfilters = []
3327 while possiblefilters:
3329 while possiblefilters:
3328 for name in possiblefilters:
3330 for name in possiblefilters:
3329 subset = subsettable.get(name)
3331 subset = subsettable.get(name)
3330 if subset not in possiblefilters:
3332 if subset not in possiblefilters:
3331 break
3333 break
3332 else:
3334 else:
3333 assert False, b'subset cycle %s!' % possiblefilters
3335 assert False, b'subset cycle %s!' % possiblefilters
3334 allfilters.append(name)
3336 allfilters.append(name)
3335 possiblefilters.remove(name)
3337 possiblefilters.remove(name)
3336
3338
3337 # warm the cache
3339 # warm the cache
3338 if not full:
3340 if not full:
3339 for name in allfilters:
3341 for name in allfilters:
3340 repo.filtered(name).branchmap()
3342 repo.filtered(name).branchmap()
3341 if not filternames or b'unfiltered' in filternames:
3343 if not filternames or b'unfiltered' in filternames:
3342 # add unfiltered
3344 # add unfiltered
3343 allfilters.append(None)
3345 allfilters.append(None)
3344
3346
3345 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3347 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3346 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3348 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3347 branchcacheread.set(classmethod(lambda *args: None))
3349 branchcacheread.set(classmethod(lambda *args: None))
3348 else:
3350 else:
3349 # older versions
3351 # older versions
3350 branchcacheread = safeattrsetter(branchmap, b'read')
3352 branchcacheread = safeattrsetter(branchmap, b'read')
3351 branchcacheread.set(lambda *args: None)
3353 branchcacheread.set(lambda *args: None)
3352 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3354 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3353 branchcachewrite.set(lambda *args: None)
3355 branchcachewrite.set(lambda *args: None)
3354 try:
3356 try:
3355 for name in allfilters:
3357 for name in allfilters:
3356 printname = name
3358 printname = name
3357 if name is None:
3359 if name is None:
3358 printname = b'unfiltered'
3360 printname = b'unfiltered'
3359 timer(getbranchmap(name), title=str(printname))
3361 timer(getbranchmap(name), title=str(printname))
3360 finally:
3362 finally:
3361 branchcacheread.restore()
3363 branchcacheread.restore()
3362 branchcachewrite.restore()
3364 branchcachewrite.restore()
3363 fm.end()
3365 fm.end()
3364
3366
3365
3367
3366 @command(
3368 @command(
3367 b'perfbranchmapupdate',
3369 b'perfbranchmapupdate',
3368 [
3370 [
3369 (b'', b'base', [], b'subset of revision to start from'),
3371 (b'', b'base', [], b'subset of revision to start from'),
3370 (b'', b'target', [], b'subset of revision to end with'),
3372 (b'', b'target', [], b'subset of revision to end with'),
3371 (b'', b'clear-caches', False, b'clear cache between each runs'),
3373 (b'', b'clear-caches', False, b'clear cache between each runs'),
3372 ]
3374 ]
3373 + formatteropts,
3375 + formatteropts,
3374 )
3376 )
3375 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3377 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3376 """benchmark branchmap update from for <base> revs to <target> revs
3378 """benchmark branchmap update from for <base> revs to <target> revs
3377
3379
3378 If `--clear-caches` is passed, the following items will be reset before
3380 If `--clear-caches` is passed, the following items will be reset before
3379 each update:
3381 each update:
3380 * the changelog instance and associated indexes
3382 * the changelog instance and associated indexes
3381 * the rev-branch-cache instance
3383 * the rev-branch-cache instance
3382
3384
3383 Examples:
3385 Examples:
3384
3386
3385 # update for the one last revision
3387 # update for the one last revision
3386 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3388 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3387
3389
3388 $ update for change coming with a new branch
3390 $ update for change coming with a new branch
3389 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3391 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3390 """
3392 """
3391 from mercurial import branchmap
3393 from mercurial import branchmap
3392 from mercurial import repoview
3394 from mercurial import repoview
3393
3395
3394 opts = _byteskwargs(opts)
3396 opts = _byteskwargs(opts)
3395 timer, fm = gettimer(ui, opts)
3397 timer, fm = gettimer(ui, opts)
3396 clearcaches = opts[b'clear_caches']
3398 clearcaches = opts[b'clear_caches']
3397 unfi = repo.unfiltered()
3399 unfi = repo.unfiltered()
3398 x = [None] # used to pass data between closure
3400 x = [None] # used to pass data between closure
3399
3401
3400 # we use a `list` here to avoid possible side effect from smartset
3402 # we use a `list` here to avoid possible side effect from smartset
3401 baserevs = list(scmutil.revrange(repo, base))
3403 baserevs = list(scmutil.revrange(repo, base))
3402 targetrevs = list(scmutil.revrange(repo, target))
3404 targetrevs = list(scmutil.revrange(repo, target))
3403 if not baserevs:
3405 if not baserevs:
3404 raise error.Abort(b'no revisions selected for --base')
3406 raise error.Abort(b'no revisions selected for --base')
3405 if not targetrevs:
3407 if not targetrevs:
3406 raise error.Abort(b'no revisions selected for --target')
3408 raise error.Abort(b'no revisions selected for --target')
3407
3409
3408 # make sure the target branchmap also contains the one in the base
3410 # make sure the target branchmap also contains the one in the base
3409 targetrevs = list(set(baserevs) | set(targetrevs))
3411 targetrevs = list(set(baserevs) | set(targetrevs))
3410 targetrevs.sort()
3412 targetrevs.sort()
3411
3413
3412 cl = repo.changelog
3414 cl = repo.changelog
3413 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3415 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3414 allbaserevs.sort()
3416 allbaserevs.sort()
3415 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3417 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3416
3418
3417 newrevs = list(alltargetrevs.difference(allbaserevs))
3419 newrevs = list(alltargetrevs.difference(allbaserevs))
3418 newrevs.sort()
3420 newrevs.sort()
3419
3421
3420 allrevs = frozenset(unfi.changelog.revs())
3422 allrevs = frozenset(unfi.changelog.revs())
3421 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3423 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3422 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3424 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3423
3425
3424 def basefilter(repo, visibilityexceptions=None):
3426 def basefilter(repo, visibilityexceptions=None):
3425 return basefilterrevs
3427 return basefilterrevs
3426
3428
3427 def targetfilter(repo, visibilityexceptions=None):
3429 def targetfilter(repo, visibilityexceptions=None):
3428 return targetfilterrevs
3430 return targetfilterrevs
3429
3431
3430 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3432 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3431 ui.status(msg % (len(allbaserevs), len(newrevs)))
3433 ui.status(msg % (len(allbaserevs), len(newrevs)))
3432 if targetfilterrevs:
3434 if targetfilterrevs:
3433 msg = b'(%d revisions still filtered)\n'
3435 msg = b'(%d revisions still filtered)\n'
3434 ui.status(msg % len(targetfilterrevs))
3436 ui.status(msg % len(targetfilterrevs))
3435
3437
3436 try:
3438 try:
3437 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3439 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3438 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3440 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3439
3441
3440 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3442 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3441 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3443 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3442
3444
3443 # try to find an existing branchmap to reuse
3445 # try to find an existing branchmap to reuse
3444 subsettable = getbranchmapsubsettable()
3446 subsettable = getbranchmapsubsettable()
3445 candidatefilter = subsettable.get(None)
3447 candidatefilter = subsettable.get(None)
3446 while candidatefilter is not None:
3448 while candidatefilter is not None:
3447 candidatebm = repo.filtered(candidatefilter).branchmap()
3449 candidatebm = repo.filtered(candidatefilter).branchmap()
3448 if candidatebm.validfor(baserepo):
3450 if candidatebm.validfor(baserepo):
3449 filtered = repoview.filterrevs(repo, candidatefilter)
3451 filtered = repoview.filterrevs(repo, candidatefilter)
3450 missing = [r for r in allbaserevs if r in filtered]
3452 missing = [r for r in allbaserevs if r in filtered]
3451 base = candidatebm.copy()
3453 base = candidatebm.copy()
3452 base.update(baserepo, missing)
3454 base.update(baserepo, missing)
3453 break
3455 break
3454 candidatefilter = subsettable.get(candidatefilter)
3456 candidatefilter = subsettable.get(candidatefilter)
3455 else:
3457 else:
3456 # no suitable subset where found
3458 # no suitable subset where found
3457 base = branchmap.branchcache()
3459 base = branchmap.branchcache()
3458 base.update(baserepo, allbaserevs)
3460 base.update(baserepo, allbaserevs)
3459
3461
3460 def setup():
3462 def setup():
3461 x[0] = base.copy()
3463 x[0] = base.copy()
3462 if clearcaches:
3464 if clearcaches:
3463 unfi._revbranchcache = None
3465 unfi._revbranchcache = None
3464 clearchangelog(repo)
3466 clearchangelog(repo)
3465
3467
3466 def bench():
3468 def bench():
3467 x[0].update(targetrepo, newrevs)
3469 x[0].update(targetrepo, newrevs)
3468
3470
3469 timer(bench, setup=setup)
3471 timer(bench, setup=setup)
3470 fm.end()
3472 fm.end()
3471 finally:
3473 finally:
3472 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3474 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3473 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3475 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3474
3476
3475
3477
3476 @command(
3478 @command(
3477 b'perfbranchmapload',
3479 b'perfbranchmapload',
3478 [
3480 [
3479 (b'f', b'filter', b'', b'Specify repoview filter'),
3481 (b'f', b'filter', b'', b'Specify repoview filter'),
3480 (b'', b'list', False, b'List brachmap filter caches'),
3482 (b'', b'list', False, b'List brachmap filter caches'),
3481 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3483 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3482 ]
3484 ]
3483 + formatteropts,
3485 + formatteropts,
3484 )
3486 )
3485 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3487 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3486 """benchmark reading the branchmap"""
3488 """benchmark reading the branchmap"""
3487 opts = _byteskwargs(opts)
3489 opts = _byteskwargs(opts)
3488 clearrevlogs = opts[b'clear_revlogs']
3490 clearrevlogs = opts[b'clear_revlogs']
3489
3491
3490 if list:
3492 if list:
3491 for name, kind, st in repo.cachevfs.readdir(stat=True):
3493 for name, kind, st in repo.cachevfs.readdir(stat=True):
3492 if name.startswith(b'branch2'):
3494 if name.startswith(b'branch2'):
3493 filtername = name.partition(b'-')[2] or b'unfiltered'
3495 filtername = name.partition(b'-')[2] or b'unfiltered'
3494 ui.status(
3496 ui.status(
3495 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3497 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3496 )
3498 )
3497 return
3499 return
3498 if not filter:
3500 if not filter:
3499 filter = None
3501 filter = None
3500 subsettable = getbranchmapsubsettable()
3502 subsettable = getbranchmapsubsettable()
3501 if filter is None:
3503 if filter is None:
3502 repo = repo.unfiltered()
3504 repo = repo.unfiltered()
3503 else:
3505 else:
3504 repo = repoview.repoview(repo, filter)
3506 repo = repoview.repoview(repo, filter)
3505
3507
3506 repo.branchmap() # make sure we have a relevant, up to date branchmap
3508 repo.branchmap() # make sure we have a relevant, up to date branchmap
3507
3509
3508 try:
3510 try:
3509 fromfile = branchmap.branchcache.fromfile
3511 fromfile = branchmap.branchcache.fromfile
3510 except AttributeError:
3512 except AttributeError:
3511 # older versions
3513 # older versions
3512 fromfile = branchmap.read
3514 fromfile = branchmap.read
3513
3515
3514 currentfilter = filter
3516 currentfilter = filter
3515 # try once without timer, the filter may not be cached
3517 # try once without timer, the filter may not be cached
3516 while fromfile(repo) is None:
3518 while fromfile(repo) is None:
3517 currentfilter = subsettable.get(currentfilter)
3519 currentfilter = subsettable.get(currentfilter)
3518 if currentfilter is None:
3520 if currentfilter is None:
3519 raise error.Abort(
3521 raise error.Abort(
3520 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3522 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3521 )
3523 )
3522 repo = repo.filtered(currentfilter)
3524 repo = repo.filtered(currentfilter)
3523 timer, fm = gettimer(ui, opts)
3525 timer, fm = gettimer(ui, opts)
3524
3526
3525 def setup():
3527 def setup():
3526 if clearrevlogs:
3528 if clearrevlogs:
3527 clearchangelog(repo)
3529 clearchangelog(repo)
3528
3530
3529 def bench():
3531 def bench():
3530 fromfile(repo)
3532 fromfile(repo)
3531
3533
3532 timer(bench, setup=setup)
3534 timer(bench, setup=setup)
3533 fm.end()
3535 fm.end()
3534
3536
3535
3537
3536 @command(b'perfloadmarkers')
3538 @command(b'perfloadmarkers')
3537 def perfloadmarkers(ui, repo):
3539 def perfloadmarkers(ui, repo):
3538 """benchmark the time to parse the on-disk markers for a repo
3540 """benchmark the time to parse the on-disk markers for a repo
3539
3541
3540 Result is the number of markers in the repo."""
3542 Result is the number of markers in the repo."""
3541 timer, fm = gettimer(ui)
3543 timer, fm = gettimer(ui)
3542 svfs = getsvfs(repo)
3544 svfs = getsvfs(repo)
3543 timer(lambda: len(obsolete.obsstore(svfs)))
3545 timer(lambda: len(obsolete.obsstore(svfs)))
3544 fm.end()
3546 fm.end()
3545
3547
3546
3548
3547 @command(
3549 @command(
3548 b'perflrucachedict',
3550 b'perflrucachedict',
3549 formatteropts
3551 formatteropts
3550 + [
3552 + [
3551 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3553 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3552 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3554 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3553 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3555 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3554 (b'', b'size', 4, b'size of cache'),
3556 (b'', b'size', 4, b'size of cache'),
3555 (b'', b'gets', 10000, b'number of key lookups'),
3557 (b'', b'gets', 10000, b'number of key lookups'),
3556 (b'', b'sets', 10000, b'number of key sets'),
3558 (b'', b'sets', 10000, b'number of key sets'),
3557 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3559 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3558 (
3560 (
3559 b'',
3561 b'',
3560 b'mixedgetfreq',
3562 b'mixedgetfreq',
3561 50,
3563 50,
3562 b'frequency of get vs set ops in mixed mode',
3564 b'frequency of get vs set ops in mixed mode',
3563 ),
3565 ),
3564 ],
3566 ],
3565 norepo=True,
3567 norepo=True,
3566 )
3568 )
3567 def perflrucache(
3569 def perflrucache(
3568 ui,
3570 ui,
3569 mincost=0,
3571 mincost=0,
3570 maxcost=100,
3572 maxcost=100,
3571 costlimit=0,
3573 costlimit=0,
3572 size=4,
3574 size=4,
3573 gets=10000,
3575 gets=10000,
3574 sets=10000,
3576 sets=10000,
3575 mixed=10000,
3577 mixed=10000,
3576 mixedgetfreq=50,
3578 mixedgetfreq=50,
3577 **opts
3579 **opts
3578 ):
3580 ):
3579 opts = _byteskwargs(opts)
3581 opts = _byteskwargs(opts)
3580
3582
3581 def doinit():
3583 def doinit():
3582 for i in _xrange(10000):
3584 for i in _xrange(10000):
3583 util.lrucachedict(size)
3585 util.lrucachedict(size)
3584
3586
3585 costrange = list(range(mincost, maxcost + 1))
3587 costrange = list(range(mincost, maxcost + 1))
3586
3588
3587 values = []
3589 values = []
3588 for i in _xrange(size):
3590 for i in _xrange(size):
3589 values.append(random.randint(0, _maxint))
3591 values.append(random.randint(0, _maxint))
3590
3592
3591 # Get mode fills the cache and tests raw lookup performance with no
3593 # Get mode fills the cache and tests raw lookup performance with no
3592 # eviction.
3594 # eviction.
3593 getseq = []
3595 getseq = []
3594 for i in _xrange(gets):
3596 for i in _xrange(gets):
3595 getseq.append(random.choice(values))
3597 getseq.append(random.choice(values))
3596
3598
3597 def dogets():
3599 def dogets():
3598 d = util.lrucachedict(size)
3600 d = util.lrucachedict(size)
3599 for v in values:
3601 for v in values:
3600 d[v] = v
3602 d[v] = v
3601 for key in getseq:
3603 for key in getseq:
3602 value = d[key]
3604 value = d[key]
3603 value # silence pyflakes warning
3605 value # silence pyflakes warning
3604
3606
3605 def dogetscost():
3607 def dogetscost():
3606 d = util.lrucachedict(size, maxcost=costlimit)
3608 d = util.lrucachedict(size, maxcost=costlimit)
3607 for i, v in enumerate(values):
3609 for i, v in enumerate(values):
3608 d.insert(v, v, cost=costs[i])
3610 d.insert(v, v, cost=costs[i])
3609 for key in getseq:
3611 for key in getseq:
3610 try:
3612 try:
3611 value = d[key]
3613 value = d[key]
3612 value # silence pyflakes warning
3614 value # silence pyflakes warning
3613 except KeyError:
3615 except KeyError:
3614 pass
3616 pass
3615
3617
3616 # Set mode tests insertion speed with cache eviction.
3618 # Set mode tests insertion speed with cache eviction.
3617 setseq = []
3619 setseq = []
3618 costs = []
3620 costs = []
3619 for i in _xrange(sets):
3621 for i in _xrange(sets):
3620 setseq.append(random.randint(0, _maxint))
3622 setseq.append(random.randint(0, _maxint))
3621 costs.append(random.choice(costrange))
3623 costs.append(random.choice(costrange))
3622
3624
3623 def doinserts():
3625 def doinserts():
3624 d = util.lrucachedict(size)
3626 d = util.lrucachedict(size)
3625 for v in setseq:
3627 for v in setseq:
3626 d.insert(v, v)
3628 d.insert(v, v)
3627
3629
3628 def doinsertscost():
3630 def doinsertscost():
3629 d = util.lrucachedict(size, maxcost=costlimit)
3631 d = util.lrucachedict(size, maxcost=costlimit)
3630 for i, v in enumerate(setseq):
3632 for i, v in enumerate(setseq):
3631 d.insert(v, v, cost=costs[i])
3633 d.insert(v, v, cost=costs[i])
3632
3634
3633 def dosets():
3635 def dosets():
3634 d = util.lrucachedict(size)
3636 d = util.lrucachedict(size)
3635 for v in setseq:
3637 for v in setseq:
3636 d[v] = v
3638 d[v] = v
3637
3639
3638 # Mixed mode randomly performs gets and sets with eviction.
3640 # Mixed mode randomly performs gets and sets with eviction.
3639 mixedops = []
3641 mixedops = []
3640 for i in _xrange(mixed):
3642 for i in _xrange(mixed):
3641 r = random.randint(0, 100)
3643 r = random.randint(0, 100)
3642 if r < mixedgetfreq:
3644 if r < mixedgetfreq:
3643 op = 0
3645 op = 0
3644 else:
3646 else:
3645 op = 1
3647 op = 1
3646
3648
3647 mixedops.append(
3649 mixedops.append(
3648 (op, random.randint(0, size * 2), random.choice(costrange))
3650 (op, random.randint(0, size * 2), random.choice(costrange))
3649 )
3651 )
3650
3652
3651 def domixed():
3653 def domixed():
3652 d = util.lrucachedict(size)
3654 d = util.lrucachedict(size)
3653
3655
3654 for op, v, cost in mixedops:
3656 for op, v, cost in mixedops:
3655 if op == 0:
3657 if op == 0:
3656 try:
3658 try:
3657 d[v]
3659 d[v]
3658 except KeyError:
3660 except KeyError:
3659 pass
3661 pass
3660 else:
3662 else:
3661 d[v] = v
3663 d[v] = v
3662
3664
3663 def domixedcost():
3665 def domixedcost():
3664 d = util.lrucachedict(size, maxcost=costlimit)
3666 d = util.lrucachedict(size, maxcost=costlimit)
3665
3667
3666 for op, v, cost in mixedops:
3668 for op, v, cost in mixedops:
3667 if op == 0:
3669 if op == 0:
3668 try:
3670 try:
3669 d[v]
3671 d[v]
3670 except KeyError:
3672 except KeyError:
3671 pass
3673 pass
3672 else:
3674 else:
3673 d.insert(v, v, cost=cost)
3675 d.insert(v, v, cost=cost)
3674
3676
3675 benches = [
3677 benches = [
3676 (doinit, b'init'),
3678 (doinit, b'init'),
3677 ]
3679 ]
3678
3680
3679 if costlimit:
3681 if costlimit:
3680 benches.extend(
3682 benches.extend(
3681 [
3683 [
3682 (dogetscost, b'gets w/ cost limit'),
3684 (dogetscost, b'gets w/ cost limit'),
3683 (doinsertscost, b'inserts w/ cost limit'),
3685 (doinsertscost, b'inserts w/ cost limit'),
3684 (domixedcost, b'mixed w/ cost limit'),
3686 (domixedcost, b'mixed w/ cost limit'),
3685 ]
3687 ]
3686 )
3688 )
3687 else:
3689 else:
3688 benches.extend(
3690 benches.extend(
3689 [
3691 [
3690 (dogets, b'gets'),
3692 (dogets, b'gets'),
3691 (doinserts, b'inserts'),
3693 (doinserts, b'inserts'),
3692 (dosets, b'sets'),
3694 (dosets, b'sets'),
3693 (domixed, b'mixed'),
3695 (domixed, b'mixed'),
3694 ]
3696 ]
3695 )
3697 )
3696
3698
3697 for fn, title in benches:
3699 for fn, title in benches:
3698 timer, fm = gettimer(ui, opts)
3700 timer, fm = gettimer(ui, opts)
3699 timer(fn, title=title)
3701 timer(fn, title=title)
3700 fm.end()
3702 fm.end()
3701
3703
3702
3704
3703 @command(b'perfwrite', formatteropts)
3705 @command(b'perfwrite', formatteropts)
3704 def perfwrite(ui, repo, **opts):
3706 def perfwrite(ui, repo, **opts):
3705 """microbenchmark ui.write
3707 """microbenchmark ui.write
3706 """
3708 """
3707 opts = _byteskwargs(opts)
3709 opts = _byteskwargs(opts)
3708
3710
3709 timer, fm = gettimer(ui, opts)
3711 timer, fm = gettimer(ui, opts)
3710
3712
3711 def write():
3713 def write():
3712 for i in range(100000):
3714 for i in range(100000):
3713 ui.writenoi18n(b'Testing write performance\n')
3715 ui.writenoi18n(b'Testing write performance\n')
3714
3716
3715 timer(write)
3717 timer(write)
3716 fm.end()
3718 fm.end()
3717
3719
3718
3720
3719 def uisetup(ui):
3721 def uisetup(ui):
3720 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3722 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3721 commands, b'debugrevlogopts'
3723 commands, b'debugrevlogopts'
3722 ):
3724 ):
3723 # for "historical portability":
3725 # for "historical portability":
3724 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3726 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3725 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3727 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3726 # openrevlog() should cause failure, because it has been
3728 # openrevlog() should cause failure, because it has been
3727 # available since 3.5 (or 49c583ca48c4).
3729 # available since 3.5 (or 49c583ca48c4).
3728 def openrevlog(orig, repo, cmd, file_, opts):
3730 def openrevlog(orig, repo, cmd, file_, opts):
3729 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3731 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3730 raise error.Abort(
3732 raise error.Abort(
3731 b"This version doesn't support --dir option",
3733 b"This version doesn't support --dir option",
3732 hint=b"use 3.5 or later",
3734 hint=b"use 3.5 or later",
3733 )
3735 )
3734 return orig(repo, cmd, file_, opts)
3736 return orig(repo, cmd, file_, opts)
3735
3737
3736 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3738 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3737
3739
3738
3740
3739 @command(
3741 @command(
3740 b'perfprogress',
3742 b'perfprogress',
3741 formatteropts
3743 formatteropts
3742 + [
3744 + [
3743 (b'', b'topic', b'topic', b'topic for progress messages'),
3745 (b'', b'topic', b'topic', b'topic for progress messages'),
3744 (b'c', b'total', 1000000, b'total value we are progressing to'),
3746 (b'c', b'total', 1000000, b'total value we are progressing to'),
3745 ],
3747 ],
3746 norepo=True,
3748 norepo=True,
3747 )
3749 )
3748 def perfprogress(ui, topic=None, total=None, **opts):
3750 def perfprogress(ui, topic=None, total=None, **opts):
3749 """printing of progress bars"""
3751 """printing of progress bars"""
3750 opts = _byteskwargs(opts)
3752 opts = _byteskwargs(opts)
3751
3753
3752 timer, fm = gettimer(ui, opts)
3754 timer, fm = gettimer(ui, opts)
3753
3755
3754 def doprogress():
3756 def doprogress():
3755 with ui.makeprogress(topic, total=total) as progress:
3757 with ui.makeprogress(topic, total=total) as progress:
3756 for i in _xrange(total):
3758 for i in _xrange(total):
3757 progress.increment()
3759 progress.increment()
3758
3760
3759 timer(doprogress)
3761 timer(doprogress)
3760 fm.end()
3762 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now