##// END OF EJS Templates
perf: make perf::bundle compatible before 61ba04693d65...
marmoute -
r50368:a7a5740b default
parent child Browse files
Show More
@@ -1,4190 +1,4195 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1003 except ImportError:
1004 path = ui.expandpath(path)
1004 path = ui.expandpath(path)
1005
1005
1006 def s():
1006 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1007 repos[1] = hg.peer(ui, opts, path)
1008
1008
1009 def d():
1009 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1010 setdiscovery.findcommonheads(ui, *repos)
1011
1011
1012 timer(d, setup=s)
1012 timer(d, setup=s)
1013 fm.end()
1013 fm.end()
1014
1014
1015
1015
1016 @command(
1016 @command(
1017 b'perf::bookmarks|perfbookmarks',
1017 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1018 formatteropts
1019 + [
1019 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1021 ],
1022 )
1022 )
1023 def perfbookmarks(ui, repo, **opts):
1023 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1024 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027
1027
1028 clearrevlogs = opts[b'clear_revlogs']
1028 clearrevlogs = opts[b'clear_revlogs']
1029
1029
1030 def s():
1030 def s():
1031 if clearrevlogs:
1031 if clearrevlogs:
1032 clearchangelog(repo)
1032 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1033 clearfilecache(repo, b'_bookmarks')
1034
1034
1035 def d():
1035 def d():
1036 repo._bookmarks
1036 repo._bookmarks
1037
1037
1038 timer(d, setup=s)
1038 timer(d, setup=s)
1039 fm.end()
1039 fm.end()
1040
1040
1041
1041
1042 @command(
1042 @command(
1043 b'perf::bundle',
1043 b'perf::bundle',
1044 [
1044 [
1045 (
1045 (
1046 b'r',
1046 b'r',
1047 b'rev',
1047 b'rev',
1048 [],
1048 [],
1049 b'changesets to bundle',
1049 b'changesets to bundle',
1050 b'REV',
1050 b'REV',
1051 ),
1051 ),
1052 (
1052 (
1053 b't',
1053 b't',
1054 b'type',
1054 b'type',
1055 b'none',
1055 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1057 b'TYPE',
1058 ),
1058 ),
1059 ]
1059 ]
1060 + formatteropts,
1060 + formatteropts,
1061 b'REVS',
1061 b'REVS',
1062 )
1062 )
1063 def perfbundle(ui, repo, *revs, **opts):
1063 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1064 """benchmark the creation of a bundle from a repository
1065
1065
1066 For now, this only supports "none" compression.
1066 For now, this only supports "none" compression.
1067 """
1067 """
1068 from mercurial import bundlecaches
1068 from mercurial import bundlecaches
1069 from mercurial import discovery
1069 from mercurial import discovery
1070 from mercurial import bundle2
1070 from mercurial import bundle2
1071
1071
1072 opts = _byteskwargs(opts)
1072 opts = _byteskwargs(opts)
1073 timer, fm = gettimer(ui, opts)
1073 timer, fm = gettimer(ui, opts)
1074
1074
1075 cl = repo.changelog
1075 cl = repo.changelog
1076 revs = list(revs)
1076 revs = list(revs)
1077 revs.extend(opts.get(b'rev', ()))
1077 revs.extend(opts.get(b'rev', ()))
1078 revs = scmutil.revrange(repo, revs)
1078 revs = scmutil.revrange(repo, revs)
1079 if not revs:
1079 if not revs:
1080 raise error.Abort(b"not revision specified")
1080 raise error.Abort(b"not revision specified")
1081 # make it a consistent set (ie: without topological gaps)
1081 # make it a consistent set (ie: without topological gaps)
1082 old_len = len(revs)
1082 old_len = len(revs)
1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1084 if old_len != len(revs):
1084 if old_len != len(revs):
1085 new_count = len(revs) - old_len
1085 new_count = len(revs) - old_len
1086 msg = b"add %d new revisions to make it a consistent set\n"
1086 msg = b"add %d new revisions to make it a consistent set\n"
1087 ui.write_err(msg % new_count)
1087 ui.write_err(msg % new_count)
1088
1088
1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1091 outgoing = discovery.outgoing(repo, bases, targets)
1091 outgoing = discovery.outgoing(repo, bases, targets)
1092
1092
1093 bundle_spec = opts.get(b'type')
1093 bundle_spec = opts.get(b'type')
1094
1094
1095 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1095 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1096
1096
1097 cgversion = bundle_spec.params[b"cg.version"]
1097 cgversion = bundle_spec.params.get(b"cg.version")
1098 if cgversion is None:
1099 if bundle_spec.version == b'v1':
1100 cgversion = b'01'
1101 if bundle_spec.version == b'v2':
1102 cgversion = b'02'
1098 if cgversion not in changegroup.supportedoutgoingversions(repo):
1103 if cgversion not in changegroup.supportedoutgoingversions(repo):
1099 err = b"repository does not support bundle version %s"
1104 err = b"repository does not support bundle version %s"
1100 raise error.Abort(err % cgversion)
1105 raise error.Abort(err % cgversion)
1101
1106
1102 if cgversion == b'01': # bundle1
1107 if cgversion == b'01': # bundle1
1103 bversion = b'HG10' + bundle_spec.wirecompression
1108 bversion = b'HG10' + bundle_spec.wirecompression
1104 bcompression = None
1109 bcompression = None
1105 elif cgversion in (b'02', b'03'):
1110 elif cgversion in (b'02', b'03'):
1106 bversion = b'HG20'
1111 bversion = b'HG20'
1107 bcompression = bundle_spec.wirecompression
1112 bcompression = bundle_spec.wirecompression
1108 else:
1113 else:
1109 err = b'perf::bundle: unexpected changegroup version %s'
1114 err = b'perf::bundle: unexpected changegroup version %s'
1110 raise error.ProgrammingError(err % cgversion)
1115 raise error.ProgrammingError(err % cgversion)
1111
1116
1112 if bcompression is None:
1117 if bcompression is None:
1113 bcompression = b'UN'
1118 bcompression = b'UN'
1114
1119
1115 if bcompression != b'UN':
1120 if bcompression != b'UN':
1116 err = b'perf::bundle: compression currently unsupported: %s'
1121 err = b'perf::bundle: compression currently unsupported: %s'
1117 raise error.ProgrammingError(err % bcompression)
1122 raise error.ProgrammingError(err % bcompression)
1118
1123
1119 def do_bundle():
1124 def do_bundle():
1120 bundle2.writenewbundle(
1125 bundle2.writenewbundle(
1121 ui,
1126 ui,
1122 repo,
1127 repo,
1123 b'perf::bundle',
1128 b'perf::bundle',
1124 os.devnull,
1129 os.devnull,
1125 bversion,
1130 bversion,
1126 outgoing,
1131 outgoing,
1127 bundle_spec.params,
1132 bundle_spec.params,
1128 )
1133 )
1129
1134
1130 timer(do_bundle)
1135 timer(do_bundle)
1131 fm.end()
1136 fm.end()
1132
1137
1133
1138
1134 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1139 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1135 def perfbundleread(ui, repo, bundlepath, **opts):
1140 def perfbundleread(ui, repo, bundlepath, **opts):
1136 """Benchmark reading of bundle files.
1141 """Benchmark reading of bundle files.
1137
1142
1138 This command is meant to isolate the I/O part of bundle reading as
1143 This command is meant to isolate the I/O part of bundle reading as
1139 much as possible.
1144 much as possible.
1140 """
1145 """
1141 from mercurial import (
1146 from mercurial import (
1142 bundle2,
1147 bundle2,
1143 exchange,
1148 exchange,
1144 streamclone,
1149 streamclone,
1145 )
1150 )
1146
1151
1147 opts = _byteskwargs(opts)
1152 opts = _byteskwargs(opts)
1148
1153
1149 def makebench(fn):
1154 def makebench(fn):
1150 def run():
1155 def run():
1151 with open(bundlepath, b'rb') as fh:
1156 with open(bundlepath, b'rb') as fh:
1152 bundle = exchange.readbundle(ui, fh, bundlepath)
1157 bundle = exchange.readbundle(ui, fh, bundlepath)
1153 fn(bundle)
1158 fn(bundle)
1154
1159
1155 return run
1160 return run
1156
1161
1157 def makereadnbytes(size):
1162 def makereadnbytes(size):
1158 def run():
1163 def run():
1159 with open(bundlepath, b'rb') as fh:
1164 with open(bundlepath, b'rb') as fh:
1160 bundle = exchange.readbundle(ui, fh, bundlepath)
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1161 while bundle.read(size):
1166 while bundle.read(size):
1162 pass
1167 pass
1163
1168
1164 return run
1169 return run
1165
1170
1166 def makestdioread(size):
1171 def makestdioread(size):
1167 def run():
1172 def run():
1168 with open(bundlepath, b'rb') as fh:
1173 with open(bundlepath, b'rb') as fh:
1169 while fh.read(size):
1174 while fh.read(size):
1170 pass
1175 pass
1171
1176
1172 return run
1177 return run
1173
1178
1174 # bundle1
1179 # bundle1
1175
1180
1176 def deltaiter(bundle):
1181 def deltaiter(bundle):
1177 for delta in bundle.deltaiter():
1182 for delta in bundle.deltaiter():
1178 pass
1183 pass
1179
1184
1180 def iterchunks(bundle):
1185 def iterchunks(bundle):
1181 for chunk in bundle.getchunks():
1186 for chunk in bundle.getchunks():
1182 pass
1187 pass
1183
1188
1184 # bundle2
1189 # bundle2
1185
1190
1186 def forwardchunks(bundle):
1191 def forwardchunks(bundle):
1187 for chunk in bundle._forwardchunks():
1192 for chunk in bundle._forwardchunks():
1188 pass
1193 pass
1189
1194
1190 def iterparts(bundle):
1195 def iterparts(bundle):
1191 for part in bundle.iterparts():
1196 for part in bundle.iterparts():
1192 pass
1197 pass
1193
1198
1194 def iterpartsseekable(bundle):
1199 def iterpartsseekable(bundle):
1195 for part in bundle.iterparts(seekable=True):
1200 for part in bundle.iterparts(seekable=True):
1196 pass
1201 pass
1197
1202
1198 def seek(bundle):
1203 def seek(bundle):
1199 for part in bundle.iterparts(seekable=True):
1204 for part in bundle.iterparts(seekable=True):
1200 part.seek(0, os.SEEK_END)
1205 part.seek(0, os.SEEK_END)
1201
1206
1202 def makepartreadnbytes(size):
1207 def makepartreadnbytes(size):
1203 def run():
1208 def run():
1204 with open(bundlepath, b'rb') as fh:
1209 with open(bundlepath, b'rb') as fh:
1205 bundle = exchange.readbundle(ui, fh, bundlepath)
1210 bundle = exchange.readbundle(ui, fh, bundlepath)
1206 for part in bundle.iterparts():
1211 for part in bundle.iterparts():
1207 while part.read(size):
1212 while part.read(size):
1208 pass
1213 pass
1209
1214
1210 return run
1215 return run
1211
1216
1212 benches = [
1217 benches = [
1213 (makestdioread(8192), b'read(8k)'),
1218 (makestdioread(8192), b'read(8k)'),
1214 (makestdioread(16384), b'read(16k)'),
1219 (makestdioread(16384), b'read(16k)'),
1215 (makestdioread(32768), b'read(32k)'),
1220 (makestdioread(32768), b'read(32k)'),
1216 (makestdioread(131072), b'read(128k)'),
1221 (makestdioread(131072), b'read(128k)'),
1217 ]
1222 ]
1218
1223
1219 with open(bundlepath, b'rb') as fh:
1224 with open(bundlepath, b'rb') as fh:
1220 bundle = exchange.readbundle(ui, fh, bundlepath)
1225 bundle = exchange.readbundle(ui, fh, bundlepath)
1221
1226
1222 if isinstance(bundle, changegroup.cg1unpacker):
1227 if isinstance(bundle, changegroup.cg1unpacker):
1223 benches.extend(
1228 benches.extend(
1224 [
1229 [
1225 (makebench(deltaiter), b'cg1 deltaiter()'),
1230 (makebench(deltaiter), b'cg1 deltaiter()'),
1226 (makebench(iterchunks), b'cg1 getchunks()'),
1231 (makebench(iterchunks), b'cg1 getchunks()'),
1227 (makereadnbytes(8192), b'cg1 read(8k)'),
1232 (makereadnbytes(8192), b'cg1 read(8k)'),
1228 (makereadnbytes(16384), b'cg1 read(16k)'),
1233 (makereadnbytes(16384), b'cg1 read(16k)'),
1229 (makereadnbytes(32768), b'cg1 read(32k)'),
1234 (makereadnbytes(32768), b'cg1 read(32k)'),
1230 (makereadnbytes(131072), b'cg1 read(128k)'),
1235 (makereadnbytes(131072), b'cg1 read(128k)'),
1231 ]
1236 ]
1232 )
1237 )
1233 elif isinstance(bundle, bundle2.unbundle20):
1238 elif isinstance(bundle, bundle2.unbundle20):
1234 benches.extend(
1239 benches.extend(
1235 [
1240 [
1236 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1241 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1237 (makebench(iterparts), b'bundle2 iterparts()'),
1242 (makebench(iterparts), b'bundle2 iterparts()'),
1238 (
1243 (
1239 makebench(iterpartsseekable),
1244 makebench(iterpartsseekable),
1240 b'bundle2 iterparts() seekable',
1245 b'bundle2 iterparts() seekable',
1241 ),
1246 ),
1242 (makebench(seek), b'bundle2 part seek()'),
1247 (makebench(seek), b'bundle2 part seek()'),
1243 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1248 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1244 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1249 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1245 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1250 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1246 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1251 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1247 ]
1252 ]
1248 )
1253 )
1249 elif isinstance(bundle, streamclone.streamcloneapplier):
1254 elif isinstance(bundle, streamclone.streamcloneapplier):
1250 raise error.Abort(b'stream clone bundles not supported')
1255 raise error.Abort(b'stream clone bundles not supported')
1251 else:
1256 else:
1252 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1257 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1253
1258
1254 for fn, title in benches:
1259 for fn, title in benches:
1255 timer, fm = gettimer(ui, opts)
1260 timer, fm = gettimer(ui, opts)
1256 timer(fn, title=title)
1261 timer(fn, title=title)
1257 fm.end()
1262 fm.end()
1258
1263
1259
1264
1260 @command(
1265 @command(
1261 b'perf::changegroupchangelog|perfchangegroupchangelog',
1266 b'perf::changegroupchangelog|perfchangegroupchangelog',
1262 formatteropts
1267 formatteropts
1263 + [
1268 + [
1264 (b'', b'cgversion', b'02', b'changegroup version'),
1269 (b'', b'cgversion', b'02', b'changegroup version'),
1265 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1270 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1266 ],
1271 ],
1267 )
1272 )
1268 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1273 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1269 """Benchmark producing a changelog group for a changegroup.
1274 """Benchmark producing a changelog group for a changegroup.
1270
1275
1271 This measures the time spent processing the changelog during a
1276 This measures the time spent processing the changelog during a
1272 bundle operation. This occurs during `hg bundle` and on a server
1277 bundle operation. This occurs during `hg bundle` and on a server
1273 processing a `getbundle` wire protocol request (handles clones
1278 processing a `getbundle` wire protocol request (handles clones
1274 and pull requests).
1279 and pull requests).
1275
1280
1276 By default, all revisions are added to the changegroup.
1281 By default, all revisions are added to the changegroup.
1277 """
1282 """
1278 opts = _byteskwargs(opts)
1283 opts = _byteskwargs(opts)
1279 cl = repo.changelog
1284 cl = repo.changelog
1280 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1285 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1281 bundler = changegroup.getbundler(cgversion, repo)
1286 bundler = changegroup.getbundler(cgversion, repo)
1282
1287
1283 def d():
1288 def d():
1284 state, chunks = bundler._generatechangelog(cl, nodes)
1289 state, chunks = bundler._generatechangelog(cl, nodes)
1285 for chunk in chunks:
1290 for chunk in chunks:
1286 pass
1291 pass
1287
1292
1288 timer, fm = gettimer(ui, opts)
1293 timer, fm = gettimer(ui, opts)
1289
1294
1290 # Terminal printing can interfere with timing. So disable it.
1295 # Terminal printing can interfere with timing. So disable it.
1291 with ui.configoverride({(b'progress', b'disable'): True}):
1296 with ui.configoverride({(b'progress', b'disable'): True}):
1292 timer(d)
1297 timer(d)
1293
1298
1294 fm.end()
1299 fm.end()
1295
1300
1296
1301
1297 @command(b'perf::dirs|perfdirs', formatteropts)
1302 @command(b'perf::dirs|perfdirs', formatteropts)
1298 def perfdirs(ui, repo, **opts):
1303 def perfdirs(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1304 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1305 timer, fm = gettimer(ui, opts)
1301 dirstate = repo.dirstate
1306 dirstate = repo.dirstate
1302 b'a' in dirstate
1307 b'a' in dirstate
1303
1308
1304 def d():
1309 def d():
1305 dirstate.hasdir(b'a')
1310 dirstate.hasdir(b'a')
1306 try:
1311 try:
1307 del dirstate._map._dirs
1312 del dirstate._map._dirs
1308 except AttributeError:
1313 except AttributeError:
1309 pass
1314 pass
1310
1315
1311 timer(d)
1316 timer(d)
1312 fm.end()
1317 fm.end()
1313
1318
1314
1319
1315 @command(
1320 @command(
1316 b'perf::dirstate|perfdirstate',
1321 b'perf::dirstate|perfdirstate',
1317 [
1322 [
1318 (
1323 (
1319 b'',
1324 b'',
1320 b'iteration',
1325 b'iteration',
1321 None,
1326 None,
1322 b'benchmark a full iteration for the dirstate',
1327 b'benchmark a full iteration for the dirstate',
1323 ),
1328 ),
1324 (
1329 (
1325 b'',
1330 b'',
1326 b'contains',
1331 b'contains',
1327 None,
1332 None,
1328 b'benchmark a large amount of `nf in dirstate` calls',
1333 b'benchmark a large amount of `nf in dirstate` calls',
1329 ),
1334 ),
1330 ]
1335 ]
1331 + formatteropts,
1336 + formatteropts,
1332 )
1337 )
1333 def perfdirstate(ui, repo, **opts):
1338 def perfdirstate(ui, repo, **opts):
1334 """benchmap the time of various distate operations
1339 """benchmap the time of various distate operations
1335
1340
1336 By default benchmark the time necessary to load a dirstate from scratch.
1341 By default benchmark the time necessary to load a dirstate from scratch.
1337 The dirstate is loaded to the point were a "contains" request can be
1342 The dirstate is loaded to the point were a "contains" request can be
1338 answered.
1343 answered.
1339 """
1344 """
1340 opts = _byteskwargs(opts)
1345 opts = _byteskwargs(opts)
1341 timer, fm = gettimer(ui, opts)
1346 timer, fm = gettimer(ui, opts)
1342 b"a" in repo.dirstate
1347 b"a" in repo.dirstate
1343
1348
1344 if opts[b'iteration'] and opts[b'contains']:
1349 if opts[b'iteration'] and opts[b'contains']:
1345 msg = b'only specify one of --iteration or --contains'
1350 msg = b'only specify one of --iteration or --contains'
1346 raise error.Abort(msg)
1351 raise error.Abort(msg)
1347
1352
1348 if opts[b'iteration']:
1353 if opts[b'iteration']:
1349 setup = None
1354 setup = None
1350 dirstate = repo.dirstate
1355 dirstate = repo.dirstate
1351
1356
1352 def d():
1357 def d():
1353 for f in dirstate:
1358 for f in dirstate:
1354 pass
1359 pass
1355
1360
1356 elif opts[b'contains']:
1361 elif opts[b'contains']:
1357 setup = None
1362 setup = None
1358 dirstate = repo.dirstate
1363 dirstate = repo.dirstate
1359 allfiles = list(dirstate)
1364 allfiles = list(dirstate)
1360 # also add file path that will be "missing" from the dirstate
1365 # also add file path that will be "missing" from the dirstate
1361 allfiles.extend([f[::-1] for f in allfiles])
1366 allfiles.extend([f[::-1] for f in allfiles])
1362
1367
1363 def d():
1368 def d():
1364 for f in allfiles:
1369 for f in allfiles:
1365 f in dirstate
1370 f in dirstate
1366
1371
1367 else:
1372 else:
1368
1373
1369 def setup():
1374 def setup():
1370 repo.dirstate.invalidate()
1375 repo.dirstate.invalidate()
1371
1376
1372 def d():
1377 def d():
1373 b"a" in repo.dirstate
1378 b"a" in repo.dirstate
1374
1379
1375 timer(d, setup=setup)
1380 timer(d, setup=setup)
1376 fm.end()
1381 fm.end()
1377
1382
1378
1383
1379 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1384 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1380 def perfdirstatedirs(ui, repo, **opts):
1385 def perfdirstatedirs(ui, repo, **opts):
1381 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1386 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1382 opts = _byteskwargs(opts)
1387 opts = _byteskwargs(opts)
1383 timer, fm = gettimer(ui, opts)
1388 timer, fm = gettimer(ui, opts)
1384 repo.dirstate.hasdir(b"a")
1389 repo.dirstate.hasdir(b"a")
1385
1390
1386 def setup():
1391 def setup():
1387 try:
1392 try:
1388 del repo.dirstate._map._dirs
1393 del repo.dirstate._map._dirs
1389 except AttributeError:
1394 except AttributeError:
1390 pass
1395 pass
1391
1396
1392 def d():
1397 def d():
1393 repo.dirstate.hasdir(b"a")
1398 repo.dirstate.hasdir(b"a")
1394
1399
1395 timer(d, setup=setup)
1400 timer(d, setup=setup)
1396 fm.end()
1401 fm.end()
1397
1402
1398
1403
1399 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1404 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1400 def perfdirstatefoldmap(ui, repo, **opts):
1405 def perfdirstatefoldmap(ui, repo, **opts):
1401 """benchmap a `dirstate._map.filefoldmap.get()` request
1406 """benchmap a `dirstate._map.filefoldmap.get()` request
1402
1407
1403 The dirstate filefoldmap cache is dropped between every request.
1408 The dirstate filefoldmap cache is dropped between every request.
1404 """
1409 """
1405 opts = _byteskwargs(opts)
1410 opts = _byteskwargs(opts)
1406 timer, fm = gettimer(ui, opts)
1411 timer, fm = gettimer(ui, opts)
1407 dirstate = repo.dirstate
1412 dirstate = repo.dirstate
1408 dirstate._map.filefoldmap.get(b'a')
1413 dirstate._map.filefoldmap.get(b'a')
1409
1414
1410 def setup():
1415 def setup():
1411 del dirstate._map.filefoldmap
1416 del dirstate._map.filefoldmap
1412
1417
1413 def d():
1418 def d():
1414 dirstate._map.filefoldmap.get(b'a')
1419 dirstate._map.filefoldmap.get(b'a')
1415
1420
1416 timer(d, setup=setup)
1421 timer(d, setup=setup)
1417 fm.end()
1422 fm.end()
1418
1423
1419
1424
1420 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1425 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1421 def perfdirfoldmap(ui, repo, **opts):
1426 def perfdirfoldmap(ui, repo, **opts):
1422 """benchmap a `dirstate._map.dirfoldmap.get()` request
1427 """benchmap a `dirstate._map.dirfoldmap.get()` request
1423
1428
1424 The dirstate dirfoldmap cache is dropped between every request.
1429 The dirstate dirfoldmap cache is dropped between every request.
1425 """
1430 """
1426 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1427 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1428 dirstate = repo.dirstate
1433 dirstate = repo.dirstate
1429 dirstate._map.dirfoldmap.get(b'a')
1434 dirstate._map.dirfoldmap.get(b'a')
1430
1435
1431 def setup():
1436 def setup():
1432 del dirstate._map.dirfoldmap
1437 del dirstate._map.dirfoldmap
1433 try:
1438 try:
1434 del dirstate._map._dirs
1439 del dirstate._map._dirs
1435 except AttributeError:
1440 except AttributeError:
1436 pass
1441 pass
1437
1442
1438 def d():
1443 def d():
1439 dirstate._map.dirfoldmap.get(b'a')
1444 dirstate._map.dirfoldmap.get(b'a')
1440
1445
1441 timer(d, setup=setup)
1446 timer(d, setup=setup)
1442 fm.end()
1447 fm.end()
1443
1448
1444
1449
1445 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1450 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1446 def perfdirstatewrite(ui, repo, **opts):
1451 def perfdirstatewrite(ui, repo, **opts):
1447 """benchmap the time it take to write a dirstate on disk"""
1452 """benchmap the time it take to write a dirstate on disk"""
1448 opts = _byteskwargs(opts)
1453 opts = _byteskwargs(opts)
1449 timer, fm = gettimer(ui, opts)
1454 timer, fm = gettimer(ui, opts)
1450 ds = repo.dirstate
1455 ds = repo.dirstate
1451 b"a" in ds
1456 b"a" in ds
1452
1457
1453 def setup():
1458 def setup():
1454 ds._dirty = True
1459 ds._dirty = True
1455
1460
1456 def d():
1461 def d():
1457 ds.write(repo.currenttransaction())
1462 ds.write(repo.currenttransaction())
1458
1463
1459 timer(d, setup=setup)
1464 timer(d, setup=setup)
1460 fm.end()
1465 fm.end()
1461
1466
1462
1467
1463 def _getmergerevs(repo, opts):
1468 def _getmergerevs(repo, opts):
1464 """parse command argument to return rev involved in merge
1469 """parse command argument to return rev involved in merge
1465
1470
1466 input: options dictionnary with `rev`, `from` and `bse`
1471 input: options dictionnary with `rev`, `from` and `bse`
1467 output: (localctx, otherctx, basectx)
1472 output: (localctx, otherctx, basectx)
1468 """
1473 """
1469 if opts[b'from']:
1474 if opts[b'from']:
1470 fromrev = scmutil.revsingle(repo, opts[b'from'])
1475 fromrev = scmutil.revsingle(repo, opts[b'from'])
1471 wctx = repo[fromrev]
1476 wctx = repo[fromrev]
1472 else:
1477 else:
1473 wctx = repo[None]
1478 wctx = repo[None]
1474 # we don't want working dir files to be stat'd in the benchmark, so
1479 # we don't want working dir files to be stat'd in the benchmark, so
1475 # prime that cache
1480 # prime that cache
1476 wctx.dirty()
1481 wctx.dirty()
1477 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1482 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1478 if opts[b'base']:
1483 if opts[b'base']:
1479 fromrev = scmutil.revsingle(repo, opts[b'base'])
1484 fromrev = scmutil.revsingle(repo, opts[b'base'])
1480 ancestor = repo[fromrev]
1485 ancestor = repo[fromrev]
1481 else:
1486 else:
1482 ancestor = wctx.ancestor(rctx)
1487 ancestor = wctx.ancestor(rctx)
1483 return (wctx, rctx, ancestor)
1488 return (wctx, rctx, ancestor)
1484
1489
1485
1490
1486 @command(
1491 @command(
1487 b'perf::mergecalculate|perfmergecalculate',
1492 b'perf::mergecalculate|perfmergecalculate',
1488 [
1493 [
1489 (b'r', b'rev', b'.', b'rev to merge against'),
1494 (b'r', b'rev', b'.', b'rev to merge against'),
1490 (b'', b'from', b'', b'rev to merge from'),
1495 (b'', b'from', b'', b'rev to merge from'),
1491 (b'', b'base', b'', b'the revision to use as base'),
1496 (b'', b'base', b'', b'the revision to use as base'),
1492 ]
1497 ]
1493 + formatteropts,
1498 + formatteropts,
1494 )
1499 )
1495 def perfmergecalculate(ui, repo, **opts):
1500 def perfmergecalculate(ui, repo, **opts):
1496 opts = _byteskwargs(opts)
1501 opts = _byteskwargs(opts)
1497 timer, fm = gettimer(ui, opts)
1502 timer, fm = gettimer(ui, opts)
1498
1503
1499 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1504 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1500
1505
1501 def d():
1506 def d():
1502 # acceptremote is True because we don't want prompts in the middle of
1507 # acceptremote is True because we don't want prompts in the middle of
1503 # our benchmark
1508 # our benchmark
1504 merge.calculateupdates(
1509 merge.calculateupdates(
1505 repo,
1510 repo,
1506 wctx,
1511 wctx,
1507 rctx,
1512 rctx,
1508 [ancestor],
1513 [ancestor],
1509 branchmerge=False,
1514 branchmerge=False,
1510 force=False,
1515 force=False,
1511 acceptremote=True,
1516 acceptremote=True,
1512 followcopies=True,
1517 followcopies=True,
1513 )
1518 )
1514
1519
1515 timer(d)
1520 timer(d)
1516 fm.end()
1521 fm.end()
1517
1522
1518
1523
1519 @command(
1524 @command(
1520 b'perf::mergecopies|perfmergecopies',
1525 b'perf::mergecopies|perfmergecopies',
1521 [
1526 [
1522 (b'r', b'rev', b'.', b'rev to merge against'),
1527 (b'r', b'rev', b'.', b'rev to merge against'),
1523 (b'', b'from', b'', b'rev to merge from'),
1528 (b'', b'from', b'', b'rev to merge from'),
1524 (b'', b'base', b'', b'the revision to use as base'),
1529 (b'', b'base', b'', b'the revision to use as base'),
1525 ]
1530 ]
1526 + formatteropts,
1531 + formatteropts,
1527 )
1532 )
1528 def perfmergecopies(ui, repo, **opts):
1533 def perfmergecopies(ui, repo, **opts):
1529 """measure runtime of `copies.mergecopies`"""
1534 """measure runtime of `copies.mergecopies`"""
1530 opts = _byteskwargs(opts)
1535 opts = _byteskwargs(opts)
1531 timer, fm = gettimer(ui, opts)
1536 timer, fm = gettimer(ui, opts)
1532 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1537 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1533
1538
1534 def d():
1539 def d():
1535 # acceptremote is True because we don't want prompts in the middle of
1540 # acceptremote is True because we don't want prompts in the middle of
1536 # our benchmark
1541 # our benchmark
1537 copies.mergecopies(repo, wctx, rctx, ancestor)
1542 copies.mergecopies(repo, wctx, rctx, ancestor)
1538
1543
1539 timer(d)
1544 timer(d)
1540 fm.end()
1545 fm.end()
1541
1546
1542
1547
1543 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1548 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1544 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1549 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1545 """benchmark the copy tracing logic"""
1550 """benchmark the copy tracing logic"""
1546 opts = _byteskwargs(opts)
1551 opts = _byteskwargs(opts)
1547 timer, fm = gettimer(ui, opts)
1552 timer, fm = gettimer(ui, opts)
1548 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1553 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1549 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1554 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1550
1555
1551 def d():
1556 def d():
1552 copies.pathcopies(ctx1, ctx2)
1557 copies.pathcopies(ctx1, ctx2)
1553
1558
1554 timer(d)
1559 timer(d)
1555 fm.end()
1560 fm.end()
1556
1561
1557
1562
1558 @command(
1563 @command(
1559 b'perf::phases|perfphases',
1564 b'perf::phases|perfphases',
1560 [
1565 [
1561 (b'', b'full', False, b'include file reading time too'),
1566 (b'', b'full', False, b'include file reading time too'),
1562 ],
1567 ],
1563 b"",
1568 b"",
1564 )
1569 )
1565 def perfphases(ui, repo, **opts):
1570 def perfphases(ui, repo, **opts):
1566 """benchmark phasesets computation"""
1571 """benchmark phasesets computation"""
1567 opts = _byteskwargs(opts)
1572 opts = _byteskwargs(opts)
1568 timer, fm = gettimer(ui, opts)
1573 timer, fm = gettimer(ui, opts)
1569 _phases = repo._phasecache
1574 _phases = repo._phasecache
1570 full = opts.get(b'full')
1575 full = opts.get(b'full')
1571
1576
1572 def d():
1577 def d():
1573 phases = _phases
1578 phases = _phases
1574 if full:
1579 if full:
1575 clearfilecache(repo, b'_phasecache')
1580 clearfilecache(repo, b'_phasecache')
1576 phases = repo._phasecache
1581 phases = repo._phasecache
1577 phases.invalidate()
1582 phases.invalidate()
1578 phases.loadphaserevs(repo)
1583 phases.loadphaserevs(repo)
1579
1584
1580 timer(d)
1585 timer(d)
1581 fm.end()
1586 fm.end()
1582
1587
1583
1588
1584 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1589 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1585 def perfphasesremote(ui, repo, dest=None, **opts):
1590 def perfphasesremote(ui, repo, dest=None, **opts):
1586 """benchmark time needed to analyse phases of the remote server"""
1591 """benchmark time needed to analyse phases of the remote server"""
1587 from mercurial.node import bin
1592 from mercurial.node import bin
1588 from mercurial import (
1593 from mercurial import (
1589 exchange,
1594 exchange,
1590 hg,
1595 hg,
1591 phases,
1596 phases,
1592 )
1597 )
1593
1598
1594 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1595 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1596
1601
1597 path = ui.getpath(dest, default=(b'default-push', b'default'))
1602 path = ui.getpath(dest, default=(b'default-push', b'default'))
1598 if not path:
1603 if not path:
1599 raise error.Abort(
1604 raise error.Abort(
1600 b'default repository not configured!',
1605 b'default repository not configured!',
1601 hint=b"see 'hg help config.paths'",
1606 hint=b"see 'hg help config.paths'",
1602 )
1607 )
1603 dest = path.pushloc or path.loc
1608 dest = path.pushloc or path.loc
1604 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1609 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1605 other = hg.peer(repo, opts, dest)
1610 other = hg.peer(repo, opts, dest)
1606
1611
1607 # easier to perform discovery through the operation
1612 # easier to perform discovery through the operation
1608 op = exchange.pushoperation(repo, other)
1613 op = exchange.pushoperation(repo, other)
1609 exchange._pushdiscoverychangeset(op)
1614 exchange._pushdiscoverychangeset(op)
1610
1615
1611 remotesubset = op.fallbackheads
1616 remotesubset = op.fallbackheads
1612
1617
1613 with other.commandexecutor() as e:
1618 with other.commandexecutor() as e:
1614 remotephases = e.callcommand(
1619 remotephases = e.callcommand(
1615 b'listkeys', {b'namespace': b'phases'}
1620 b'listkeys', {b'namespace': b'phases'}
1616 ).result()
1621 ).result()
1617 del other
1622 del other
1618 publishing = remotephases.get(b'publishing', False)
1623 publishing = remotephases.get(b'publishing', False)
1619 if publishing:
1624 if publishing:
1620 ui.statusnoi18n(b'publishing: yes\n')
1625 ui.statusnoi18n(b'publishing: yes\n')
1621 else:
1626 else:
1622 ui.statusnoi18n(b'publishing: no\n')
1627 ui.statusnoi18n(b'publishing: no\n')
1623
1628
1624 has_node = getattr(repo.changelog.index, 'has_node', None)
1629 has_node = getattr(repo.changelog.index, 'has_node', None)
1625 if has_node is None:
1630 if has_node is None:
1626 has_node = repo.changelog.nodemap.__contains__
1631 has_node = repo.changelog.nodemap.__contains__
1627 nonpublishroots = 0
1632 nonpublishroots = 0
1628 for nhex, phase in remotephases.iteritems():
1633 for nhex, phase in remotephases.iteritems():
1629 if nhex == b'publishing': # ignore data related to publish option
1634 if nhex == b'publishing': # ignore data related to publish option
1630 continue
1635 continue
1631 node = bin(nhex)
1636 node = bin(nhex)
1632 if has_node(node) and int(phase):
1637 if has_node(node) and int(phase):
1633 nonpublishroots += 1
1638 nonpublishroots += 1
1634 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1639 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1635 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1640 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1636
1641
1637 def d():
1642 def d():
1638 phases.remotephasessummary(repo, remotesubset, remotephases)
1643 phases.remotephasessummary(repo, remotesubset, remotephases)
1639
1644
1640 timer(d)
1645 timer(d)
1641 fm.end()
1646 fm.end()
1642
1647
1643
1648
1644 @command(
1649 @command(
1645 b'perf::manifest|perfmanifest',
1650 b'perf::manifest|perfmanifest',
1646 [
1651 [
1647 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1652 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1648 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1653 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1649 ]
1654 ]
1650 + formatteropts,
1655 + formatteropts,
1651 b'REV|NODE',
1656 b'REV|NODE',
1652 )
1657 )
1653 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1658 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1654 """benchmark the time to read a manifest from disk and return a usable
1659 """benchmark the time to read a manifest from disk and return a usable
1655 dict-like object
1660 dict-like object
1656
1661
1657 Manifest caches are cleared before retrieval."""
1662 Manifest caches are cleared before retrieval."""
1658 opts = _byteskwargs(opts)
1663 opts = _byteskwargs(opts)
1659 timer, fm = gettimer(ui, opts)
1664 timer, fm = gettimer(ui, opts)
1660 if not manifest_rev:
1665 if not manifest_rev:
1661 ctx = scmutil.revsingle(repo, rev, rev)
1666 ctx = scmutil.revsingle(repo, rev, rev)
1662 t = ctx.manifestnode()
1667 t = ctx.manifestnode()
1663 else:
1668 else:
1664 from mercurial.node import bin
1669 from mercurial.node import bin
1665
1670
1666 if len(rev) == 40:
1671 if len(rev) == 40:
1667 t = bin(rev)
1672 t = bin(rev)
1668 else:
1673 else:
1669 try:
1674 try:
1670 rev = int(rev)
1675 rev = int(rev)
1671
1676
1672 if util.safehasattr(repo.manifestlog, b'getstorage'):
1677 if util.safehasattr(repo.manifestlog, b'getstorage'):
1673 t = repo.manifestlog.getstorage(b'').node(rev)
1678 t = repo.manifestlog.getstorage(b'').node(rev)
1674 else:
1679 else:
1675 t = repo.manifestlog._revlog.lookup(rev)
1680 t = repo.manifestlog._revlog.lookup(rev)
1676 except ValueError:
1681 except ValueError:
1677 raise error.Abort(
1682 raise error.Abort(
1678 b'manifest revision must be integer or full node'
1683 b'manifest revision must be integer or full node'
1679 )
1684 )
1680
1685
1681 def d():
1686 def d():
1682 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1687 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1683 repo.manifestlog[t].read()
1688 repo.manifestlog[t].read()
1684
1689
1685 timer(d)
1690 timer(d)
1686 fm.end()
1691 fm.end()
1687
1692
1688
1693
1689 @command(b'perf::changeset|perfchangeset', formatteropts)
1694 @command(b'perf::changeset|perfchangeset', formatteropts)
1690 def perfchangeset(ui, repo, rev, **opts):
1695 def perfchangeset(ui, repo, rev, **opts):
1691 opts = _byteskwargs(opts)
1696 opts = _byteskwargs(opts)
1692 timer, fm = gettimer(ui, opts)
1697 timer, fm = gettimer(ui, opts)
1693 n = scmutil.revsingle(repo, rev).node()
1698 n = scmutil.revsingle(repo, rev).node()
1694
1699
1695 def d():
1700 def d():
1696 repo.changelog.read(n)
1701 repo.changelog.read(n)
1697 # repo.changelog._cache = None
1702 # repo.changelog._cache = None
1698
1703
1699 timer(d)
1704 timer(d)
1700 fm.end()
1705 fm.end()
1701
1706
1702
1707
1703 @command(b'perf::ignore|perfignore', formatteropts)
1708 @command(b'perf::ignore|perfignore', formatteropts)
1704 def perfignore(ui, repo, **opts):
1709 def perfignore(ui, repo, **opts):
1705 """benchmark operation related to computing ignore"""
1710 """benchmark operation related to computing ignore"""
1706 opts = _byteskwargs(opts)
1711 opts = _byteskwargs(opts)
1707 timer, fm = gettimer(ui, opts)
1712 timer, fm = gettimer(ui, opts)
1708 dirstate = repo.dirstate
1713 dirstate = repo.dirstate
1709
1714
1710 def setupone():
1715 def setupone():
1711 dirstate.invalidate()
1716 dirstate.invalidate()
1712 clearfilecache(dirstate, b'_ignore')
1717 clearfilecache(dirstate, b'_ignore')
1713
1718
1714 def runone():
1719 def runone():
1715 dirstate._ignore
1720 dirstate._ignore
1716
1721
1717 timer(runone, setup=setupone, title=b"load")
1722 timer(runone, setup=setupone, title=b"load")
1718 fm.end()
1723 fm.end()
1719
1724
1720
1725
1721 @command(
1726 @command(
1722 b'perf::index|perfindex',
1727 b'perf::index|perfindex',
1723 [
1728 [
1724 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1729 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1725 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1730 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1726 ]
1731 ]
1727 + formatteropts,
1732 + formatteropts,
1728 )
1733 )
1729 def perfindex(ui, repo, **opts):
1734 def perfindex(ui, repo, **opts):
1730 """benchmark index creation time followed by a lookup
1735 """benchmark index creation time followed by a lookup
1731
1736
1732 The default is to look `tip` up. Depending on the index implementation,
1737 The default is to look `tip` up. Depending on the index implementation,
1733 the revision looked up can matters. For example, an implementation
1738 the revision looked up can matters. For example, an implementation
1734 scanning the index will have a faster lookup time for `--rev tip` than for
1739 scanning the index will have a faster lookup time for `--rev tip` than for
1735 `--rev 0`. The number of looked up revisions and their order can also
1740 `--rev 0`. The number of looked up revisions and their order can also
1736 matters.
1741 matters.
1737
1742
1738 Example of useful set to test:
1743 Example of useful set to test:
1739
1744
1740 * tip
1745 * tip
1741 * 0
1746 * 0
1742 * -10:
1747 * -10:
1743 * :10
1748 * :10
1744 * -10: + :10
1749 * -10: + :10
1745 * :10: + -10:
1750 * :10: + -10:
1746 * -10000:
1751 * -10000:
1747 * -10000: + 0
1752 * -10000: + 0
1748
1753
1749 It is not currently possible to check for lookup of a missing node. For
1754 It is not currently possible to check for lookup of a missing node. For
1750 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1755 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1751 import mercurial.revlog
1756 import mercurial.revlog
1752
1757
1753 opts = _byteskwargs(opts)
1758 opts = _byteskwargs(opts)
1754 timer, fm = gettimer(ui, opts)
1759 timer, fm = gettimer(ui, opts)
1755 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1760 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1756 if opts[b'no_lookup']:
1761 if opts[b'no_lookup']:
1757 if opts['rev']:
1762 if opts['rev']:
1758 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1763 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1759 nodes = []
1764 nodes = []
1760 elif not opts[b'rev']:
1765 elif not opts[b'rev']:
1761 nodes = [repo[b"tip"].node()]
1766 nodes = [repo[b"tip"].node()]
1762 else:
1767 else:
1763 revs = scmutil.revrange(repo, opts[b'rev'])
1768 revs = scmutil.revrange(repo, opts[b'rev'])
1764 cl = repo.changelog
1769 cl = repo.changelog
1765 nodes = [cl.node(r) for r in revs]
1770 nodes = [cl.node(r) for r in revs]
1766
1771
1767 unfi = repo.unfiltered()
1772 unfi = repo.unfiltered()
1768 # find the filecache func directly
1773 # find the filecache func directly
1769 # This avoid polluting the benchmark with the filecache logic
1774 # This avoid polluting the benchmark with the filecache logic
1770 makecl = unfi.__class__.changelog.func
1775 makecl = unfi.__class__.changelog.func
1771
1776
1772 def setup():
1777 def setup():
1773 # probably not necessary, but for good measure
1778 # probably not necessary, but for good measure
1774 clearchangelog(unfi)
1779 clearchangelog(unfi)
1775
1780
1776 def d():
1781 def d():
1777 cl = makecl(unfi)
1782 cl = makecl(unfi)
1778 for n in nodes:
1783 for n in nodes:
1779 cl.rev(n)
1784 cl.rev(n)
1780
1785
1781 timer(d, setup=setup)
1786 timer(d, setup=setup)
1782 fm.end()
1787 fm.end()
1783
1788
1784
1789
1785 @command(
1790 @command(
1786 b'perf::nodemap|perfnodemap',
1791 b'perf::nodemap|perfnodemap',
1787 [
1792 [
1788 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1793 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1789 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1794 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1790 ]
1795 ]
1791 + formatteropts,
1796 + formatteropts,
1792 )
1797 )
1793 def perfnodemap(ui, repo, **opts):
1798 def perfnodemap(ui, repo, **opts):
1794 """benchmark the time necessary to look up revision from a cold nodemap
1799 """benchmark the time necessary to look up revision from a cold nodemap
1795
1800
1796 Depending on the implementation, the amount and order of revision we look
1801 Depending on the implementation, the amount and order of revision we look
1797 up can varies. Example of useful set to test:
1802 up can varies. Example of useful set to test:
1798 * tip
1803 * tip
1799 * 0
1804 * 0
1800 * -10:
1805 * -10:
1801 * :10
1806 * :10
1802 * -10: + :10
1807 * -10: + :10
1803 * :10: + -10:
1808 * :10: + -10:
1804 * -10000:
1809 * -10000:
1805 * -10000: + 0
1810 * -10000: + 0
1806
1811
1807 The command currently focus on valid binary lookup. Benchmarking for
1812 The command currently focus on valid binary lookup. Benchmarking for
1808 hexlookup, prefix lookup and missing lookup would also be valuable.
1813 hexlookup, prefix lookup and missing lookup would also be valuable.
1809 """
1814 """
1810 import mercurial.revlog
1815 import mercurial.revlog
1811
1816
1812 opts = _byteskwargs(opts)
1817 opts = _byteskwargs(opts)
1813 timer, fm = gettimer(ui, opts)
1818 timer, fm = gettimer(ui, opts)
1814 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1819 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1815
1820
1816 unfi = repo.unfiltered()
1821 unfi = repo.unfiltered()
1817 clearcaches = opts[b'clear_caches']
1822 clearcaches = opts[b'clear_caches']
1818 # find the filecache func directly
1823 # find the filecache func directly
1819 # This avoid polluting the benchmark with the filecache logic
1824 # This avoid polluting the benchmark with the filecache logic
1820 makecl = unfi.__class__.changelog.func
1825 makecl = unfi.__class__.changelog.func
1821 if not opts[b'rev']:
1826 if not opts[b'rev']:
1822 raise error.Abort(b'use --rev to specify revisions to look up')
1827 raise error.Abort(b'use --rev to specify revisions to look up')
1823 revs = scmutil.revrange(repo, opts[b'rev'])
1828 revs = scmutil.revrange(repo, opts[b'rev'])
1824 cl = repo.changelog
1829 cl = repo.changelog
1825 nodes = [cl.node(r) for r in revs]
1830 nodes = [cl.node(r) for r in revs]
1826
1831
1827 # use a list to pass reference to a nodemap from one closure to the next
1832 # use a list to pass reference to a nodemap from one closure to the next
1828 nodeget = [None]
1833 nodeget = [None]
1829
1834
1830 def setnodeget():
1835 def setnodeget():
1831 # probably not necessary, but for good measure
1836 # probably not necessary, but for good measure
1832 clearchangelog(unfi)
1837 clearchangelog(unfi)
1833 cl = makecl(unfi)
1838 cl = makecl(unfi)
1834 if util.safehasattr(cl.index, 'get_rev'):
1839 if util.safehasattr(cl.index, 'get_rev'):
1835 nodeget[0] = cl.index.get_rev
1840 nodeget[0] = cl.index.get_rev
1836 else:
1841 else:
1837 nodeget[0] = cl.nodemap.get
1842 nodeget[0] = cl.nodemap.get
1838
1843
1839 def d():
1844 def d():
1840 get = nodeget[0]
1845 get = nodeget[0]
1841 for n in nodes:
1846 for n in nodes:
1842 get(n)
1847 get(n)
1843
1848
1844 setup = None
1849 setup = None
1845 if clearcaches:
1850 if clearcaches:
1846
1851
1847 def setup():
1852 def setup():
1848 setnodeget()
1853 setnodeget()
1849
1854
1850 else:
1855 else:
1851 setnodeget()
1856 setnodeget()
1852 d() # prewarm the data structure
1857 d() # prewarm the data structure
1853 timer(d, setup=setup)
1858 timer(d, setup=setup)
1854 fm.end()
1859 fm.end()
1855
1860
1856
1861
1857 @command(b'perf::startup|perfstartup', formatteropts)
1862 @command(b'perf::startup|perfstartup', formatteropts)
1858 def perfstartup(ui, repo, **opts):
1863 def perfstartup(ui, repo, **opts):
1859 opts = _byteskwargs(opts)
1864 opts = _byteskwargs(opts)
1860 timer, fm = gettimer(ui, opts)
1865 timer, fm = gettimer(ui, opts)
1861
1866
1862 def d():
1867 def d():
1863 if os.name != 'nt':
1868 if os.name != 'nt':
1864 os.system(
1869 os.system(
1865 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1870 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1866 )
1871 )
1867 else:
1872 else:
1868 os.environ['HGRCPATH'] = r' '
1873 os.environ['HGRCPATH'] = r' '
1869 os.system("%s version -q > NUL" % sys.argv[0])
1874 os.system("%s version -q > NUL" % sys.argv[0])
1870
1875
1871 timer(d)
1876 timer(d)
1872 fm.end()
1877 fm.end()
1873
1878
1874
1879
1875 @command(b'perf::parents|perfparents', formatteropts)
1880 @command(b'perf::parents|perfparents', formatteropts)
1876 def perfparents(ui, repo, **opts):
1881 def perfparents(ui, repo, **opts):
1877 """benchmark the time necessary to fetch one changeset's parents.
1882 """benchmark the time necessary to fetch one changeset's parents.
1878
1883
1879 The fetch is done using the `node identifier`, traversing all object layers
1884 The fetch is done using the `node identifier`, traversing all object layers
1880 from the repository object. The first N revisions will be used for this
1885 from the repository object. The first N revisions will be used for this
1881 benchmark. N is controlled by the ``perf.parentscount`` config option
1886 benchmark. N is controlled by the ``perf.parentscount`` config option
1882 (default: 1000).
1887 (default: 1000).
1883 """
1888 """
1884 opts = _byteskwargs(opts)
1889 opts = _byteskwargs(opts)
1885 timer, fm = gettimer(ui, opts)
1890 timer, fm = gettimer(ui, opts)
1886 # control the number of commits perfparents iterates over
1891 # control the number of commits perfparents iterates over
1887 # experimental config: perf.parentscount
1892 # experimental config: perf.parentscount
1888 count = getint(ui, b"perf", b"parentscount", 1000)
1893 count = getint(ui, b"perf", b"parentscount", 1000)
1889 if len(repo.changelog) < count:
1894 if len(repo.changelog) < count:
1890 raise error.Abort(b"repo needs %d commits for this test" % count)
1895 raise error.Abort(b"repo needs %d commits for this test" % count)
1891 repo = repo.unfiltered()
1896 repo = repo.unfiltered()
1892 nl = [repo.changelog.node(i) for i in _xrange(count)]
1897 nl = [repo.changelog.node(i) for i in _xrange(count)]
1893
1898
1894 def d():
1899 def d():
1895 for n in nl:
1900 for n in nl:
1896 repo.changelog.parents(n)
1901 repo.changelog.parents(n)
1897
1902
1898 timer(d)
1903 timer(d)
1899 fm.end()
1904 fm.end()
1900
1905
1901
1906
1902 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1907 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1903 def perfctxfiles(ui, repo, x, **opts):
1908 def perfctxfiles(ui, repo, x, **opts):
1904 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1905 x = int(x)
1910 x = int(x)
1906 timer, fm = gettimer(ui, opts)
1911 timer, fm = gettimer(ui, opts)
1907
1912
1908 def d():
1913 def d():
1909 len(repo[x].files())
1914 len(repo[x].files())
1910
1915
1911 timer(d)
1916 timer(d)
1912 fm.end()
1917 fm.end()
1913
1918
1914
1919
1915 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1920 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1916 def perfrawfiles(ui, repo, x, **opts):
1921 def perfrawfiles(ui, repo, x, **opts):
1917 opts = _byteskwargs(opts)
1922 opts = _byteskwargs(opts)
1918 x = int(x)
1923 x = int(x)
1919 timer, fm = gettimer(ui, opts)
1924 timer, fm = gettimer(ui, opts)
1920 cl = repo.changelog
1925 cl = repo.changelog
1921
1926
1922 def d():
1927 def d():
1923 len(cl.read(x)[3])
1928 len(cl.read(x)[3])
1924
1929
1925 timer(d)
1930 timer(d)
1926 fm.end()
1931 fm.end()
1927
1932
1928
1933
1929 @command(b'perf::lookup|perflookup', formatteropts)
1934 @command(b'perf::lookup|perflookup', formatteropts)
1930 def perflookup(ui, repo, rev, **opts):
1935 def perflookup(ui, repo, rev, **opts):
1931 opts = _byteskwargs(opts)
1936 opts = _byteskwargs(opts)
1932 timer, fm = gettimer(ui, opts)
1937 timer, fm = gettimer(ui, opts)
1933 timer(lambda: len(repo.lookup(rev)))
1938 timer(lambda: len(repo.lookup(rev)))
1934 fm.end()
1939 fm.end()
1935
1940
1936
1941
1937 @command(
1942 @command(
1938 b'perf::linelogedits|perflinelogedits',
1943 b'perf::linelogedits|perflinelogedits',
1939 [
1944 [
1940 (b'n', b'edits', 10000, b'number of edits'),
1945 (b'n', b'edits', 10000, b'number of edits'),
1941 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1946 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1942 ],
1947 ],
1943 norepo=True,
1948 norepo=True,
1944 )
1949 )
1945 def perflinelogedits(ui, **opts):
1950 def perflinelogedits(ui, **opts):
1946 from mercurial import linelog
1951 from mercurial import linelog
1947
1952
1948 opts = _byteskwargs(opts)
1953 opts = _byteskwargs(opts)
1949
1954
1950 edits = opts[b'edits']
1955 edits = opts[b'edits']
1951 maxhunklines = opts[b'max_hunk_lines']
1956 maxhunklines = opts[b'max_hunk_lines']
1952
1957
1953 maxb1 = 100000
1958 maxb1 = 100000
1954 random.seed(0)
1959 random.seed(0)
1955 randint = random.randint
1960 randint = random.randint
1956 currentlines = 0
1961 currentlines = 0
1957 arglist = []
1962 arglist = []
1958 for rev in _xrange(edits):
1963 for rev in _xrange(edits):
1959 a1 = randint(0, currentlines)
1964 a1 = randint(0, currentlines)
1960 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1965 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1961 b1 = randint(0, maxb1)
1966 b1 = randint(0, maxb1)
1962 b2 = randint(b1, b1 + maxhunklines)
1967 b2 = randint(b1, b1 + maxhunklines)
1963 currentlines += (b2 - b1) - (a2 - a1)
1968 currentlines += (b2 - b1) - (a2 - a1)
1964 arglist.append((rev, a1, a2, b1, b2))
1969 arglist.append((rev, a1, a2, b1, b2))
1965
1970
1966 def d():
1971 def d():
1967 ll = linelog.linelog()
1972 ll = linelog.linelog()
1968 for args in arglist:
1973 for args in arglist:
1969 ll.replacelines(*args)
1974 ll.replacelines(*args)
1970
1975
1971 timer, fm = gettimer(ui, opts)
1976 timer, fm = gettimer(ui, opts)
1972 timer(d)
1977 timer(d)
1973 fm.end()
1978 fm.end()
1974
1979
1975
1980
1976 @command(b'perf::revrange|perfrevrange', formatteropts)
1981 @command(b'perf::revrange|perfrevrange', formatteropts)
1977 def perfrevrange(ui, repo, *specs, **opts):
1982 def perfrevrange(ui, repo, *specs, **opts):
1978 opts = _byteskwargs(opts)
1983 opts = _byteskwargs(opts)
1979 timer, fm = gettimer(ui, opts)
1984 timer, fm = gettimer(ui, opts)
1980 revrange = scmutil.revrange
1985 revrange = scmutil.revrange
1981 timer(lambda: len(revrange(repo, specs)))
1986 timer(lambda: len(revrange(repo, specs)))
1982 fm.end()
1987 fm.end()
1983
1988
1984
1989
1985 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1990 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1986 def perfnodelookup(ui, repo, rev, **opts):
1991 def perfnodelookup(ui, repo, rev, **opts):
1987 opts = _byteskwargs(opts)
1992 opts = _byteskwargs(opts)
1988 timer, fm = gettimer(ui, opts)
1993 timer, fm = gettimer(ui, opts)
1989 import mercurial.revlog
1994 import mercurial.revlog
1990
1995
1991 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1996 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1992 n = scmutil.revsingle(repo, rev).node()
1997 n = scmutil.revsingle(repo, rev).node()
1993
1998
1994 try:
1999 try:
1995 cl = revlog(getsvfs(repo), radix=b"00changelog")
2000 cl = revlog(getsvfs(repo), radix=b"00changelog")
1996 except TypeError:
2001 except TypeError:
1997 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2002 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1998
2003
1999 def d():
2004 def d():
2000 cl.rev(n)
2005 cl.rev(n)
2001 clearcaches(cl)
2006 clearcaches(cl)
2002
2007
2003 timer(d)
2008 timer(d)
2004 fm.end()
2009 fm.end()
2005
2010
2006
2011
2007 @command(
2012 @command(
2008 b'perf::log|perflog',
2013 b'perf::log|perflog',
2009 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2014 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2010 )
2015 )
2011 def perflog(ui, repo, rev=None, **opts):
2016 def perflog(ui, repo, rev=None, **opts):
2012 opts = _byteskwargs(opts)
2017 opts = _byteskwargs(opts)
2013 if rev is None:
2018 if rev is None:
2014 rev = []
2019 rev = []
2015 timer, fm = gettimer(ui, opts)
2020 timer, fm = gettimer(ui, opts)
2016 ui.pushbuffer()
2021 ui.pushbuffer()
2017 timer(
2022 timer(
2018 lambda: commands.log(
2023 lambda: commands.log(
2019 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2024 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2020 )
2025 )
2021 )
2026 )
2022 ui.popbuffer()
2027 ui.popbuffer()
2023 fm.end()
2028 fm.end()
2024
2029
2025
2030
2026 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2031 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2027 def perfmoonwalk(ui, repo, **opts):
2032 def perfmoonwalk(ui, repo, **opts):
2028 """benchmark walking the changelog backwards
2033 """benchmark walking the changelog backwards
2029
2034
2030 This also loads the changelog data for each revision in the changelog.
2035 This also loads the changelog data for each revision in the changelog.
2031 """
2036 """
2032 opts = _byteskwargs(opts)
2037 opts = _byteskwargs(opts)
2033 timer, fm = gettimer(ui, opts)
2038 timer, fm = gettimer(ui, opts)
2034
2039
2035 def moonwalk():
2040 def moonwalk():
2036 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2041 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2037 ctx = repo[i]
2042 ctx = repo[i]
2038 ctx.branch() # read changelog data (in addition to the index)
2043 ctx.branch() # read changelog data (in addition to the index)
2039
2044
2040 timer(moonwalk)
2045 timer(moonwalk)
2041 fm.end()
2046 fm.end()
2042
2047
2043
2048
2044 @command(
2049 @command(
2045 b'perf::templating|perftemplating',
2050 b'perf::templating|perftemplating',
2046 [
2051 [
2047 (b'r', b'rev', [], b'revisions to run the template on'),
2052 (b'r', b'rev', [], b'revisions to run the template on'),
2048 ]
2053 ]
2049 + formatteropts,
2054 + formatteropts,
2050 )
2055 )
2051 def perftemplating(ui, repo, testedtemplate=None, **opts):
2056 def perftemplating(ui, repo, testedtemplate=None, **opts):
2052 """test the rendering time of a given template"""
2057 """test the rendering time of a given template"""
2053 if makelogtemplater is None:
2058 if makelogtemplater is None:
2054 raise error.Abort(
2059 raise error.Abort(
2055 b"perftemplating not available with this Mercurial",
2060 b"perftemplating not available with this Mercurial",
2056 hint=b"use 4.3 or later",
2061 hint=b"use 4.3 or later",
2057 )
2062 )
2058
2063
2059 opts = _byteskwargs(opts)
2064 opts = _byteskwargs(opts)
2060
2065
2061 nullui = ui.copy()
2066 nullui = ui.copy()
2062 nullui.fout = open(os.devnull, 'wb')
2067 nullui.fout = open(os.devnull, 'wb')
2063 nullui.disablepager()
2068 nullui.disablepager()
2064 revs = opts.get(b'rev')
2069 revs = opts.get(b'rev')
2065 if not revs:
2070 if not revs:
2066 revs = [b'all()']
2071 revs = [b'all()']
2067 revs = list(scmutil.revrange(repo, revs))
2072 revs = list(scmutil.revrange(repo, revs))
2068
2073
2069 defaulttemplate = (
2074 defaulttemplate = (
2070 b'{date|shortdate} [{rev}:{node|short}]'
2075 b'{date|shortdate} [{rev}:{node|short}]'
2071 b' {author|person}: {desc|firstline}\n'
2076 b' {author|person}: {desc|firstline}\n'
2072 )
2077 )
2073 if testedtemplate is None:
2078 if testedtemplate is None:
2074 testedtemplate = defaulttemplate
2079 testedtemplate = defaulttemplate
2075 displayer = makelogtemplater(nullui, repo, testedtemplate)
2080 displayer = makelogtemplater(nullui, repo, testedtemplate)
2076
2081
2077 def format():
2082 def format():
2078 for r in revs:
2083 for r in revs:
2079 ctx = repo[r]
2084 ctx = repo[r]
2080 displayer.show(ctx)
2085 displayer.show(ctx)
2081 displayer.flush(ctx)
2086 displayer.flush(ctx)
2082
2087
2083 timer, fm = gettimer(ui, opts)
2088 timer, fm = gettimer(ui, opts)
2084 timer(format)
2089 timer(format)
2085 fm.end()
2090 fm.end()
2086
2091
2087
2092
2088 def _displaystats(ui, opts, entries, data):
2093 def _displaystats(ui, opts, entries, data):
2089 # use a second formatter because the data are quite different, not sure
2094 # use a second formatter because the data are quite different, not sure
2090 # how it flies with the templater.
2095 # how it flies with the templater.
2091 fm = ui.formatter(b'perf-stats', opts)
2096 fm = ui.formatter(b'perf-stats', opts)
2092 for key, title in entries:
2097 for key, title in entries:
2093 values = data[key]
2098 values = data[key]
2094 nbvalues = len(data)
2099 nbvalues = len(data)
2095 values.sort()
2100 values.sort()
2096 stats = {
2101 stats = {
2097 'key': key,
2102 'key': key,
2098 'title': title,
2103 'title': title,
2099 'nbitems': len(values),
2104 'nbitems': len(values),
2100 'min': values[0][0],
2105 'min': values[0][0],
2101 '10%': values[(nbvalues * 10) // 100][0],
2106 '10%': values[(nbvalues * 10) // 100][0],
2102 '25%': values[(nbvalues * 25) // 100][0],
2107 '25%': values[(nbvalues * 25) // 100][0],
2103 '50%': values[(nbvalues * 50) // 100][0],
2108 '50%': values[(nbvalues * 50) // 100][0],
2104 '75%': values[(nbvalues * 75) // 100][0],
2109 '75%': values[(nbvalues * 75) // 100][0],
2105 '80%': values[(nbvalues * 80) // 100][0],
2110 '80%': values[(nbvalues * 80) // 100][0],
2106 '85%': values[(nbvalues * 85) // 100][0],
2111 '85%': values[(nbvalues * 85) // 100][0],
2107 '90%': values[(nbvalues * 90) // 100][0],
2112 '90%': values[(nbvalues * 90) // 100][0],
2108 '95%': values[(nbvalues * 95) // 100][0],
2113 '95%': values[(nbvalues * 95) // 100][0],
2109 '99%': values[(nbvalues * 99) // 100][0],
2114 '99%': values[(nbvalues * 99) // 100][0],
2110 'max': values[-1][0],
2115 'max': values[-1][0],
2111 }
2116 }
2112 fm.startitem()
2117 fm.startitem()
2113 fm.data(**stats)
2118 fm.data(**stats)
2114 # make node pretty for the human output
2119 # make node pretty for the human output
2115 fm.plain('### %s (%d items)\n' % (title, len(values)))
2120 fm.plain('### %s (%d items)\n' % (title, len(values)))
2116 lines = [
2121 lines = [
2117 'min',
2122 'min',
2118 '10%',
2123 '10%',
2119 '25%',
2124 '25%',
2120 '50%',
2125 '50%',
2121 '75%',
2126 '75%',
2122 '80%',
2127 '80%',
2123 '85%',
2128 '85%',
2124 '90%',
2129 '90%',
2125 '95%',
2130 '95%',
2126 '99%',
2131 '99%',
2127 'max',
2132 'max',
2128 ]
2133 ]
2129 for l in lines:
2134 for l in lines:
2130 fm.plain('%s: %s\n' % (l, stats[l]))
2135 fm.plain('%s: %s\n' % (l, stats[l]))
2131 fm.end()
2136 fm.end()
2132
2137
2133
2138
2134 @command(
2139 @command(
2135 b'perf::helper-mergecopies|perfhelper-mergecopies',
2140 b'perf::helper-mergecopies|perfhelper-mergecopies',
2136 formatteropts
2141 formatteropts
2137 + [
2142 + [
2138 (b'r', b'revs', [], b'restrict search to these revisions'),
2143 (b'r', b'revs', [], b'restrict search to these revisions'),
2139 (b'', b'timing', False, b'provides extra data (costly)'),
2144 (b'', b'timing', False, b'provides extra data (costly)'),
2140 (b'', b'stats', False, b'provides statistic about the measured data'),
2145 (b'', b'stats', False, b'provides statistic about the measured data'),
2141 ],
2146 ],
2142 )
2147 )
2143 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2148 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2144 """find statistics about potential parameters for `perfmergecopies`
2149 """find statistics about potential parameters for `perfmergecopies`
2145
2150
2146 This command find (base, p1, p2) triplet relevant for copytracing
2151 This command find (base, p1, p2) triplet relevant for copytracing
2147 benchmarking in the context of a merge. It reports values for some of the
2152 benchmarking in the context of a merge. It reports values for some of the
2148 parameters that impact merge copy tracing time during merge.
2153 parameters that impact merge copy tracing time during merge.
2149
2154
2150 If `--timing` is set, rename detection is run and the associated timing
2155 If `--timing` is set, rename detection is run and the associated timing
2151 will be reported. The extra details come at the cost of slower command
2156 will be reported. The extra details come at the cost of slower command
2152 execution.
2157 execution.
2153
2158
2154 Since rename detection is only run once, other factors might easily
2159 Since rename detection is only run once, other factors might easily
2155 affect the precision of the timing. However it should give a good
2160 affect the precision of the timing. However it should give a good
2156 approximation of which revision triplets are very costly.
2161 approximation of which revision triplets are very costly.
2157 """
2162 """
2158 opts = _byteskwargs(opts)
2163 opts = _byteskwargs(opts)
2159 fm = ui.formatter(b'perf', opts)
2164 fm = ui.formatter(b'perf', opts)
2160 dotiming = opts[b'timing']
2165 dotiming = opts[b'timing']
2161 dostats = opts[b'stats']
2166 dostats = opts[b'stats']
2162
2167
2163 output_template = [
2168 output_template = [
2164 ("base", "%(base)12s"),
2169 ("base", "%(base)12s"),
2165 ("p1", "%(p1.node)12s"),
2170 ("p1", "%(p1.node)12s"),
2166 ("p2", "%(p2.node)12s"),
2171 ("p2", "%(p2.node)12s"),
2167 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2172 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2168 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2173 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2169 ("p1.renames", "%(p1.renamedfiles)12d"),
2174 ("p1.renames", "%(p1.renamedfiles)12d"),
2170 ("p1.time", "%(p1.time)12.3f"),
2175 ("p1.time", "%(p1.time)12.3f"),
2171 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2176 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2172 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2177 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2173 ("p2.renames", "%(p2.renamedfiles)12d"),
2178 ("p2.renames", "%(p2.renamedfiles)12d"),
2174 ("p2.time", "%(p2.time)12.3f"),
2179 ("p2.time", "%(p2.time)12.3f"),
2175 ("renames", "%(nbrenamedfiles)12d"),
2180 ("renames", "%(nbrenamedfiles)12d"),
2176 ("total.time", "%(time)12.3f"),
2181 ("total.time", "%(time)12.3f"),
2177 ]
2182 ]
2178 if not dotiming:
2183 if not dotiming:
2179 output_template = [
2184 output_template = [
2180 i
2185 i
2181 for i in output_template
2186 for i in output_template
2182 if not ('time' in i[0] or 'renames' in i[0])
2187 if not ('time' in i[0] or 'renames' in i[0])
2183 ]
2188 ]
2184 header_names = [h for (h, v) in output_template]
2189 header_names = [h for (h, v) in output_template]
2185 output = ' '.join([v for (h, v) in output_template]) + '\n'
2190 output = ' '.join([v for (h, v) in output_template]) + '\n'
2186 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2191 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2187 fm.plain(header % tuple(header_names))
2192 fm.plain(header % tuple(header_names))
2188
2193
2189 if not revs:
2194 if not revs:
2190 revs = ['all()']
2195 revs = ['all()']
2191 revs = scmutil.revrange(repo, revs)
2196 revs = scmutil.revrange(repo, revs)
2192
2197
2193 if dostats:
2198 if dostats:
2194 alldata = {
2199 alldata = {
2195 'nbrevs': [],
2200 'nbrevs': [],
2196 'nbmissingfiles': [],
2201 'nbmissingfiles': [],
2197 }
2202 }
2198 if dotiming:
2203 if dotiming:
2199 alldata['parentnbrenames'] = []
2204 alldata['parentnbrenames'] = []
2200 alldata['totalnbrenames'] = []
2205 alldata['totalnbrenames'] = []
2201 alldata['parenttime'] = []
2206 alldata['parenttime'] = []
2202 alldata['totaltime'] = []
2207 alldata['totaltime'] = []
2203
2208
2204 roi = repo.revs('merge() and %ld', revs)
2209 roi = repo.revs('merge() and %ld', revs)
2205 for r in roi:
2210 for r in roi:
2206 ctx = repo[r]
2211 ctx = repo[r]
2207 p1 = ctx.p1()
2212 p1 = ctx.p1()
2208 p2 = ctx.p2()
2213 p2 = ctx.p2()
2209 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2214 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2210 for b in bases:
2215 for b in bases:
2211 b = repo[b]
2216 b = repo[b]
2212 p1missing = copies._computeforwardmissing(b, p1)
2217 p1missing = copies._computeforwardmissing(b, p1)
2213 p2missing = copies._computeforwardmissing(b, p2)
2218 p2missing = copies._computeforwardmissing(b, p2)
2214 data = {
2219 data = {
2215 b'base': b.hex(),
2220 b'base': b.hex(),
2216 b'p1.node': p1.hex(),
2221 b'p1.node': p1.hex(),
2217 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2222 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2218 b'p1.nbmissingfiles': len(p1missing),
2223 b'p1.nbmissingfiles': len(p1missing),
2219 b'p2.node': p2.hex(),
2224 b'p2.node': p2.hex(),
2220 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2225 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2221 b'p2.nbmissingfiles': len(p2missing),
2226 b'p2.nbmissingfiles': len(p2missing),
2222 }
2227 }
2223 if dostats:
2228 if dostats:
2224 if p1missing:
2229 if p1missing:
2225 alldata['nbrevs'].append(
2230 alldata['nbrevs'].append(
2226 (data['p1.nbrevs'], b.hex(), p1.hex())
2231 (data['p1.nbrevs'], b.hex(), p1.hex())
2227 )
2232 )
2228 alldata['nbmissingfiles'].append(
2233 alldata['nbmissingfiles'].append(
2229 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2234 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2230 )
2235 )
2231 if p2missing:
2236 if p2missing:
2232 alldata['nbrevs'].append(
2237 alldata['nbrevs'].append(
2233 (data['p2.nbrevs'], b.hex(), p2.hex())
2238 (data['p2.nbrevs'], b.hex(), p2.hex())
2234 )
2239 )
2235 alldata['nbmissingfiles'].append(
2240 alldata['nbmissingfiles'].append(
2236 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2241 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2237 )
2242 )
2238 if dotiming:
2243 if dotiming:
2239 begin = util.timer()
2244 begin = util.timer()
2240 mergedata = copies.mergecopies(repo, p1, p2, b)
2245 mergedata = copies.mergecopies(repo, p1, p2, b)
2241 end = util.timer()
2246 end = util.timer()
2242 # not very stable timing since we did only one run
2247 # not very stable timing since we did only one run
2243 data['time'] = end - begin
2248 data['time'] = end - begin
2244 # mergedata contains five dicts: "copy", "movewithdir",
2249 # mergedata contains five dicts: "copy", "movewithdir",
2245 # "diverge", "renamedelete" and "dirmove".
2250 # "diverge", "renamedelete" and "dirmove".
2246 # The first 4 are about renamed file so lets count that.
2251 # The first 4 are about renamed file so lets count that.
2247 renames = len(mergedata[0])
2252 renames = len(mergedata[0])
2248 renames += len(mergedata[1])
2253 renames += len(mergedata[1])
2249 renames += len(mergedata[2])
2254 renames += len(mergedata[2])
2250 renames += len(mergedata[3])
2255 renames += len(mergedata[3])
2251 data['nbrenamedfiles'] = renames
2256 data['nbrenamedfiles'] = renames
2252 begin = util.timer()
2257 begin = util.timer()
2253 p1renames = copies.pathcopies(b, p1)
2258 p1renames = copies.pathcopies(b, p1)
2254 end = util.timer()
2259 end = util.timer()
2255 data['p1.time'] = end - begin
2260 data['p1.time'] = end - begin
2256 begin = util.timer()
2261 begin = util.timer()
2257 p2renames = copies.pathcopies(b, p2)
2262 p2renames = copies.pathcopies(b, p2)
2258 end = util.timer()
2263 end = util.timer()
2259 data['p2.time'] = end - begin
2264 data['p2.time'] = end - begin
2260 data['p1.renamedfiles'] = len(p1renames)
2265 data['p1.renamedfiles'] = len(p1renames)
2261 data['p2.renamedfiles'] = len(p2renames)
2266 data['p2.renamedfiles'] = len(p2renames)
2262
2267
2263 if dostats:
2268 if dostats:
2264 if p1missing:
2269 if p1missing:
2265 alldata['parentnbrenames'].append(
2270 alldata['parentnbrenames'].append(
2266 (data['p1.renamedfiles'], b.hex(), p1.hex())
2271 (data['p1.renamedfiles'], b.hex(), p1.hex())
2267 )
2272 )
2268 alldata['parenttime'].append(
2273 alldata['parenttime'].append(
2269 (data['p1.time'], b.hex(), p1.hex())
2274 (data['p1.time'], b.hex(), p1.hex())
2270 )
2275 )
2271 if p2missing:
2276 if p2missing:
2272 alldata['parentnbrenames'].append(
2277 alldata['parentnbrenames'].append(
2273 (data['p2.renamedfiles'], b.hex(), p2.hex())
2278 (data['p2.renamedfiles'], b.hex(), p2.hex())
2274 )
2279 )
2275 alldata['parenttime'].append(
2280 alldata['parenttime'].append(
2276 (data['p2.time'], b.hex(), p2.hex())
2281 (data['p2.time'], b.hex(), p2.hex())
2277 )
2282 )
2278 if p1missing or p2missing:
2283 if p1missing or p2missing:
2279 alldata['totalnbrenames'].append(
2284 alldata['totalnbrenames'].append(
2280 (
2285 (
2281 data['nbrenamedfiles'],
2286 data['nbrenamedfiles'],
2282 b.hex(),
2287 b.hex(),
2283 p1.hex(),
2288 p1.hex(),
2284 p2.hex(),
2289 p2.hex(),
2285 )
2290 )
2286 )
2291 )
2287 alldata['totaltime'].append(
2292 alldata['totaltime'].append(
2288 (data['time'], b.hex(), p1.hex(), p2.hex())
2293 (data['time'], b.hex(), p1.hex(), p2.hex())
2289 )
2294 )
2290 fm.startitem()
2295 fm.startitem()
2291 fm.data(**data)
2296 fm.data(**data)
2292 # make node pretty for the human output
2297 # make node pretty for the human output
2293 out = data.copy()
2298 out = data.copy()
2294 out['base'] = fm.hexfunc(b.node())
2299 out['base'] = fm.hexfunc(b.node())
2295 out['p1.node'] = fm.hexfunc(p1.node())
2300 out['p1.node'] = fm.hexfunc(p1.node())
2296 out['p2.node'] = fm.hexfunc(p2.node())
2301 out['p2.node'] = fm.hexfunc(p2.node())
2297 fm.plain(output % out)
2302 fm.plain(output % out)
2298
2303
2299 fm.end()
2304 fm.end()
2300 if dostats:
2305 if dostats:
2301 # use a second formatter because the data are quite different, not sure
2306 # use a second formatter because the data are quite different, not sure
2302 # how it flies with the templater.
2307 # how it flies with the templater.
2303 entries = [
2308 entries = [
2304 ('nbrevs', 'number of revision covered'),
2309 ('nbrevs', 'number of revision covered'),
2305 ('nbmissingfiles', 'number of missing files at head'),
2310 ('nbmissingfiles', 'number of missing files at head'),
2306 ]
2311 ]
2307 if dotiming:
2312 if dotiming:
2308 entries.append(
2313 entries.append(
2309 ('parentnbrenames', 'rename from one parent to base')
2314 ('parentnbrenames', 'rename from one parent to base')
2310 )
2315 )
2311 entries.append(('totalnbrenames', 'total number of renames'))
2316 entries.append(('totalnbrenames', 'total number of renames'))
2312 entries.append(('parenttime', 'time for one parent'))
2317 entries.append(('parenttime', 'time for one parent'))
2313 entries.append(('totaltime', 'time for both parents'))
2318 entries.append(('totaltime', 'time for both parents'))
2314 _displaystats(ui, opts, entries, alldata)
2319 _displaystats(ui, opts, entries, alldata)
2315
2320
2316
2321
2317 @command(
2322 @command(
2318 b'perf::helper-pathcopies|perfhelper-pathcopies',
2323 b'perf::helper-pathcopies|perfhelper-pathcopies',
2319 formatteropts
2324 formatteropts
2320 + [
2325 + [
2321 (b'r', b'revs', [], b'restrict search to these revisions'),
2326 (b'r', b'revs', [], b'restrict search to these revisions'),
2322 (b'', b'timing', False, b'provides extra data (costly)'),
2327 (b'', b'timing', False, b'provides extra data (costly)'),
2323 (b'', b'stats', False, b'provides statistic about the measured data'),
2328 (b'', b'stats', False, b'provides statistic about the measured data'),
2324 ],
2329 ],
2325 )
2330 )
2326 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2331 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2327 """find statistic about potential parameters for the `perftracecopies`
2332 """find statistic about potential parameters for the `perftracecopies`
2328
2333
2329 This command find source-destination pair relevant for copytracing testing.
2334 This command find source-destination pair relevant for copytracing testing.
2330 It report value for some of the parameters that impact copy tracing time.
2335 It report value for some of the parameters that impact copy tracing time.
2331
2336
2332 If `--timing` is set, rename detection is run and the associated timing
2337 If `--timing` is set, rename detection is run and the associated timing
2333 will be reported. The extra details comes at the cost of a slower command
2338 will be reported. The extra details comes at the cost of a slower command
2334 execution.
2339 execution.
2335
2340
2336 Since the rename detection is only run once, other factors might easily
2341 Since the rename detection is only run once, other factors might easily
2337 affect the precision of the timing. However it should give a good
2342 affect the precision of the timing. However it should give a good
2338 approximation of which revision pairs are very costly.
2343 approximation of which revision pairs are very costly.
2339 """
2344 """
2340 opts = _byteskwargs(opts)
2345 opts = _byteskwargs(opts)
2341 fm = ui.formatter(b'perf', opts)
2346 fm = ui.formatter(b'perf', opts)
2342 dotiming = opts[b'timing']
2347 dotiming = opts[b'timing']
2343 dostats = opts[b'stats']
2348 dostats = opts[b'stats']
2344
2349
2345 if dotiming:
2350 if dotiming:
2346 header = '%12s %12s %12s %12s %12s %12s\n'
2351 header = '%12s %12s %12s %12s %12s %12s\n'
2347 output = (
2352 output = (
2348 "%(source)12s %(destination)12s "
2353 "%(source)12s %(destination)12s "
2349 "%(nbrevs)12d %(nbmissingfiles)12d "
2354 "%(nbrevs)12d %(nbmissingfiles)12d "
2350 "%(nbrenamedfiles)12d %(time)18.5f\n"
2355 "%(nbrenamedfiles)12d %(time)18.5f\n"
2351 )
2356 )
2352 header_names = (
2357 header_names = (
2353 "source",
2358 "source",
2354 "destination",
2359 "destination",
2355 "nb-revs",
2360 "nb-revs",
2356 "nb-files",
2361 "nb-files",
2357 "nb-renames",
2362 "nb-renames",
2358 "time",
2363 "time",
2359 )
2364 )
2360 fm.plain(header % header_names)
2365 fm.plain(header % header_names)
2361 else:
2366 else:
2362 header = '%12s %12s %12s %12s\n'
2367 header = '%12s %12s %12s %12s\n'
2363 output = (
2368 output = (
2364 "%(source)12s %(destination)12s "
2369 "%(source)12s %(destination)12s "
2365 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2370 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2366 )
2371 )
2367 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2372 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2368
2373
2369 if not revs:
2374 if not revs:
2370 revs = ['all()']
2375 revs = ['all()']
2371 revs = scmutil.revrange(repo, revs)
2376 revs = scmutil.revrange(repo, revs)
2372
2377
2373 if dostats:
2378 if dostats:
2374 alldata = {
2379 alldata = {
2375 'nbrevs': [],
2380 'nbrevs': [],
2376 'nbmissingfiles': [],
2381 'nbmissingfiles': [],
2377 }
2382 }
2378 if dotiming:
2383 if dotiming:
2379 alldata['nbrenames'] = []
2384 alldata['nbrenames'] = []
2380 alldata['time'] = []
2385 alldata['time'] = []
2381
2386
2382 roi = repo.revs('merge() and %ld', revs)
2387 roi = repo.revs('merge() and %ld', revs)
2383 for r in roi:
2388 for r in roi:
2384 ctx = repo[r]
2389 ctx = repo[r]
2385 p1 = ctx.p1().rev()
2390 p1 = ctx.p1().rev()
2386 p2 = ctx.p2().rev()
2391 p2 = ctx.p2().rev()
2387 bases = repo.changelog._commonancestorsheads(p1, p2)
2392 bases = repo.changelog._commonancestorsheads(p1, p2)
2388 for p in (p1, p2):
2393 for p in (p1, p2):
2389 for b in bases:
2394 for b in bases:
2390 base = repo[b]
2395 base = repo[b]
2391 parent = repo[p]
2396 parent = repo[p]
2392 missing = copies._computeforwardmissing(base, parent)
2397 missing = copies._computeforwardmissing(base, parent)
2393 if not missing:
2398 if not missing:
2394 continue
2399 continue
2395 data = {
2400 data = {
2396 b'source': base.hex(),
2401 b'source': base.hex(),
2397 b'destination': parent.hex(),
2402 b'destination': parent.hex(),
2398 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2403 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2399 b'nbmissingfiles': len(missing),
2404 b'nbmissingfiles': len(missing),
2400 }
2405 }
2401 if dostats:
2406 if dostats:
2402 alldata['nbrevs'].append(
2407 alldata['nbrevs'].append(
2403 (
2408 (
2404 data['nbrevs'],
2409 data['nbrevs'],
2405 base.hex(),
2410 base.hex(),
2406 parent.hex(),
2411 parent.hex(),
2407 )
2412 )
2408 )
2413 )
2409 alldata['nbmissingfiles'].append(
2414 alldata['nbmissingfiles'].append(
2410 (
2415 (
2411 data['nbmissingfiles'],
2416 data['nbmissingfiles'],
2412 base.hex(),
2417 base.hex(),
2413 parent.hex(),
2418 parent.hex(),
2414 )
2419 )
2415 )
2420 )
2416 if dotiming:
2421 if dotiming:
2417 begin = util.timer()
2422 begin = util.timer()
2418 renames = copies.pathcopies(base, parent)
2423 renames = copies.pathcopies(base, parent)
2419 end = util.timer()
2424 end = util.timer()
2420 # not very stable timing since we did only one run
2425 # not very stable timing since we did only one run
2421 data['time'] = end - begin
2426 data['time'] = end - begin
2422 data['nbrenamedfiles'] = len(renames)
2427 data['nbrenamedfiles'] = len(renames)
2423 if dostats:
2428 if dostats:
2424 alldata['time'].append(
2429 alldata['time'].append(
2425 (
2430 (
2426 data['time'],
2431 data['time'],
2427 base.hex(),
2432 base.hex(),
2428 parent.hex(),
2433 parent.hex(),
2429 )
2434 )
2430 )
2435 )
2431 alldata['nbrenames'].append(
2436 alldata['nbrenames'].append(
2432 (
2437 (
2433 data['nbrenamedfiles'],
2438 data['nbrenamedfiles'],
2434 base.hex(),
2439 base.hex(),
2435 parent.hex(),
2440 parent.hex(),
2436 )
2441 )
2437 )
2442 )
2438 fm.startitem()
2443 fm.startitem()
2439 fm.data(**data)
2444 fm.data(**data)
2440 out = data.copy()
2445 out = data.copy()
2441 out['source'] = fm.hexfunc(base.node())
2446 out['source'] = fm.hexfunc(base.node())
2442 out['destination'] = fm.hexfunc(parent.node())
2447 out['destination'] = fm.hexfunc(parent.node())
2443 fm.plain(output % out)
2448 fm.plain(output % out)
2444
2449
2445 fm.end()
2450 fm.end()
2446 if dostats:
2451 if dostats:
2447 entries = [
2452 entries = [
2448 ('nbrevs', 'number of revision covered'),
2453 ('nbrevs', 'number of revision covered'),
2449 ('nbmissingfiles', 'number of missing files at head'),
2454 ('nbmissingfiles', 'number of missing files at head'),
2450 ]
2455 ]
2451 if dotiming:
2456 if dotiming:
2452 entries.append(('nbrenames', 'renamed files'))
2457 entries.append(('nbrenames', 'renamed files'))
2453 entries.append(('time', 'time'))
2458 entries.append(('time', 'time'))
2454 _displaystats(ui, opts, entries, alldata)
2459 _displaystats(ui, opts, entries, alldata)
2455
2460
2456
2461
2457 @command(b'perf::cca|perfcca', formatteropts)
2462 @command(b'perf::cca|perfcca', formatteropts)
2458 def perfcca(ui, repo, **opts):
2463 def perfcca(ui, repo, **opts):
2459 opts = _byteskwargs(opts)
2464 opts = _byteskwargs(opts)
2460 timer, fm = gettimer(ui, opts)
2465 timer, fm = gettimer(ui, opts)
2461 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2466 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2462 fm.end()
2467 fm.end()
2463
2468
2464
2469
2465 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2470 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2466 def perffncacheload(ui, repo, **opts):
2471 def perffncacheload(ui, repo, **opts):
2467 opts = _byteskwargs(opts)
2472 opts = _byteskwargs(opts)
2468 timer, fm = gettimer(ui, opts)
2473 timer, fm = gettimer(ui, opts)
2469 s = repo.store
2474 s = repo.store
2470
2475
2471 def d():
2476 def d():
2472 s.fncache._load()
2477 s.fncache._load()
2473
2478
2474 timer(d)
2479 timer(d)
2475 fm.end()
2480 fm.end()
2476
2481
2477
2482
2478 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2483 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2479 def perffncachewrite(ui, repo, **opts):
2484 def perffncachewrite(ui, repo, **opts):
2480 opts = _byteskwargs(opts)
2485 opts = _byteskwargs(opts)
2481 timer, fm = gettimer(ui, opts)
2486 timer, fm = gettimer(ui, opts)
2482 s = repo.store
2487 s = repo.store
2483 lock = repo.lock()
2488 lock = repo.lock()
2484 s.fncache._load()
2489 s.fncache._load()
2485 tr = repo.transaction(b'perffncachewrite')
2490 tr = repo.transaction(b'perffncachewrite')
2486 tr.addbackup(b'fncache')
2491 tr.addbackup(b'fncache')
2487
2492
2488 def d():
2493 def d():
2489 s.fncache._dirty = True
2494 s.fncache._dirty = True
2490 s.fncache.write(tr)
2495 s.fncache.write(tr)
2491
2496
2492 timer(d)
2497 timer(d)
2493 tr.close()
2498 tr.close()
2494 lock.release()
2499 lock.release()
2495 fm.end()
2500 fm.end()
2496
2501
2497
2502
2498 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2503 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2499 def perffncacheencode(ui, repo, **opts):
2504 def perffncacheencode(ui, repo, **opts):
2500 opts = _byteskwargs(opts)
2505 opts = _byteskwargs(opts)
2501 timer, fm = gettimer(ui, opts)
2506 timer, fm = gettimer(ui, opts)
2502 s = repo.store
2507 s = repo.store
2503 s.fncache._load()
2508 s.fncache._load()
2504
2509
2505 def d():
2510 def d():
2506 for p in s.fncache.entries:
2511 for p in s.fncache.entries:
2507 s.encode(p)
2512 s.encode(p)
2508
2513
2509 timer(d)
2514 timer(d)
2510 fm.end()
2515 fm.end()
2511
2516
2512
2517
2513 def _bdiffworker(q, blocks, xdiff, ready, done):
2518 def _bdiffworker(q, blocks, xdiff, ready, done):
2514 while not done.is_set():
2519 while not done.is_set():
2515 pair = q.get()
2520 pair = q.get()
2516 while pair is not None:
2521 while pair is not None:
2517 if xdiff:
2522 if xdiff:
2518 mdiff.bdiff.xdiffblocks(*pair)
2523 mdiff.bdiff.xdiffblocks(*pair)
2519 elif blocks:
2524 elif blocks:
2520 mdiff.bdiff.blocks(*pair)
2525 mdiff.bdiff.blocks(*pair)
2521 else:
2526 else:
2522 mdiff.textdiff(*pair)
2527 mdiff.textdiff(*pair)
2523 q.task_done()
2528 q.task_done()
2524 pair = q.get()
2529 pair = q.get()
2525 q.task_done() # for the None one
2530 q.task_done() # for the None one
2526 with ready:
2531 with ready:
2527 ready.wait()
2532 ready.wait()
2528
2533
2529
2534
2530 def _manifestrevision(repo, mnode):
2535 def _manifestrevision(repo, mnode):
2531 ml = repo.manifestlog
2536 ml = repo.manifestlog
2532
2537
2533 if util.safehasattr(ml, b'getstorage'):
2538 if util.safehasattr(ml, b'getstorage'):
2534 store = ml.getstorage(b'')
2539 store = ml.getstorage(b'')
2535 else:
2540 else:
2536 store = ml._revlog
2541 store = ml._revlog
2537
2542
2538 return store.revision(mnode)
2543 return store.revision(mnode)
2539
2544
2540
2545
2541 @command(
2546 @command(
2542 b'perf::bdiff|perfbdiff',
2547 b'perf::bdiff|perfbdiff',
2543 revlogopts
2548 revlogopts
2544 + formatteropts
2549 + formatteropts
2545 + [
2550 + [
2546 (
2551 (
2547 b'',
2552 b'',
2548 b'count',
2553 b'count',
2549 1,
2554 1,
2550 b'number of revisions to test (when using --startrev)',
2555 b'number of revisions to test (when using --startrev)',
2551 ),
2556 ),
2552 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2557 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2553 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2558 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2554 (b'', b'blocks', False, b'test computing diffs into blocks'),
2559 (b'', b'blocks', False, b'test computing diffs into blocks'),
2555 (b'', b'xdiff', False, b'use xdiff algorithm'),
2560 (b'', b'xdiff', False, b'use xdiff algorithm'),
2556 ],
2561 ],
2557 b'-c|-m|FILE REV',
2562 b'-c|-m|FILE REV',
2558 )
2563 )
2559 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2564 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2560 """benchmark a bdiff between revisions
2565 """benchmark a bdiff between revisions
2561
2566
2562 By default, benchmark a bdiff between its delta parent and itself.
2567 By default, benchmark a bdiff between its delta parent and itself.
2563
2568
2564 With ``--count``, benchmark bdiffs between delta parents and self for N
2569 With ``--count``, benchmark bdiffs between delta parents and self for N
2565 revisions starting at the specified revision.
2570 revisions starting at the specified revision.
2566
2571
2567 With ``--alldata``, assume the requested revision is a changeset and
2572 With ``--alldata``, assume the requested revision is a changeset and
2568 measure bdiffs for all changes related to that changeset (manifest
2573 measure bdiffs for all changes related to that changeset (manifest
2569 and filelogs).
2574 and filelogs).
2570 """
2575 """
2571 opts = _byteskwargs(opts)
2576 opts = _byteskwargs(opts)
2572
2577
2573 if opts[b'xdiff'] and not opts[b'blocks']:
2578 if opts[b'xdiff'] and not opts[b'blocks']:
2574 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2579 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2575
2580
2576 if opts[b'alldata']:
2581 if opts[b'alldata']:
2577 opts[b'changelog'] = True
2582 opts[b'changelog'] = True
2578
2583
2579 if opts.get(b'changelog') or opts.get(b'manifest'):
2584 if opts.get(b'changelog') or opts.get(b'manifest'):
2580 file_, rev = None, file_
2585 file_, rev = None, file_
2581 elif rev is None:
2586 elif rev is None:
2582 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2587 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2583
2588
2584 blocks = opts[b'blocks']
2589 blocks = opts[b'blocks']
2585 xdiff = opts[b'xdiff']
2590 xdiff = opts[b'xdiff']
2586 textpairs = []
2591 textpairs = []
2587
2592
2588 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2593 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2589
2594
2590 startrev = r.rev(r.lookup(rev))
2595 startrev = r.rev(r.lookup(rev))
2591 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2596 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2592 if opts[b'alldata']:
2597 if opts[b'alldata']:
2593 # Load revisions associated with changeset.
2598 # Load revisions associated with changeset.
2594 ctx = repo[rev]
2599 ctx = repo[rev]
2595 mtext = _manifestrevision(repo, ctx.manifestnode())
2600 mtext = _manifestrevision(repo, ctx.manifestnode())
2596 for pctx in ctx.parents():
2601 for pctx in ctx.parents():
2597 pman = _manifestrevision(repo, pctx.manifestnode())
2602 pman = _manifestrevision(repo, pctx.manifestnode())
2598 textpairs.append((pman, mtext))
2603 textpairs.append((pman, mtext))
2599
2604
2600 # Load filelog revisions by iterating manifest delta.
2605 # Load filelog revisions by iterating manifest delta.
2601 man = ctx.manifest()
2606 man = ctx.manifest()
2602 pman = ctx.p1().manifest()
2607 pman = ctx.p1().manifest()
2603 for filename, change in pman.diff(man).items():
2608 for filename, change in pman.diff(man).items():
2604 fctx = repo.file(filename)
2609 fctx = repo.file(filename)
2605 f1 = fctx.revision(change[0][0] or -1)
2610 f1 = fctx.revision(change[0][0] or -1)
2606 f2 = fctx.revision(change[1][0] or -1)
2611 f2 = fctx.revision(change[1][0] or -1)
2607 textpairs.append((f1, f2))
2612 textpairs.append((f1, f2))
2608 else:
2613 else:
2609 dp = r.deltaparent(rev)
2614 dp = r.deltaparent(rev)
2610 textpairs.append((r.revision(dp), r.revision(rev)))
2615 textpairs.append((r.revision(dp), r.revision(rev)))
2611
2616
2612 withthreads = threads > 0
2617 withthreads = threads > 0
2613 if not withthreads:
2618 if not withthreads:
2614
2619
2615 def d():
2620 def d():
2616 for pair in textpairs:
2621 for pair in textpairs:
2617 if xdiff:
2622 if xdiff:
2618 mdiff.bdiff.xdiffblocks(*pair)
2623 mdiff.bdiff.xdiffblocks(*pair)
2619 elif blocks:
2624 elif blocks:
2620 mdiff.bdiff.blocks(*pair)
2625 mdiff.bdiff.blocks(*pair)
2621 else:
2626 else:
2622 mdiff.textdiff(*pair)
2627 mdiff.textdiff(*pair)
2623
2628
2624 else:
2629 else:
2625 q = queue()
2630 q = queue()
2626 for i in _xrange(threads):
2631 for i in _xrange(threads):
2627 q.put(None)
2632 q.put(None)
2628 ready = threading.Condition()
2633 ready = threading.Condition()
2629 done = threading.Event()
2634 done = threading.Event()
2630 for i in _xrange(threads):
2635 for i in _xrange(threads):
2631 threading.Thread(
2636 threading.Thread(
2632 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2637 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2633 ).start()
2638 ).start()
2634 q.join()
2639 q.join()
2635
2640
2636 def d():
2641 def d():
2637 for pair in textpairs:
2642 for pair in textpairs:
2638 q.put(pair)
2643 q.put(pair)
2639 for i in _xrange(threads):
2644 for i in _xrange(threads):
2640 q.put(None)
2645 q.put(None)
2641 with ready:
2646 with ready:
2642 ready.notify_all()
2647 ready.notify_all()
2643 q.join()
2648 q.join()
2644
2649
2645 timer, fm = gettimer(ui, opts)
2650 timer, fm = gettimer(ui, opts)
2646 timer(d)
2651 timer(d)
2647 fm.end()
2652 fm.end()
2648
2653
2649 if withthreads:
2654 if withthreads:
2650 done.set()
2655 done.set()
2651 for i in _xrange(threads):
2656 for i in _xrange(threads):
2652 q.put(None)
2657 q.put(None)
2653 with ready:
2658 with ready:
2654 ready.notify_all()
2659 ready.notify_all()
2655
2660
2656
2661
2657 @command(
2662 @command(
2658 b'perf::unbundle',
2663 b'perf::unbundle',
2659 formatteropts,
2664 formatteropts,
2660 b'BUNDLE_FILE',
2665 b'BUNDLE_FILE',
2661 )
2666 )
2662 def perf_unbundle(ui, repo, fname, **opts):
2667 def perf_unbundle(ui, repo, fname, **opts):
2663 """benchmark application of a bundle in a repository.
2668 """benchmark application of a bundle in a repository.
2664
2669
2665 This does not include the final transaction processing"""
2670 This does not include the final transaction processing"""
2666 from mercurial import exchange
2671 from mercurial import exchange
2667 from mercurial import bundle2
2672 from mercurial import bundle2
2668
2673
2669 opts = _byteskwargs(opts)
2674 opts = _byteskwargs(opts)
2670
2675
2671 with repo.lock():
2676 with repo.lock():
2672 bundle = [None, None]
2677 bundle = [None, None]
2673 orig_quiet = repo.ui.quiet
2678 orig_quiet = repo.ui.quiet
2674 try:
2679 try:
2675 repo.ui.quiet = True
2680 repo.ui.quiet = True
2676 with open(fname, mode="rb") as f:
2681 with open(fname, mode="rb") as f:
2677
2682
2678 def noop_report(*args, **kwargs):
2683 def noop_report(*args, **kwargs):
2679 pass
2684 pass
2680
2685
2681 def setup():
2686 def setup():
2682 gen, tr = bundle
2687 gen, tr = bundle
2683 if tr is not None:
2688 if tr is not None:
2684 tr.abort()
2689 tr.abort()
2685 bundle[:] = [None, None]
2690 bundle[:] = [None, None]
2686 f.seek(0)
2691 f.seek(0)
2687 bundle[0] = exchange.readbundle(ui, f, fname)
2692 bundle[0] = exchange.readbundle(ui, f, fname)
2688 bundle[1] = repo.transaction(b'perf::unbundle')
2693 bundle[1] = repo.transaction(b'perf::unbundle')
2689 bundle[1]._report = noop_report # silence the transaction
2694 bundle[1]._report = noop_report # silence the transaction
2690
2695
2691 def apply():
2696 def apply():
2692 gen, tr = bundle
2697 gen, tr = bundle
2693 bundle2.applybundle(
2698 bundle2.applybundle(
2694 repo,
2699 repo,
2695 gen,
2700 gen,
2696 tr,
2701 tr,
2697 source=b'perf::unbundle',
2702 source=b'perf::unbundle',
2698 url=fname,
2703 url=fname,
2699 )
2704 )
2700
2705
2701 timer, fm = gettimer(ui, opts)
2706 timer, fm = gettimer(ui, opts)
2702 timer(apply, setup=setup)
2707 timer(apply, setup=setup)
2703 fm.end()
2708 fm.end()
2704 finally:
2709 finally:
2705 repo.ui.quiet == orig_quiet
2710 repo.ui.quiet == orig_quiet
2706 gen, tr = bundle
2711 gen, tr = bundle
2707 if tr is not None:
2712 if tr is not None:
2708 tr.abort()
2713 tr.abort()
2709
2714
2710
2715
2711 @command(
2716 @command(
2712 b'perf::unidiff|perfunidiff',
2717 b'perf::unidiff|perfunidiff',
2713 revlogopts
2718 revlogopts
2714 + formatteropts
2719 + formatteropts
2715 + [
2720 + [
2716 (
2721 (
2717 b'',
2722 b'',
2718 b'count',
2723 b'count',
2719 1,
2724 1,
2720 b'number of revisions to test (when using --startrev)',
2725 b'number of revisions to test (when using --startrev)',
2721 ),
2726 ),
2722 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2727 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2723 ],
2728 ],
2724 b'-c|-m|FILE REV',
2729 b'-c|-m|FILE REV',
2725 )
2730 )
2726 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2731 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2727 """benchmark a unified diff between revisions
2732 """benchmark a unified diff between revisions
2728
2733
2729 This doesn't include any copy tracing - it's just a unified diff
2734 This doesn't include any copy tracing - it's just a unified diff
2730 of the texts.
2735 of the texts.
2731
2736
2732 By default, benchmark a diff between its delta parent and itself.
2737 By default, benchmark a diff between its delta parent and itself.
2733
2738
2734 With ``--count``, benchmark diffs between delta parents and self for N
2739 With ``--count``, benchmark diffs between delta parents and self for N
2735 revisions starting at the specified revision.
2740 revisions starting at the specified revision.
2736
2741
2737 With ``--alldata``, assume the requested revision is a changeset and
2742 With ``--alldata``, assume the requested revision is a changeset and
2738 measure diffs for all changes related to that changeset (manifest
2743 measure diffs for all changes related to that changeset (manifest
2739 and filelogs).
2744 and filelogs).
2740 """
2745 """
2741 opts = _byteskwargs(opts)
2746 opts = _byteskwargs(opts)
2742 if opts[b'alldata']:
2747 if opts[b'alldata']:
2743 opts[b'changelog'] = True
2748 opts[b'changelog'] = True
2744
2749
2745 if opts.get(b'changelog') or opts.get(b'manifest'):
2750 if opts.get(b'changelog') or opts.get(b'manifest'):
2746 file_, rev = None, file_
2751 file_, rev = None, file_
2747 elif rev is None:
2752 elif rev is None:
2748 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2753 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2749
2754
2750 textpairs = []
2755 textpairs = []
2751
2756
2752 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2757 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2753
2758
2754 startrev = r.rev(r.lookup(rev))
2759 startrev = r.rev(r.lookup(rev))
2755 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2760 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2756 if opts[b'alldata']:
2761 if opts[b'alldata']:
2757 # Load revisions associated with changeset.
2762 # Load revisions associated with changeset.
2758 ctx = repo[rev]
2763 ctx = repo[rev]
2759 mtext = _manifestrevision(repo, ctx.manifestnode())
2764 mtext = _manifestrevision(repo, ctx.manifestnode())
2760 for pctx in ctx.parents():
2765 for pctx in ctx.parents():
2761 pman = _manifestrevision(repo, pctx.manifestnode())
2766 pman = _manifestrevision(repo, pctx.manifestnode())
2762 textpairs.append((pman, mtext))
2767 textpairs.append((pman, mtext))
2763
2768
2764 # Load filelog revisions by iterating manifest delta.
2769 # Load filelog revisions by iterating manifest delta.
2765 man = ctx.manifest()
2770 man = ctx.manifest()
2766 pman = ctx.p1().manifest()
2771 pman = ctx.p1().manifest()
2767 for filename, change in pman.diff(man).items():
2772 for filename, change in pman.diff(man).items():
2768 fctx = repo.file(filename)
2773 fctx = repo.file(filename)
2769 f1 = fctx.revision(change[0][0] or -1)
2774 f1 = fctx.revision(change[0][0] or -1)
2770 f2 = fctx.revision(change[1][0] or -1)
2775 f2 = fctx.revision(change[1][0] or -1)
2771 textpairs.append((f1, f2))
2776 textpairs.append((f1, f2))
2772 else:
2777 else:
2773 dp = r.deltaparent(rev)
2778 dp = r.deltaparent(rev)
2774 textpairs.append((r.revision(dp), r.revision(rev)))
2779 textpairs.append((r.revision(dp), r.revision(rev)))
2775
2780
2776 def d():
2781 def d():
2777 for left, right in textpairs:
2782 for left, right in textpairs:
2778 # The date strings don't matter, so we pass empty strings.
2783 # The date strings don't matter, so we pass empty strings.
2779 headerlines, hunks = mdiff.unidiff(
2784 headerlines, hunks = mdiff.unidiff(
2780 left, b'', right, b'', b'left', b'right', binary=False
2785 left, b'', right, b'', b'left', b'right', binary=False
2781 )
2786 )
2782 # consume iterators in roughly the way patch.py does
2787 # consume iterators in roughly the way patch.py does
2783 b'\n'.join(headerlines)
2788 b'\n'.join(headerlines)
2784 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2789 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2785
2790
2786 timer, fm = gettimer(ui, opts)
2791 timer, fm = gettimer(ui, opts)
2787 timer(d)
2792 timer(d)
2788 fm.end()
2793 fm.end()
2789
2794
2790
2795
2791 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2796 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2792 def perfdiffwd(ui, repo, **opts):
2797 def perfdiffwd(ui, repo, **opts):
2793 """Profile diff of working directory changes"""
2798 """Profile diff of working directory changes"""
2794 opts = _byteskwargs(opts)
2799 opts = _byteskwargs(opts)
2795 timer, fm = gettimer(ui, opts)
2800 timer, fm = gettimer(ui, opts)
2796 options = {
2801 options = {
2797 'w': 'ignore_all_space',
2802 'w': 'ignore_all_space',
2798 'b': 'ignore_space_change',
2803 'b': 'ignore_space_change',
2799 'B': 'ignore_blank_lines',
2804 'B': 'ignore_blank_lines',
2800 }
2805 }
2801
2806
2802 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2807 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2803 opts = {options[c]: b'1' for c in diffopt}
2808 opts = {options[c]: b'1' for c in diffopt}
2804
2809
2805 def d():
2810 def d():
2806 ui.pushbuffer()
2811 ui.pushbuffer()
2807 commands.diff(ui, repo, **opts)
2812 commands.diff(ui, repo, **opts)
2808 ui.popbuffer()
2813 ui.popbuffer()
2809
2814
2810 diffopt = diffopt.encode('ascii')
2815 diffopt = diffopt.encode('ascii')
2811 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2816 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2812 timer(d, title=title)
2817 timer(d, title=title)
2813 fm.end()
2818 fm.end()
2814
2819
2815
2820
2816 @command(
2821 @command(
2817 b'perf::revlogindex|perfrevlogindex',
2822 b'perf::revlogindex|perfrevlogindex',
2818 revlogopts + formatteropts,
2823 revlogopts + formatteropts,
2819 b'-c|-m|FILE',
2824 b'-c|-m|FILE',
2820 )
2825 )
2821 def perfrevlogindex(ui, repo, file_=None, **opts):
2826 def perfrevlogindex(ui, repo, file_=None, **opts):
2822 """Benchmark operations against a revlog index.
2827 """Benchmark operations against a revlog index.
2823
2828
2824 This tests constructing a revlog instance, reading index data,
2829 This tests constructing a revlog instance, reading index data,
2825 parsing index data, and performing various operations related to
2830 parsing index data, and performing various operations related to
2826 index data.
2831 index data.
2827 """
2832 """
2828
2833
2829 opts = _byteskwargs(opts)
2834 opts = _byteskwargs(opts)
2830
2835
2831 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2836 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2832
2837
2833 opener = getattr(rl, 'opener') # trick linter
2838 opener = getattr(rl, 'opener') # trick linter
2834 # compat with hg <= 5.8
2839 # compat with hg <= 5.8
2835 radix = getattr(rl, 'radix', None)
2840 radix = getattr(rl, 'radix', None)
2836 indexfile = getattr(rl, '_indexfile', None)
2841 indexfile = getattr(rl, '_indexfile', None)
2837 if indexfile is None:
2842 if indexfile is None:
2838 # compatibility with <= hg-5.8
2843 # compatibility with <= hg-5.8
2839 indexfile = getattr(rl, 'indexfile')
2844 indexfile = getattr(rl, 'indexfile')
2840 data = opener.read(indexfile)
2845 data = opener.read(indexfile)
2841
2846
2842 header = struct.unpack(b'>I', data[0:4])[0]
2847 header = struct.unpack(b'>I', data[0:4])[0]
2843 version = header & 0xFFFF
2848 version = header & 0xFFFF
2844 if version == 1:
2849 if version == 1:
2845 inline = header & (1 << 16)
2850 inline = header & (1 << 16)
2846 else:
2851 else:
2847 raise error.Abort(b'unsupported revlog version: %d' % version)
2852 raise error.Abort(b'unsupported revlog version: %d' % version)
2848
2853
2849 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2854 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2850 if parse_index_v1 is None:
2855 if parse_index_v1 is None:
2851 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2856 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2852
2857
2853 rllen = len(rl)
2858 rllen = len(rl)
2854
2859
2855 node0 = rl.node(0)
2860 node0 = rl.node(0)
2856 node25 = rl.node(rllen // 4)
2861 node25 = rl.node(rllen // 4)
2857 node50 = rl.node(rllen // 2)
2862 node50 = rl.node(rllen // 2)
2858 node75 = rl.node(rllen // 4 * 3)
2863 node75 = rl.node(rllen // 4 * 3)
2859 node100 = rl.node(rllen - 1)
2864 node100 = rl.node(rllen - 1)
2860
2865
2861 allrevs = range(rllen)
2866 allrevs = range(rllen)
2862 allrevsrev = list(reversed(allrevs))
2867 allrevsrev = list(reversed(allrevs))
2863 allnodes = [rl.node(rev) for rev in range(rllen)]
2868 allnodes = [rl.node(rev) for rev in range(rllen)]
2864 allnodesrev = list(reversed(allnodes))
2869 allnodesrev = list(reversed(allnodes))
2865
2870
2866 def constructor():
2871 def constructor():
2867 if radix is not None:
2872 if radix is not None:
2868 revlog(opener, radix=radix)
2873 revlog(opener, radix=radix)
2869 else:
2874 else:
2870 # hg <= 5.8
2875 # hg <= 5.8
2871 revlog(opener, indexfile=indexfile)
2876 revlog(opener, indexfile=indexfile)
2872
2877
2873 def read():
2878 def read():
2874 with opener(indexfile) as fh:
2879 with opener(indexfile) as fh:
2875 fh.read()
2880 fh.read()
2876
2881
2877 def parseindex():
2882 def parseindex():
2878 parse_index_v1(data, inline)
2883 parse_index_v1(data, inline)
2879
2884
2880 def getentry(revornode):
2885 def getentry(revornode):
2881 index = parse_index_v1(data, inline)[0]
2886 index = parse_index_v1(data, inline)[0]
2882 index[revornode]
2887 index[revornode]
2883
2888
2884 def getentries(revs, count=1):
2889 def getentries(revs, count=1):
2885 index = parse_index_v1(data, inline)[0]
2890 index = parse_index_v1(data, inline)[0]
2886
2891
2887 for i in range(count):
2892 for i in range(count):
2888 for rev in revs:
2893 for rev in revs:
2889 index[rev]
2894 index[rev]
2890
2895
2891 def resolvenode(node):
2896 def resolvenode(node):
2892 index = parse_index_v1(data, inline)[0]
2897 index = parse_index_v1(data, inline)[0]
2893 rev = getattr(index, 'rev', None)
2898 rev = getattr(index, 'rev', None)
2894 if rev is None:
2899 if rev is None:
2895 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2900 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2896 # This only works for the C code.
2901 # This only works for the C code.
2897 if nodemap is None:
2902 if nodemap is None:
2898 return
2903 return
2899 rev = nodemap.__getitem__
2904 rev = nodemap.__getitem__
2900
2905
2901 try:
2906 try:
2902 rev(node)
2907 rev(node)
2903 except error.RevlogError:
2908 except error.RevlogError:
2904 pass
2909 pass
2905
2910
2906 def resolvenodes(nodes, count=1):
2911 def resolvenodes(nodes, count=1):
2907 index = parse_index_v1(data, inline)[0]
2912 index = parse_index_v1(data, inline)[0]
2908 rev = getattr(index, 'rev', None)
2913 rev = getattr(index, 'rev', None)
2909 if rev is None:
2914 if rev is None:
2910 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2915 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2911 # This only works for the C code.
2916 # This only works for the C code.
2912 if nodemap is None:
2917 if nodemap is None:
2913 return
2918 return
2914 rev = nodemap.__getitem__
2919 rev = nodemap.__getitem__
2915
2920
2916 for i in range(count):
2921 for i in range(count):
2917 for node in nodes:
2922 for node in nodes:
2918 try:
2923 try:
2919 rev(node)
2924 rev(node)
2920 except error.RevlogError:
2925 except error.RevlogError:
2921 pass
2926 pass
2922
2927
2923 benches = [
2928 benches = [
2924 (constructor, b'revlog constructor'),
2929 (constructor, b'revlog constructor'),
2925 (read, b'read'),
2930 (read, b'read'),
2926 (parseindex, b'create index object'),
2931 (parseindex, b'create index object'),
2927 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2932 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2928 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2933 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2929 (lambda: resolvenode(node0), b'look up node at rev 0'),
2934 (lambda: resolvenode(node0), b'look up node at rev 0'),
2930 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2935 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2931 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2936 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2932 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2937 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2933 (lambda: resolvenode(node100), b'look up node at tip'),
2938 (lambda: resolvenode(node100), b'look up node at tip'),
2934 # 2x variation is to measure caching impact.
2939 # 2x variation is to measure caching impact.
2935 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2940 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2936 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2941 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2937 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2942 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2938 (
2943 (
2939 lambda: resolvenodes(allnodesrev, 2),
2944 lambda: resolvenodes(allnodesrev, 2),
2940 b'look up all nodes 2x (reverse)',
2945 b'look up all nodes 2x (reverse)',
2941 ),
2946 ),
2942 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2947 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2943 (
2948 (
2944 lambda: getentries(allrevs, 2),
2949 lambda: getentries(allrevs, 2),
2945 b'retrieve all index entries 2x (forward)',
2950 b'retrieve all index entries 2x (forward)',
2946 ),
2951 ),
2947 (
2952 (
2948 lambda: getentries(allrevsrev),
2953 lambda: getentries(allrevsrev),
2949 b'retrieve all index entries (reverse)',
2954 b'retrieve all index entries (reverse)',
2950 ),
2955 ),
2951 (
2956 (
2952 lambda: getentries(allrevsrev, 2),
2957 lambda: getentries(allrevsrev, 2),
2953 b'retrieve all index entries 2x (reverse)',
2958 b'retrieve all index entries 2x (reverse)',
2954 ),
2959 ),
2955 ]
2960 ]
2956
2961
2957 for fn, title in benches:
2962 for fn, title in benches:
2958 timer, fm = gettimer(ui, opts)
2963 timer, fm = gettimer(ui, opts)
2959 timer(fn, title=title)
2964 timer(fn, title=title)
2960 fm.end()
2965 fm.end()
2961
2966
2962
2967
2963 @command(
2968 @command(
2964 b'perf::revlogrevisions|perfrevlogrevisions',
2969 b'perf::revlogrevisions|perfrevlogrevisions',
2965 revlogopts
2970 revlogopts
2966 + formatteropts
2971 + formatteropts
2967 + [
2972 + [
2968 (b'd', b'dist', 100, b'distance between the revisions'),
2973 (b'd', b'dist', 100, b'distance between the revisions'),
2969 (b's', b'startrev', 0, b'revision to start reading at'),
2974 (b's', b'startrev', 0, b'revision to start reading at'),
2970 (b'', b'reverse', False, b'read in reverse'),
2975 (b'', b'reverse', False, b'read in reverse'),
2971 ],
2976 ],
2972 b'-c|-m|FILE',
2977 b'-c|-m|FILE',
2973 )
2978 )
2974 def perfrevlogrevisions(
2979 def perfrevlogrevisions(
2975 ui, repo, file_=None, startrev=0, reverse=False, **opts
2980 ui, repo, file_=None, startrev=0, reverse=False, **opts
2976 ):
2981 ):
2977 """Benchmark reading a series of revisions from a revlog.
2982 """Benchmark reading a series of revisions from a revlog.
2978
2983
2979 By default, we read every ``-d/--dist`` revision from 0 to tip of
2984 By default, we read every ``-d/--dist`` revision from 0 to tip of
2980 the specified revlog.
2985 the specified revlog.
2981
2986
2982 The start revision can be defined via ``-s/--startrev``.
2987 The start revision can be defined via ``-s/--startrev``.
2983 """
2988 """
2984 opts = _byteskwargs(opts)
2989 opts = _byteskwargs(opts)
2985
2990
2986 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2991 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2987 rllen = getlen(ui)(rl)
2992 rllen = getlen(ui)(rl)
2988
2993
2989 if startrev < 0:
2994 if startrev < 0:
2990 startrev = rllen + startrev
2995 startrev = rllen + startrev
2991
2996
2992 def d():
2997 def d():
2993 rl.clearcaches()
2998 rl.clearcaches()
2994
2999
2995 beginrev = startrev
3000 beginrev = startrev
2996 endrev = rllen
3001 endrev = rllen
2997 dist = opts[b'dist']
3002 dist = opts[b'dist']
2998
3003
2999 if reverse:
3004 if reverse:
3000 beginrev, endrev = endrev - 1, beginrev - 1
3005 beginrev, endrev = endrev - 1, beginrev - 1
3001 dist = -1 * dist
3006 dist = -1 * dist
3002
3007
3003 for x in _xrange(beginrev, endrev, dist):
3008 for x in _xrange(beginrev, endrev, dist):
3004 # Old revisions don't support passing int.
3009 # Old revisions don't support passing int.
3005 n = rl.node(x)
3010 n = rl.node(x)
3006 rl.revision(n)
3011 rl.revision(n)
3007
3012
3008 timer, fm = gettimer(ui, opts)
3013 timer, fm = gettimer(ui, opts)
3009 timer(d)
3014 timer(d)
3010 fm.end()
3015 fm.end()
3011
3016
3012
3017
3013 @command(
3018 @command(
3014 b'perf::revlogwrite|perfrevlogwrite',
3019 b'perf::revlogwrite|perfrevlogwrite',
3015 revlogopts
3020 revlogopts
3016 + formatteropts
3021 + formatteropts
3017 + [
3022 + [
3018 (b's', b'startrev', 1000, b'revision to start writing at'),
3023 (b's', b'startrev', 1000, b'revision to start writing at'),
3019 (b'', b'stoprev', -1, b'last revision to write'),
3024 (b'', b'stoprev', -1, b'last revision to write'),
3020 (b'', b'count', 3, b'number of passes to perform'),
3025 (b'', b'count', 3, b'number of passes to perform'),
3021 (b'', b'details', False, b'print timing for every revisions tested'),
3026 (b'', b'details', False, b'print timing for every revisions tested'),
3022 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3027 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3023 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3028 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3024 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3029 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3025 ],
3030 ],
3026 b'-c|-m|FILE',
3031 b'-c|-m|FILE',
3027 )
3032 )
3028 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3033 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3029 """Benchmark writing a series of revisions to a revlog.
3034 """Benchmark writing a series of revisions to a revlog.
3030
3035
3031 Possible source values are:
3036 Possible source values are:
3032 * `full`: add from a full text (default).
3037 * `full`: add from a full text (default).
3033 * `parent-1`: add from a delta to the first parent
3038 * `parent-1`: add from a delta to the first parent
3034 * `parent-2`: add from a delta to the second parent if it exists
3039 * `parent-2`: add from a delta to the second parent if it exists
3035 (use a delta from the first parent otherwise)
3040 (use a delta from the first parent otherwise)
3036 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3041 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3037 * `storage`: add from the existing precomputed deltas
3042 * `storage`: add from the existing precomputed deltas
3038
3043
3039 Note: This performance command measures performance in a custom way. As a
3044 Note: This performance command measures performance in a custom way. As a
3040 result some of the global configuration of the 'perf' command does not
3045 result some of the global configuration of the 'perf' command does not
3041 apply to it:
3046 apply to it:
3042
3047
3043 * ``pre-run``: disabled
3048 * ``pre-run``: disabled
3044
3049
3045 * ``profile-benchmark``: disabled
3050 * ``profile-benchmark``: disabled
3046
3051
3047 * ``run-limits``: disabled use --count instead
3052 * ``run-limits``: disabled use --count instead
3048 """
3053 """
3049 opts = _byteskwargs(opts)
3054 opts = _byteskwargs(opts)
3050
3055
3051 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3056 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3052 rllen = getlen(ui)(rl)
3057 rllen = getlen(ui)(rl)
3053 if startrev < 0:
3058 if startrev < 0:
3054 startrev = rllen + startrev
3059 startrev = rllen + startrev
3055 if stoprev < 0:
3060 if stoprev < 0:
3056 stoprev = rllen + stoprev
3061 stoprev = rllen + stoprev
3057
3062
3058 lazydeltabase = opts['lazydeltabase']
3063 lazydeltabase = opts['lazydeltabase']
3059 source = opts['source']
3064 source = opts['source']
3060 clearcaches = opts['clear_caches']
3065 clearcaches = opts['clear_caches']
3061 validsource = (
3066 validsource = (
3062 b'full',
3067 b'full',
3063 b'parent-1',
3068 b'parent-1',
3064 b'parent-2',
3069 b'parent-2',
3065 b'parent-smallest',
3070 b'parent-smallest',
3066 b'storage',
3071 b'storage',
3067 )
3072 )
3068 if source not in validsource:
3073 if source not in validsource:
3069 raise error.Abort('invalid source type: %s' % source)
3074 raise error.Abort('invalid source type: %s' % source)
3070
3075
3071 ### actually gather results
3076 ### actually gather results
3072 count = opts['count']
3077 count = opts['count']
3073 if count <= 0:
3078 if count <= 0:
3074 raise error.Abort('invalide run count: %d' % count)
3079 raise error.Abort('invalide run count: %d' % count)
3075 allresults = []
3080 allresults = []
3076 for c in range(count):
3081 for c in range(count):
3077 timing = _timeonewrite(
3082 timing = _timeonewrite(
3078 ui,
3083 ui,
3079 rl,
3084 rl,
3080 source,
3085 source,
3081 startrev,
3086 startrev,
3082 stoprev,
3087 stoprev,
3083 c + 1,
3088 c + 1,
3084 lazydeltabase=lazydeltabase,
3089 lazydeltabase=lazydeltabase,
3085 clearcaches=clearcaches,
3090 clearcaches=clearcaches,
3086 )
3091 )
3087 allresults.append(timing)
3092 allresults.append(timing)
3088
3093
3089 ### consolidate the results in a single list
3094 ### consolidate the results in a single list
3090 results = []
3095 results = []
3091 for idx, (rev, t) in enumerate(allresults[0]):
3096 for idx, (rev, t) in enumerate(allresults[0]):
3092 ts = [t]
3097 ts = [t]
3093 for other in allresults[1:]:
3098 for other in allresults[1:]:
3094 orev, ot = other[idx]
3099 orev, ot = other[idx]
3095 assert orev == rev
3100 assert orev == rev
3096 ts.append(ot)
3101 ts.append(ot)
3097 results.append((rev, ts))
3102 results.append((rev, ts))
3098 resultcount = len(results)
3103 resultcount = len(results)
3099
3104
3100 ### Compute and display relevant statistics
3105 ### Compute and display relevant statistics
3101
3106
3102 # get a formatter
3107 # get a formatter
3103 fm = ui.formatter(b'perf', opts)
3108 fm = ui.formatter(b'perf', opts)
3104 displayall = ui.configbool(b"perf", b"all-timing", False)
3109 displayall = ui.configbool(b"perf", b"all-timing", False)
3105
3110
3106 # print individual details if requested
3111 # print individual details if requested
3107 if opts['details']:
3112 if opts['details']:
3108 for idx, item in enumerate(results, 1):
3113 for idx, item in enumerate(results, 1):
3109 rev, data = item
3114 rev, data = item
3110 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3115 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3111 formatone(fm, data, title=title, displayall=displayall)
3116 formatone(fm, data, title=title, displayall=displayall)
3112
3117
3113 # sorts results by median time
3118 # sorts results by median time
3114 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3119 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3115 # list of (name, index) to display)
3120 # list of (name, index) to display)
3116 relevants = [
3121 relevants = [
3117 ("min", 0),
3122 ("min", 0),
3118 ("10%", resultcount * 10 // 100),
3123 ("10%", resultcount * 10 // 100),
3119 ("25%", resultcount * 25 // 100),
3124 ("25%", resultcount * 25 // 100),
3120 ("50%", resultcount * 70 // 100),
3125 ("50%", resultcount * 70 // 100),
3121 ("75%", resultcount * 75 // 100),
3126 ("75%", resultcount * 75 // 100),
3122 ("90%", resultcount * 90 // 100),
3127 ("90%", resultcount * 90 // 100),
3123 ("95%", resultcount * 95 // 100),
3128 ("95%", resultcount * 95 // 100),
3124 ("99%", resultcount * 99 // 100),
3129 ("99%", resultcount * 99 // 100),
3125 ("99.9%", resultcount * 999 // 1000),
3130 ("99.9%", resultcount * 999 // 1000),
3126 ("99.99%", resultcount * 9999 // 10000),
3131 ("99.99%", resultcount * 9999 // 10000),
3127 ("99.999%", resultcount * 99999 // 100000),
3132 ("99.999%", resultcount * 99999 // 100000),
3128 ("max", -1),
3133 ("max", -1),
3129 ]
3134 ]
3130 if not ui.quiet:
3135 if not ui.quiet:
3131 for name, idx in relevants:
3136 for name, idx in relevants:
3132 data = results[idx]
3137 data = results[idx]
3133 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3138 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3134 formatone(fm, data[1], title=title, displayall=displayall)
3139 formatone(fm, data[1], title=title, displayall=displayall)
3135
3140
3136 # XXX summing that many float will not be very precise, we ignore this fact
3141 # XXX summing that many float will not be very precise, we ignore this fact
3137 # for now
3142 # for now
3138 totaltime = []
3143 totaltime = []
3139 for item in allresults:
3144 for item in allresults:
3140 totaltime.append(
3145 totaltime.append(
3141 (
3146 (
3142 sum(x[1][0] for x in item),
3147 sum(x[1][0] for x in item),
3143 sum(x[1][1] for x in item),
3148 sum(x[1][1] for x in item),
3144 sum(x[1][2] for x in item),
3149 sum(x[1][2] for x in item),
3145 )
3150 )
3146 )
3151 )
3147 formatone(
3152 formatone(
3148 fm,
3153 fm,
3149 totaltime,
3154 totaltime,
3150 title="total time (%d revs)" % resultcount,
3155 title="total time (%d revs)" % resultcount,
3151 displayall=displayall,
3156 displayall=displayall,
3152 )
3157 )
3153 fm.end()
3158 fm.end()
3154
3159
3155
3160
3156 class _faketr:
3161 class _faketr:
3157 def add(s, x, y, z=None):
3162 def add(s, x, y, z=None):
3158 return None
3163 return None
3159
3164
3160
3165
3161 def _timeonewrite(
3166 def _timeonewrite(
3162 ui,
3167 ui,
3163 orig,
3168 orig,
3164 source,
3169 source,
3165 startrev,
3170 startrev,
3166 stoprev,
3171 stoprev,
3167 runidx=None,
3172 runidx=None,
3168 lazydeltabase=True,
3173 lazydeltabase=True,
3169 clearcaches=True,
3174 clearcaches=True,
3170 ):
3175 ):
3171 timings = []
3176 timings = []
3172 tr = _faketr()
3177 tr = _faketr()
3173 with _temprevlog(ui, orig, startrev) as dest:
3178 with _temprevlog(ui, orig, startrev) as dest:
3174 dest._lazydeltabase = lazydeltabase
3179 dest._lazydeltabase = lazydeltabase
3175 revs = list(orig.revs(startrev, stoprev))
3180 revs = list(orig.revs(startrev, stoprev))
3176 total = len(revs)
3181 total = len(revs)
3177 topic = 'adding'
3182 topic = 'adding'
3178 if runidx is not None:
3183 if runidx is not None:
3179 topic += ' (run #%d)' % runidx
3184 topic += ' (run #%d)' % runidx
3180 # Support both old and new progress API
3185 # Support both old and new progress API
3181 if util.safehasattr(ui, 'makeprogress'):
3186 if util.safehasattr(ui, 'makeprogress'):
3182 progress = ui.makeprogress(topic, unit='revs', total=total)
3187 progress = ui.makeprogress(topic, unit='revs', total=total)
3183
3188
3184 def updateprogress(pos):
3189 def updateprogress(pos):
3185 progress.update(pos)
3190 progress.update(pos)
3186
3191
3187 def completeprogress():
3192 def completeprogress():
3188 progress.complete()
3193 progress.complete()
3189
3194
3190 else:
3195 else:
3191
3196
3192 def updateprogress(pos):
3197 def updateprogress(pos):
3193 ui.progress(topic, pos, unit='revs', total=total)
3198 ui.progress(topic, pos, unit='revs', total=total)
3194
3199
3195 def completeprogress():
3200 def completeprogress():
3196 ui.progress(topic, None, unit='revs', total=total)
3201 ui.progress(topic, None, unit='revs', total=total)
3197
3202
3198 for idx, rev in enumerate(revs):
3203 for idx, rev in enumerate(revs):
3199 updateprogress(idx)
3204 updateprogress(idx)
3200 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3205 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3201 if clearcaches:
3206 if clearcaches:
3202 dest.index.clearcaches()
3207 dest.index.clearcaches()
3203 dest.clearcaches()
3208 dest.clearcaches()
3204 with timeone() as r:
3209 with timeone() as r:
3205 dest.addrawrevision(*addargs, **addkwargs)
3210 dest.addrawrevision(*addargs, **addkwargs)
3206 timings.append((rev, r[0]))
3211 timings.append((rev, r[0]))
3207 updateprogress(total)
3212 updateprogress(total)
3208 completeprogress()
3213 completeprogress()
3209 return timings
3214 return timings
3210
3215
3211
3216
3212 def _getrevisionseed(orig, rev, tr, source):
3217 def _getrevisionseed(orig, rev, tr, source):
3213 from mercurial.node import nullid
3218 from mercurial.node import nullid
3214
3219
3215 linkrev = orig.linkrev(rev)
3220 linkrev = orig.linkrev(rev)
3216 node = orig.node(rev)
3221 node = orig.node(rev)
3217 p1, p2 = orig.parents(node)
3222 p1, p2 = orig.parents(node)
3218 flags = orig.flags(rev)
3223 flags = orig.flags(rev)
3219 cachedelta = None
3224 cachedelta = None
3220 text = None
3225 text = None
3221
3226
3222 if source == b'full':
3227 if source == b'full':
3223 text = orig.revision(rev)
3228 text = orig.revision(rev)
3224 elif source == b'parent-1':
3229 elif source == b'parent-1':
3225 baserev = orig.rev(p1)
3230 baserev = orig.rev(p1)
3226 cachedelta = (baserev, orig.revdiff(p1, rev))
3231 cachedelta = (baserev, orig.revdiff(p1, rev))
3227 elif source == b'parent-2':
3232 elif source == b'parent-2':
3228 parent = p2
3233 parent = p2
3229 if p2 == nullid:
3234 if p2 == nullid:
3230 parent = p1
3235 parent = p1
3231 baserev = orig.rev(parent)
3236 baserev = orig.rev(parent)
3232 cachedelta = (baserev, orig.revdiff(parent, rev))
3237 cachedelta = (baserev, orig.revdiff(parent, rev))
3233 elif source == b'parent-smallest':
3238 elif source == b'parent-smallest':
3234 p1diff = orig.revdiff(p1, rev)
3239 p1diff = orig.revdiff(p1, rev)
3235 parent = p1
3240 parent = p1
3236 diff = p1diff
3241 diff = p1diff
3237 if p2 != nullid:
3242 if p2 != nullid:
3238 p2diff = orig.revdiff(p2, rev)
3243 p2diff = orig.revdiff(p2, rev)
3239 if len(p1diff) > len(p2diff):
3244 if len(p1diff) > len(p2diff):
3240 parent = p2
3245 parent = p2
3241 diff = p2diff
3246 diff = p2diff
3242 baserev = orig.rev(parent)
3247 baserev = orig.rev(parent)
3243 cachedelta = (baserev, diff)
3248 cachedelta = (baserev, diff)
3244 elif source == b'storage':
3249 elif source == b'storage':
3245 baserev = orig.deltaparent(rev)
3250 baserev = orig.deltaparent(rev)
3246 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3251 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3247
3252
3248 return (
3253 return (
3249 (text, tr, linkrev, p1, p2),
3254 (text, tr, linkrev, p1, p2),
3250 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3255 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3251 )
3256 )
3252
3257
3253
3258
3254 @contextlib.contextmanager
3259 @contextlib.contextmanager
3255 def _temprevlog(ui, orig, truncaterev):
3260 def _temprevlog(ui, orig, truncaterev):
3256 from mercurial import vfs as vfsmod
3261 from mercurial import vfs as vfsmod
3257
3262
3258 if orig._inline:
3263 if orig._inline:
3259 raise error.Abort('not supporting inline revlog (yet)')
3264 raise error.Abort('not supporting inline revlog (yet)')
3260 revlogkwargs = {}
3265 revlogkwargs = {}
3261 k = 'upperboundcomp'
3266 k = 'upperboundcomp'
3262 if util.safehasattr(orig, k):
3267 if util.safehasattr(orig, k):
3263 revlogkwargs[k] = getattr(orig, k)
3268 revlogkwargs[k] = getattr(orig, k)
3264
3269
3265 indexfile = getattr(orig, '_indexfile', None)
3270 indexfile = getattr(orig, '_indexfile', None)
3266 if indexfile is None:
3271 if indexfile is None:
3267 # compatibility with <= hg-5.8
3272 # compatibility with <= hg-5.8
3268 indexfile = getattr(orig, 'indexfile')
3273 indexfile = getattr(orig, 'indexfile')
3269 origindexpath = orig.opener.join(indexfile)
3274 origindexpath = orig.opener.join(indexfile)
3270
3275
3271 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3276 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3272 origdatapath = orig.opener.join(datafile)
3277 origdatapath = orig.opener.join(datafile)
3273 radix = b'revlog'
3278 radix = b'revlog'
3274 indexname = b'revlog.i'
3279 indexname = b'revlog.i'
3275 dataname = b'revlog.d'
3280 dataname = b'revlog.d'
3276
3281
3277 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3282 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3278 try:
3283 try:
3279 # copy the data file in a temporary directory
3284 # copy the data file in a temporary directory
3280 ui.debug('copying data in %s\n' % tmpdir)
3285 ui.debug('copying data in %s\n' % tmpdir)
3281 destindexpath = os.path.join(tmpdir, 'revlog.i')
3286 destindexpath = os.path.join(tmpdir, 'revlog.i')
3282 destdatapath = os.path.join(tmpdir, 'revlog.d')
3287 destdatapath = os.path.join(tmpdir, 'revlog.d')
3283 shutil.copyfile(origindexpath, destindexpath)
3288 shutil.copyfile(origindexpath, destindexpath)
3284 shutil.copyfile(origdatapath, destdatapath)
3289 shutil.copyfile(origdatapath, destdatapath)
3285
3290
3286 # remove the data we want to add again
3291 # remove the data we want to add again
3287 ui.debug('truncating data to be rewritten\n')
3292 ui.debug('truncating data to be rewritten\n')
3288 with open(destindexpath, 'ab') as index:
3293 with open(destindexpath, 'ab') as index:
3289 index.seek(0)
3294 index.seek(0)
3290 index.truncate(truncaterev * orig._io.size)
3295 index.truncate(truncaterev * orig._io.size)
3291 with open(destdatapath, 'ab') as data:
3296 with open(destdatapath, 'ab') as data:
3292 data.seek(0)
3297 data.seek(0)
3293 data.truncate(orig.start(truncaterev))
3298 data.truncate(orig.start(truncaterev))
3294
3299
3295 # instantiate a new revlog from the temporary copy
3300 # instantiate a new revlog from the temporary copy
3296 ui.debug('truncating adding to be rewritten\n')
3301 ui.debug('truncating adding to be rewritten\n')
3297 vfs = vfsmod.vfs(tmpdir)
3302 vfs = vfsmod.vfs(tmpdir)
3298 vfs.options = getattr(orig.opener, 'options', None)
3303 vfs.options = getattr(orig.opener, 'options', None)
3299
3304
3300 try:
3305 try:
3301 dest = revlog(vfs, radix=radix, **revlogkwargs)
3306 dest = revlog(vfs, radix=radix, **revlogkwargs)
3302 except TypeError:
3307 except TypeError:
3303 dest = revlog(
3308 dest = revlog(
3304 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3309 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3305 )
3310 )
3306 if dest._inline:
3311 if dest._inline:
3307 raise error.Abort('not supporting inline revlog (yet)')
3312 raise error.Abort('not supporting inline revlog (yet)')
3308 # make sure internals are initialized
3313 # make sure internals are initialized
3309 dest.revision(len(dest) - 1)
3314 dest.revision(len(dest) - 1)
3310 yield dest
3315 yield dest
3311 del dest, vfs
3316 del dest, vfs
3312 finally:
3317 finally:
3313 shutil.rmtree(tmpdir, True)
3318 shutil.rmtree(tmpdir, True)
3314
3319
3315
3320
3316 @command(
3321 @command(
3317 b'perf::revlogchunks|perfrevlogchunks',
3322 b'perf::revlogchunks|perfrevlogchunks',
3318 revlogopts
3323 revlogopts
3319 + formatteropts
3324 + formatteropts
3320 + [
3325 + [
3321 (b'e', b'engines', b'', b'compression engines to use'),
3326 (b'e', b'engines', b'', b'compression engines to use'),
3322 (b's', b'startrev', 0, b'revision to start at'),
3327 (b's', b'startrev', 0, b'revision to start at'),
3323 ],
3328 ],
3324 b'-c|-m|FILE',
3329 b'-c|-m|FILE',
3325 )
3330 )
3326 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3331 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3327 """Benchmark operations on revlog chunks.
3332 """Benchmark operations on revlog chunks.
3328
3333
3329 Logically, each revlog is a collection of fulltext revisions. However,
3334 Logically, each revlog is a collection of fulltext revisions. However,
3330 stored within each revlog are "chunks" of possibly compressed data. This
3335 stored within each revlog are "chunks" of possibly compressed data. This
3331 data needs to be read and decompressed or compressed and written.
3336 data needs to be read and decompressed or compressed and written.
3332
3337
3333 This command measures the time it takes to read+decompress and recompress
3338 This command measures the time it takes to read+decompress and recompress
3334 chunks in a revlog. It effectively isolates I/O and compression performance.
3339 chunks in a revlog. It effectively isolates I/O and compression performance.
3335 For measurements of higher-level operations like resolving revisions,
3340 For measurements of higher-level operations like resolving revisions,
3336 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3341 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3337 """
3342 """
3338 opts = _byteskwargs(opts)
3343 opts = _byteskwargs(opts)
3339
3344
3340 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3345 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3341
3346
3342 # _chunkraw was renamed to _getsegmentforrevs.
3347 # _chunkraw was renamed to _getsegmentforrevs.
3343 try:
3348 try:
3344 segmentforrevs = rl._getsegmentforrevs
3349 segmentforrevs = rl._getsegmentforrevs
3345 except AttributeError:
3350 except AttributeError:
3346 segmentforrevs = rl._chunkraw
3351 segmentforrevs = rl._chunkraw
3347
3352
3348 # Verify engines argument.
3353 # Verify engines argument.
3349 if engines:
3354 if engines:
3350 engines = {e.strip() for e in engines.split(b',')}
3355 engines = {e.strip() for e in engines.split(b',')}
3351 for engine in engines:
3356 for engine in engines:
3352 try:
3357 try:
3353 util.compressionengines[engine]
3358 util.compressionengines[engine]
3354 except KeyError:
3359 except KeyError:
3355 raise error.Abort(b'unknown compression engine: %s' % engine)
3360 raise error.Abort(b'unknown compression engine: %s' % engine)
3356 else:
3361 else:
3357 engines = []
3362 engines = []
3358 for e in util.compengines:
3363 for e in util.compengines:
3359 engine = util.compengines[e]
3364 engine = util.compengines[e]
3360 try:
3365 try:
3361 if engine.available():
3366 if engine.available():
3362 engine.revlogcompressor().compress(b'dummy')
3367 engine.revlogcompressor().compress(b'dummy')
3363 engines.append(e)
3368 engines.append(e)
3364 except NotImplementedError:
3369 except NotImplementedError:
3365 pass
3370 pass
3366
3371
3367 revs = list(rl.revs(startrev, len(rl) - 1))
3372 revs = list(rl.revs(startrev, len(rl) - 1))
3368
3373
3369 def rlfh(rl):
3374 def rlfh(rl):
3370 if rl._inline:
3375 if rl._inline:
3371 indexfile = getattr(rl, '_indexfile', None)
3376 indexfile = getattr(rl, '_indexfile', None)
3372 if indexfile is None:
3377 if indexfile is None:
3373 # compatibility with <= hg-5.8
3378 # compatibility with <= hg-5.8
3374 indexfile = getattr(rl, 'indexfile')
3379 indexfile = getattr(rl, 'indexfile')
3375 return getsvfs(repo)(indexfile)
3380 return getsvfs(repo)(indexfile)
3376 else:
3381 else:
3377 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3382 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3378 return getsvfs(repo)(datafile)
3383 return getsvfs(repo)(datafile)
3379
3384
3380 def doread():
3385 def doread():
3381 rl.clearcaches()
3386 rl.clearcaches()
3382 for rev in revs:
3387 for rev in revs:
3383 segmentforrevs(rev, rev)
3388 segmentforrevs(rev, rev)
3384
3389
3385 def doreadcachedfh():
3390 def doreadcachedfh():
3386 rl.clearcaches()
3391 rl.clearcaches()
3387 fh = rlfh(rl)
3392 fh = rlfh(rl)
3388 for rev in revs:
3393 for rev in revs:
3389 segmentforrevs(rev, rev, df=fh)
3394 segmentforrevs(rev, rev, df=fh)
3390
3395
3391 def doreadbatch():
3396 def doreadbatch():
3392 rl.clearcaches()
3397 rl.clearcaches()
3393 segmentforrevs(revs[0], revs[-1])
3398 segmentforrevs(revs[0], revs[-1])
3394
3399
3395 def doreadbatchcachedfh():
3400 def doreadbatchcachedfh():
3396 rl.clearcaches()
3401 rl.clearcaches()
3397 fh = rlfh(rl)
3402 fh = rlfh(rl)
3398 segmentforrevs(revs[0], revs[-1], df=fh)
3403 segmentforrevs(revs[0], revs[-1], df=fh)
3399
3404
3400 def dochunk():
3405 def dochunk():
3401 rl.clearcaches()
3406 rl.clearcaches()
3402 fh = rlfh(rl)
3407 fh = rlfh(rl)
3403 for rev in revs:
3408 for rev in revs:
3404 rl._chunk(rev, df=fh)
3409 rl._chunk(rev, df=fh)
3405
3410
3406 chunks = [None]
3411 chunks = [None]
3407
3412
3408 def dochunkbatch():
3413 def dochunkbatch():
3409 rl.clearcaches()
3414 rl.clearcaches()
3410 fh = rlfh(rl)
3415 fh = rlfh(rl)
3411 # Save chunks as a side-effect.
3416 # Save chunks as a side-effect.
3412 chunks[0] = rl._chunks(revs, df=fh)
3417 chunks[0] = rl._chunks(revs, df=fh)
3413
3418
3414 def docompress(compressor):
3419 def docompress(compressor):
3415 rl.clearcaches()
3420 rl.clearcaches()
3416
3421
3417 try:
3422 try:
3418 # Swap in the requested compression engine.
3423 # Swap in the requested compression engine.
3419 oldcompressor = rl._compressor
3424 oldcompressor = rl._compressor
3420 rl._compressor = compressor
3425 rl._compressor = compressor
3421 for chunk in chunks[0]:
3426 for chunk in chunks[0]:
3422 rl.compress(chunk)
3427 rl.compress(chunk)
3423 finally:
3428 finally:
3424 rl._compressor = oldcompressor
3429 rl._compressor = oldcompressor
3425
3430
3426 benches = [
3431 benches = [
3427 (lambda: doread(), b'read'),
3432 (lambda: doread(), b'read'),
3428 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3433 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3429 (lambda: doreadbatch(), b'read batch'),
3434 (lambda: doreadbatch(), b'read batch'),
3430 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3435 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3431 (lambda: dochunk(), b'chunk'),
3436 (lambda: dochunk(), b'chunk'),
3432 (lambda: dochunkbatch(), b'chunk batch'),
3437 (lambda: dochunkbatch(), b'chunk batch'),
3433 ]
3438 ]
3434
3439
3435 for engine in sorted(engines):
3440 for engine in sorted(engines):
3436 compressor = util.compengines[engine].revlogcompressor()
3441 compressor = util.compengines[engine].revlogcompressor()
3437 benches.append(
3442 benches.append(
3438 (
3443 (
3439 functools.partial(docompress, compressor),
3444 functools.partial(docompress, compressor),
3440 b'compress w/ %s' % engine,
3445 b'compress w/ %s' % engine,
3441 )
3446 )
3442 )
3447 )
3443
3448
3444 for fn, title in benches:
3449 for fn, title in benches:
3445 timer, fm = gettimer(ui, opts)
3450 timer, fm = gettimer(ui, opts)
3446 timer(fn, title=title)
3451 timer(fn, title=title)
3447 fm.end()
3452 fm.end()
3448
3453
3449
3454
3450 @command(
3455 @command(
3451 b'perf::revlogrevision|perfrevlogrevision',
3456 b'perf::revlogrevision|perfrevlogrevision',
3452 revlogopts
3457 revlogopts
3453 + formatteropts
3458 + formatteropts
3454 + [(b'', b'cache', False, b'use caches instead of clearing')],
3459 + [(b'', b'cache', False, b'use caches instead of clearing')],
3455 b'-c|-m|FILE REV',
3460 b'-c|-m|FILE REV',
3456 )
3461 )
3457 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3462 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3458 """Benchmark obtaining a revlog revision.
3463 """Benchmark obtaining a revlog revision.
3459
3464
3460 Obtaining a revlog revision consists of roughly the following steps:
3465 Obtaining a revlog revision consists of roughly the following steps:
3461
3466
3462 1. Compute the delta chain
3467 1. Compute the delta chain
3463 2. Slice the delta chain if applicable
3468 2. Slice the delta chain if applicable
3464 3. Obtain the raw chunks for that delta chain
3469 3. Obtain the raw chunks for that delta chain
3465 4. Decompress each raw chunk
3470 4. Decompress each raw chunk
3466 5. Apply binary patches to obtain fulltext
3471 5. Apply binary patches to obtain fulltext
3467 6. Verify hash of fulltext
3472 6. Verify hash of fulltext
3468
3473
3469 This command measures the time spent in each of these phases.
3474 This command measures the time spent in each of these phases.
3470 """
3475 """
3471 opts = _byteskwargs(opts)
3476 opts = _byteskwargs(opts)
3472
3477
3473 if opts.get(b'changelog') or opts.get(b'manifest'):
3478 if opts.get(b'changelog') or opts.get(b'manifest'):
3474 file_, rev = None, file_
3479 file_, rev = None, file_
3475 elif rev is None:
3480 elif rev is None:
3476 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3481 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3477
3482
3478 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3483 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3479
3484
3480 # _chunkraw was renamed to _getsegmentforrevs.
3485 # _chunkraw was renamed to _getsegmentforrevs.
3481 try:
3486 try:
3482 segmentforrevs = r._getsegmentforrevs
3487 segmentforrevs = r._getsegmentforrevs
3483 except AttributeError:
3488 except AttributeError:
3484 segmentforrevs = r._chunkraw
3489 segmentforrevs = r._chunkraw
3485
3490
3486 node = r.lookup(rev)
3491 node = r.lookup(rev)
3487 rev = r.rev(node)
3492 rev = r.rev(node)
3488
3493
3489 def getrawchunks(data, chain):
3494 def getrawchunks(data, chain):
3490 start = r.start
3495 start = r.start
3491 length = r.length
3496 length = r.length
3492 inline = r._inline
3497 inline = r._inline
3493 try:
3498 try:
3494 iosize = r.index.entry_size
3499 iosize = r.index.entry_size
3495 except AttributeError:
3500 except AttributeError:
3496 iosize = r._io.size
3501 iosize = r._io.size
3497 buffer = util.buffer
3502 buffer = util.buffer
3498
3503
3499 chunks = []
3504 chunks = []
3500 ladd = chunks.append
3505 ladd = chunks.append
3501 for idx, item in enumerate(chain):
3506 for idx, item in enumerate(chain):
3502 offset = start(item[0])
3507 offset = start(item[0])
3503 bits = data[idx]
3508 bits = data[idx]
3504 for rev in item:
3509 for rev in item:
3505 chunkstart = start(rev)
3510 chunkstart = start(rev)
3506 if inline:
3511 if inline:
3507 chunkstart += (rev + 1) * iosize
3512 chunkstart += (rev + 1) * iosize
3508 chunklength = length(rev)
3513 chunklength = length(rev)
3509 ladd(buffer(bits, chunkstart - offset, chunklength))
3514 ladd(buffer(bits, chunkstart - offset, chunklength))
3510
3515
3511 return chunks
3516 return chunks
3512
3517
3513 def dodeltachain(rev):
3518 def dodeltachain(rev):
3514 if not cache:
3519 if not cache:
3515 r.clearcaches()
3520 r.clearcaches()
3516 r._deltachain(rev)
3521 r._deltachain(rev)
3517
3522
3518 def doread(chain):
3523 def doread(chain):
3519 if not cache:
3524 if not cache:
3520 r.clearcaches()
3525 r.clearcaches()
3521 for item in slicedchain:
3526 for item in slicedchain:
3522 segmentforrevs(item[0], item[-1])
3527 segmentforrevs(item[0], item[-1])
3523
3528
3524 def doslice(r, chain, size):
3529 def doslice(r, chain, size):
3525 for s in slicechunk(r, chain, targetsize=size):
3530 for s in slicechunk(r, chain, targetsize=size):
3526 pass
3531 pass
3527
3532
3528 def dorawchunks(data, chain):
3533 def dorawchunks(data, chain):
3529 if not cache:
3534 if not cache:
3530 r.clearcaches()
3535 r.clearcaches()
3531 getrawchunks(data, chain)
3536 getrawchunks(data, chain)
3532
3537
3533 def dodecompress(chunks):
3538 def dodecompress(chunks):
3534 decomp = r.decompress
3539 decomp = r.decompress
3535 for chunk in chunks:
3540 for chunk in chunks:
3536 decomp(chunk)
3541 decomp(chunk)
3537
3542
3538 def dopatch(text, bins):
3543 def dopatch(text, bins):
3539 if not cache:
3544 if not cache:
3540 r.clearcaches()
3545 r.clearcaches()
3541 mdiff.patches(text, bins)
3546 mdiff.patches(text, bins)
3542
3547
3543 def dohash(text):
3548 def dohash(text):
3544 if not cache:
3549 if not cache:
3545 r.clearcaches()
3550 r.clearcaches()
3546 r.checkhash(text, node, rev=rev)
3551 r.checkhash(text, node, rev=rev)
3547
3552
3548 def dorevision():
3553 def dorevision():
3549 if not cache:
3554 if not cache:
3550 r.clearcaches()
3555 r.clearcaches()
3551 r.revision(node)
3556 r.revision(node)
3552
3557
3553 try:
3558 try:
3554 from mercurial.revlogutils.deltas import slicechunk
3559 from mercurial.revlogutils.deltas import slicechunk
3555 except ImportError:
3560 except ImportError:
3556 slicechunk = getattr(revlog, '_slicechunk', None)
3561 slicechunk = getattr(revlog, '_slicechunk', None)
3557
3562
3558 size = r.length(rev)
3563 size = r.length(rev)
3559 chain = r._deltachain(rev)[0]
3564 chain = r._deltachain(rev)[0]
3560 if not getattr(r, '_withsparseread', False):
3565 if not getattr(r, '_withsparseread', False):
3561 slicedchain = (chain,)
3566 slicedchain = (chain,)
3562 else:
3567 else:
3563 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3568 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3564 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3569 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3565 rawchunks = getrawchunks(data, slicedchain)
3570 rawchunks = getrawchunks(data, slicedchain)
3566 bins = r._chunks(chain)
3571 bins = r._chunks(chain)
3567 text = bytes(bins[0])
3572 text = bytes(bins[0])
3568 bins = bins[1:]
3573 bins = bins[1:]
3569 text = mdiff.patches(text, bins)
3574 text = mdiff.patches(text, bins)
3570
3575
3571 benches = [
3576 benches = [
3572 (lambda: dorevision(), b'full'),
3577 (lambda: dorevision(), b'full'),
3573 (lambda: dodeltachain(rev), b'deltachain'),
3578 (lambda: dodeltachain(rev), b'deltachain'),
3574 (lambda: doread(chain), b'read'),
3579 (lambda: doread(chain), b'read'),
3575 ]
3580 ]
3576
3581
3577 if getattr(r, '_withsparseread', False):
3582 if getattr(r, '_withsparseread', False):
3578 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3583 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3579 benches.append(slicing)
3584 benches.append(slicing)
3580
3585
3581 benches.extend(
3586 benches.extend(
3582 [
3587 [
3583 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3588 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3584 (lambda: dodecompress(rawchunks), b'decompress'),
3589 (lambda: dodecompress(rawchunks), b'decompress'),
3585 (lambda: dopatch(text, bins), b'patch'),
3590 (lambda: dopatch(text, bins), b'patch'),
3586 (lambda: dohash(text), b'hash'),
3591 (lambda: dohash(text), b'hash'),
3587 ]
3592 ]
3588 )
3593 )
3589
3594
3590 timer, fm = gettimer(ui, opts)
3595 timer, fm = gettimer(ui, opts)
3591 for fn, title in benches:
3596 for fn, title in benches:
3592 timer(fn, title=title)
3597 timer(fn, title=title)
3593 fm.end()
3598 fm.end()
3594
3599
3595
3600
3596 @command(
3601 @command(
3597 b'perf::revset|perfrevset',
3602 b'perf::revset|perfrevset',
3598 [
3603 [
3599 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3604 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3600 (b'', b'contexts', False, b'obtain changectx for each revision'),
3605 (b'', b'contexts', False, b'obtain changectx for each revision'),
3601 ]
3606 ]
3602 + formatteropts,
3607 + formatteropts,
3603 b"REVSET",
3608 b"REVSET",
3604 )
3609 )
3605 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3610 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3606 """benchmark the execution time of a revset
3611 """benchmark the execution time of a revset
3607
3612
3608 Use the --clean option if need to evaluate the impact of build volatile
3613 Use the --clean option if need to evaluate the impact of build volatile
3609 revisions set cache on the revset execution. Volatile cache hold filtered
3614 revisions set cache on the revset execution. Volatile cache hold filtered
3610 and obsolete related cache."""
3615 and obsolete related cache."""
3611 opts = _byteskwargs(opts)
3616 opts = _byteskwargs(opts)
3612
3617
3613 timer, fm = gettimer(ui, opts)
3618 timer, fm = gettimer(ui, opts)
3614
3619
3615 def d():
3620 def d():
3616 if clear:
3621 if clear:
3617 repo.invalidatevolatilesets()
3622 repo.invalidatevolatilesets()
3618 if contexts:
3623 if contexts:
3619 for ctx in repo.set(expr):
3624 for ctx in repo.set(expr):
3620 pass
3625 pass
3621 else:
3626 else:
3622 for r in repo.revs(expr):
3627 for r in repo.revs(expr):
3623 pass
3628 pass
3624
3629
3625 timer(d)
3630 timer(d)
3626 fm.end()
3631 fm.end()
3627
3632
3628
3633
3629 @command(
3634 @command(
3630 b'perf::volatilesets|perfvolatilesets',
3635 b'perf::volatilesets|perfvolatilesets',
3631 [
3636 [
3632 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3637 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3633 ]
3638 ]
3634 + formatteropts,
3639 + formatteropts,
3635 )
3640 )
3636 def perfvolatilesets(ui, repo, *names, **opts):
3641 def perfvolatilesets(ui, repo, *names, **opts):
3637 """benchmark the computation of various volatile set
3642 """benchmark the computation of various volatile set
3638
3643
3639 Volatile set computes element related to filtering and obsolescence."""
3644 Volatile set computes element related to filtering and obsolescence."""
3640 opts = _byteskwargs(opts)
3645 opts = _byteskwargs(opts)
3641 timer, fm = gettimer(ui, opts)
3646 timer, fm = gettimer(ui, opts)
3642 repo = repo.unfiltered()
3647 repo = repo.unfiltered()
3643
3648
3644 def getobs(name):
3649 def getobs(name):
3645 def d():
3650 def d():
3646 repo.invalidatevolatilesets()
3651 repo.invalidatevolatilesets()
3647 if opts[b'clear_obsstore']:
3652 if opts[b'clear_obsstore']:
3648 clearfilecache(repo, b'obsstore')
3653 clearfilecache(repo, b'obsstore')
3649 obsolete.getrevs(repo, name)
3654 obsolete.getrevs(repo, name)
3650
3655
3651 return d
3656 return d
3652
3657
3653 allobs = sorted(obsolete.cachefuncs)
3658 allobs = sorted(obsolete.cachefuncs)
3654 if names:
3659 if names:
3655 allobs = [n for n in allobs if n in names]
3660 allobs = [n for n in allobs if n in names]
3656
3661
3657 for name in allobs:
3662 for name in allobs:
3658 timer(getobs(name), title=name)
3663 timer(getobs(name), title=name)
3659
3664
3660 def getfiltered(name):
3665 def getfiltered(name):
3661 def d():
3666 def d():
3662 repo.invalidatevolatilesets()
3667 repo.invalidatevolatilesets()
3663 if opts[b'clear_obsstore']:
3668 if opts[b'clear_obsstore']:
3664 clearfilecache(repo, b'obsstore')
3669 clearfilecache(repo, b'obsstore')
3665 repoview.filterrevs(repo, name)
3670 repoview.filterrevs(repo, name)
3666
3671
3667 return d
3672 return d
3668
3673
3669 allfilter = sorted(repoview.filtertable)
3674 allfilter = sorted(repoview.filtertable)
3670 if names:
3675 if names:
3671 allfilter = [n for n in allfilter if n in names]
3676 allfilter = [n for n in allfilter if n in names]
3672
3677
3673 for name in allfilter:
3678 for name in allfilter:
3674 timer(getfiltered(name), title=name)
3679 timer(getfiltered(name), title=name)
3675 fm.end()
3680 fm.end()
3676
3681
3677
3682
3678 @command(
3683 @command(
3679 b'perf::branchmap|perfbranchmap',
3684 b'perf::branchmap|perfbranchmap',
3680 [
3685 [
3681 (b'f', b'full', False, b'Includes build time of subset'),
3686 (b'f', b'full', False, b'Includes build time of subset'),
3682 (
3687 (
3683 b'',
3688 b'',
3684 b'clear-revbranch',
3689 b'clear-revbranch',
3685 False,
3690 False,
3686 b'purge the revbranch cache between computation',
3691 b'purge the revbranch cache between computation',
3687 ),
3692 ),
3688 ]
3693 ]
3689 + formatteropts,
3694 + formatteropts,
3690 )
3695 )
3691 def perfbranchmap(ui, repo, *filternames, **opts):
3696 def perfbranchmap(ui, repo, *filternames, **opts):
3692 """benchmark the update of a branchmap
3697 """benchmark the update of a branchmap
3693
3698
3694 This benchmarks the full repo.branchmap() call with read and write disabled
3699 This benchmarks the full repo.branchmap() call with read and write disabled
3695 """
3700 """
3696 opts = _byteskwargs(opts)
3701 opts = _byteskwargs(opts)
3697 full = opts.get(b"full", False)
3702 full = opts.get(b"full", False)
3698 clear_revbranch = opts.get(b"clear_revbranch", False)
3703 clear_revbranch = opts.get(b"clear_revbranch", False)
3699 timer, fm = gettimer(ui, opts)
3704 timer, fm = gettimer(ui, opts)
3700
3705
3701 def getbranchmap(filtername):
3706 def getbranchmap(filtername):
3702 """generate a benchmark function for the filtername"""
3707 """generate a benchmark function for the filtername"""
3703 if filtername is None:
3708 if filtername is None:
3704 view = repo
3709 view = repo
3705 else:
3710 else:
3706 view = repo.filtered(filtername)
3711 view = repo.filtered(filtername)
3707 if util.safehasattr(view._branchcaches, '_per_filter'):
3712 if util.safehasattr(view._branchcaches, '_per_filter'):
3708 filtered = view._branchcaches._per_filter
3713 filtered = view._branchcaches._per_filter
3709 else:
3714 else:
3710 # older versions
3715 # older versions
3711 filtered = view._branchcaches
3716 filtered = view._branchcaches
3712
3717
3713 def d():
3718 def d():
3714 if clear_revbranch:
3719 if clear_revbranch:
3715 repo.revbranchcache()._clear()
3720 repo.revbranchcache()._clear()
3716 if full:
3721 if full:
3717 view._branchcaches.clear()
3722 view._branchcaches.clear()
3718 else:
3723 else:
3719 filtered.pop(filtername, None)
3724 filtered.pop(filtername, None)
3720 view.branchmap()
3725 view.branchmap()
3721
3726
3722 return d
3727 return d
3723
3728
3724 # add filter in smaller subset to bigger subset
3729 # add filter in smaller subset to bigger subset
3725 possiblefilters = set(repoview.filtertable)
3730 possiblefilters = set(repoview.filtertable)
3726 if filternames:
3731 if filternames:
3727 possiblefilters &= set(filternames)
3732 possiblefilters &= set(filternames)
3728 subsettable = getbranchmapsubsettable()
3733 subsettable = getbranchmapsubsettable()
3729 allfilters = []
3734 allfilters = []
3730 while possiblefilters:
3735 while possiblefilters:
3731 for name in possiblefilters:
3736 for name in possiblefilters:
3732 subset = subsettable.get(name)
3737 subset = subsettable.get(name)
3733 if subset not in possiblefilters:
3738 if subset not in possiblefilters:
3734 break
3739 break
3735 else:
3740 else:
3736 assert False, b'subset cycle %s!' % possiblefilters
3741 assert False, b'subset cycle %s!' % possiblefilters
3737 allfilters.append(name)
3742 allfilters.append(name)
3738 possiblefilters.remove(name)
3743 possiblefilters.remove(name)
3739
3744
3740 # warm the cache
3745 # warm the cache
3741 if not full:
3746 if not full:
3742 for name in allfilters:
3747 for name in allfilters:
3743 repo.filtered(name).branchmap()
3748 repo.filtered(name).branchmap()
3744 if not filternames or b'unfiltered' in filternames:
3749 if not filternames or b'unfiltered' in filternames:
3745 # add unfiltered
3750 # add unfiltered
3746 allfilters.append(None)
3751 allfilters.append(None)
3747
3752
3748 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3753 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3749 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3754 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3750 branchcacheread.set(classmethod(lambda *args: None))
3755 branchcacheread.set(classmethod(lambda *args: None))
3751 else:
3756 else:
3752 # older versions
3757 # older versions
3753 branchcacheread = safeattrsetter(branchmap, b'read')
3758 branchcacheread = safeattrsetter(branchmap, b'read')
3754 branchcacheread.set(lambda *args: None)
3759 branchcacheread.set(lambda *args: None)
3755 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3760 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3756 branchcachewrite.set(lambda *args: None)
3761 branchcachewrite.set(lambda *args: None)
3757 try:
3762 try:
3758 for name in allfilters:
3763 for name in allfilters:
3759 printname = name
3764 printname = name
3760 if name is None:
3765 if name is None:
3761 printname = b'unfiltered'
3766 printname = b'unfiltered'
3762 timer(getbranchmap(name), title=printname)
3767 timer(getbranchmap(name), title=printname)
3763 finally:
3768 finally:
3764 branchcacheread.restore()
3769 branchcacheread.restore()
3765 branchcachewrite.restore()
3770 branchcachewrite.restore()
3766 fm.end()
3771 fm.end()
3767
3772
3768
3773
3769 @command(
3774 @command(
3770 b'perf::branchmapupdate|perfbranchmapupdate',
3775 b'perf::branchmapupdate|perfbranchmapupdate',
3771 [
3776 [
3772 (b'', b'base', [], b'subset of revision to start from'),
3777 (b'', b'base', [], b'subset of revision to start from'),
3773 (b'', b'target', [], b'subset of revision to end with'),
3778 (b'', b'target', [], b'subset of revision to end with'),
3774 (b'', b'clear-caches', False, b'clear cache between each runs'),
3779 (b'', b'clear-caches', False, b'clear cache between each runs'),
3775 ]
3780 ]
3776 + formatteropts,
3781 + formatteropts,
3777 )
3782 )
3778 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3783 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3779 """benchmark branchmap update from for <base> revs to <target> revs
3784 """benchmark branchmap update from for <base> revs to <target> revs
3780
3785
3781 If `--clear-caches` is passed, the following items will be reset before
3786 If `--clear-caches` is passed, the following items will be reset before
3782 each update:
3787 each update:
3783 * the changelog instance and associated indexes
3788 * the changelog instance and associated indexes
3784 * the rev-branch-cache instance
3789 * the rev-branch-cache instance
3785
3790
3786 Examples:
3791 Examples:
3787
3792
3788 # update for the one last revision
3793 # update for the one last revision
3789 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3794 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3790
3795
3791 $ update for change coming with a new branch
3796 $ update for change coming with a new branch
3792 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3797 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3793 """
3798 """
3794 from mercurial import branchmap
3799 from mercurial import branchmap
3795 from mercurial import repoview
3800 from mercurial import repoview
3796
3801
3797 opts = _byteskwargs(opts)
3802 opts = _byteskwargs(opts)
3798 timer, fm = gettimer(ui, opts)
3803 timer, fm = gettimer(ui, opts)
3799 clearcaches = opts[b'clear_caches']
3804 clearcaches = opts[b'clear_caches']
3800 unfi = repo.unfiltered()
3805 unfi = repo.unfiltered()
3801 x = [None] # used to pass data between closure
3806 x = [None] # used to pass data between closure
3802
3807
3803 # we use a `list` here to avoid possible side effect from smartset
3808 # we use a `list` here to avoid possible side effect from smartset
3804 baserevs = list(scmutil.revrange(repo, base))
3809 baserevs = list(scmutil.revrange(repo, base))
3805 targetrevs = list(scmutil.revrange(repo, target))
3810 targetrevs = list(scmutil.revrange(repo, target))
3806 if not baserevs:
3811 if not baserevs:
3807 raise error.Abort(b'no revisions selected for --base')
3812 raise error.Abort(b'no revisions selected for --base')
3808 if not targetrevs:
3813 if not targetrevs:
3809 raise error.Abort(b'no revisions selected for --target')
3814 raise error.Abort(b'no revisions selected for --target')
3810
3815
3811 # make sure the target branchmap also contains the one in the base
3816 # make sure the target branchmap also contains the one in the base
3812 targetrevs = list(set(baserevs) | set(targetrevs))
3817 targetrevs = list(set(baserevs) | set(targetrevs))
3813 targetrevs.sort()
3818 targetrevs.sort()
3814
3819
3815 cl = repo.changelog
3820 cl = repo.changelog
3816 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3821 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3817 allbaserevs.sort()
3822 allbaserevs.sort()
3818 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3823 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3819
3824
3820 newrevs = list(alltargetrevs.difference(allbaserevs))
3825 newrevs = list(alltargetrevs.difference(allbaserevs))
3821 newrevs.sort()
3826 newrevs.sort()
3822
3827
3823 allrevs = frozenset(unfi.changelog.revs())
3828 allrevs = frozenset(unfi.changelog.revs())
3824 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3829 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3825 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3830 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3826
3831
3827 def basefilter(repo, visibilityexceptions=None):
3832 def basefilter(repo, visibilityexceptions=None):
3828 return basefilterrevs
3833 return basefilterrevs
3829
3834
3830 def targetfilter(repo, visibilityexceptions=None):
3835 def targetfilter(repo, visibilityexceptions=None):
3831 return targetfilterrevs
3836 return targetfilterrevs
3832
3837
3833 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3838 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3834 ui.status(msg % (len(allbaserevs), len(newrevs)))
3839 ui.status(msg % (len(allbaserevs), len(newrevs)))
3835 if targetfilterrevs:
3840 if targetfilterrevs:
3836 msg = b'(%d revisions still filtered)\n'
3841 msg = b'(%d revisions still filtered)\n'
3837 ui.status(msg % len(targetfilterrevs))
3842 ui.status(msg % len(targetfilterrevs))
3838
3843
3839 try:
3844 try:
3840 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3845 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3841 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3846 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3842
3847
3843 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3848 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3844 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3849 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3845
3850
3846 # try to find an existing branchmap to reuse
3851 # try to find an existing branchmap to reuse
3847 subsettable = getbranchmapsubsettable()
3852 subsettable = getbranchmapsubsettable()
3848 candidatefilter = subsettable.get(None)
3853 candidatefilter = subsettable.get(None)
3849 while candidatefilter is not None:
3854 while candidatefilter is not None:
3850 candidatebm = repo.filtered(candidatefilter).branchmap()
3855 candidatebm = repo.filtered(candidatefilter).branchmap()
3851 if candidatebm.validfor(baserepo):
3856 if candidatebm.validfor(baserepo):
3852 filtered = repoview.filterrevs(repo, candidatefilter)
3857 filtered = repoview.filterrevs(repo, candidatefilter)
3853 missing = [r for r in allbaserevs if r in filtered]
3858 missing = [r for r in allbaserevs if r in filtered]
3854 base = candidatebm.copy()
3859 base = candidatebm.copy()
3855 base.update(baserepo, missing)
3860 base.update(baserepo, missing)
3856 break
3861 break
3857 candidatefilter = subsettable.get(candidatefilter)
3862 candidatefilter = subsettable.get(candidatefilter)
3858 else:
3863 else:
3859 # no suitable subset where found
3864 # no suitable subset where found
3860 base = branchmap.branchcache()
3865 base = branchmap.branchcache()
3861 base.update(baserepo, allbaserevs)
3866 base.update(baserepo, allbaserevs)
3862
3867
3863 def setup():
3868 def setup():
3864 x[0] = base.copy()
3869 x[0] = base.copy()
3865 if clearcaches:
3870 if clearcaches:
3866 unfi._revbranchcache = None
3871 unfi._revbranchcache = None
3867 clearchangelog(repo)
3872 clearchangelog(repo)
3868
3873
3869 def bench():
3874 def bench():
3870 x[0].update(targetrepo, newrevs)
3875 x[0].update(targetrepo, newrevs)
3871
3876
3872 timer(bench, setup=setup)
3877 timer(bench, setup=setup)
3873 fm.end()
3878 fm.end()
3874 finally:
3879 finally:
3875 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3880 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3876 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3881 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3877
3882
3878
3883
3879 @command(
3884 @command(
3880 b'perf::branchmapload|perfbranchmapload',
3885 b'perf::branchmapload|perfbranchmapload',
3881 [
3886 [
3882 (b'f', b'filter', b'', b'Specify repoview filter'),
3887 (b'f', b'filter', b'', b'Specify repoview filter'),
3883 (b'', b'list', False, b'List brachmap filter caches'),
3888 (b'', b'list', False, b'List brachmap filter caches'),
3884 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3885 ]
3890 ]
3886 + formatteropts,
3891 + formatteropts,
3887 )
3892 )
3888 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3893 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3889 """benchmark reading the branchmap"""
3894 """benchmark reading the branchmap"""
3890 opts = _byteskwargs(opts)
3895 opts = _byteskwargs(opts)
3891 clearrevlogs = opts[b'clear_revlogs']
3896 clearrevlogs = opts[b'clear_revlogs']
3892
3897
3893 if list:
3898 if list:
3894 for name, kind, st in repo.cachevfs.readdir(stat=True):
3899 for name, kind, st in repo.cachevfs.readdir(stat=True):
3895 if name.startswith(b'branch2'):
3900 if name.startswith(b'branch2'):
3896 filtername = name.partition(b'-')[2] or b'unfiltered'
3901 filtername = name.partition(b'-')[2] or b'unfiltered'
3897 ui.status(
3902 ui.status(
3898 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3903 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3899 )
3904 )
3900 return
3905 return
3901 if not filter:
3906 if not filter:
3902 filter = None
3907 filter = None
3903 subsettable = getbranchmapsubsettable()
3908 subsettable = getbranchmapsubsettable()
3904 if filter is None:
3909 if filter is None:
3905 repo = repo.unfiltered()
3910 repo = repo.unfiltered()
3906 else:
3911 else:
3907 repo = repoview.repoview(repo, filter)
3912 repo = repoview.repoview(repo, filter)
3908
3913
3909 repo.branchmap() # make sure we have a relevant, up to date branchmap
3914 repo.branchmap() # make sure we have a relevant, up to date branchmap
3910
3915
3911 try:
3916 try:
3912 fromfile = branchmap.branchcache.fromfile
3917 fromfile = branchmap.branchcache.fromfile
3913 except AttributeError:
3918 except AttributeError:
3914 # older versions
3919 # older versions
3915 fromfile = branchmap.read
3920 fromfile = branchmap.read
3916
3921
3917 currentfilter = filter
3922 currentfilter = filter
3918 # try once without timer, the filter may not be cached
3923 # try once without timer, the filter may not be cached
3919 while fromfile(repo) is None:
3924 while fromfile(repo) is None:
3920 currentfilter = subsettable.get(currentfilter)
3925 currentfilter = subsettable.get(currentfilter)
3921 if currentfilter is None:
3926 if currentfilter is None:
3922 raise error.Abort(
3927 raise error.Abort(
3923 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3928 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3924 )
3929 )
3925 repo = repo.filtered(currentfilter)
3930 repo = repo.filtered(currentfilter)
3926 timer, fm = gettimer(ui, opts)
3931 timer, fm = gettimer(ui, opts)
3927
3932
3928 def setup():
3933 def setup():
3929 if clearrevlogs:
3934 if clearrevlogs:
3930 clearchangelog(repo)
3935 clearchangelog(repo)
3931
3936
3932 def bench():
3937 def bench():
3933 fromfile(repo)
3938 fromfile(repo)
3934
3939
3935 timer(bench, setup=setup)
3940 timer(bench, setup=setup)
3936 fm.end()
3941 fm.end()
3937
3942
3938
3943
3939 @command(b'perf::loadmarkers|perfloadmarkers')
3944 @command(b'perf::loadmarkers|perfloadmarkers')
3940 def perfloadmarkers(ui, repo):
3945 def perfloadmarkers(ui, repo):
3941 """benchmark the time to parse the on-disk markers for a repo
3946 """benchmark the time to parse the on-disk markers for a repo
3942
3947
3943 Result is the number of markers in the repo."""
3948 Result is the number of markers in the repo."""
3944 timer, fm = gettimer(ui)
3949 timer, fm = gettimer(ui)
3945 svfs = getsvfs(repo)
3950 svfs = getsvfs(repo)
3946 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3951 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3947 fm.end()
3952 fm.end()
3948
3953
3949
3954
3950 @command(
3955 @command(
3951 b'perf::lrucachedict|perflrucachedict',
3956 b'perf::lrucachedict|perflrucachedict',
3952 formatteropts
3957 formatteropts
3953 + [
3958 + [
3954 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3959 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3955 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3960 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3956 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3961 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3957 (b'', b'size', 4, b'size of cache'),
3962 (b'', b'size', 4, b'size of cache'),
3958 (b'', b'gets', 10000, b'number of key lookups'),
3963 (b'', b'gets', 10000, b'number of key lookups'),
3959 (b'', b'sets', 10000, b'number of key sets'),
3964 (b'', b'sets', 10000, b'number of key sets'),
3960 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3965 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3961 (
3966 (
3962 b'',
3967 b'',
3963 b'mixedgetfreq',
3968 b'mixedgetfreq',
3964 50,
3969 50,
3965 b'frequency of get vs set ops in mixed mode',
3970 b'frequency of get vs set ops in mixed mode',
3966 ),
3971 ),
3967 ],
3972 ],
3968 norepo=True,
3973 norepo=True,
3969 )
3974 )
3970 def perflrucache(
3975 def perflrucache(
3971 ui,
3976 ui,
3972 mincost=0,
3977 mincost=0,
3973 maxcost=100,
3978 maxcost=100,
3974 costlimit=0,
3979 costlimit=0,
3975 size=4,
3980 size=4,
3976 gets=10000,
3981 gets=10000,
3977 sets=10000,
3982 sets=10000,
3978 mixed=10000,
3983 mixed=10000,
3979 mixedgetfreq=50,
3984 mixedgetfreq=50,
3980 **opts
3985 **opts
3981 ):
3986 ):
3982 opts = _byteskwargs(opts)
3987 opts = _byteskwargs(opts)
3983
3988
3984 def doinit():
3989 def doinit():
3985 for i in _xrange(10000):
3990 for i in _xrange(10000):
3986 util.lrucachedict(size)
3991 util.lrucachedict(size)
3987
3992
3988 costrange = list(range(mincost, maxcost + 1))
3993 costrange = list(range(mincost, maxcost + 1))
3989
3994
3990 values = []
3995 values = []
3991 for i in _xrange(size):
3996 for i in _xrange(size):
3992 values.append(random.randint(0, _maxint))
3997 values.append(random.randint(0, _maxint))
3993
3998
3994 # Get mode fills the cache and tests raw lookup performance with no
3999 # Get mode fills the cache and tests raw lookup performance with no
3995 # eviction.
4000 # eviction.
3996 getseq = []
4001 getseq = []
3997 for i in _xrange(gets):
4002 for i in _xrange(gets):
3998 getseq.append(random.choice(values))
4003 getseq.append(random.choice(values))
3999
4004
4000 def dogets():
4005 def dogets():
4001 d = util.lrucachedict(size)
4006 d = util.lrucachedict(size)
4002 for v in values:
4007 for v in values:
4003 d[v] = v
4008 d[v] = v
4004 for key in getseq:
4009 for key in getseq:
4005 value = d[key]
4010 value = d[key]
4006 value # silence pyflakes warning
4011 value # silence pyflakes warning
4007
4012
4008 def dogetscost():
4013 def dogetscost():
4009 d = util.lrucachedict(size, maxcost=costlimit)
4014 d = util.lrucachedict(size, maxcost=costlimit)
4010 for i, v in enumerate(values):
4015 for i, v in enumerate(values):
4011 d.insert(v, v, cost=costs[i])
4016 d.insert(v, v, cost=costs[i])
4012 for key in getseq:
4017 for key in getseq:
4013 try:
4018 try:
4014 value = d[key]
4019 value = d[key]
4015 value # silence pyflakes warning
4020 value # silence pyflakes warning
4016 except KeyError:
4021 except KeyError:
4017 pass
4022 pass
4018
4023
4019 # Set mode tests insertion speed with cache eviction.
4024 # Set mode tests insertion speed with cache eviction.
4020 setseq = []
4025 setseq = []
4021 costs = []
4026 costs = []
4022 for i in _xrange(sets):
4027 for i in _xrange(sets):
4023 setseq.append(random.randint(0, _maxint))
4028 setseq.append(random.randint(0, _maxint))
4024 costs.append(random.choice(costrange))
4029 costs.append(random.choice(costrange))
4025
4030
4026 def doinserts():
4031 def doinserts():
4027 d = util.lrucachedict(size)
4032 d = util.lrucachedict(size)
4028 for v in setseq:
4033 for v in setseq:
4029 d.insert(v, v)
4034 d.insert(v, v)
4030
4035
4031 def doinsertscost():
4036 def doinsertscost():
4032 d = util.lrucachedict(size, maxcost=costlimit)
4037 d = util.lrucachedict(size, maxcost=costlimit)
4033 for i, v in enumerate(setseq):
4038 for i, v in enumerate(setseq):
4034 d.insert(v, v, cost=costs[i])
4039 d.insert(v, v, cost=costs[i])
4035
4040
4036 def dosets():
4041 def dosets():
4037 d = util.lrucachedict(size)
4042 d = util.lrucachedict(size)
4038 for v in setseq:
4043 for v in setseq:
4039 d[v] = v
4044 d[v] = v
4040
4045
4041 # Mixed mode randomly performs gets and sets with eviction.
4046 # Mixed mode randomly performs gets and sets with eviction.
4042 mixedops = []
4047 mixedops = []
4043 for i in _xrange(mixed):
4048 for i in _xrange(mixed):
4044 r = random.randint(0, 100)
4049 r = random.randint(0, 100)
4045 if r < mixedgetfreq:
4050 if r < mixedgetfreq:
4046 op = 0
4051 op = 0
4047 else:
4052 else:
4048 op = 1
4053 op = 1
4049
4054
4050 mixedops.append(
4055 mixedops.append(
4051 (op, random.randint(0, size * 2), random.choice(costrange))
4056 (op, random.randint(0, size * 2), random.choice(costrange))
4052 )
4057 )
4053
4058
4054 def domixed():
4059 def domixed():
4055 d = util.lrucachedict(size)
4060 d = util.lrucachedict(size)
4056
4061
4057 for op, v, cost in mixedops:
4062 for op, v, cost in mixedops:
4058 if op == 0:
4063 if op == 0:
4059 try:
4064 try:
4060 d[v]
4065 d[v]
4061 except KeyError:
4066 except KeyError:
4062 pass
4067 pass
4063 else:
4068 else:
4064 d[v] = v
4069 d[v] = v
4065
4070
4066 def domixedcost():
4071 def domixedcost():
4067 d = util.lrucachedict(size, maxcost=costlimit)
4072 d = util.lrucachedict(size, maxcost=costlimit)
4068
4073
4069 for op, v, cost in mixedops:
4074 for op, v, cost in mixedops:
4070 if op == 0:
4075 if op == 0:
4071 try:
4076 try:
4072 d[v]
4077 d[v]
4073 except KeyError:
4078 except KeyError:
4074 pass
4079 pass
4075 else:
4080 else:
4076 d.insert(v, v, cost=cost)
4081 d.insert(v, v, cost=cost)
4077
4082
4078 benches = [
4083 benches = [
4079 (doinit, b'init'),
4084 (doinit, b'init'),
4080 ]
4085 ]
4081
4086
4082 if costlimit:
4087 if costlimit:
4083 benches.extend(
4088 benches.extend(
4084 [
4089 [
4085 (dogetscost, b'gets w/ cost limit'),
4090 (dogetscost, b'gets w/ cost limit'),
4086 (doinsertscost, b'inserts w/ cost limit'),
4091 (doinsertscost, b'inserts w/ cost limit'),
4087 (domixedcost, b'mixed w/ cost limit'),
4092 (domixedcost, b'mixed w/ cost limit'),
4088 ]
4093 ]
4089 )
4094 )
4090 else:
4095 else:
4091 benches.extend(
4096 benches.extend(
4092 [
4097 [
4093 (dogets, b'gets'),
4098 (dogets, b'gets'),
4094 (doinserts, b'inserts'),
4099 (doinserts, b'inserts'),
4095 (dosets, b'sets'),
4100 (dosets, b'sets'),
4096 (domixed, b'mixed'),
4101 (domixed, b'mixed'),
4097 ]
4102 ]
4098 )
4103 )
4099
4104
4100 for fn, title in benches:
4105 for fn, title in benches:
4101 timer, fm = gettimer(ui, opts)
4106 timer, fm = gettimer(ui, opts)
4102 timer(fn, title=title)
4107 timer(fn, title=title)
4103 fm.end()
4108 fm.end()
4104
4109
4105
4110
4106 @command(
4111 @command(
4107 b'perf::write|perfwrite',
4112 b'perf::write|perfwrite',
4108 formatteropts
4113 formatteropts
4109 + [
4114 + [
4110 (b'', b'write-method', b'write', b'ui write method'),
4115 (b'', b'write-method', b'write', b'ui write method'),
4111 (b'', b'nlines', 100, b'number of lines'),
4116 (b'', b'nlines', 100, b'number of lines'),
4112 (b'', b'nitems', 100, b'number of items (per line)'),
4117 (b'', b'nitems', 100, b'number of items (per line)'),
4113 (b'', b'item', b'x', b'item that is written'),
4118 (b'', b'item', b'x', b'item that is written'),
4114 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4119 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4115 (b'', b'flush-line', None, b'flush after each line'),
4120 (b'', b'flush-line', None, b'flush after each line'),
4116 ],
4121 ],
4117 )
4122 )
4118 def perfwrite(ui, repo, **opts):
4123 def perfwrite(ui, repo, **opts):
4119 """microbenchmark ui.write (and others)"""
4124 """microbenchmark ui.write (and others)"""
4120 opts = _byteskwargs(opts)
4125 opts = _byteskwargs(opts)
4121
4126
4122 write = getattr(ui, _sysstr(opts[b'write_method']))
4127 write = getattr(ui, _sysstr(opts[b'write_method']))
4123 nlines = int(opts[b'nlines'])
4128 nlines = int(opts[b'nlines'])
4124 nitems = int(opts[b'nitems'])
4129 nitems = int(opts[b'nitems'])
4125 item = opts[b'item']
4130 item = opts[b'item']
4126 batch_line = opts.get(b'batch_line')
4131 batch_line = opts.get(b'batch_line')
4127 flush_line = opts.get(b'flush_line')
4132 flush_line = opts.get(b'flush_line')
4128
4133
4129 if batch_line:
4134 if batch_line:
4130 line = item * nitems + b'\n'
4135 line = item * nitems + b'\n'
4131
4136
4132 def benchmark():
4137 def benchmark():
4133 for i in pycompat.xrange(nlines):
4138 for i in pycompat.xrange(nlines):
4134 if batch_line:
4139 if batch_line:
4135 write(line)
4140 write(line)
4136 else:
4141 else:
4137 for i in pycompat.xrange(nitems):
4142 for i in pycompat.xrange(nitems):
4138 write(item)
4143 write(item)
4139 write(b'\n')
4144 write(b'\n')
4140 if flush_line:
4145 if flush_line:
4141 ui.flush()
4146 ui.flush()
4142 ui.flush()
4147 ui.flush()
4143
4148
4144 timer, fm = gettimer(ui, opts)
4149 timer, fm = gettimer(ui, opts)
4145 timer(benchmark)
4150 timer(benchmark)
4146 fm.end()
4151 fm.end()
4147
4152
4148
4153
4149 def uisetup(ui):
4154 def uisetup(ui):
4150 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4155 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4151 commands, b'debugrevlogopts'
4156 commands, b'debugrevlogopts'
4152 ):
4157 ):
4153 # for "historical portability":
4158 # for "historical portability":
4154 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4159 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4155 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4160 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4156 # openrevlog() should cause failure, because it has been
4161 # openrevlog() should cause failure, because it has been
4157 # available since 3.5 (or 49c583ca48c4).
4162 # available since 3.5 (or 49c583ca48c4).
4158 def openrevlog(orig, repo, cmd, file_, opts):
4163 def openrevlog(orig, repo, cmd, file_, opts):
4159 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4164 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4160 raise error.Abort(
4165 raise error.Abort(
4161 b"This version doesn't support --dir option",
4166 b"This version doesn't support --dir option",
4162 hint=b"use 3.5 or later",
4167 hint=b"use 3.5 or later",
4163 )
4168 )
4164 return orig(repo, cmd, file_, opts)
4169 return orig(repo, cmd, file_, opts)
4165
4170
4166 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4171 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4167
4172
4168
4173
4169 @command(
4174 @command(
4170 b'perf::progress|perfprogress',
4175 b'perf::progress|perfprogress',
4171 formatteropts
4176 formatteropts
4172 + [
4177 + [
4173 (b'', b'topic', b'topic', b'topic for progress messages'),
4178 (b'', b'topic', b'topic', b'topic for progress messages'),
4174 (b'c', b'total', 1000000, b'total value we are progressing to'),
4179 (b'c', b'total', 1000000, b'total value we are progressing to'),
4175 ],
4180 ],
4176 norepo=True,
4181 norepo=True,
4177 )
4182 )
4178 def perfprogress(ui, topic=None, total=None, **opts):
4183 def perfprogress(ui, topic=None, total=None, **opts):
4179 """printing of progress bars"""
4184 """printing of progress bars"""
4180 opts = _byteskwargs(opts)
4185 opts = _byteskwargs(opts)
4181
4186
4182 timer, fm = gettimer(ui, opts)
4187 timer, fm = gettimer(ui, opts)
4183
4188
4184 def doprogress():
4189 def doprogress():
4185 with ui.makeprogress(topic, total=total) as progress:
4190 with ui.makeprogress(topic, total=total) as progress:
4186 for i in _xrange(total):
4191 for i in _xrange(total):
4187 progress.increment()
4192 progress.increment()
4188
4193
4189 timer(doprogress)
4194 timer(doprogress)
4190 fm.end()
4195 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now