##// END OF EJS Templates
perf: make perf::bundle compatible down to 5.2...
marmoute -
r50369:d513ae93 default
parent child Browse files
Show More
@@ -1,4195 +1,4203 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1003 except ImportError:
1004 path = ui.expandpath(path)
1004 path = ui.expandpath(path)
1005
1005
1006 def s():
1006 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1007 repos[1] = hg.peer(ui, opts, path)
1008
1008
1009 def d():
1009 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1010 setdiscovery.findcommonheads(ui, *repos)
1011
1011
1012 timer(d, setup=s)
1012 timer(d, setup=s)
1013 fm.end()
1013 fm.end()
1014
1014
1015
1015
1016 @command(
1016 @command(
1017 b'perf::bookmarks|perfbookmarks',
1017 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1018 formatteropts
1019 + [
1019 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1021 ],
1022 )
1022 )
1023 def perfbookmarks(ui, repo, **opts):
1023 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1024 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027
1027
1028 clearrevlogs = opts[b'clear_revlogs']
1028 clearrevlogs = opts[b'clear_revlogs']
1029
1029
1030 def s():
1030 def s():
1031 if clearrevlogs:
1031 if clearrevlogs:
1032 clearchangelog(repo)
1032 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1033 clearfilecache(repo, b'_bookmarks')
1034
1034
1035 def d():
1035 def d():
1036 repo._bookmarks
1036 repo._bookmarks
1037
1037
1038 timer(d, setup=s)
1038 timer(d, setup=s)
1039 fm.end()
1039 fm.end()
1040
1040
1041
1041
1042 @command(
1042 @command(
1043 b'perf::bundle',
1043 b'perf::bundle',
1044 [
1044 [
1045 (
1045 (
1046 b'r',
1046 b'r',
1047 b'rev',
1047 b'rev',
1048 [],
1048 [],
1049 b'changesets to bundle',
1049 b'changesets to bundle',
1050 b'REV',
1050 b'REV',
1051 ),
1051 ),
1052 (
1052 (
1053 b't',
1053 b't',
1054 b'type',
1054 b'type',
1055 b'none',
1055 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1057 b'TYPE',
1058 ),
1058 ),
1059 ]
1059 ]
1060 + formatteropts,
1060 + formatteropts,
1061 b'REVS',
1061 b'REVS',
1062 )
1062 )
1063 def perfbundle(ui, repo, *revs, **opts):
1063 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1064 """benchmark the creation of a bundle from a repository
1065
1065
1066 For now, this only supports "none" compression.
1066 For now, this only supports "none" compression.
1067 """
1067 """
1068 try:
1068 from mercurial import bundlecaches
1069 from mercurial import bundlecaches
1070
1071 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1073 from mercurial import exchange
1074
1075 parsebundlespec = exchange.parsebundlespec
1076
1069 from mercurial import discovery
1077 from mercurial import discovery
1070 from mercurial import bundle2
1078 from mercurial import bundle2
1071
1079
1072 opts = _byteskwargs(opts)
1080 opts = _byteskwargs(opts)
1073 timer, fm = gettimer(ui, opts)
1081 timer, fm = gettimer(ui, opts)
1074
1082
1075 cl = repo.changelog
1083 cl = repo.changelog
1076 revs = list(revs)
1084 revs = list(revs)
1077 revs.extend(opts.get(b'rev', ()))
1085 revs.extend(opts.get(b'rev', ()))
1078 revs = scmutil.revrange(repo, revs)
1086 revs = scmutil.revrange(repo, revs)
1079 if not revs:
1087 if not revs:
1080 raise error.Abort(b"not revision specified")
1088 raise error.Abort(b"not revision specified")
1081 # make it a consistent set (ie: without topological gaps)
1089 # make it a consistent set (ie: without topological gaps)
1082 old_len = len(revs)
1090 old_len = len(revs)
1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1084 if old_len != len(revs):
1092 if old_len != len(revs):
1085 new_count = len(revs) - old_len
1093 new_count = len(revs) - old_len
1086 msg = b"add %d new revisions to make it a consistent set\n"
1094 msg = b"add %d new revisions to make it a consistent set\n"
1087 ui.write_err(msg % new_count)
1095 ui.write_err(msg % new_count)
1088
1096
1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1091 outgoing = discovery.outgoing(repo, bases, targets)
1099 outgoing = discovery.outgoing(repo, bases, targets)
1092
1100
1093 bundle_spec = opts.get(b'type')
1101 bundle_spec = opts.get(b'type')
1094
1102
1095 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1096
1104
1097 cgversion = bundle_spec.params.get(b"cg.version")
1105 cgversion = bundle_spec.params.get(b"cg.version")
1098 if cgversion is None:
1106 if cgversion is None:
1099 if bundle_spec.version == b'v1':
1107 if bundle_spec.version == b'v1':
1100 cgversion = b'01'
1108 cgversion = b'01'
1101 if bundle_spec.version == b'v2':
1109 if bundle_spec.version == b'v2':
1102 cgversion = b'02'
1110 cgversion = b'02'
1103 if cgversion not in changegroup.supportedoutgoingversions(repo):
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1104 err = b"repository does not support bundle version %s"
1112 err = b"repository does not support bundle version %s"
1105 raise error.Abort(err % cgversion)
1113 raise error.Abort(err % cgversion)
1106
1114
1107 if cgversion == b'01': # bundle1
1115 if cgversion == b'01': # bundle1
1108 bversion = b'HG10' + bundle_spec.wirecompression
1116 bversion = b'HG10' + bundle_spec.wirecompression
1109 bcompression = None
1117 bcompression = None
1110 elif cgversion in (b'02', b'03'):
1118 elif cgversion in (b'02', b'03'):
1111 bversion = b'HG20'
1119 bversion = b'HG20'
1112 bcompression = bundle_spec.wirecompression
1120 bcompression = bundle_spec.wirecompression
1113 else:
1121 else:
1114 err = b'perf::bundle: unexpected changegroup version %s'
1122 err = b'perf::bundle: unexpected changegroup version %s'
1115 raise error.ProgrammingError(err % cgversion)
1123 raise error.ProgrammingError(err % cgversion)
1116
1124
1117 if bcompression is None:
1125 if bcompression is None:
1118 bcompression = b'UN'
1126 bcompression = b'UN'
1119
1127
1120 if bcompression != b'UN':
1128 if bcompression != b'UN':
1121 err = b'perf::bundle: compression currently unsupported: %s'
1129 err = b'perf::bundle: compression currently unsupported: %s'
1122 raise error.ProgrammingError(err % bcompression)
1130 raise error.ProgrammingError(err % bcompression)
1123
1131
1124 def do_bundle():
1132 def do_bundle():
1125 bundle2.writenewbundle(
1133 bundle2.writenewbundle(
1126 ui,
1134 ui,
1127 repo,
1135 repo,
1128 b'perf::bundle',
1136 b'perf::bundle',
1129 os.devnull,
1137 os.devnull,
1130 bversion,
1138 bversion,
1131 outgoing,
1139 outgoing,
1132 bundle_spec.params,
1140 bundle_spec.params,
1133 )
1141 )
1134
1142
1135 timer(do_bundle)
1143 timer(do_bundle)
1136 fm.end()
1144 fm.end()
1137
1145
1138
1146
1139 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1140 def perfbundleread(ui, repo, bundlepath, **opts):
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1141 """Benchmark reading of bundle files.
1149 """Benchmark reading of bundle files.
1142
1150
1143 This command is meant to isolate the I/O part of bundle reading as
1151 This command is meant to isolate the I/O part of bundle reading as
1144 much as possible.
1152 much as possible.
1145 """
1153 """
1146 from mercurial import (
1154 from mercurial import (
1147 bundle2,
1155 bundle2,
1148 exchange,
1156 exchange,
1149 streamclone,
1157 streamclone,
1150 )
1158 )
1151
1159
1152 opts = _byteskwargs(opts)
1160 opts = _byteskwargs(opts)
1153
1161
1154 def makebench(fn):
1162 def makebench(fn):
1155 def run():
1163 def run():
1156 with open(bundlepath, b'rb') as fh:
1164 with open(bundlepath, b'rb') as fh:
1157 bundle = exchange.readbundle(ui, fh, bundlepath)
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1158 fn(bundle)
1166 fn(bundle)
1159
1167
1160 return run
1168 return run
1161
1169
1162 def makereadnbytes(size):
1170 def makereadnbytes(size):
1163 def run():
1171 def run():
1164 with open(bundlepath, b'rb') as fh:
1172 with open(bundlepath, b'rb') as fh:
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 while bundle.read(size):
1174 while bundle.read(size):
1167 pass
1175 pass
1168
1176
1169 return run
1177 return run
1170
1178
1171 def makestdioread(size):
1179 def makestdioread(size):
1172 def run():
1180 def run():
1173 with open(bundlepath, b'rb') as fh:
1181 with open(bundlepath, b'rb') as fh:
1174 while fh.read(size):
1182 while fh.read(size):
1175 pass
1183 pass
1176
1184
1177 return run
1185 return run
1178
1186
1179 # bundle1
1187 # bundle1
1180
1188
1181 def deltaiter(bundle):
1189 def deltaiter(bundle):
1182 for delta in bundle.deltaiter():
1190 for delta in bundle.deltaiter():
1183 pass
1191 pass
1184
1192
1185 def iterchunks(bundle):
1193 def iterchunks(bundle):
1186 for chunk in bundle.getchunks():
1194 for chunk in bundle.getchunks():
1187 pass
1195 pass
1188
1196
1189 # bundle2
1197 # bundle2
1190
1198
1191 def forwardchunks(bundle):
1199 def forwardchunks(bundle):
1192 for chunk in bundle._forwardchunks():
1200 for chunk in bundle._forwardchunks():
1193 pass
1201 pass
1194
1202
1195 def iterparts(bundle):
1203 def iterparts(bundle):
1196 for part in bundle.iterparts():
1204 for part in bundle.iterparts():
1197 pass
1205 pass
1198
1206
1199 def iterpartsseekable(bundle):
1207 def iterpartsseekable(bundle):
1200 for part in bundle.iterparts(seekable=True):
1208 for part in bundle.iterparts(seekable=True):
1201 pass
1209 pass
1202
1210
1203 def seek(bundle):
1211 def seek(bundle):
1204 for part in bundle.iterparts(seekable=True):
1212 for part in bundle.iterparts(seekable=True):
1205 part.seek(0, os.SEEK_END)
1213 part.seek(0, os.SEEK_END)
1206
1214
1207 def makepartreadnbytes(size):
1215 def makepartreadnbytes(size):
1208 def run():
1216 def run():
1209 with open(bundlepath, b'rb') as fh:
1217 with open(bundlepath, b'rb') as fh:
1210 bundle = exchange.readbundle(ui, fh, bundlepath)
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1211 for part in bundle.iterparts():
1219 for part in bundle.iterparts():
1212 while part.read(size):
1220 while part.read(size):
1213 pass
1221 pass
1214
1222
1215 return run
1223 return run
1216
1224
1217 benches = [
1225 benches = [
1218 (makestdioread(8192), b'read(8k)'),
1226 (makestdioread(8192), b'read(8k)'),
1219 (makestdioread(16384), b'read(16k)'),
1227 (makestdioread(16384), b'read(16k)'),
1220 (makestdioread(32768), b'read(32k)'),
1228 (makestdioread(32768), b'read(32k)'),
1221 (makestdioread(131072), b'read(128k)'),
1229 (makestdioread(131072), b'read(128k)'),
1222 ]
1230 ]
1223
1231
1224 with open(bundlepath, b'rb') as fh:
1232 with open(bundlepath, b'rb') as fh:
1225 bundle = exchange.readbundle(ui, fh, bundlepath)
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1226
1234
1227 if isinstance(bundle, changegroup.cg1unpacker):
1235 if isinstance(bundle, changegroup.cg1unpacker):
1228 benches.extend(
1236 benches.extend(
1229 [
1237 [
1230 (makebench(deltaiter), b'cg1 deltaiter()'),
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1231 (makebench(iterchunks), b'cg1 getchunks()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1232 (makereadnbytes(8192), b'cg1 read(8k)'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1233 (makereadnbytes(16384), b'cg1 read(16k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1234 (makereadnbytes(32768), b'cg1 read(32k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1235 (makereadnbytes(131072), b'cg1 read(128k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1236 ]
1244 ]
1237 )
1245 )
1238 elif isinstance(bundle, bundle2.unbundle20):
1246 elif isinstance(bundle, bundle2.unbundle20):
1239 benches.extend(
1247 benches.extend(
1240 [
1248 [
1241 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1242 (makebench(iterparts), b'bundle2 iterparts()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1243 (
1251 (
1244 makebench(iterpartsseekable),
1252 makebench(iterpartsseekable),
1245 b'bundle2 iterparts() seekable',
1253 b'bundle2 iterparts() seekable',
1246 ),
1254 ),
1247 (makebench(seek), b'bundle2 part seek()'),
1255 (makebench(seek), b'bundle2 part seek()'),
1248 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1249 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1250 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1251 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1252 ]
1260 ]
1253 )
1261 )
1254 elif isinstance(bundle, streamclone.streamcloneapplier):
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1255 raise error.Abort(b'stream clone bundles not supported')
1263 raise error.Abort(b'stream clone bundles not supported')
1256 else:
1264 else:
1257 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1258
1266
1259 for fn, title in benches:
1267 for fn, title in benches:
1260 timer, fm = gettimer(ui, opts)
1268 timer, fm = gettimer(ui, opts)
1261 timer(fn, title=title)
1269 timer(fn, title=title)
1262 fm.end()
1270 fm.end()
1263
1271
1264
1272
1265 @command(
1273 @command(
1266 b'perf::changegroupchangelog|perfchangegroupchangelog',
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1267 formatteropts
1275 formatteropts
1268 + [
1276 + [
1269 (b'', b'cgversion', b'02', b'changegroup version'),
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1270 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1271 ],
1279 ],
1272 )
1280 )
1273 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1274 """Benchmark producing a changelog group for a changegroup.
1282 """Benchmark producing a changelog group for a changegroup.
1275
1283
1276 This measures the time spent processing the changelog during a
1284 This measures the time spent processing the changelog during a
1277 bundle operation. This occurs during `hg bundle` and on a server
1285 bundle operation. This occurs during `hg bundle` and on a server
1278 processing a `getbundle` wire protocol request (handles clones
1286 processing a `getbundle` wire protocol request (handles clones
1279 and pull requests).
1287 and pull requests).
1280
1288
1281 By default, all revisions are added to the changegroup.
1289 By default, all revisions are added to the changegroup.
1282 """
1290 """
1283 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1284 cl = repo.changelog
1292 cl = repo.changelog
1285 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1286 bundler = changegroup.getbundler(cgversion, repo)
1294 bundler = changegroup.getbundler(cgversion, repo)
1287
1295
1288 def d():
1296 def d():
1289 state, chunks = bundler._generatechangelog(cl, nodes)
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1290 for chunk in chunks:
1298 for chunk in chunks:
1291 pass
1299 pass
1292
1300
1293 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1294
1302
1295 # Terminal printing can interfere with timing. So disable it.
1303 # Terminal printing can interfere with timing. So disable it.
1296 with ui.configoverride({(b'progress', b'disable'): True}):
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1297 timer(d)
1305 timer(d)
1298
1306
1299 fm.end()
1307 fm.end()
1300
1308
1301
1309
1302 @command(b'perf::dirs|perfdirs', formatteropts)
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1303 def perfdirs(ui, repo, **opts):
1311 def perfdirs(ui, repo, **opts):
1304 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1305 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1306 dirstate = repo.dirstate
1314 dirstate = repo.dirstate
1307 b'a' in dirstate
1315 b'a' in dirstate
1308
1316
1309 def d():
1317 def d():
1310 dirstate.hasdir(b'a')
1318 dirstate.hasdir(b'a')
1311 try:
1319 try:
1312 del dirstate._map._dirs
1320 del dirstate._map._dirs
1313 except AttributeError:
1321 except AttributeError:
1314 pass
1322 pass
1315
1323
1316 timer(d)
1324 timer(d)
1317 fm.end()
1325 fm.end()
1318
1326
1319
1327
1320 @command(
1328 @command(
1321 b'perf::dirstate|perfdirstate',
1329 b'perf::dirstate|perfdirstate',
1322 [
1330 [
1323 (
1331 (
1324 b'',
1332 b'',
1325 b'iteration',
1333 b'iteration',
1326 None,
1334 None,
1327 b'benchmark a full iteration for the dirstate',
1335 b'benchmark a full iteration for the dirstate',
1328 ),
1336 ),
1329 (
1337 (
1330 b'',
1338 b'',
1331 b'contains',
1339 b'contains',
1332 None,
1340 None,
1333 b'benchmark a large amount of `nf in dirstate` calls',
1341 b'benchmark a large amount of `nf in dirstate` calls',
1334 ),
1342 ),
1335 ]
1343 ]
1336 + formatteropts,
1344 + formatteropts,
1337 )
1345 )
1338 def perfdirstate(ui, repo, **opts):
1346 def perfdirstate(ui, repo, **opts):
1339 """benchmap the time of various distate operations
1347 """benchmap the time of various distate operations
1340
1348
1341 By default benchmark the time necessary to load a dirstate from scratch.
1349 By default benchmark the time necessary to load a dirstate from scratch.
1342 The dirstate is loaded to the point were a "contains" request can be
1350 The dirstate is loaded to the point were a "contains" request can be
1343 answered.
1351 answered.
1344 """
1352 """
1345 opts = _byteskwargs(opts)
1353 opts = _byteskwargs(opts)
1346 timer, fm = gettimer(ui, opts)
1354 timer, fm = gettimer(ui, opts)
1347 b"a" in repo.dirstate
1355 b"a" in repo.dirstate
1348
1356
1349 if opts[b'iteration'] and opts[b'contains']:
1357 if opts[b'iteration'] and opts[b'contains']:
1350 msg = b'only specify one of --iteration or --contains'
1358 msg = b'only specify one of --iteration or --contains'
1351 raise error.Abort(msg)
1359 raise error.Abort(msg)
1352
1360
1353 if opts[b'iteration']:
1361 if opts[b'iteration']:
1354 setup = None
1362 setup = None
1355 dirstate = repo.dirstate
1363 dirstate = repo.dirstate
1356
1364
1357 def d():
1365 def d():
1358 for f in dirstate:
1366 for f in dirstate:
1359 pass
1367 pass
1360
1368
1361 elif opts[b'contains']:
1369 elif opts[b'contains']:
1362 setup = None
1370 setup = None
1363 dirstate = repo.dirstate
1371 dirstate = repo.dirstate
1364 allfiles = list(dirstate)
1372 allfiles = list(dirstate)
1365 # also add file path that will be "missing" from the dirstate
1373 # also add file path that will be "missing" from the dirstate
1366 allfiles.extend([f[::-1] for f in allfiles])
1374 allfiles.extend([f[::-1] for f in allfiles])
1367
1375
1368 def d():
1376 def d():
1369 for f in allfiles:
1377 for f in allfiles:
1370 f in dirstate
1378 f in dirstate
1371
1379
1372 else:
1380 else:
1373
1381
1374 def setup():
1382 def setup():
1375 repo.dirstate.invalidate()
1383 repo.dirstate.invalidate()
1376
1384
1377 def d():
1385 def d():
1378 b"a" in repo.dirstate
1386 b"a" in repo.dirstate
1379
1387
1380 timer(d, setup=setup)
1388 timer(d, setup=setup)
1381 fm.end()
1389 fm.end()
1382
1390
1383
1391
1384 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1385 def perfdirstatedirs(ui, repo, **opts):
1393 def perfdirstatedirs(ui, repo, **opts):
1386 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1387 opts = _byteskwargs(opts)
1395 opts = _byteskwargs(opts)
1388 timer, fm = gettimer(ui, opts)
1396 timer, fm = gettimer(ui, opts)
1389 repo.dirstate.hasdir(b"a")
1397 repo.dirstate.hasdir(b"a")
1390
1398
1391 def setup():
1399 def setup():
1392 try:
1400 try:
1393 del repo.dirstate._map._dirs
1401 del repo.dirstate._map._dirs
1394 except AttributeError:
1402 except AttributeError:
1395 pass
1403 pass
1396
1404
1397 def d():
1405 def d():
1398 repo.dirstate.hasdir(b"a")
1406 repo.dirstate.hasdir(b"a")
1399
1407
1400 timer(d, setup=setup)
1408 timer(d, setup=setup)
1401 fm.end()
1409 fm.end()
1402
1410
1403
1411
1404 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1405 def perfdirstatefoldmap(ui, repo, **opts):
1413 def perfdirstatefoldmap(ui, repo, **opts):
1406 """benchmap a `dirstate._map.filefoldmap.get()` request
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1407
1415
1408 The dirstate filefoldmap cache is dropped between every request.
1416 The dirstate filefoldmap cache is dropped between every request.
1409 """
1417 """
1410 opts = _byteskwargs(opts)
1418 opts = _byteskwargs(opts)
1411 timer, fm = gettimer(ui, opts)
1419 timer, fm = gettimer(ui, opts)
1412 dirstate = repo.dirstate
1420 dirstate = repo.dirstate
1413 dirstate._map.filefoldmap.get(b'a')
1421 dirstate._map.filefoldmap.get(b'a')
1414
1422
1415 def setup():
1423 def setup():
1416 del dirstate._map.filefoldmap
1424 del dirstate._map.filefoldmap
1417
1425
1418 def d():
1426 def d():
1419 dirstate._map.filefoldmap.get(b'a')
1427 dirstate._map.filefoldmap.get(b'a')
1420
1428
1421 timer(d, setup=setup)
1429 timer(d, setup=setup)
1422 fm.end()
1430 fm.end()
1423
1431
1424
1432
1425 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1426 def perfdirfoldmap(ui, repo, **opts):
1434 def perfdirfoldmap(ui, repo, **opts):
1427 """benchmap a `dirstate._map.dirfoldmap.get()` request
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1428
1436
1429 The dirstate dirfoldmap cache is dropped between every request.
1437 The dirstate dirfoldmap cache is dropped between every request.
1430 """
1438 """
1431 opts = _byteskwargs(opts)
1439 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1440 timer, fm = gettimer(ui, opts)
1433 dirstate = repo.dirstate
1441 dirstate = repo.dirstate
1434 dirstate._map.dirfoldmap.get(b'a')
1442 dirstate._map.dirfoldmap.get(b'a')
1435
1443
1436 def setup():
1444 def setup():
1437 del dirstate._map.dirfoldmap
1445 del dirstate._map.dirfoldmap
1438 try:
1446 try:
1439 del dirstate._map._dirs
1447 del dirstate._map._dirs
1440 except AttributeError:
1448 except AttributeError:
1441 pass
1449 pass
1442
1450
1443 def d():
1451 def d():
1444 dirstate._map.dirfoldmap.get(b'a')
1452 dirstate._map.dirfoldmap.get(b'a')
1445
1453
1446 timer(d, setup=setup)
1454 timer(d, setup=setup)
1447 fm.end()
1455 fm.end()
1448
1456
1449
1457
1450 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1451 def perfdirstatewrite(ui, repo, **opts):
1459 def perfdirstatewrite(ui, repo, **opts):
1452 """benchmap the time it take to write a dirstate on disk"""
1460 """benchmap the time it take to write a dirstate on disk"""
1453 opts = _byteskwargs(opts)
1461 opts = _byteskwargs(opts)
1454 timer, fm = gettimer(ui, opts)
1462 timer, fm = gettimer(ui, opts)
1455 ds = repo.dirstate
1463 ds = repo.dirstate
1456 b"a" in ds
1464 b"a" in ds
1457
1465
1458 def setup():
1466 def setup():
1459 ds._dirty = True
1467 ds._dirty = True
1460
1468
1461 def d():
1469 def d():
1462 ds.write(repo.currenttransaction())
1470 ds.write(repo.currenttransaction())
1463
1471
1464 timer(d, setup=setup)
1472 timer(d, setup=setup)
1465 fm.end()
1473 fm.end()
1466
1474
1467
1475
1468 def _getmergerevs(repo, opts):
1476 def _getmergerevs(repo, opts):
1469 """parse command argument to return rev involved in merge
1477 """parse command argument to return rev involved in merge
1470
1478
1471 input: options dictionnary with `rev`, `from` and `bse`
1479 input: options dictionnary with `rev`, `from` and `bse`
1472 output: (localctx, otherctx, basectx)
1480 output: (localctx, otherctx, basectx)
1473 """
1481 """
1474 if opts[b'from']:
1482 if opts[b'from']:
1475 fromrev = scmutil.revsingle(repo, opts[b'from'])
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1476 wctx = repo[fromrev]
1484 wctx = repo[fromrev]
1477 else:
1485 else:
1478 wctx = repo[None]
1486 wctx = repo[None]
1479 # we don't want working dir files to be stat'd in the benchmark, so
1487 # we don't want working dir files to be stat'd in the benchmark, so
1480 # prime that cache
1488 # prime that cache
1481 wctx.dirty()
1489 wctx.dirty()
1482 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1483 if opts[b'base']:
1491 if opts[b'base']:
1484 fromrev = scmutil.revsingle(repo, opts[b'base'])
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1485 ancestor = repo[fromrev]
1493 ancestor = repo[fromrev]
1486 else:
1494 else:
1487 ancestor = wctx.ancestor(rctx)
1495 ancestor = wctx.ancestor(rctx)
1488 return (wctx, rctx, ancestor)
1496 return (wctx, rctx, ancestor)
1489
1497
1490
1498
1491 @command(
1499 @command(
1492 b'perf::mergecalculate|perfmergecalculate',
1500 b'perf::mergecalculate|perfmergecalculate',
1493 [
1501 [
1494 (b'r', b'rev', b'.', b'rev to merge against'),
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1495 (b'', b'from', b'', b'rev to merge from'),
1503 (b'', b'from', b'', b'rev to merge from'),
1496 (b'', b'base', b'', b'the revision to use as base'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1497 ]
1505 ]
1498 + formatteropts,
1506 + formatteropts,
1499 )
1507 )
1500 def perfmergecalculate(ui, repo, **opts):
1508 def perfmergecalculate(ui, repo, **opts):
1501 opts = _byteskwargs(opts)
1509 opts = _byteskwargs(opts)
1502 timer, fm = gettimer(ui, opts)
1510 timer, fm = gettimer(ui, opts)
1503
1511
1504 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1505
1513
1506 def d():
1514 def d():
1507 # acceptremote is True because we don't want prompts in the middle of
1515 # acceptremote is True because we don't want prompts in the middle of
1508 # our benchmark
1516 # our benchmark
1509 merge.calculateupdates(
1517 merge.calculateupdates(
1510 repo,
1518 repo,
1511 wctx,
1519 wctx,
1512 rctx,
1520 rctx,
1513 [ancestor],
1521 [ancestor],
1514 branchmerge=False,
1522 branchmerge=False,
1515 force=False,
1523 force=False,
1516 acceptremote=True,
1524 acceptremote=True,
1517 followcopies=True,
1525 followcopies=True,
1518 )
1526 )
1519
1527
1520 timer(d)
1528 timer(d)
1521 fm.end()
1529 fm.end()
1522
1530
1523
1531
1524 @command(
1532 @command(
1525 b'perf::mergecopies|perfmergecopies',
1533 b'perf::mergecopies|perfmergecopies',
1526 [
1534 [
1527 (b'r', b'rev', b'.', b'rev to merge against'),
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1528 (b'', b'from', b'', b'rev to merge from'),
1536 (b'', b'from', b'', b'rev to merge from'),
1529 (b'', b'base', b'', b'the revision to use as base'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1530 ]
1538 ]
1531 + formatteropts,
1539 + formatteropts,
1532 )
1540 )
1533 def perfmergecopies(ui, repo, **opts):
1541 def perfmergecopies(ui, repo, **opts):
1534 """measure runtime of `copies.mergecopies`"""
1542 """measure runtime of `copies.mergecopies`"""
1535 opts = _byteskwargs(opts)
1543 opts = _byteskwargs(opts)
1536 timer, fm = gettimer(ui, opts)
1544 timer, fm = gettimer(ui, opts)
1537 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1538
1546
1539 def d():
1547 def d():
1540 # acceptremote is True because we don't want prompts in the middle of
1548 # acceptremote is True because we don't want prompts in the middle of
1541 # our benchmark
1549 # our benchmark
1542 copies.mergecopies(repo, wctx, rctx, ancestor)
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1543
1551
1544 timer(d)
1552 timer(d)
1545 fm.end()
1553 fm.end()
1546
1554
1547
1555
1548 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1549 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1550 """benchmark the copy tracing logic"""
1558 """benchmark the copy tracing logic"""
1551 opts = _byteskwargs(opts)
1559 opts = _byteskwargs(opts)
1552 timer, fm = gettimer(ui, opts)
1560 timer, fm = gettimer(ui, opts)
1553 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1554 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1555
1563
1556 def d():
1564 def d():
1557 copies.pathcopies(ctx1, ctx2)
1565 copies.pathcopies(ctx1, ctx2)
1558
1566
1559 timer(d)
1567 timer(d)
1560 fm.end()
1568 fm.end()
1561
1569
1562
1570
1563 @command(
1571 @command(
1564 b'perf::phases|perfphases',
1572 b'perf::phases|perfphases',
1565 [
1573 [
1566 (b'', b'full', False, b'include file reading time too'),
1574 (b'', b'full', False, b'include file reading time too'),
1567 ],
1575 ],
1568 b"",
1576 b"",
1569 )
1577 )
1570 def perfphases(ui, repo, **opts):
1578 def perfphases(ui, repo, **opts):
1571 """benchmark phasesets computation"""
1579 """benchmark phasesets computation"""
1572 opts = _byteskwargs(opts)
1580 opts = _byteskwargs(opts)
1573 timer, fm = gettimer(ui, opts)
1581 timer, fm = gettimer(ui, opts)
1574 _phases = repo._phasecache
1582 _phases = repo._phasecache
1575 full = opts.get(b'full')
1583 full = opts.get(b'full')
1576
1584
1577 def d():
1585 def d():
1578 phases = _phases
1586 phases = _phases
1579 if full:
1587 if full:
1580 clearfilecache(repo, b'_phasecache')
1588 clearfilecache(repo, b'_phasecache')
1581 phases = repo._phasecache
1589 phases = repo._phasecache
1582 phases.invalidate()
1590 phases.invalidate()
1583 phases.loadphaserevs(repo)
1591 phases.loadphaserevs(repo)
1584
1592
1585 timer(d)
1593 timer(d)
1586 fm.end()
1594 fm.end()
1587
1595
1588
1596
1589 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1590 def perfphasesremote(ui, repo, dest=None, **opts):
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1591 """benchmark time needed to analyse phases of the remote server"""
1599 """benchmark time needed to analyse phases of the remote server"""
1592 from mercurial.node import bin
1600 from mercurial.node import bin
1593 from mercurial import (
1601 from mercurial import (
1594 exchange,
1602 exchange,
1595 hg,
1603 hg,
1596 phases,
1604 phases,
1597 )
1605 )
1598
1606
1599 opts = _byteskwargs(opts)
1607 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1608 timer, fm = gettimer(ui, opts)
1601
1609
1602 path = ui.getpath(dest, default=(b'default-push', b'default'))
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1603 if not path:
1611 if not path:
1604 raise error.Abort(
1612 raise error.Abort(
1605 b'default repository not configured!',
1613 b'default repository not configured!',
1606 hint=b"see 'hg help config.paths'",
1614 hint=b"see 'hg help config.paths'",
1607 )
1615 )
1608 dest = path.pushloc or path.loc
1616 dest = path.pushloc or path.loc
1609 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1610 other = hg.peer(repo, opts, dest)
1618 other = hg.peer(repo, opts, dest)
1611
1619
1612 # easier to perform discovery through the operation
1620 # easier to perform discovery through the operation
1613 op = exchange.pushoperation(repo, other)
1621 op = exchange.pushoperation(repo, other)
1614 exchange._pushdiscoverychangeset(op)
1622 exchange._pushdiscoverychangeset(op)
1615
1623
1616 remotesubset = op.fallbackheads
1624 remotesubset = op.fallbackheads
1617
1625
1618 with other.commandexecutor() as e:
1626 with other.commandexecutor() as e:
1619 remotephases = e.callcommand(
1627 remotephases = e.callcommand(
1620 b'listkeys', {b'namespace': b'phases'}
1628 b'listkeys', {b'namespace': b'phases'}
1621 ).result()
1629 ).result()
1622 del other
1630 del other
1623 publishing = remotephases.get(b'publishing', False)
1631 publishing = remotephases.get(b'publishing', False)
1624 if publishing:
1632 if publishing:
1625 ui.statusnoi18n(b'publishing: yes\n')
1633 ui.statusnoi18n(b'publishing: yes\n')
1626 else:
1634 else:
1627 ui.statusnoi18n(b'publishing: no\n')
1635 ui.statusnoi18n(b'publishing: no\n')
1628
1636
1629 has_node = getattr(repo.changelog.index, 'has_node', None)
1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1630 if has_node is None:
1638 if has_node is None:
1631 has_node = repo.changelog.nodemap.__contains__
1639 has_node = repo.changelog.nodemap.__contains__
1632 nonpublishroots = 0
1640 nonpublishroots = 0
1633 for nhex, phase in remotephases.iteritems():
1641 for nhex, phase in remotephases.iteritems():
1634 if nhex == b'publishing': # ignore data related to publish option
1642 if nhex == b'publishing': # ignore data related to publish option
1635 continue
1643 continue
1636 node = bin(nhex)
1644 node = bin(nhex)
1637 if has_node(node) and int(phase):
1645 if has_node(node) and int(phase):
1638 nonpublishroots += 1
1646 nonpublishroots += 1
1639 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1640 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1641
1649
1642 def d():
1650 def d():
1643 phases.remotephasessummary(repo, remotesubset, remotephases)
1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1644
1652
1645 timer(d)
1653 timer(d)
1646 fm.end()
1654 fm.end()
1647
1655
1648
1656
1649 @command(
1657 @command(
1650 b'perf::manifest|perfmanifest',
1658 b'perf::manifest|perfmanifest',
1651 [
1659 [
1652 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1653 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1654 ]
1662 ]
1655 + formatteropts,
1663 + formatteropts,
1656 b'REV|NODE',
1664 b'REV|NODE',
1657 )
1665 )
1658 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1659 """benchmark the time to read a manifest from disk and return a usable
1667 """benchmark the time to read a manifest from disk and return a usable
1660 dict-like object
1668 dict-like object
1661
1669
1662 Manifest caches are cleared before retrieval."""
1670 Manifest caches are cleared before retrieval."""
1663 opts = _byteskwargs(opts)
1671 opts = _byteskwargs(opts)
1664 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1665 if not manifest_rev:
1673 if not manifest_rev:
1666 ctx = scmutil.revsingle(repo, rev, rev)
1674 ctx = scmutil.revsingle(repo, rev, rev)
1667 t = ctx.manifestnode()
1675 t = ctx.manifestnode()
1668 else:
1676 else:
1669 from mercurial.node import bin
1677 from mercurial.node import bin
1670
1678
1671 if len(rev) == 40:
1679 if len(rev) == 40:
1672 t = bin(rev)
1680 t = bin(rev)
1673 else:
1681 else:
1674 try:
1682 try:
1675 rev = int(rev)
1683 rev = int(rev)
1676
1684
1677 if util.safehasattr(repo.manifestlog, b'getstorage'):
1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1678 t = repo.manifestlog.getstorage(b'').node(rev)
1686 t = repo.manifestlog.getstorage(b'').node(rev)
1679 else:
1687 else:
1680 t = repo.manifestlog._revlog.lookup(rev)
1688 t = repo.manifestlog._revlog.lookup(rev)
1681 except ValueError:
1689 except ValueError:
1682 raise error.Abort(
1690 raise error.Abort(
1683 b'manifest revision must be integer or full node'
1691 b'manifest revision must be integer or full node'
1684 )
1692 )
1685
1693
1686 def d():
1694 def d():
1687 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1688 repo.manifestlog[t].read()
1696 repo.manifestlog[t].read()
1689
1697
1690 timer(d)
1698 timer(d)
1691 fm.end()
1699 fm.end()
1692
1700
1693
1701
1694 @command(b'perf::changeset|perfchangeset', formatteropts)
1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1695 def perfchangeset(ui, repo, rev, **opts):
1703 def perfchangeset(ui, repo, rev, **opts):
1696 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1697 timer, fm = gettimer(ui, opts)
1705 timer, fm = gettimer(ui, opts)
1698 n = scmutil.revsingle(repo, rev).node()
1706 n = scmutil.revsingle(repo, rev).node()
1699
1707
1700 def d():
1708 def d():
1701 repo.changelog.read(n)
1709 repo.changelog.read(n)
1702 # repo.changelog._cache = None
1710 # repo.changelog._cache = None
1703
1711
1704 timer(d)
1712 timer(d)
1705 fm.end()
1713 fm.end()
1706
1714
1707
1715
1708 @command(b'perf::ignore|perfignore', formatteropts)
1716 @command(b'perf::ignore|perfignore', formatteropts)
1709 def perfignore(ui, repo, **opts):
1717 def perfignore(ui, repo, **opts):
1710 """benchmark operation related to computing ignore"""
1718 """benchmark operation related to computing ignore"""
1711 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1712 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1713 dirstate = repo.dirstate
1721 dirstate = repo.dirstate
1714
1722
1715 def setupone():
1723 def setupone():
1716 dirstate.invalidate()
1724 dirstate.invalidate()
1717 clearfilecache(dirstate, b'_ignore')
1725 clearfilecache(dirstate, b'_ignore')
1718
1726
1719 def runone():
1727 def runone():
1720 dirstate._ignore
1728 dirstate._ignore
1721
1729
1722 timer(runone, setup=setupone, title=b"load")
1730 timer(runone, setup=setupone, title=b"load")
1723 fm.end()
1731 fm.end()
1724
1732
1725
1733
1726 @command(
1734 @command(
1727 b'perf::index|perfindex',
1735 b'perf::index|perfindex',
1728 [
1736 [
1729 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1730 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1731 ]
1739 ]
1732 + formatteropts,
1740 + formatteropts,
1733 )
1741 )
1734 def perfindex(ui, repo, **opts):
1742 def perfindex(ui, repo, **opts):
1735 """benchmark index creation time followed by a lookup
1743 """benchmark index creation time followed by a lookup
1736
1744
1737 The default is to look `tip` up. Depending on the index implementation,
1745 The default is to look `tip` up. Depending on the index implementation,
1738 the revision looked up can matters. For example, an implementation
1746 the revision looked up can matters. For example, an implementation
1739 scanning the index will have a faster lookup time for `--rev tip` than for
1747 scanning the index will have a faster lookup time for `--rev tip` than for
1740 `--rev 0`. The number of looked up revisions and their order can also
1748 `--rev 0`. The number of looked up revisions and their order can also
1741 matters.
1749 matters.
1742
1750
1743 Example of useful set to test:
1751 Example of useful set to test:
1744
1752
1745 * tip
1753 * tip
1746 * 0
1754 * 0
1747 * -10:
1755 * -10:
1748 * :10
1756 * :10
1749 * -10: + :10
1757 * -10: + :10
1750 * :10: + -10:
1758 * :10: + -10:
1751 * -10000:
1759 * -10000:
1752 * -10000: + 0
1760 * -10000: + 0
1753
1761
1754 It is not currently possible to check for lookup of a missing node. For
1762 It is not currently possible to check for lookup of a missing node. For
1755 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1756 import mercurial.revlog
1764 import mercurial.revlog
1757
1765
1758 opts = _byteskwargs(opts)
1766 opts = _byteskwargs(opts)
1759 timer, fm = gettimer(ui, opts)
1767 timer, fm = gettimer(ui, opts)
1760 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1761 if opts[b'no_lookup']:
1769 if opts[b'no_lookup']:
1762 if opts['rev']:
1770 if opts['rev']:
1763 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1764 nodes = []
1772 nodes = []
1765 elif not opts[b'rev']:
1773 elif not opts[b'rev']:
1766 nodes = [repo[b"tip"].node()]
1774 nodes = [repo[b"tip"].node()]
1767 else:
1775 else:
1768 revs = scmutil.revrange(repo, opts[b'rev'])
1776 revs = scmutil.revrange(repo, opts[b'rev'])
1769 cl = repo.changelog
1777 cl = repo.changelog
1770 nodes = [cl.node(r) for r in revs]
1778 nodes = [cl.node(r) for r in revs]
1771
1779
1772 unfi = repo.unfiltered()
1780 unfi = repo.unfiltered()
1773 # find the filecache func directly
1781 # find the filecache func directly
1774 # This avoid polluting the benchmark with the filecache logic
1782 # This avoid polluting the benchmark with the filecache logic
1775 makecl = unfi.__class__.changelog.func
1783 makecl = unfi.__class__.changelog.func
1776
1784
1777 def setup():
1785 def setup():
1778 # probably not necessary, but for good measure
1786 # probably not necessary, but for good measure
1779 clearchangelog(unfi)
1787 clearchangelog(unfi)
1780
1788
1781 def d():
1789 def d():
1782 cl = makecl(unfi)
1790 cl = makecl(unfi)
1783 for n in nodes:
1791 for n in nodes:
1784 cl.rev(n)
1792 cl.rev(n)
1785
1793
1786 timer(d, setup=setup)
1794 timer(d, setup=setup)
1787 fm.end()
1795 fm.end()
1788
1796
1789
1797
1790 @command(
1798 @command(
1791 b'perf::nodemap|perfnodemap',
1799 b'perf::nodemap|perfnodemap',
1792 [
1800 [
1793 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1794 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1795 ]
1803 ]
1796 + formatteropts,
1804 + formatteropts,
1797 )
1805 )
1798 def perfnodemap(ui, repo, **opts):
1806 def perfnodemap(ui, repo, **opts):
1799 """benchmark the time necessary to look up revision from a cold nodemap
1807 """benchmark the time necessary to look up revision from a cold nodemap
1800
1808
1801 Depending on the implementation, the amount and order of revision we look
1809 Depending on the implementation, the amount and order of revision we look
1802 up can varies. Example of useful set to test:
1810 up can varies. Example of useful set to test:
1803 * tip
1811 * tip
1804 * 0
1812 * 0
1805 * -10:
1813 * -10:
1806 * :10
1814 * :10
1807 * -10: + :10
1815 * -10: + :10
1808 * :10: + -10:
1816 * :10: + -10:
1809 * -10000:
1817 * -10000:
1810 * -10000: + 0
1818 * -10000: + 0
1811
1819
1812 The command currently focus on valid binary lookup. Benchmarking for
1820 The command currently focus on valid binary lookup. Benchmarking for
1813 hexlookup, prefix lookup and missing lookup would also be valuable.
1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1814 """
1822 """
1815 import mercurial.revlog
1823 import mercurial.revlog
1816
1824
1817 opts = _byteskwargs(opts)
1825 opts = _byteskwargs(opts)
1818 timer, fm = gettimer(ui, opts)
1826 timer, fm = gettimer(ui, opts)
1819 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1820
1828
1821 unfi = repo.unfiltered()
1829 unfi = repo.unfiltered()
1822 clearcaches = opts[b'clear_caches']
1830 clearcaches = opts[b'clear_caches']
1823 # find the filecache func directly
1831 # find the filecache func directly
1824 # This avoid polluting the benchmark with the filecache logic
1832 # This avoid polluting the benchmark with the filecache logic
1825 makecl = unfi.__class__.changelog.func
1833 makecl = unfi.__class__.changelog.func
1826 if not opts[b'rev']:
1834 if not opts[b'rev']:
1827 raise error.Abort(b'use --rev to specify revisions to look up')
1835 raise error.Abort(b'use --rev to specify revisions to look up')
1828 revs = scmutil.revrange(repo, opts[b'rev'])
1836 revs = scmutil.revrange(repo, opts[b'rev'])
1829 cl = repo.changelog
1837 cl = repo.changelog
1830 nodes = [cl.node(r) for r in revs]
1838 nodes = [cl.node(r) for r in revs]
1831
1839
1832 # use a list to pass reference to a nodemap from one closure to the next
1840 # use a list to pass reference to a nodemap from one closure to the next
1833 nodeget = [None]
1841 nodeget = [None]
1834
1842
1835 def setnodeget():
1843 def setnodeget():
1836 # probably not necessary, but for good measure
1844 # probably not necessary, but for good measure
1837 clearchangelog(unfi)
1845 clearchangelog(unfi)
1838 cl = makecl(unfi)
1846 cl = makecl(unfi)
1839 if util.safehasattr(cl.index, 'get_rev'):
1847 if util.safehasattr(cl.index, 'get_rev'):
1840 nodeget[0] = cl.index.get_rev
1848 nodeget[0] = cl.index.get_rev
1841 else:
1849 else:
1842 nodeget[0] = cl.nodemap.get
1850 nodeget[0] = cl.nodemap.get
1843
1851
1844 def d():
1852 def d():
1845 get = nodeget[0]
1853 get = nodeget[0]
1846 for n in nodes:
1854 for n in nodes:
1847 get(n)
1855 get(n)
1848
1856
1849 setup = None
1857 setup = None
1850 if clearcaches:
1858 if clearcaches:
1851
1859
1852 def setup():
1860 def setup():
1853 setnodeget()
1861 setnodeget()
1854
1862
1855 else:
1863 else:
1856 setnodeget()
1864 setnodeget()
1857 d() # prewarm the data structure
1865 d() # prewarm the data structure
1858 timer(d, setup=setup)
1866 timer(d, setup=setup)
1859 fm.end()
1867 fm.end()
1860
1868
1861
1869
1862 @command(b'perf::startup|perfstartup', formatteropts)
1870 @command(b'perf::startup|perfstartup', formatteropts)
1863 def perfstartup(ui, repo, **opts):
1871 def perfstartup(ui, repo, **opts):
1864 opts = _byteskwargs(opts)
1872 opts = _byteskwargs(opts)
1865 timer, fm = gettimer(ui, opts)
1873 timer, fm = gettimer(ui, opts)
1866
1874
1867 def d():
1875 def d():
1868 if os.name != 'nt':
1876 if os.name != 'nt':
1869 os.system(
1877 os.system(
1870 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1871 )
1879 )
1872 else:
1880 else:
1873 os.environ['HGRCPATH'] = r' '
1881 os.environ['HGRCPATH'] = r' '
1874 os.system("%s version -q > NUL" % sys.argv[0])
1882 os.system("%s version -q > NUL" % sys.argv[0])
1875
1883
1876 timer(d)
1884 timer(d)
1877 fm.end()
1885 fm.end()
1878
1886
1879
1887
1880 @command(b'perf::parents|perfparents', formatteropts)
1888 @command(b'perf::parents|perfparents', formatteropts)
1881 def perfparents(ui, repo, **opts):
1889 def perfparents(ui, repo, **opts):
1882 """benchmark the time necessary to fetch one changeset's parents.
1890 """benchmark the time necessary to fetch one changeset's parents.
1883
1891
1884 The fetch is done using the `node identifier`, traversing all object layers
1892 The fetch is done using the `node identifier`, traversing all object layers
1885 from the repository object. The first N revisions will be used for this
1893 from the repository object. The first N revisions will be used for this
1886 benchmark. N is controlled by the ``perf.parentscount`` config option
1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1887 (default: 1000).
1895 (default: 1000).
1888 """
1896 """
1889 opts = _byteskwargs(opts)
1897 opts = _byteskwargs(opts)
1890 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1891 # control the number of commits perfparents iterates over
1899 # control the number of commits perfparents iterates over
1892 # experimental config: perf.parentscount
1900 # experimental config: perf.parentscount
1893 count = getint(ui, b"perf", b"parentscount", 1000)
1901 count = getint(ui, b"perf", b"parentscount", 1000)
1894 if len(repo.changelog) < count:
1902 if len(repo.changelog) < count:
1895 raise error.Abort(b"repo needs %d commits for this test" % count)
1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1896 repo = repo.unfiltered()
1904 repo = repo.unfiltered()
1897 nl = [repo.changelog.node(i) for i in _xrange(count)]
1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1898
1906
1899 def d():
1907 def d():
1900 for n in nl:
1908 for n in nl:
1901 repo.changelog.parents(n)
1909 repo.changelog.parents(n)
1902
1910
1903 timer(d)
1911 timer(d)
1904 fm.end()
1912 fm.end()
1905
1913
1906
1914
1907 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1908 def perfctxfiles(ui, repo, x, **opts):
1916 def perfctxfiles(ui, repo, x, **opts):
1909 opts = _byteskwargs(opts)
1917 opts = _byteskwargs(opts)
1910 x = int(x)
1918 x = int(x)
1911 timer, fm = gettimer(ui, opts)
1919 timer, fm = gettimer(ui, opts)
1912
1920
1913 def d():
1921 def d():
1914 len(repo[x].files())
1922 len(repo[x].files())
1915
1923
1916 timer(d)
1924 timer(d)
1917 fm.end()
1925 fm.end()
1918
1926
1919
1927
1920 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1921 def perfrawfiles(ui, repo, x, **opts):
1929 def perfrawfiles(ui, repo, x, **opts):
1922 opts = _byteskwargs(opts)
1930 opts = _byteskwargs(opts)
1923 x = int(x)
1931 x = int(x)
1924 timer, fm = gettimer(ui, opts)
1932 timer, fm = gettimer(ui, opts)
1925 cl = repo.changelog
1933 cl = repo.changelog
1926
1934
1927 def d():
1935 def d():
1928 len(cl.read(x)[3])
1936 len(cl.read(x)[3])
1929
1937
1930 timer(d)
1938 timer(d)
1931 fm.end()
1939 fm.end()
1932
1940
1933
1941
1934 @command(b'perf::lookup|perflookup', formatteropts)
1942 @command(b'perf::lookup|perflookup', formatteropts)
1935 def perflookup(ui, repo, rev, **opts):
1943 def perflookup(ui, repo, rev, **opts):
1936 opts = _byteskwargs(opts)
1944 opts = _byteskwargs(opts)
1937 timer, fm = gettimer(ui, opts)
1945 timer, fm = gettimer(ui, opts)
1938 timer(lambda: len(repo.lookup(rev)))
1946 timer(lambda: len(repo.lookup(rev)))
1939 fm.end()
1947 fm.end()
1940
1948
1941
1949
1942 @command(
1950 @command(
1943 b'perf::linelogedits|perflinelogedits',
1951 b'perf::linelogedits|perflinelogedits',
1944 [
1952 [
1945 (b'n', b'edits', 10000, b'number of edits'),
1953 (b'n', b'edits', 10000, b'number of edits'),
1946 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1947 ],
1955 ],
1948 norepo=True,
1956 norepo=True,
1949 )
1957 )
1950 def perflinelogedits(ui, **opts):
1958 def perflinelogedits(ui, **opts):
1951 from mercurial import linelog
1959 from mercurial import linelog
1952
1960
1953 opts = _byteskwargs(opts)
1961 opts = _byteskwargs(opts)
1954
1962
1955 edits = opts[b'edits']
1963 edits = opts[b'edits']
1956 maxhunklines = opts[b'max_hunk_lines']
1964 maxhunklines = opts[b'max_hunk_lines']
1957
1965
1958 maxb1 = 100000
1966 maxb1 = 100000
1959 random.seed(0)
1967 random.seed(0)
1960 randint = random.randint
1968 randint = random.randint
1961 currentlines = 0
1969 currentlines = 0
1962 arglist = []
1970 arglist = []
1963 for rev in _xrange(edits):
1971 for rev in _xrange(edits):
1964 a1 = randint(0, currentlines)
1972 a1 = randint(0, currentlines)
1965 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1966 b1 = randint(0, maxb1)
1974 b1 = randint(0, maxb1)
1967 b2 = randint(b1, b1 + maxhunklines)
1975 b2 = randint(b1, b1 + maxhunklines)
1968 currentlines += (b2 - b1) - (a2 - a1)
1976 currentlines += (b2 - b1) - (a2 - a1)
1969 arglist.append((rev, a1, a2, b1, b2))
1977 arglist.append((rev, a1, a2, b1, b2))
1970
1978
1971 def d():
1979 def d():
1972 ll = linelog.linelog()
1980 ll = linelog.linelog()
1973 for args in arglist:
1981 for args in arglist:
1974 ll.replacelines(*args)
1982 ll.replacelines(*args)
1975
1983
1976 timer, fm = gettimer(ui, opts)
1984 timer, fm = gettimer(ui, opts)
1977 timer(d)
1985 timer(d)
1978 fm.end()
1986 fm.end()
1979
1987
1980
1988
1981 @command(b'perf::revrange|perfrevrange', formatteropts)
1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1982 def perfrevrange(ui, repo, *specs, **opts):
1990 def perfrevrange(ui, repo, *specs, **opts):
1983 opts = _byteskwargs(opts)
1991 opts = _byteskwargs(opts)
1984 timer, fm = gettimer(ui, opts)
1992 timer, fm = gettimer(ui, opts)
1985 revrange = scmutil.revrange
1993 revrange = scmutil.revrange
1986 timer(lambda: len(revrange(repo, specs)))
1994 timer(lambda: len(revrange(repo, specs)))
1987 fm.end()
1995 fm.end()
1988
1996
1989
1997
1990 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1991 def perfnodelookup(ui, repo, rev, **opts):
1999 def perfnodelookup(ui, repo, rev, **opts):
1992 opts = _byteskwargs(opts)
2000 opts = _byteskwargs(opts)
1993 timer, fm = gettimer(ui, opts)
2001 timer, fm = gettimer(ui, opts)
1994 import mercurial.revlog
2002 import mercurial.revlog
1995
2003
1996 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1997 n = scmutil.revsingle(repo, rev).node()
2005 n = scmutil.revsingle(repo, rev).node()
1998
2006
1999 try:
2007 try:
2000 cl = revlog(getsvfs(repo), radix=b"00changelog")
2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2001 except TypeError:
2009 except TypeError:
2002 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2003
2011
2004 def d():
2012 def d():
2005 cl.rev(n)
2013 cl.rev(n)
2006 clearcaches(cl)
2014 clearcaches(cl)
2007
2015
2008 timer(d)
2016 timer(d)
2009 fm.end()
2017 fm.end()
2010
2018
2011
2019
2012 @command(
2020 @command(
2013 b'perf::log|perflog',
2021 b'perf::log|perflog',
2014 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2015 )
2023 )
2016 def perflog(ui, repo, rev=None, **opts):
2024 def perflog(ui, repo, rev=None, **opts):
2017 opts = _byteskwargs(opts)
2025 opts = _byteskwargs(opts)
2018 if rev is None:
2026 if rev is None:
2019 rev = []
2027 rev = []
2020 timer, fm = gettimer(ui, opts)
2028 timer, fm = gettimer(ui, opts)
2021 ui.pushbuffer()
2029 ui.pushbuffer()
2022 timer(
2030 timer(
2023 lambda: commands.log(
2031 lambda: commands.log(
2024 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2025 )
2033 )
2026 )
2034 )
2027 ui.popbuffer()
2035 ui.popbuffer()
2028 fm.end()
2036 fm.end()
2029
2037
2030
2038
2031 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2032 def perfmoonwalk(ui, repo, **opts):
2040 def perfmoonwalk(ui, repo, **opts):
2033 """benchmark walking the changelog backwards
2041 """benchmark walking the changelog backwards
2034
2042
2035 This also loads the changelog data for each revision in the changelog.
2043 This also loads the changelog data for each revision in the changelog.
2036 """
2044 """
2037 opts = _byteskwargs(opts)
2045 opts = _byteskwargs(opts)
2038 timer, fm = gettimer(ui, opts)
2046 timer, fm = gettimer(ui, opts)
2039
2047
2040 def moonwalk():
2048 def moonwalk():
2041 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2042 ctx = repo[i]
2050 ctx = repo[i]
2043 ctx.branch() # read changelog data (in addition to the index)
2051 ctx.branch() # read changelog data (in addition to the index)
2044
2052
2045 timer(moonwalk)
2053 timer(moonwalk)
2046 fm.end()
2054 fm.end()
2047
2055
2048
2056
2049 @command(
2057 @command(
2050 b'perf::templating|perftemplating',
2058 b'perf::templating|perftemplating',
2051 [
2059 [
2052 (b'r', b'rev', [], b'revisions to run the template on'),
2060 (b'r', b'rev', [], b'revisions to run the template on'),
2053 ]
2061 ]
2054 + formatteropts,
2062 + formatteropts,
2055 )
2063 )
2056 def perftemplating(ui, repo, testedtemplate=None, **opts):
2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2057 """test the rendering time of a given template"""
2065 """test the rendering time of a given template"""
2058 if makelogtemplater is None:
2066 if makelogtemplater is None:
2059 raise error.Abort(
2067 raise error.Abort(
2060 b"perftemplating not available with this Mercurial",
2068 b"perftemplating not available with this Mercurial",
2061 hint=b"use 4.3 or later",
2069 hint=b"use 4.3 or later",
2062 )
2070 )
2063
2071
2064 opts = _byteskwargs(opts)
2072 opts = _byteskwargs(opts)
2065
2073
2066 nullui = ui.copy()
2074 nullui = ui.copy()
2067 nullui.fout = open(os.devnull, 'wb')
2075 nullui.fout = open(os.devnull, 'wb')
2068 nullui.disablepager()
2076 nullui.disablepager()
2069 revs = opts.get(b'rev')
2077 revs = opts.get(b'rev')
2070 if not revs:
2078 if not revs:
2071 revs = [b'all()']
2079 revs = [b'all()']
2072 revs = list(scmutil.revrange(repo, revs))
2080 revs = list(scmutil.revrange(repo, revs))
2073
2081
2074 defaulttemplate = (
2082 defaulttemplate = (
2075 b'{date|shortdate} [{rev}:{node|short}]'
2083 b'{date|shortdate} [{rev}:{node|short}]'
2076 b' {author|person}: {desc|firstline}\n'
2084 b' {author|person}: {desc|firstline}\n'
2077 )
2085 )
2078 if testedtemplate is None:
2086 if testedtemplate is None:
2079 testedtemplate = defaulttemplate
2087 testedtemplate = defaulttemplate
2080 displayer = makelogtemplater(nullui, repo, testedtemplate)
2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2081
2089
2082 def format():
2090 def format():
2083 for r in revs:
2091 for r in revs:
2084 ctx = repo[r]
2092 ctx = repo[r]
2085 displayer.show(ctx)
2093 displayer.show(ctx)
2086 displayer.flush(ctx)
2094 displayer.flush(ctx)
2087
2095
2088 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2089 timer(format)
2097 timer(format)
2090 fm.end()
2098 fm.end()
2091
2099
2092
2100
2093 def _displaystats(ui, opts, entries, data):
2101 def _displaystats(ui, opts, entries, data):
2094 # use a second formatter because the data are quite different, not sure
2102 # use a second formatter because the data are quite different, not sure
2095 # how it flies with the templater.
2103 # how it flies with the templater.
2096 fm = ui.formatter(b'perf-stats', opts)
2104 fm = ui.formatter(b'perf-stats', opts)
2097 for key, title in entries:
2105 for key, title in entries:
2098 values = data[key]
2106 values = data[key]
2099 nbvalues = len(data)
2107 nbvalues = len(data)
2100 values.sort()
2108 values.sort()
2101 stats = {
2109 stats = {
2102 'key': key,
2110 'key': key,
2103 'title': title,
2111 'title': title,
2104 'nbitems': len(values),
2112 'nbitems': len(values),
2105 'min': values[0][0],
2113 'min': values[0][0],
2106 '10%': values[(nbvalues * 10) // 100][0],
2114 '10%': values[(nbvalues * 10) // 100][0],
2107 '25%': values[(nbvalues * 25) // 100][0],
2115 '25%': values[(nbvalues * 25) // 100][0],
2108 '50%': values[(nbvalues * 50) // 100][0],
2116 '50%': values[(nbvalues * 50) // 100][0],
2109 '75%': values[(nbvalues * 75) // 100][0],
2117 '75%': values[(nbvalues * 75) // 100][0],
2110 '80%': values[(nbvalues * 80) // 100][0],
2118 '80%': values[(nbvalues * 80) // 100][0],
2111 '85%': values[(nbvalues * 85) // 100][0],
2119 '85%': values[(nbvalues * 85) // 100][0],
2112 '90%': values[(nbvalues * 90) // 100][0],
2120 '90%': values[(nbvalues * 90) // 100][0],
2113 '95%': values[(nbvalues * 95) // 100][0],
2121 '95%': values[(nbvalues * 95) // 100][0],
2114 '99%': values[(nbvalues * 99) // 100][0],
2122 '99%': values[(nbvalues * 99) // 100][0],
2115 'max': values[-1][0],
2123 'max': values[-1][0],
2116 }
2124 }
2117 fm.startitem()
2125 fm.startitem()
2118 fm.data(**stats)
2126 fm.data(**stats)
2119 # make node pretty for the human output
2127 # make node pretty for the human output
2120 fm.plain('### %s (%d items)\n' % (title, len(values)))
2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2121 lines = [
2129 lines = [
2122 'min',
2130 'min',
2123 '10%',
2131 '10%',
2124 '25%',
2132 '25%',
2125 '50%',
2133 '50%',
2126 '75%',
2134 '75%',
2127 '80%',
2135 '80%',
2128 '85%',
2136 '85%',
2129 '90%',
2137 '90%',
2130 '95%',
2138 '95%',
2131 '99%',
2139 '99%',
2132 'max',
2140 'max',
2133 ]
2141 ]
2134 for l in lines:
2142 for l in lines:
2135 fm.plain('%s: %s\n' % (l, stats[l]))
2143 fm.plain('%s: %s\n' % (l, stats[l]))
2136 fm.end()
2144 fm.end()
2137
2145
2138
2146
2139 @command(
2147 @command(
2140 b'perf::helper-mergecopies|perfhelper-mergecopies',
2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2141 formatteropts
2149 formatteropts
2142 + [
2150 + [
2143 (b'r', b'revs', [], b'restrict search to these revisions'),
2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2144 (b'', b'timing', False, b'provides extra data (costly)'),
2152 (b'', b'timing', False, b'provides extra data (costly)'),
2145 (b'', b'stats', False, b'provides statistic about the measured data'),
2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2146 ],
2154 ],
2147 )
2155 )
2148 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2149 """find statistics about potential parameters for `perfmergecopies`
2157 """find statistics about potential parameters for `perfmergecopies`
2150
2158
2151 This command find (base, p1, p2) triplet relevant for copytracing
2159 This command find (base, p1, p2) triplet relevant for copytracing
2152 benchmarking in the context of a merge. It reports values for some of the
2160 benchmarking in the context of a merge. It reports values for some of the
2153 parameters that impact merge copy tracing time during merge.
2161 parameters that impact merge copy tracing time during merge.
2154
2162
2155 If `--timing` is set, rename detection is run and the associated timing
2163 If `--timing` is set, rename detection is run and the associated timing
2156 will be reported. The extra details come at the cost of slower command
2164 will be reported. The extra details come at the cost of slower command
2157 execution.
2165 execution.
2158
2166
2159 Since rename detection is only run once, other factors might easily
2167 Since rename detection is only run once, other factors might easily
2160 affect the precision of the timing. However it should give a good
2168 affect the precision of the timing. However it should give a good
2161 approximation of which revision triplets are very costly.
2169 approximation of which revision triplets are very costly.
2162 """
2170 """
2163 opts = _byteskwargs(opts)
2171 opts = _byteskwargs(opts)
2164 fm = ui.formatter(b'perf', opts)
2172 fm = ui.formatter(b'perf', opts)
2165 dotiming = opts[b'timing']
2173 dotiming = opts[b'timing']
2166 dostats = opts[b'stats']
2174 dostats = opts[b'stats']
2167
2175
2168 output_template = [
2176 output_template = [
2169 ("base", "%(base)12s"),
2177 ("base", "%(base)12s"),
2170 ("p1", "%(p1.node)12s"),
2178 ("p1", "%(p1.node)12s"),
2171 ("p2", "%(p2.node)12s"),
2179 ("p2", "%(p2.node)12s"),
2172 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2173 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2174 ("p1.renames", "%(p1.renamedfiles)12d"),
2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2175 ("p1.time", "%(p1.time)12.3f"),
2183 ("p1.time", "%(p1.time)12.3f"),
2176 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2177 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2178 ("p2.renames", "%(p2.renamedfiles)12d"),
2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2179 ("p2.time", "%(p2.time)12.3f"),
2187 ("p2.time", "%(p2.time)12.3f"),
2180 ("renames", "%(nbrenamedfiles)12d"),
2188 ("renames", "%(nbrenamedfiles)12d"),
2181 ("total.time", "%(time)12.3f"),
2189 ("total.time", "%(time)12.3f"),
2182 ]
2190 ]
2183 if not dotiming:
2191 if not dotiming:
2184 output_template = [
2192 output_template = [
2185 i
2193 i
2186 for i in output_template
2194 for i in output_template
2187 if not ('time' in i[0] or 'renames' in i[0])
2195 if not ('time' in i[0] or 'renames' in i[0])
2188 ]
2196 ]
2189 header_names = [h for (h, v) in output_template]
2197 header_names = [h for (h, v) in output_template]
2190 output = ' '.join([v for (h, v) in output_template]) + '\n'
2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2191 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2192 fm.plain(header % tuple(header_names))
2200 fm.plain(header % tuple(header_names))
2193
2201
2194 if not revs:
2202 if not revs:
2195 revs = ['all()']
2203 revs = ['all()']
2196 revs = scmutil.revrange(repo, revs)
2204 revs = scmutil.revrange(repo, revs)
2197
2205
2198 if dostats:
2206 if dostats:
2199 alldata = {
2207 alldata = {
2200 'nbrevs': [],
2208 'nbrevs': [],
2201 'nbmissingfiles': [],
2209 'nbmissingfiles': [],
2202 }
2210 }
2203 if dotiming:
2211 if dotiming:
2204 alldata['parentnbrenames'] = []
2212 alldata['parentnbrenames'] = []
2205 alldata['totalnbrenames'] = []
2213 alldata['totalnbrenames'] = []
2206 alldata['parenttime'] = []
2214 alldata['parenttime'] = []
2207 alldata['totaltime'] = []
2215 alldata['totaltime'] = []
2208
2216
2209 roi = repo.revs('merge() and %ld', revs)
2217 roi = repo.revs('merge() and %ld', revs)
2210 for r in roi:
2218 for r in roi:
2211 ctx = repo[r]
2219 ctx = repo[r]
2212 p1 = ctx.p1()
2220 p1 = ctx.p1()
2213 p2 = ctx.p2()
2221 p2 = ctx.p2()
2214 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2215 for b in bases:
2223 for b in bases:
2216 b = repo[b]
2224 b = repo[b]
2217 p1missing = copies._computeforwardmissing(b, p1)
2225 p1missing = copies._computeforwardmissing(b, p1)
2218 p2missing = copies._computeforwardmissing(b, p2)
2226 p2missing = copies._computeforwardmissing(b, p2)
2219 data = {
2227 data = {
2220 b'base': b.hex(),
2228 b'base': b.hex(),
2221 b'p1.node': p1.hex(),
2229 b'p1.node': p1.hex(),
2222 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2223 b'p1.nbmissingfiles': len(p1missing),
2231 b'p1.nbmissingfiles': len(p1missing),
2224 b'p2.node': p2.hex(),
2232 b'p2.node': p2.hex(),
2225 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2226 b'p2.nbmissingfiles': len(p2missing),
2234 b'p2.nbmissingfiles': len(p2missing),
2227 }
2235 }
2228 if dostats:
2236 if dostats:
2229 if p1missing:
2237 if p1missing:
2230 alldata['nbrevs'].append(
2238 alldata['nbrevs'].append(
2231 (data['p1.nbrevs'], b.hex(), p1.hex())
2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2232 )
2240 )
2233 alldata['nbmissingfiles'].append(
2241 alldata['nbmissingfiles'].append(
2234 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2235 )
2243 )
2236 if p2missing:
2244 if p2missing:
2237 alldata['nbrevs'].append(
2245 alldata['nbrevs'].append(
2238 (data['p2.nbrevs'], b.hex(), p2.hex())
2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2239 )
2247 )
2240 alldata['nbmissingfiles'].append(
2248 alldata['nbmissingfiles'].append(
2241 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2242 )
2250 )
2243 if dotiming:
2251 if dotiming:
2244 begin = util.timer()
2252 begin = util.timer()
2245 mergedata = copies.mergecopies(repo, p1, p2, b)
2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2246 end = util.timer()
2254 end = util.timer()
2247 # not very stable timing since we did only one run
2255 # not very stable timing since we did only one run
2248 data['time'] = end - begin
2256 data['time'] = end - begin
2249 # mergedata contains five dicts: "copy", "movewithdir",
2257 # mergedata contains five dicts: "copy", "movewithdir",
2250 # "diverge", "renamedelete" and "dirmove".
2258 # "diverge", "renamedelete" and "dirmove".
2251 # The first 4 are about renamed file so lets count that.
2259 # The first 4 are about renamed file so lets count that.
2252 renames = len(mergedata[0])
2260 renames = len(mergedata[0])
2253 renames += len(mergedata[1])
2261 renames += len(mergedata[1])
2254 renames += len(mergedata[2])
2262 renames += len(mergedata[2])
2255 renames += len(mergedata[3])
2263 renames += len(mergedata[3])
2256 data['nbrenamedfiles'] = renames
2264 data['nbrenamedfiles'] = renames
2257 begin = util.timer()
2265 begin = util.timer()
2258 p1renames = copies.pathcopies(b, p1)
2266 p1renames = copies.pathcopies(b, p1)
2259 end = util.timer()
2267 end = util.timer()
2260 data['p1.time'] = end - begin
2268 data['p1.time'] = end - begin
2261 begin = util.timer()
2269 begin = util.timer()
2262 p2renames = copies.pathcopies(b, p2)
2270 p2renames = copies.pathcopies(b, p2)
2263 end = util.timer()
2271 end = util.timer()
2264 data['p2.time'] = end - begin
2272 data['p2.time'] = end - begin
2265 data['p1.renamedfiles'] = len(p1renames)
2273 data['p1.renamedfiles'] = len(p1renames)
2266 data['p2.renamedfiles'] = len(p2renames)
2274 data['p2.renamedfiles'] = len(p2renames)
2267
2275
2268 if dostats:
2276 if dostats:
2269 if p1missing:
2277 if p1missing:
2270 alldata['parentnbrenames'].append(
2278 alldata['parentnbrenames'].append(
2271 (data['p1.renamedfiles'], b.hex(), p1.hex())
2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2272 )
2280 )
2273 alldata['parenttime'].append(
2281 alldata['parenttime'].append(
2274 (data['p1.time'], b.hex(), p1.hex())
2282 (data['p1.time'], b.hex(), p1.hex())
2275 )
2283 )
2276 if p2missing:
2284 if p2missing:
2277 alldata['parentnbrenames'].append(
2285 alldata['parentnbrenames'].append(
2278 (data['p2.renamedfiles'], b.hex(), p2.hex())
2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2279 )
2287 )
2280 alldata['parenttime'].append(
2288 alldata['parenttime'].append(
2281 (data['p2.time'], b.hex(), p2.hex())
2289 (data['p2.time'], b.hex(), p2.hex())
2282 )
2290 )
2283 if p1missing or p2missing:
2291 if p1missing or p2missing:
2284 alldata['totalnbrenames'].append(
2292 alldata['totalnbrenames'].append(
2285 (
2293 (
2286 data['nbrenamedfiles'],
2294 data['nbrenamedfiles'],
2287 b.hex(),
2295 b.hex(),
2288 p1.hex(),
2296 p1.hex(),
2289 p2.hex(),
2297 p2.hex(),
2290 )
2298 )
2291 )
2299 )
2292 alldata['totaltime'].append(
2300 alldata['totaltime'].append(
2293 (data['time'], b.hex(), p1.hex(), p2.hex())
2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2294 )
2302 )
2295 fm.startitem()
2303 fm.startitem()
2296 fm.data(**data)
2304 fm.data(**data)
2297 # make node pretty for the human output
2305 # make node pretty for the human output
2298 out = data.copy()
2306 out = data.copy()
2299 out['base'] = fm.hexfunc(b.node())
2307 out['base'] = fm.hexfunc(b.node())
2300 out['p1.node'] = fm.hexfunc(p1.node())
2308 out['p1.node'] = fm.hexfunc(p1.node())
2301 out['p2.node'] = fm.hexfunc(p2.node())
2309 out['p2.node'] = fm.hexfunc(p2.node())
2302 fm.plain(output % out)
2310 fm.plain(output % out)
2303
2311
2304 fm.end()
2312 fm.end()
2305 if dostats:
2313 if dostats:
2306 # use a second formatter because the data are quite different, not sure
2314 # use a second formatter because the data are quite different, not sure
2307 # how it flies with the templater.
2315 # how it flies with the templater.
2308 entries = [
2316 entries = [
2309 ('nbrevs', 'number of revision covered'),
2317 ('nbrevs', 'number of revision covered'),
2310 ('nbmissingfiles', 'number of missing files at head'),
2318 ('nbmissingfiles', 'number of missing files at head'),
2311 ]
2319 ]
2312 if dotiming:
2320 if dotiming:
2313 entries.append(
2321 entries.append(
2314 ('parentnbrenames', 'rename from one parent to base')
2322 ('parentnbrenames', 'rename from one parent to base')
2315 )
2323 )
2316 entries.append(('totalnbrenames', 'total number of renames'))
2324 entries.append(('totalnbrenames', 'total number of renames'))
2317 entries.append(('parenttime', 'time for one parent'))
2325 entries.append(('parenttime', 'time for one parent'))
2318 entries.append(('totaltime', 'time for both parents'))
2326 entries.append(('totaltime', 'time for both parents'))
2319 _displaystats(ui, opts, entries, alldata)
2327 _displaystats(ui, opts, entries, alldata)
2320
2328
2321
2329
2322 @command(
2330 @command(
2323 b'perf::helper-pathcopies|perfhelper-pathcopies',
2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2324 formatteropts
2332 formatteropts
2325 + [
2333 + [
2326 (b'r', b'revs', [], b'restrict search to these revisions'),
2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2327 (b'', b'timing', False, b'provides extra data (costly)'),
2335 (b'', b'timing', False, b'provides extra data (costly)'),
2328 (b'', b'stats', False, b'provides statistic about the measured data'),
2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2329 ],
2337 ],
2330 )
2338 )
2331 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2332 """find statistic about potential parameters for the `perftracecopies`
2340 """find statistic about potential parameters for the `perftracecopies`
2333
2341
2334 This command find source-destination pair relevant for copytracing testing.
2342 This command find source-destination pair relevant for copytracing testing.
2335 It report value for some of the parameters that impact copy tracing time.
2343 It report value for some of the parameters that impact copy tracing time.
2336
2344
2337 If `--timing` is set, rename detection is run and the associated timing
2345 If `--timing` is set, rename detection is run and the associated timing
2338 will be reported. The extra details comes at the cost of a slower command
2346 will be reported. The extra details comes at the cost of a slower command
2339 execution.
2347 execution.
2340
2348
2341 Since the rename detection is only run once, other factors might easily
2349 Since the rename detection is only run once, other factors might easily
2342 affect the precision of the timing. However it should give a good
2350 affect the precision of the timing. However it should give a good
2343 approximation of which revision pairs are very costly.
2351 approximation of which revision pairs are very costly.
2344 """
2352 """
2345 opts = _byteskwargs(opts)
2353 opts = _byteskwargs(opts)
2346 fm = ui.formatter(b'perf', opts)
2354 fm = ui.formatter(b'perf', opts)
2347 dotiming = opts[b'timing']
2355 dotiming = opts[b'timing']
2348 dostats = opts[b'stats']
2356 dostats = opts[b'stats']
2349
2357
2350 if dotiming:
2358 if dotiming:
2351 header = '%12s %12s %12s %12s %12s %12s\n'
2359 header = '%12s %12s %12s %12s %12s %12s\n'
2352 output = (
2360 output = (
2353 "%(source)12s %(destination)12s "
2361 "%(source)12s %(destination)12s "
2354 "%(nbrevs)12d %(nbmissingfiles)12d "
2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2355 "%(nbrenamedfiles)12d %(time)18.5f\n"
2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2356 )
2364 )
2357 header_names = (
2365 header_names = (
2358 "source",
2366 "source",
2359 "destination",
2367 "destination",
2360 "nb-revs",
2368 "nb-revs",
2361 "nb-files",
2369 "nb-files",
2362 "nb-renames",
2370 "nb-renames",
2363 "time",
2371 "time",
2364 )
2372 )
2365 fm.plain(header % header_names)
2373 fm.plain(header % header_names)
2366 else:
2374 else:
2367 header = '%12s %12s %12s %12s\n'
2375 header = '%12s %12s %12s %12s\n'
2368 output = (
2376 output = (
2369 "%(source)12s %(destination)12s "
2377 "%(source)12s %(destination)12s "
2370 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2371 )
2379 )
2372 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2373
2381
2374 if not revs:
2382 if not revs:
2375 revs = ['all()']
2383 revs = ['all()']
2376 revs = scmutil.revrange(repo, revs)
2384 revs = scmutil.revrange(repo, revs)
2377
2385
2378 if dostats:
2386 if dostats:
2379 alldata = {
2387 alldata = {
2380 'nbrevs': [],
2388 'nbrevs': [],
2381 'nbmissingfiles': [],
2389 'nbmissingfiles': [],
2382 }
2390 }
2383 if dotiming:
2391 if dotiming:
2384 alldata['nbrenames'] = []
2392 alldata['nbrenames'] = []
2385 alldata['time'] = []
2393 alldata['time'] = []
2386
2394
2387 roi = repo.revs('merge() and %ld', revs)
2395 roi = repo.revs('merge() and %ld', revs)
2388 for r in roi:
2396 for r in roi:
2389 ctx = repo[r]
2397 ctx = repo[r]
2390 p1 = ctx.p1().rev()
2398 p1 = ctx.p1().rev()
2391 p2 = ctx.p2().rev()
2399 p2 = ctx.p2().rev()
2392 bases = repo.changelog._commonancestorsheads(p1, p2)
2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2393 for p in (p1, p2):
2401 for p in (p1, p2):
2394 for b in bases:
2402 for b in bases:
2395 base = repo[b]
2403 base = repo[b]
2396 parent = repo[p]
2404 parent = repo[p]
2397 missing = copies._computeforwardmissing(base, parent)
2405 missing = copies._computeforwardmissing(base, parent)
2398 if not missing:
2406 if not missing:
2399 continue
2407 continue
2400 data = {
2408 data = {
2401 b'source': base.hex(),
2409 b'source': base.hex(),
2402 b'destination': parent.hex(),
2410 b'destination': parent.hex(),
2403 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2404 b'nbmissingfiles': len(missing),
2412 b'nbmissingfiles': len(missing),
2405 }
2413 }
2406 if dostats:
2414 if dostats:
2407 alldata['nbrevs'].append(
2415 alldata['nbrevs'].append(
2408 (
2416 (
2409 data['nbrevs'],
2417 data['nbrevs'],
2410 base.hex(),
2418 base.hex(),
2411 parent.hex(),
2419 parent.hex(),
2412 )
2420 )
2413 )
2421 )
2414 alldata['nbmissingfiles'].append(
2422 alldata['nbmissingfiles'].append(
2415 (
2423 (
2416 data['nbmissingfiles'],
2424 data['nbmissingfiles'],
2417 base.hex(),
2425 base.hex(),
2418 parent.hex(),
2426 parent.hex(),
2419 )
2427 )
2420 )
2428 )
2421 if dotiming:
2429 if dotiming:
2422 begin = util.timer()
2430 begin = util.timer()
2423 renames = copies.pathcopies(base, parent)
2431 renames = copies.pathcopies(base, parent)
2424 end = util.timer()
2432 end = util.timer()
2425 # not very stable timing since we did only one run
2433 # not very stable timing since we did only one run
2426 data['time'] = end - begin
2434 data['time'] = end - begin
2427 data['nbrenamedfiles'] = len(renames)
2435 data['nbrenamedfiles'] = len(renames)
2428 if dostats:
2436 if dostats:
2429 alldata['time'].append(
2437 alldata['time'].append(
2430 (
2438 (
2431 data['time'],
2439 data['time'],
2432 base.hex(),
2440 base.hex(),
2433 parent.hex(),
2441 parent.hex(),
2434 )
2442 )
2435 )
2443 )
2436 alldata['nbrenames'].append(
2444 alldata['nbrenames'].append(
2437 (
2445 (
2438 data['nbrenamedfiles'],
2446 data['nbrenamedfiles'],
2439 base.hex(),
2447 base.hex(),
2440 parent.hex(),
2448 parent.hex(),
2441 )
2449 )
2442 )
2450 )
2443 fm.startitem()
2451 fm.startitem()
2444 fm.data(**data)
2452 fm.data(**data)
2445 out = data.copy()
2453 out = data.copy()
2446 out['source'] = fm.hexfunc(base.node())
2454 out['source'] = fm.hexfunc(base.node())
2447 out['destination'] = fm.hexfunc(parent.node())
2455 out['destination'] = fm.hexfunc(parent.node())
2448 fm.plain(output % out)
2456 fm.plain(output % out)
2449
2457
2450 fm.end()
2458 fm.end()
2451 if dostats:
2459 if dostats:
2452 entries = [
2460 entries = [
2453 ('nbrevs', 'number of revision covered'),
2461 ('nbrevs', 'number of revision covered'),
2454 ('nbmissingfiles', 'number of missing files at head'),
2462 ('nbmissingfiles', 'number of missing files at head'),
2455 ]
2463 ]
2456 if dotiming:
2464 if dotiming:
2457 entries.append(('nbrenames', 'renamed files'))
2465 entries.append(('nbrenames', 'renamed files'))
2458 entries.append(('time', 'time'))
2466 entries.append(('time', 'time'))
2459 _displaystats(ui, opts, entries, alldata)
2467 _displaystats(ui, opts, entries, alldata)
2460
2468
2461
2469
2462 @command(b'perf::cca|perfcca', formatteropts)
2470 @command(b'perf::cca|perfcca', formatteropts)
2463 def perfcca(ui, repo, **opts):
2471 def perfcca(ui, repo, **opts):
2464 opts = _byteskwargs(opts)
2472 opts = _byteskwargs(opts)
2465 timer, fm = gettimer(ui, opts)
2473 timer, fm = gettimer(ui, opts)
2466 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2467 fm.end()
2475 fm.end()
2468
2476
2469
2477
2470 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2471 def perffncacheload(ui, repo, **opts):
2479 def perffncacheload(ui, repo, **opts):
2472 opts = _byteskwargs(opts)
2480 opts = _byteskwargs(opts)
2473 timer, fm = gettimer(ui, opts)
2481 timer, fm = gettimer(ui, opts)
2474 s = repo.store
2482 s = repo.store
2475
2483
2476 def d():
2484 def d():
2477 s.fncache._load()
2485 s.fncache._load()
2478
2486
2479 timer(d)
2487 timer(d)
2480 fm.end()
2488 fm.end()
2481
2489
2482
2490
2483 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2484 def perffncachewrite(ui, repo, **opts):
2492 def perffncachewrite(ui, repo, **opts):
2485 opts = _byteskwargs(opts)
2493 opts = _byteskwargs(opts)
2486 timer, fm = gettimer(ui, opts)
2494 timer, fm = gettimer(ui, opts)
2487 s = repo.store
2495 s = repo.store
2488 lock = repo.lock()
2496 lock = repo.lock()
2489 s.fncache._load()
2497 s.fncache._load()
2490 tr = repo.transaction(b'perffncachewrite')
2498 tr = repo.transaction(b'perffncachewrite')
2491 tr.addbackup(b'fncache')
2499 tr.addbackup(b'fncache')
2492
2500
2493 def d():
2501 def d():
2494 s.fncache._dirty = True
2502 s.fncache._dirty = True
2495 s.fncache.write(tr)
2503 s.fncache.write(tr)
2496
2504
2497 timer(d)
2505 timer(d)
2498 tr.close()
2506 tr.close()
2499 lock.release()
2507 lock.release()
2500 fm.end()
2508 fm.end()
2501
2509
2502
2510
2503 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2504 def perffncacheencode(ui, repo, **opts):
2512 def perffncacheencode(ui, repo, **opts):
2505 opts = _byteskwargs(opts)
2513 opts = _byteskwargs(opts)
2506 timer, fm = gettimer(ui, opts)
2514 timer, fm = gettimer(ui, opts)
2507 s = repo.store
2515 s = repo.store
2508 s.fncache._load()
2516 s.fncache._load()
2509
2517
2510 def d():
2518 def d():
2511 for p in s.fncache.entries:
2519 for p in s.fncache.entries:
2512 s.encode(p)
2520 s.encode(p)
2513
2521
2514 timer(d)
2522 timer(d)
2515 fm.end()
2523 fm.end()
2516
2524
2517
2525
2518 def _bdiffworker(q, blocks, xdiff, ready, done):
2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2519 while not done.is_set():
2527 while not done.is_set():
2520 pair = q.get()
2528 pair = q.get()
2521 while pair is not None:
2529 while pair is not None:
2522 if xdiff:
2530 if xdiff:
2523 mdiff.bdiff.xdiffblocks(*pair)
2531 mdiff.bdiff.xdiffblocks(*pair)
2524 elif blocks:
2532 elif blocks:
2525 mdiff.bdiff.blocks(*pair)
2533 mdiff.bdiff.blocks(*pair)
2526 else:
2534 else:
2527 mdiff.textdiff(*pair)
2535 mdiff.textdiff(*pair)
2528 q.task_done()
2536 q.task_done()
2529 pair = q.get()
2537 pair = q.get()
2530 q.task_done() # for the None one
2538 q.task_done() # for the None one
2531 with ready:
2539 with ready:
2532 ready.wait()
2540 ready.wait()
2533
2541
2534
2542
2535 def _manifestrevision(repo, mnode):
2543 def _manifestrevision(repo, mnode):
2536 ml = repo.manifestlog
2544 ml = repo.manifestlog
2537
2545
2538 if util.safehasattr(ml, b'getstorage'):
2546 if util.safehasattr(ml, b'getstorage'):
2539 store = ml.getstorage(b'')
2547 store = ml.getstorage(b'')
2540 else:
2548 else:
2541 store = ml._revlog
2549 store = ml._revlog
2542
2550
2543 return store.revision(mnode)
2551 return store.revision(mnode)
2544
2552
2545
2553
2546 @command(
2554 @command(
2547 b'perf::bdiff|perfbdiff',
2555 b'perf::bdiff|perfbdiff',
2548 revlogopts
2556 revlogopts
2549 + formatteropts
2557 + formatteropts
2550 + [
2558 + [
2551 (
2559 (
2552 b'',
2560 b'',
2553 b'count',
2561 b'count',
2554 1,
2562 1,
2555 b'number of revisions to test (when using --startrev)',
2563 b'number of revisions to test (when using --startrev)',
2556 ),
2564 ),
2557 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2558 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2559 (b'', b'blocks', False, b'test computing diffs into blocks'),
2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2560 (b'', b'xdiff', False, b'use xdiff algorithm'),
2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2561 ],
2569 ],
2562 b'-c|-m|FILE REV',
2570 b'-c|-m|FILE REV',
2563 )
2571 )
2564 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2565 """benchmark a bdiff between revisions
2573 """benchmark a bdiff between revisions
2566
2574
2567 By default, benchmark a bdiff between its delta parent and itself.
2575 By default, benchmark a bdiff between its delta parent and itself.
2568
2576
2569 With ``--count``, benchmark bdiffs between delta parents and self for N
2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2570 revisions starting at the specified revision.
2578 revisions starting at the specified revision.
2571
2579
2572 With ``--alldata``, assume the requested revision is a changeset and
2580 With ``--alldata``, assume the requested revision is a changeset and
2573 measure bdiffs for all changes related to that changeset (manifest
2581 measure bdiffs for all changes related to that changeset (manifest
2574 and filelogs).
2582 and filelogs).
2575 """
2583 """
2576 opts = _byteskwargs(opts)
2584 opts = _byteskwargs(opts)
2577
2585
2578 if opts[b'xdiff'] and not opts[b'blocks']:
2586 if opts[b'xdiff'] and not opts[b'blocks']:
2579 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2580
2588
2581 if opts[b'alldata']:
2589 if opts[b'alldata']:
2582 opts[b'changelog'] = True
2590 opts[b'changelog'] = True
2583
2591
2584 if opts.get(b'changelog') or opts.get(b'manifest'):
2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2585 file_, rev = None, file_
2593 file_, rev = None, file_
2586 elif rev is None:
2594 elif rev is None:
2587 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2588
2596
2589 blocks = opts[b'blocks']
2597 blocks = opts[b'blocks']
2590 xdiff = opts[b'xdiff']
2598 xdiff = opts[b'xdiff']
2591 textpairs = []
2599 textpairs = []
2592
2600
2593 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2594
2602
2595 startrev = r.rev(r.lookup(rev))
2603 startrev = r.rev(r.lookup(rev))
2596 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2597 if opts[b'alldata']:
2605 if opts[b'alldata']:
2598 # Load revisions associated with changeset.
2606 # Load revisions associated with changeset.
2599 ctx = repo[rev]
2607 ctx = repo[rev]
2600 mtext = _manifestrevision(repo, ctx.manifestnode())
2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2601 for pctx in ctx.parents():
2609 for pctx in ctx.parents():
2602 pman = _manifestrevision(repo, pctx.manifestnode())
2610 pman = _manifestrevision(repo, pctx.manifestnode())
2603 textpairs.append((pman, mtext))
2611 textpairs.append((pman, mtext))
2604
2612
2605 # Load filelog revisions by iterating manifest delta.
2613 # Load filelog revisions by iterating manifest delta.
2606 man = ctx.manifest()
2614 man = ctx.manifest()
2607 pman = ctx.p1().manifest()
2615 pman = ctx.p1().manifest()
2608 for filename, change in pman.diff(man).items():
2616 for filename, change in pman.diff(man).items():
2609 fctx = repo.file(filename)
2617 fctx = repo.file(filename)
2610 f1 = fctx.revision(change[0][0] or -1)
2618 f1 = fctx.revision(change[0][0] or -1)
2611 f2 = fctx.revision(change[1][0] or -1)
2619 f2 = fctx.revision(change[1][0] or -1)
2612 textpairs.append((f1, f2))
2620 textpairs.append((f1, f2))
2613 else:
2621 else:
2614 dp = r.deltaparent(rev)
2622 dp = r.deltaparent(rev)
2615 textpairs.append((r.revision(dp), r.revision(rev)))
2623 textpairs.append((r.revision(dp), r.revision(rev)))
2616
2624
2617 withthreads = threads > 0
2625 withthreads = threads > 0
2618 if not withthreads:
2626 if not withthreads:
2619
2627
2620 def d():
2628 def d():
2621 for pair in textpairs:
2629 for pair in textpairs:
2622 if xdiff:
2630 if xdiff:
2623 mdiff.bdiff.xdiffblocks(*pair)
2631 mdiff.bdiff.xdiffblocks(*pair)
2624 elif blocks:
2632 elif blocks:
2625 mdiff.bdiff.blocks(*pair)
2633 mdiff.bdiff.blocks(*pair)
2626 else:
2634 else:
2627 mdiff.textdiff(*pair)
2635 mdiff.textdiff(*pair)
2628
2636
2629 else:
2637 else:
2630 q = queue()
2638 q = queue()
2631 for i in _xrange(threads):
2639 for i in _xrange(threads):
2632 q.put(None)
2640 q.put(None)
2633 ready = threading.Condition()
2641 ready = threading.Condition()
2634 done = threading.Event()
2642 done = threading.Event()
2635 for i in _xrange(threads):
2643 for i in _xrange(threads):
2636 threading.Thread(
2644 threading.Thread(
2637 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2638 ).start()
2646 ).start()
2639 q.join()
2647 q.join()
2640
2648
2641 def d():
2649 def d():
2642 for pair in textpairs:
2650 for pair in textpairs:
2643 q.put(pair)
2651 q.put(pair)
2644 for i in _xrange(threads):
2652 for i in _xrange(threads):
2645 q.put(None)
2653 q.put(None)
2646 with ready:
2654 with ready:
2647 ready.notify_all()
2655 ready.notify_all()
2648 q.join()
2656 q.join()
2649
2657
2650 timer, fm = gettimer(ui, opts)
2658 timer, fm = gettimer(ui, opts)
2651 timer(d)
2659 timer(d)
2652 fm.end()
2660 fm.end()
2653
2661
2654 if withthreads:
2662 if withthreads:
2655 done.set()
2663 done.set()
2656 for i in _xrange(threads):
2664 for i in _xrange(threads):
2657 q.put(None)
2665 q.put(None)
2658 with ready:
2666 with ready:
2659 ready.notify_all()
2667 ready.notify_all()
2660
2668
2661
2669
2662 @command(
2670 @command(
2663 b'perf::unbundle',
2671 b'perf::unbundle',
2664 formatteropts,
2672 formatteropts,
2665 b'BUNDLE_FILE',
2673 b'BUNDLE_FILE',
2666 )
2674 )
2667 def perf_unbundle(ui, repo, fname, **opts):
2675 def perf_unbundle(ui, repo, fname, **opts):
2668 """benchmark application of a bundle in a repository.
2676 """benchmark application of a bundle in a repository.
2669
2677
2670 This does not include the final transaction processing"""
2678 This does not include the final transaction processing"""
2671 from mercurial import exchange
2679 from mercurial import exchange
2672 from mercurial import bundle2
2680 from mercurial import bundle2
2673
2681
2674 opts = _byteskwargs(opts)
2682 opts = _byteskwargs(opts)
2675
2683
2676 with repo.lock():
2684 with repo.lock():
2677 bundle = [None, None]
2685 bundle = [None, None]
2678 orig_quiet = repo.ui.quiet
2686 orig_quiet = repo.ui.quiet
2679 try:
2687 try:
2680 repo.ui.quiet = True
2688 repo.ui.quiet = True
2681 with open(fname, mode="rb") as f:
2689 with open(fname, mode="rb") as f:
2682
2690
2683 def noop_report(*args, **kwargs):
2691 def noop_report(*args, **kwargs):
2684 pass
2692 pass
2685
2693
2686 def setup():
2694 def setup():
2687 gen, tr = bundle
2695 gen, tr = bundle
2688 if tr is not None:
2696 if tr is not None:
2689 tr.abort()
2697 tr.abort()
2690 bundle[:] = [None, None]
2698 bundle[:] = [None, None]
2691 f.seek(0)
2699 f.seek(0)
2692 bundle[0] = exchange.readbundle(ui, f, fname)
2700 bundle[0] = exchange.readbundle(ui, f, fname)
2693 bundle[1] = repo.transaction(b'perf::unbundle')
2701 bundle[1] = repo.transaction(b'perf::unbundle')
2694 bundle[1]._report = noop_report # silence the transaction
2702 bundle[1]._report = noop_report # silence the transaction
2695
2703
2696 def apply():
2704 def apply():
2697 gen, tr = bundle
2705 gen, tr = bundle
2698 bundle2.applybundle(
2706 bundle2.applybundle(
2699 repo,
2707 repo,
2700 gen,
2708 gen,
2701 tr,
2709 tr,
2702 source=b'perf::unbundle',
2710 source=b'perf::unbundle',
2703 url=fname,
2711 url=fname,
2704 )
2712 )
2705
2713
2706 timer, fm = gettimer(ui, opts)
2714 timer, fm = gettimer(ui, opts)
2707 timer(apply, setup=setup)
2715 timer(apply, setup=setup)
2708 fm.end()
2716 fm.end()
2709 finally:
2717 finally:
2710 repo.ui.quiet == orig_quiet
2718 repo.ui.quiet == orig_quiet
2711 gen, tr = bundle
2719 gen, tr = bundle
2712 if tr is not None:
2720 if tr is not None:
2713 tr.abort()
2721 tr.abort()
2714
2722
2715
2723
2716 @command(
2724 @command(
2717 b'perf::unidiff|perfunidiff',
2725 b'perf::unidiff|perfunidiff',
2718 revlogopts
2726 revlogopts
2719 + formatteropts
2727 + formatteropts
2720 + [
2728 + [
2721 (
2729 (
2722 b'',
2730 b'',
2723 b'count',
2731 b'count',
2724 1,
2732 1,
2725 b'number of revisions to test (when using --startrev)',
2733 b'number of revisions to test (when using --startrev)',
2726 ),
2734 ),
2727 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2735 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2728 ],
2736 ],
2729 b'-c|-m|FILE REV',
2737 b'-c|-m|FILE REV',
2730 )
2738 )
2731 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2739 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2732 """benchmark a unified diff between revisions
2740 """benchmark a unified diff between revisions
2733
2741
2734 This doesn't include any copy tracing - it's just a unified diff
2742 This doesn't include any copy tracing - it's just a unified diff
2735 of the texts.
2743 of the texts.
2736
2744
2737 By default, benchmark a diff between its delta parent and itself.
2745 By default, benchmark a diff between its delta parent and itself.
2738
2746
2739 With ``--count``, benchmark diffs between delta parents and self for N
2747 With ``--count``, benchmark diffs between delta parents and self for N
2740 revisions starting at the specified revision.
2748 revisions starting at the specified revision.
2741
2749
2742 With ``--alldata``, assume the requested revision is a changeset and
2750 With ``--alldata``, assume the requested revision is a changeset and
2743 measure diffs for all changes related to that changeset (manifest
2751 measure diffs for all changes related to that changeset (manifest
2744 and filelogs).
2752 and filelogs).
2745 """
2753 """
2746 opts = _byteskwargs(opts)
2754 opts = _byteskwargs(opts)
2747 if opts[b'alldata']:
2755 if opts[b'alldata']:
2748 opts[b'changelog'] = True
2756 opts[b'changelog'] = True
2749
2757
2750 if opts.get(b'changelog') or opts.get(b'manifest'):
2758 if opts.get(b'changelog') or opts.get(b'manifest'):
2751 file_, rev = None, file_
2759 file_, rev = None, file_
2752 elif rev is None:
2760 elif rev is None:
2753 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2761 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2754
2762
2755 textpairs = []
2763 textpairs = []
2756
2764
2757 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2765 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2758
2766
2759 startrev = r.rev(r.lookup(rev))
2767 startrev = r.rev(r.lookup(rev))
2760 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2768 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2761 if opts[b'alldata']:
2769 if opts[b'alldata']:
2762 # Load revisions associated with changeset.
2770 # Load revisions associated with changeset.
2763 ctx = repo[rev]
2771 ctx = repo[rev]
2764 mtext = _manifestrevision(repo, ctx.manifestnode())
2772 mtext = _manifestrevision(repo, ctx.manifestnode())
2765 for pctx in ctx.parents():
2773 for pctx in ctx.parents():
2766 pman = _manifestrevision(repo, pctx.manifestnode())
2774 pman = _manifestrevision(repo, pctx.manifestnode())
2767 textpairs.append((pman, mtext))
2775 textpairs.append((pman, mtext))
2768
2776
2769 # Load filelog revisions by iterating manifest delta.
2777 # Load filelog revisions by iterating manifest delta.
2770 man = ctx.manifest()
2778 man = ctx.manifest()
2771 pman = ctx.p1().manifest()
2779 pman = ctx.p1().manifest()
2772 for filename, change in pman.diff(man).items():
2780 for filename, change in pman.diff(man).items():
2773 fctx = repo.file(filename)
2781 fctx = repo.file(filename)
2774 f1 = fctx.revision(change[0][0] or -1)
2782 f1 = fctx.revision(change[0][0] or -1)
2775 f2 = fctx.revision(change[1][0] or -1)
2783 f2 = fctx.revision(change[1][0] or -1)
2776 textpairs.append((f1, f2))
2784 textpairs.append((f1, f2))
2777 else:
2785 else:
2778 dp = r.deltaparent(rev)
2786 dp = r.deltaparent(rev)
2779 textpairs.append((r.revision(dp), r.revision(rev)))
2787 textpairs.append((r.revision(dp), r.revision(rev)))
2780
2788
2781 def d():
2789 def d():
2782 for left, right in textpairs:
2790 for left, right in textpairs:
2783 # The date strings don't matter, so we pass empty strings.
2791 # The date strings don't matter, so we pass empty strings.
2784 headerlines, hunks = mdiff.unidiff(
2792 headerlines, hunks = mdiff.unidiff(
2785 left, b'', right, b'', b'left', b'right', binary=False
2793 left, b'', right, b'', b'left', b'right', binary=False
2786 )
2794 )
2787 # consume iterators in roughly the way patch.py does
2795 # consume iterators in roughly the way patch.py does
2788 b'\n'.join(headerlines)
2796 b'\n'.join(headerlines)
2789 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2797 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2790
2798
2791 timer, fm = gettimer(ui, opts)
2799 timer, fm = gettimer(ui, opts)
2792 timer(d)
2800 timer(d)
2793 fm.end()
2801 fm.end()
2794
2802
2795
2803
2796 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2804 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2797 def perfdiffwd(ui, repo, **opts):
2805 def perfdiffwd(ui, repo, **opts):
2798 """Profile diff of working directory changes"""
2806 """Profile diff of working directory changes"""
2799 opts = _byteskwargs(opts)
2807 opts = _byteskwargs(opts)
2800 timer, fm = gettimer(ui, opts)
2808 timer, fm = gettimer(ui, opts)
2801 options = {
2809 options = {
2802 'w': 'ignore_all_space',
2810 'w': 'ignore_all_space',
2803 'b': 'ignore_space_change',
2811 'b': 'ignore_space_change',
2804 'B': 'ignore_blank_lines',
2812 'B': 'ignore_blank_lines',
2805 }
2813 }
2806
2814
2807 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2815 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2808 opts = {options[c]: b'1' for c in diffopt}
2816 opts = {options[c]: b'1' for c in diffopt}
2809
2817
2810 def d():
2818 def d():
2811 ui.pushbuffer()
2819 ui.pushbuffer()
2812 commands.diff(ui, repo, **opts)
2820 commands.diff(ui, repo, **opts)
2813 ui.popbuffer()
2821 ui.popbuffer()
2814
2822
2815 diffopt = diffopt.encode('ascii')
2823 diffopt = diffopt.encode('ascii')
2816 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2824 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2817 timer(d, title=title)
2825 timer(d, title=title)
2818 fm.end()
2826 fm.end()
2819
2827
2820
2828
2821 @command(
2829 @command(
2822 b'perf::revlogindex|perfrevlogindex',
2830 b'perf::revlogindex|perfrevlogindex',
2823 revlogopts + formatteropts,
2831 revlogopts + formatteropts,
2824 b'-c|-m|FILE',
2832 b'-c|-m|FILE',
2825 )
2833 )
2826 def perfrevlogindex(ui, repo, file_=None, **opts):
2834 def perfrevlogindex(ui, repo, file_=None, **opts):
2827 """Benchmark operations against a revlog index.
2835 """Benchmark operations against a revlog index.
2828
2836
2829 This tests constructing a revlog instance, reading index data,
2837 This tests constructing a revlog instance, reading index data,
2830 parsing index data, and performing various operations related to
2838 parsing index data, and performing various operations related to
2831 index data.
2839 index data.
2832 """
2840 """
2833
2841
2834 opts = _byteskwargs(opts)
2842 opts = _byteskwargs(opts)
2835
2843
2836 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2844 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2837
2845
2838 opener = getattr(rl, 'opener') # trick linter
2846 opener = getattr(rl, 'opener') # trick linter
2839 # compat with hg <= 5.8
2847 # compat with hg <= 5.8
2840 radix = getattr(rl, 'radix', None)
2848 radix = getattr(rl, 'radix', None)
2841 indexfile = getattr(rl, '_indexfile', None)
2849 indexfile = getattr(rl, '_indexfile', None)
2842 if indexfile is None:
2850 if indexfile is None:
2843 # compatibility with <= hg-5.8
2851 # compatibility with <= hg-5.8
2844 indexfile = getattr(rl, 'indexfile')
2852 indexfile = getattr(rl, 'indexfile')
2845 data = opener.read(indexfile)
2853 data = opener.read(indexfile)
2846
2854
2847 header = struct.unpack(b'>I', data[0:4])[0]
2855 header = struct.unpack(b'>I', data[0:4])[0]
2848 version = header & 0xFFFF
2856 version = header & 0xFFFF
2849 if version == 1:
2857 if version == 1:
2850 inline = header & (1 << 16)
2858 inline = header & (1 << 16)
2851 else:
2859 else:
2852 raise error.Abort(b'unsupported revlog version: %d' % version)
2860 raise error.Abort(b'unsupported revlog version: %d' % version)
2853
2861
2854 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2862 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2855 if parse_index_v1 is None:
2863 if parse_index_v1 is None:
2856 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2864 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2857
2865
2858 rllen = len(rl)
2866 rllen = len(rl)
2859
2867
2860 node0 = rl.node(0)
2868 node0 = rl.node(0)
2861 node25 = rl.node(rllen // 4)
2869 node25 = rl.node(rllen // 4)
2862 node50 = rl.node(rllen // 2)
2870 node50 = rl.node(rllen // 2)
2863 node75 = rl.node(rllen // 4 * 3)
2871 node75 = rl.node(rllen // 4 * 3)
2864 node100 = rl.node(rllen - 1)
2872 node100 = rl.node(rllen - 1)
2865
2873
2866 allrevs = range(rllen)
2874 allrevs = range(rllen)
2867 allrevsrev = list(reversed(allrevs))
2875 allrevsrev = list(reversed(allrevs))
2868 allnodes = [rl.node(rev) for rev in range(rllen)]
2876 allnodes = [rl.node(rev) for rev in range(rllen)]
2869 allnodesrev = list(reversed(allnodes))
2877 allnodesrev = list(reversed(allnodes))
2870
2878
2871 def constructor():
2879 def constructor():
2872 if radix is not None:
2880 if radix is not None:
2873 revlog(opener, radix=radix)
2881 revlog(opener, radix=radix)
2874 else:
2882 else:
2875 # hg <= 5.8
2883 # hg <= 5.8
2876 revlog(opener, indexfile=indexfile)
2884 revlog(opener, indexfile=indexfile)
2877
2885
2878 def read():
2886 def read():
2879 with opener(indexfile) as fh:
2887 with opener(indexfile) as fh:
2880 fh.read()
2888 fh.read()
2881
2889
2882 def parseindex():
2890 def parseindex():
2883 parse_index_v1(data, inline)
2891 parse_index_v1(data, inline)
2884
2892
2885 def getentry(revornode):
2893 def getentry(revornode):
2886 index = parse_index_v1(data, inline)[0]
2894 index = parse_index_v1(data, inline)[0]
2887 index[revornode]
2895 index[revornode]
2888
2896
2889 def getentries(revs, count=1):
2897 def getentries(revs, count=1):
2890 index = parse_index_v1(data, inline)[0]
2898 index = parse_index_v1(data, inline)[0]
2891
2899
2892 for i in range(count):
2900 for i in range(count):
2893 for rev in revs:
2901 for rev in revs:
2894 index[rev]
2902 index[rev]
2895
2903
2896 def resolvenode(node):
2904 def resolvenode(node):
2897 index = parse_index_v1(data, inline)[0]
2905 index = parse_index_v1(data, inline)[0]
2898 rev = getattr(index, 'rev', None)
2906 rev = getattr(index, 'rev', None)
2899 if rev is None:
2907 if rev is None:
2900 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2908 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2901 # This only works for the C code.
2909 # This only works for the C code.
2902 if nodemap is None:
2910 if nodemap is None:
2903 return
2911 return
2904 rev = nodemap.__getitem__
2912 rev = nodemap.__getitem__
2905
2913
2906 try:
2914 try:
2907 rev(node)
2915 rev(node)
2908 except error.RevlogError:
2916 except error.RevlogError:
2909 pass
2917 pass
2910
2918
2911 def resolvenodes(nodes, count=1):
2919 def resolvenodes(nodes, count=1):
2912 index = parse_index_v1(data, inline)[0]
2920 index = parse_index_v1(data, inline)[0]
2913 rev = getattr(index, 'rev', None)
2921 rev = getattr(index, 'rev', None)
2914 if rev is None:
2922 if rev is None:
2915 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2923 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2916 # This only works for the C code.
2924 # This only works for the C code.
2917 if nodemap is None:
2925 if nodemap is None:
2918 return
2926 return
2919 rev = nodemap.__getitem__
2927 rev = nodemap.__getitem__
2920
2928
2921 for i in range(count):
2929 for i in range(count):
2922 for node in nodes:
2930 for node in nodes:
2923 try:
2931 try:
2924 rev(node)
2932 rev(node)
2925 except error.RevlogError:
2933 except error.RevlogError:
2926 pass
2934 pass
2927
2935
2928 benches = [
2936 benches = [
2929 (constructor, b'revlog constructor'),
2937 (constructor, b'revlog constructor'),
2930 (read, b'read'),
2938 (read, b'read'),
2931 (parseindex, b'create index object'),
2939 (parseindex, b'create index object'),
2932 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2940 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2933 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2941 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2934 (lambda: resolvenode(node0), b'look up node at rev 0'),
2942 (lambda: resolvenode(node0), b'look up node at rev 0'),
2935 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2943 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2936 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2944 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2937 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2945 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2938 (lambda: resolvenode(node100), b'look up node at tip'),
2946 (lambda: resolvenode(node100), b'look up node at tip'),
2939 # 2x variation is to measure caching impact.
2947 # 2x variation is to measure caching impact.
2940 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2948 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2941 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2949 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2942 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2950 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2943 (
2951 (
2944 lambda: resolvenodes(allnodesrev, 2),
2952 lambda: resolvenodes(allnodesrev, 2),
2945 b'look up all nodes 2x (reverse)',
2953 b'look up all nodes 2x (reverse)',
2946 ),
2954 ),
2947 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2955 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2948 (
2956 (
2949 lambda: getentries(allrevs, 2),
2957 lambda: getentries(allrevs, 2),
2950 b'retrieve all index entries 2x (forward)',
2958 b'retrieve all index entries 2x (forward)',
2951 ),
2959 ),
2952 (
2960 (
2953 lambda: getentries(allrevsrev),
2961 lambda: getentries(allrevsrev),
2954 b'retrieve all index entries (reverse)',
2962 b'retrieve all index entries (reverse)',
2955 ),
2963 ),
2956 (
2964 (
2957 lambda: getentries(allrevsrev, 2),
2965 lambda: getentries(allrevsrev, 2),
2958 b'retrieve all index entries 2x (reverse)',
2966 b'retrieve all index entries 2x (reverse)',
2959 ),
2967 ),
2960 ]
2968 ]
2961
2969
2962 for fn, title in benches:
2970 for fn, title in benches:
2963 timer, fm = gettimer(ui, opts)
2971 timer, fm = gettimer(ui, opts)
2964 timer(fn, title=title)
2972 timer(fn, title=title)
2965 fm.end()
2973 fm.end()
2966
2974
2967
2975
2968 @command(
2976 @command(
2969 b'perf::revlogrevisions|perfrevlogrevisions',
2977 b'perf::revlogrevisions|perfrevlogrevisions',
2970 revlogopts
2978 revlogopts
2971 + formatteropts
2979 + formatteropts
2972 + [
2980 + [
2973 (b'd', b'dist', 100, b'distance between the revisions'),
2981 (b'd', b'dist', 100, b'distance between the revisions'),
2974 (b's', b'startrev', 0, b'revision to start reading at'),
2982 (b's', b'startrev', 0, b'revision to start reading at'),
2975 (b'', b'reverse', False, b'read in reverse'),
2983 (b'', b'reverse', False, b'read in reverse'),
2976 ],
2984 ],
2977 b'-c|-m|FILE',
2985 b'-c|-m|FILE',
2978 )
2986 )
2979 def perfrevlogrevisions(
2987 def perfrevlogrevisions(
2980 ui, repo, file_=None, startrev=0, reverse=False, **opts
2988 ui, repo, file_=None, startrev=0, reverse=False, **opts
2981 ):
2989 ):
2982 """Benchmark reading a series of revisions from a revlog.
2990 """Benchmark reading a series of revisions from a revlog.
2983
2991
2984 By default, we read every ``-d/--dist`` revision from 0 to tip of
2992 By default, we read every ``-d/--dist`` revision from 0 to tip of
2985 the specified revlog.
2993 the specified revlog.
2986
2994
2987 The start revision can be defined via ``-s/--startrev``.
2995 The start revision can be defined via ``-s/--startrev``.
2988 """
2996 """
2989 opts = _byteskwargs(opts)
2997 opts = _byteskwargs(opts)
2990
2998
2991 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2999 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2992 rllen = getlen(ui)(rl)
3000 rllen = getlen(ui)(rl)
2993
3001
2994 if startrev < 0:
3002 if startrev < 0:
2995 startrev = rllen + startrev
3003 startrev = rllen + startrev
2996
3004
2997 def d():
3005 def d():
2998 rl.clearcaches()
3006 rl.clearcaches()
2999
3007
3000 beginrev = startrev
3008 beginrev = startrev
3001 endrev = rllen
3009 endrev = rllen
3002 dist = opts[b'dist']
3010 dist = opts[b'dist']
3003
3011
3004 if reverse:
3012 if reverse:
3005 beginrev, endrev = endrev - 1, beginrev - 1
3013 beginrev, endrev = endrev - 1, beginrev - 1
3006 dist = -1 * dist
3014 dist = -1 * dist
3007
3015
3008 for x in _xrange(beginrev, endrev, dist):
3016 for x in _xrange(beginrev, endrev, dist):
3009 # Old revisions don't support passing int.
3017 # Old revisions don't support passing int.
3010 n = rl.node(x)
3018 n = rl.node(x)
3011 rl.revision(n)
3019 rl.revision(n)
3012
3020
3013 timer, fm = gettimer(ui, opts)
3021 timer, fm = gettimer(ui, opts)
3014 timer(d)
3022 timer(d)
3015 fm.end()
3023 fm.end()
3016
3024
3017
3025
3018 @command(
3026 @command(
3019 b'perf::revlogwrite|perfrevlogwrite',
3027 b'perf::revlogwrite|perfrevlogwrite',
3020 revlogopts
3028 revlogopts
3021 + formatteropts
3029 + formatteropts
3022 + [
3030 + [
3023 (b's', b'startrev', 1000, b'revision to start writing at'),
3031 (b's', b'startrev', 1000, b'revision to start writing at'),
3024 (b'', b'stoprev', -1, b'last revision to write'),
3032 (b'', b'stoprev', -1, b'last revision to write'),
3025 (b'', b'count', 3, b'number of passes to perform'),
3033 (b'', b'count', 3, b'number of passes to perform'),
3026 (b'', b'details', False, b'print timing for every revisions tested'),
3034 (b'', b'details', False, b'print timing for every revisions tested'),
3027 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3035 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3028 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3036 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3029 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3037 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3030 ],
3038 ],
3031 b'-c|-m|FILE',
3039 b'-c|-m|FILE',
3032 )
3040 )
3033 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3041 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3034 """Benchmark writing a series of revisions to a revlog.
3042 """Benchmark writing a series of revisions to a revlog.
3035
3043
3036 Possible source values are:
3044 Possible source values are:
3037 * `full`: add from a full text (default).
3045 * `full`: add from a full text (default).
3038 * `parent-1`: add from a delta to the first parent
3046 * `parent-1`: add from a delta to the first parent
3039 * `parent-2`: add from a delta to the second parent if it exists
3047 * `parent-2`: add from a delta to the second parent if it exists
3040 (use a delta from the first parent otherwise)
3048 (use a delta from the first parent otherwise)
3041 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3049 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3042 * `storage`: add from the existing precomputed deltas
3050 * `storage`: add from the existing precomputed deltas
3043
3051
3044 Note: This performance command measures performance in a custom way. As a
3052 Note: This performance command measures performance in a custom way. As a
3045 result some of the global configuration of the 'perf' command does not
3053 result some of the global configuration of the 'perf' command does not
3046 apply to it:
3054 apply to it:
3047
3055
3048 * ``pre-run``: disabled
3056 * ``pre-run``: disabled
3049
3057
3050 * ``profile-benchmark``: disabled
3058 * ``profile-benchmark``: disabled
3051
3059
3052 * ``run-limits``: disabled use --count instead
3060 * ``run-limits``: disabled use --count instead
3053 """
3061 """
3054 opts = _byteskwargs(opts)
3062 opts = _byteskwargs(opts)
3055
3063
3056 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3064 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3057 rllen = getlen(ui)(rl)
3065 rllen = getlen(ui)(rl)
3058 if startrev < 0:
3066 if startrev < 0:
3059 startrev = rllen + startrev
3067 startrev = rllen + startrev
3060 if stoprev < 0:
3068 if stoprev < 0:
3061 stoprev = rllen + stoprev
3069 stoprev = rllen + stoprev
3062
3070
3063 lazydeltabase = opts['lazydeltabase']
3071 lazydeltabase = opts['lazydeltabase']
3064 source = opts['source']
3072 source = opts['source']
3065 clearcaches = opts['clear_caches']
3073 clearcaches = opts['clear_caches']
3066 validsource = (
3074 validsource = (
3067 b'full',
3075 b'full',
3068 b'parent-1',
3076 b'parent-1',
3069 b'parent-2',
3077 b'parent-2',
3070 b'parent-smallest',
3078 b'parent-smallest',
3071 b'storage',
3079 b'storage',
3072 )
3080 )
3073 if source not in validsource:
3081 if source not in validsource:
3074 raise error.Abort('invalid source type: %s' % source)
3082 raise error.Abort('invalid source type: %s' % source)
3075
3083
3076 ### actually gather results
3084 ### actually gather results
3077 count = opts['count']
3085 count = opts['count']
3078 if count <= 0:
3086 if count <= 0:
3079 raise error.Abort('invalide run count: %d' % count)
3087 raise error.Abort('invalide run count: %d' % count)
3080 allresults = []
3088 allresults = []
3081 for c in range(count):
3089 for c in range(count):
3082 timing = _timeonewrite(
3090 timing = _timeonewrite(
3083 ui,
3091 ui,
3084 rl,
3092 rl,
3085 source,
3093 source,
3086 startrev,
3094 startrev,
3087 stoprev,
3095 stoprev,
3088 c + 1,
3096 c + 1,
3089 lazydeltabase=lazydeltabase,
3097 lazydeltabase=lazydeltabase,
3090 clearcaches=clearcaches,
3098 clearcaches=clearcaches,
3091 )
3099 )
3092 allresults.append(timing)
3100 allresults.append(timing)
3093
3101
3094 ### consolidate the results in a single list
3102 ### consolidate the results in a single list
3095 results = []
3103 results = []
3096 for idx, (rev, t) in enumerate(allresults[0]):
3104 for idx, (rev, t) in enumerate(allresults[0]):
3097 ts = [t]
3105 ts = [t]
3098 for other in allresults[1:]:
3106 for other in allresults[1:]:
3099 orev, ot = other[idx]
3107 orev, ot = other[idx]
3100 assert orev == rev
3108 assert orev == rev
3101 ts.append(ot)
3109 ts.append(ot)
3102 results.append((rev, ts))
3110 results.append((rev, ts))
3103 resultcount = len(results)
3111 resultcount = len(results)
3104
3112
3105 ### Compute and display relevant statistics
3113 ### Compute and display relevant statistics
3106
3114
3107 # get a formatter
3115 # get a formatter
3108 fm = ui.formatter(b'perf', opts)
3116 fm = ui.formatter(b'perf', opts)
3109 displayall = ui.configbool(b"perf", b"all-timing", False)
3117 displayall = ui.configbool(b"perf", b"all-timing", False)
3110
3118
3111 # print individual details if requested
3119 # print individual details if requested
3112 if opts['details']:
3120 if opts['details']:
3113 for idx, item in enumerate(results, 1):
3121 for idx, item in enumerate(results, 1):
3114 rev, data = item
3122 rev, data = item
3115 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3123 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3116 formatone(fm, data, title=title, displayall=displayall)
3124 formatone(fm, data, title=title, displayall=displayall)
3117
3125
3118 # sorts results by median time
3126 # sorts results by median time
3119 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3127 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3120 # list of (name, index) to display)
3128 # list of (name, index) to display)
3121 relevants = [
3129 relevants = [
3122 ("min", 0),
3130 ("min", 0),
3123 ("10%", resultcount * 10 // 100),
3131 ("10%", resultcount * 10 // 100),
3124 ("25%", resultcount * 25 // 100),
3132 ("25%", resultcount * 25 // 100),
3125 ("50%", resultcount * 70 // 100),
3133 ("50%", resultcount * 70 // 100),
3126 ("75%", resultcount * 75 // 100),
3134 ("75%", resultcount * 75 // 100),
3127 ("90%", resultcount * 90 // 100),
3135 ("90%", resultcount * 90 // 100),
3128 ("95%", resultcount * 95 // 100),
3136 ("95%", resultcount * 95 // 100),
3129 ("99%", resultcount * 99 // 100),
3137 ("99%", resultcount * 99 // 100),
3130 ("99.9%", resultcount * 999 // 1000),
3138 ("99.9%", resultcount * 999 // 1000),
3131 ("99.99%", resultcount * 9999 // 10000),
3139 ("99.99%", resultcount * 9999 // 10000),
3132 ("99.999%", resultcount * 99999 // 100000),
3140 ("99.999%", resultcount * 99999 // 100000),
3133 ("max", -1),
3141 ("max", -1),
3134 ]
3142 ]
3135 if not ui.quiet:
3143 if not ui.quiet:
3136 for name, idx in relevants:
3144 for name, idx in relevants:
3137 data = results[idx]
3145 data = results[idx]
3138 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3146 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3139 formatone(fm, data[1], title=title, displayall=displayall)
3147 formatone(fm, data[1], title=title, displayall=displayall)
3140
3148
3141 # XXX summing that many float will not be very precise, we ignore this fact
3149 # XXX summing that many float will not be very precise, we ignore this fact
3142 # for now
3150 # for now
3143 totaltime = []
3151 totaltime = []
3144 for item in allresults:
3152 for item in allresults:
3145 totaltime.append(
3153 totaltime.append(
3146 (
3154 (
3147 sum(x[1][0] for x in item),
3155 sum(x[1][0] for x in item),
3148 sum(x[1][1] for x in item),
3156 sum(x[1][1] for x in item),
3149 sum(x[1][2] for x in item),
3157 sum(x[1][2] for x in item),
3150 )
3158 )
3151 )
3159 )
3152 formatone(
3160 formatone(
3153 fm,
3161 fm,
3154 totaltime,
3162 totaltime,
3155 title="total time (%d revs)" % resultcount,
3163 title="total time (%d revs)" % resultcount,
3156 displayall=displayall,
3164 displayall=displayall,
3157 )
3165 )
3158 fm.end()
3166 fm.end()
3159
3167
3160
3168
3161 class _faketr:
3169 class _faketr:
3162 def add(s, x, y, z=None):
3170 def add(s, x, y, z=None):
3163 return None
3171 return None
3164
3172
3165
3173
3166 def _timeonewrite(
3174 def _timeonewrite(
3167 ui,
3175 ui,
3168 orig,
3176 orig,
3169 source,
3177 source,
3170 startrev,
3178 startrev,
3171 stoprev,
3179 stoprev,
3172 runidx=None,
3180 runidx=None,
3173 lazydeltabase=True,
3181 lazydeltabase=True,
3174 clearcaches=True,
3182 clearcaches=True,
3175 ):
3183 ):
3176 timings = []
3184 timings = []
3177 tr = _faketr()
3185 tr = _faketr()
3178 with _temprevlog(ui, orig, startrev) as dest:
3186 with _temprevlog(ui, orig, startrev) as dest:
3179 dest._lazydeltabase = lazydeltabase
3187 dest._lazydeltabase = lazydeltabase
3180 revs = list(orig.revs(startrev, stoprev))
3188 revs = list(orig.revs(startrev, stoprev))
3181 total = len(revs)
3189 total = len(revs)
3182 topic = 'adding'
3190 topic = 'adding'
3183 if runidx is not None:
3191 if runidx is not None:
3184 topic += ' (run #%d)' % runidx
3192 topic += ' (run #%d)' % runidx
3185 # Support both old and new progress API
3193 # Support both old and new progress API
3186 if util.safehasattr(ui, 'makeprogress'):
3194 if util.safehasattr(ui, 'makeprogress'):
3187 progress = ui.makeprogress(topic, unit='revs', total=total)
3195 progress = ui.makeprogress(topic, unit='revs', total=total)
3188
3196
3189 def updateprogress(pos):
3197 def updateprogress(pos):
3190 progress.update(pos)
3198 progress.update(pos)
3191
3199
3192 def completeprogress():
3200 def completeprogress():
3193 progress.complete()
3201 progress.complete()
3194
3202
3195 else:
3203 else:
3196
3204
3197 def updateprogress(pos):
3205 def updateprogress(pos):
3198 ui.progress(topic, pos, unit='revs', total=total)
3206 ui.progress(topic, pos, unit='revs', total=total)
3199
3207
3200 def completeprogress():
3208 def completeprogress():
3201 ui.progress(topic, None, unit='revs', total=total)
3209 ui.progress(topic, None, unit='revs', total=total)
3202
3210
3203 for idx, rev in enumerate(revs):
3211 for idx, rev in enumerate(revs):
3204 updateprogress(idx)
3212 updateprogress(idx)
3205 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3213 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3206 if clearcaches:
3214 if clearcaches:
3207 dest.index.clearcaches()
3215 dest.index.clearcaches()
3208 dest.clearcaches()
3216 dest.clearcaches()
3209 with timeone() as r:
3217 with timeone() as r:
3210 dest.addrawrevision(*addargs, **addkwargs)
3218 dest.addrawrevision(*addargs, **addkwargs)
3211 timings.append((rev, r[0]))
3219 timings.append((rev, r[0]))
3212 updateprogress(total)
3220 updateprogress(total)
3213 completeprogress()
3221 completeprogress()
3214 return timings
3222 return timings
3215
3223
3216
3224
3217 def _getrevisionseed(orig, rev, tr, source):
3225 def _getrevisionseed(orig, rev, tr, source):
3218 from mercurial.node import nullid
3226 from mercurial.node import nullid
3219
3227
3220 linkrev = orig.linkrev(rev)
3228 linkrev = orig.linkrev(rev)
3221 node = orig.node(rev)
3229 node = orig.node(rev)
3222 p1, p2 = orig.parents(node)
3230 p1, p2 = orig.parents(node)
3223 flags = orig.flags(rev)
3231 flags = orig.flags(rev)
3224 cachedelta = None
3232 cachedelta = None
3225 text = None
3233 text = None
3226
3234
3227 if source == b'full':
3235 if source == b'full':
3228 text = orig.revision(rev)
3236 text = orig.revision(rev)
3229 elif source == b'parent-1':
3237 elif source == b'parent-1':
3230 baserev = orig.rev(p1)
3238 baserev = orig.rev(p1)
3231 cachedelta = (baserev, orig.revdiff(p1, rev))
3239 cachedelta = (baserev, orig.revdiff(p1, rev))
3232 elif source == b'parent-2':
3240 elif source == b'parent-2':
3233 parent = p2
3241 parent = p2
3234 if p2 == nullid:
3242 if p2 == nullid:
3235 parent = p1
3243 parent = p1
3236 baserev = orig.rev(parent)
3244 baserev = orig.rev(parent)
3237 cachedelta = (baserev, orig.revdiff(parent, rev))
3245 cachedelta = (baserev, orig.revdiff(parent, rev))
3238 elif source == b'parent-smallest':
3246 elif source == b'parent-smallest':
3239 p1diff = orig.revdiff(p1, rev)
3247 p1diff = orig.revdiff(p1, rev)
3240 parent = p1
3248 parent = p1
3241 diff = p1diff
3249 diff = p1diff
3242 if p2 != nullid:
3250 if p2 != nullid:
3243 p2diff = orig.revdiff(p2, rev)
3251 p2diff = orig.revdiff(p2, rev)
3244 if len(p1diff) > len(p2diff):
3252 if len(p1diff) > len(p2diff):
3245 parent = p2
3253 parent = p2
3246 diff = p2diff
3254 diff = p2diff
3247 baserev = orig.rev(parent)
3255 baserev = orig.rev(parent)
3248 cachedelta = (baserev, diff)
3256 cachedelta = (baserev, diff)
3249 elif source == b'storage':
3257 elif source == b'storage':
3250 baserev = orig.deltaparent(rev)
3258 baserev = orig.deltaparent(rev)
3251 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3259 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3252
3260
3253 return (
3261 return (
3254 (text, tr, linkrev, p1, p2),
3262 (text, tr, linkrev, p1, p2),
3255 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3263 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3256 )
3264 )
3257
3265
3258
3266
3259 @contextlib.contextmanager
3267 @contextlib.contextmanager
3260 def _temprevlog(ui, orig, truncaterev):
3268 def _temprevlog(ui, orig, truncaterev):
3261 from mercurial import vfs as vfsmod
3269 from mercurial import vfs as vfsmod
3262
3270
3263 if orig._inline:
3271 if orig._inline:
3264 raise error.Abort('not supporting inline revlog (yet)')
3272 raise error.Abort('not supporting inline revlog (yet)')
3265 revlogkwargs = {}
3273 revlogkwargs = {}
3266 k = 'upperboundcomp'
3274 k = 'upperboundcomp'
3267 if util.safehasattr(orig, k):
3275 if util.safehasattr(orig, k):
3268 revlogkwargs[k] = getattr(orig, k)
3276 revlogkwargs[k] = getattr(orig, k)
3269
3277
3270 indexfile = getattr(orig, '_indexfile', None)
3278 indexfile = getattr(orig, '_indexfile', None)
3271 if indexfile is None:
3279 if indexfile is None:
3272 # compatibility with <= hg-5.8
3280 # compatibility with <= hg-5.8
3273 indexfile = getattr(orig, 'indexfile')
3281 indexfile = getattr(orig, 'indexfile')
3274 origindexpath = orig.opener.join(indexfile)
3282 origindexpath = orig.opener.join(indexfile)
3275
3283
3276 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3284 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3277 origdatapath = orig.opener.join(datafile)
3285 origdatapath = orig.opener.join(datafile)
3278 radix = b'revlog'
3286 radix = b'revlog'
3279 indexname = b'revlog.i'
3287 indexname = b'revlog.i'
3280 dataname = b'revlog.d'
3288 dataname = b'revlog.d'
3281
3289
3282 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3290 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3283 try:
3291 try:
3284 # copy the data file in a temporary directory
3292 # copy the data file in a temporary directory
3285 ui.debug('copying data in %s\n' % tmpdir)
3293 ui.debug('copying data in %s\n' % tmpdir)
3286 destindexpath = os.path.join(tmpdir, 'revlog.i')
3294 destindexpath = os.path.join(tmpdir, 'revlog.i')
3287 destdatapath = os.path.join(tmpdir, 'revlog.d')
3295 destdatapath = os.path.join(tmpdir, 'revlog.d')
3288 shutil.copyfile(origindexpath, destindexpath)
3296 shutil.copyfile(origindexpath, destindexpath)
3289 shutil.copyfile(origdatapath, destdatapath)
3297 shutil.copyfile(origdatapath, destdatapath)
3290
3298
3291 # remove the data we want to add again
3299 # remove the data we want to add again
3292 ui.debug('truncating data to be rewritten\n')
3300 ui.debug('truncating data to be rewritten\n')
3293 with open(destindexpath, 'ab') as index:
3301 with open(destindexpath, 'ab') as index:
3294 index.seek(0)
3302 index.seek(0)
3295 index.truncate(truncaterev * orig._io.size)
3303 index.truncate(truncaterev * orig._io.size)
3296 with open(destdatapath, 'ab') as data:
3304 with open(destdatapath, 'ab') as data:
3297 data.seek(0)
3305 data.seek(0)
3298 data.truncate(orig.start(truncaterev))
3306 data.truncate(orig.start(truncaterev))
3299
3307
3300 # instantiate a new revlog from the temporary copy
3308 # instantiate a new revlog from the temporary copy
3301 ui.debug('truncating adding to be rewritten\n')
3309 ui.debug('truncating adding to be rewritten\n')
3302 vfs = vfsmod.vfs(tmpdir)
3310 vfs = vfsmod.vfs(tmpdir)
3303 vfs.options = getattr(orig.opener, 'options', None)
3311 vfs.options = getattr(orig.opener, 'options', None)
3304
3312
3305 try:
3313 try:
3306 dest = revlog(vfs, radix=radix, **revlogkwargs)
3314 dest = revlog(vfs, radix=radix, **revlogkwargs)
3307 except TypeError:
3315 except TypeError:
3308 dest = revlog(
3316 dest = revlog(
3309 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3317 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3310 )
3318 )
3311 if dest._inline:
3319 if dest._inline:
3312 raise error.Abort('not supporting inline revlog (yet)')
3320 raise error.Abort('not supporting inline revlog (yet)')
3313 # make sure internals are initialized
3321 # make sure internals are initialized
3314 dest.revision(len(dest) - 1)
3322 dest.revision(len(dest) - 1)
3315 yield dest
3323 yield dest
3316 del dest, vfs
3324 del dest, vfs
3317 finally:
3325 finally:
3318 shutil.rmtree(tmpdir, True)
3326 shutil.rmtree(tmpdir, True)
3319
3327
3320
3328
3321 @command(
3329 @command(
3322 b'perf::revlogchunks|perfrevlogchunks',
3330 b'perf::revlogchunks|perfrevlogchunks',
3323 revlogopts
3331 revlogopts
3324 + formatteropts
3332 + formatteropts
3325 + [
3333 + [
3326 (b'e', b'engines', b'', b'compression engines to use'),
3334 (b'e', b'engines', b'', b'compression engines to use'),
3327 (b's', b'startrev', 0, b'revision to start at'),
3335 (b's', b'startrev', 0, b'revision to start at'),
3328 ],
3336 ],
3329 b'-c|-m|FILE',
3337 b'-c|-m|FILE',
3330 )
3338 )
3331 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3339 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3332 """Benchmark operations on revlog chunks.
3340 """Benchmark operations on revlog chunks.
3333
3341
3334 Logically, each revlog is a collection of fulltext revisions. However,
3342 Logically, each revlog is a collection of fulltext revisions. However,
3335 stored within each revlog are "chunks" of possibly compressed data. This
3343 stored within each revlog are "chunks" of possibly compressed data. This
3336 data needs to be read and decompressed or compressed and written.
3344 data needs to be read and decompressed or compressed and written.
3337
3345
3338 This command measures the time it takes to read+decompress and recompress
3346 This command measures the time it takes to read+decompress and recompress
3339 chunks in a revlog. It effectively isolates I/O and compression performance.
3347 chunks in a revlog. It effectively isolates I/O and compression performance.
3340 For measurements of higher-level operations like resolving revisions,
3348 For measurements of higher-level operations like resolving revisions,
3341 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3349 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3342 """
3350 """
3343 opts = _byteskwargs(opts)
3351 opts = _byteskwargs(opts)
3344
3352
3345 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3353 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3346
3354
3347 # _chunkraw was renamed to _getsegmentforrevs.
3355 # _chunkraw was renamed to _getsegmentforrevs.
3348 try:
3356 try:
3349 segmentforrevs = rl._getsegmentforrevs
3357 segmentforrevs = rl._getsegmentforrevs
3350 except AttributeError:
3358 except AttributeError:
3351 segmentforrevs = rl._chunkraw
3359 segmentforrevs = rl._chunkraw
3352
3360
3353 # Verify engines argument.
3361 # Verify engines argument.
3354 if engines:
3362 if engines:
3355 engines = {e.strip() for e in engines.split(b',')}
3363 engines = {e.strip() for e in engines.split(b',')}
3356 for engine in engines:
3364 for engine in engines:
3357 try:
3365 try:
3358 util.compressionengines[engine]
3366 util.compressionengines[engine]
3359 except KeyError:
3367 except KeyError:
3360 raise error.Abort(b'unknown compression engine: %s' % engine)
3368 raise error.Abort(b'unknown compression engine: %s' % engine)
3361 else:
3369 else:
3362 engines = []
3370 engines = []
3363 for e in util.compengines:
3371 for e in util.compengines:
3364 engine = util.compengines[e]
3372 engine = util.compengines[e]
3365 try:
3373 try:
3366 if engine.available():
3374 if engine.available():
3367 engine.revlogcompressor().compress(b'dummy')
3375 engine.revlogcompressor().compress(b'dummy')
3368 engines.append(e)
3376 engines.append(e)
3369 except NotImplementedError:
3377 except NotImplementedError:
3370 pass
3378 pass
3371
3379
3372 revs = list(rl.revs(startrev, len(rl) - 1))
3380 revs = list(rl.revs(startrev, len(rl) - 1))
3373
3381
3374 def rlfh(rl):
3382 def rlfh(rl):
3375 if rl._inline:
3383 if rl._inline:
3376 indexfile = getattr(rl, '_indexfile', None)
3384 indexfile = getattr(rl, '_indexfile', None)
3377 if indexfile is None:
3385 if indexfile is None:
3378 # compatibility with <= hg-5.8
3386 # compatibility with <= hg-5.8
3379 indexfile = getattr(rl, 'indexfile')
3387 indexfile = getattr(rl, 'indexfile')
3380 return getsvfs(repo)(indexfile)
3388 return getsvfs(repo)(indexfile)
3381 else:
3389 else:
3382 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3390 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3383 return getsvfs(repo)(datafile)
3391 return getsvfs(repo)(datafile)
3384
3392
3385 def doread():
3393 def doread():
3386 rl.clearcaches()
3394 rl.clearcaches()
3387 for rev in revs:
3395 for rev in revs:
3388 segmentforrevs(rev, rev)
3396 segmentforrevs(rev, rev)
3389
3397
3390 def doreadcachedfh():
3398 def doreadcachedfh():
3391 rl.clearcaches()
3399 rl.clearcaches()
3392 fh = rlfh(rl)
3400 fh = rlfh(rl)
3393 for rev in revs:
3401 for rev in revs:
3394 segmentforrevs(rev, rev, df=fh)
3402 segmentforrevs(rev, rev, df=fh)
3395
3403
3396 def doreadbatch():
3404 def doreadbatch():
3397 rl.clearcaches()
3405 rl.clearcaches()
3398 segmentforrevs(revs[0], revs[-1])
3406 segmentforrevs(revs[0], revs[-1])
3399
3407
3400 def doreadbatchcachedfh():
3408 def doreadbatchcachedfh():
3401 rl.clearcaches()
3409 rl.clearcaches()
3402 fh = rlfh(rl)
3410 fh = rlfh(rl)
3403 segmentforrevs(revs[0], revs[-1], df=fh)
3411 segmentforrevs(revs[0], revs[-1], df=fh)
3404
3412
3405 def dochunk():
3413 def dochunk():
3406 rl.clearcaches()
3414 rl.clearcaches()
3407 fh = rlfh(rl)
3415 fh = rlfh(rl)
3408 for rev in revs:
3416 for rev in revs:
3409 rl._chunk(rev, df=fh)
3417 rl._chunk(rev, df=fh)
3410
3418
3411 chunks = [None]
3419 chunks = [None]
3412
3420
3413 def dochunkbatch():
3421 def dochunkbatch():
3414 rl.clearcaches()
3422 rl.clearcaches()
3415 fh = rlfh(rl)
3423 fh = rlfh(rl)
3416 # Save chunks as a side-effect.
3424 # Save chunks as a side-effect.
3417 chunks[0] = rl._chunks(revs, df=fh)
3425 chunks[0] = rl._chunks(revs, df=fh)
3418
3426
3419 def docompress(compressor):
3427 def docompress(compressor):
3420 rl.clearcaches()
3428 rl.clearcaches()
3421
3429
3422 try:
3430 try:
3423 # Swap in the requested compression engine.
3431 # Swap in the requested compression engine.
3424 oldcompressor = rl._compressor
3432 oldcompressor = rl._compressor
3425 rl._compressor = compressor
3433 rl._compressor = compressor
3426 for chunk in chunks[0]:
3434 for chunk in chunks[0]:
3427 rl.compress(chunk)
3435 rl.compress(chunk)
3428 finally:
3436 finally:
3429 rl._compressor = oldcompressor
3437 rl._compressor = oldcompressor
3430
3438
3431 benches = [
3439 benches = [
3432 (lambda: doread(), b'read'),
3440 (lambda: doread(), b'read'),
3433 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3441 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3434 (lambda: doreadbatch(), b'read batch'),
3442 (lambda: doreadbatch(), b'read batch'),
3435 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3443 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3436 (lambda: dochunk(), b'chunk'),
3444 (lambda: dochunk(), b'chunk'),
3437 (lambda: dochunkbatch(), b'chunk batch'),
3445 (lambda: dochunkbatch(), b'chunk batch'),
3438 ]
3446 ]
3439
3447
3440 for engine in sorted(engines):
3448 for engine in sorted(engines):
3441 compressor = util.compengines[engine].revlogcompressor()
3449 compressor = util.compengines[engine].revlogcompressor()
3442 benches.append(
3450 benches.append(
3443 (
3451 (
3444 functools.partial(docompress, compressor),
3452 functools.partial(docompress, compressor),
3445 b'compress w/ %s' % engine,
3453 b'compress w/ %s' % engine,
3446 )
3454 )
3447 )
3455 )
3448
3456
3449 for fn, title in benches:
3457 for fn, title in benches:
3450 timer, fm = gettimer(ui, opts)
3458 timer, fm = gettimer(ui, opts)
3451 timer(fn, title=title)
3459 timer(fn, title=title)
3452 fm.end()
3460 fm.end()
3453
3461
3454
3462
3455 @command(
3463 @command(
3456 b'perf::revlogrevision|perfrevlogrevision',
3464 b'perf::revlogrevision|perfrevlogrevision',
3457 revlogopts
3465 revlogopts
3458 + formatteropts
3466 + formatteropts
3459 + [(b'', b'cache', False, b'use caches instead of clearing')],
3467 + [(b'', b'cache', False, b'use caches instead of clearing')],
3460 b'-c|-m|FILE REV',
3468 b'-c|-m|FILE REV',
3461 )
3469 )
3462 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3470 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3463 """Benchmark obtaining a revlog revision.
3471 """Benchmark obtaining a revlog revision.
3464
3472
3465 Obtaining a revlog revision consists of roughly the following steps:
3473 Obtaining a revlog revision consists of roughly the following steps:
3466
3474
3467 1. Compute the delta chain
3475 1. Compute the delta chain
3468 2. Slice the delta chain if applicable
3476 2. Slice the delta chain if applicable
3469 3. Obtain the raw chunks for that delta chain
3477 3. Obtain the raw chunks for that delta chain
3470 4. Decompress each raw chunk
3478 4. Decompress each raw chunk
3471 5. Apply binary patches to obtain fulltext
3479 5. Apply binary patches to obtain fulltext
3472 6. Verify hash of fulltext
3480 6. Verify hash of fulltext
3473
3481
3474 This command measures the time spent in each of these phases.
3482 This command measures the time spent in each of these phases.
3475 """
3483 """
3476 opts = _byteskwargs(opts)
3484 opts = _byteskwargs(opts)
3477
3485
3478 if opts.get(b'changelog') or opts.get(b'manifest'):
3486 if opts.get(b'changelog') or opts.get(b'manifest'):
3479 file_, rev = None, file_
3487 file_, rev = None, file_
3480 elif rev is None:
3488 elif rev is None:
3481 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3489 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3482
3490
3483 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3491 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3484
3492
3485 # _chunkraw was renamed to _getsegmentforrevs.
3493 # _chunkraw was renamed to _getsegmentforrevs.
3486 try:
3494 try:
3487 segmentforrevs = r._getsegmentforrevs
3495 segmentforrevs = r._getsegmentforrevs
3488 except AttributeError:
3496 except AttributeError:
3489 segmentforrevs = r._chunkraw
3497 segmentforrevs = r._chunkraw
3490
3498
3491 node = r.lookup(rev)
3499 node = r.lookup(rev)
3492 rev = r.rev(node)
3500 rev = r.rev(node)
3493
3501
3494 def getrawchunks(data, chain):
3502 def getrawchunks(data, chain):
3495 start = r.start
3503 start = r.start
3496 length = r.length
3504 length = r.length
3497 inline = r._inline
3505 inline = r._inline
3498 try:
3506 try:
3499 iosize = r.index.entry_size
3507 iosize = r.index.entry_size
3500 except AttributeError:
3508 except AttributeError:
3501 iosize = r._io.size
3509 iosize = r._io.size
3502 buffer = util.buffer
3510 buffer = util.buffer
3503
3511
3504 chunks = []
3512 chunks = []
3505 ladd = chunks.append
3513 ladd = chunks.append
3506 for idx, item in enumerate(chain):
3514 for idx, item in enumerate(chain):
3507 offset = start(item[0])
3515 offset = start(item[0])
3508 bits = data[idx]
3516 bits = data[idx]
3509 for rev in item:
3517 for rev in item:
3510 chunkstart = start(rev)
3518 chunkstart = start(rev)
3511 if inline:
3519 if inline:
3512 chunkstart += (rev + 1) * iosize
3520 chunkstart += (rev + 1) * iosize
3513 chunklength = length(rev)
3521 chunklength = length(rev)
3514 ladd(buffer(bits, chunkstart - offset, chunklength))
3522 ladd(buffer(bits, chunkstart - offset, chunklength))
3515
3523
3516 return chunks
3524 return chunks
3517
3525
3518 def dodeltachain(rev):
3526 def dodeltachain(rev):
3519 if not cache:
3527 if not cache:
3520 r.clearcaches()
3528 r.clearcaches()
3521 r._deltachain(rev)
3529 r._deltachain(rev)
3522
3530
3523 def doread(chain):
3531 def doread(chain):
3524 if not cache:
3532 if not cache:
3525 r.clearcaches()
3533 r.clearcaches()
3526 for item in slicedchain:
3534 for item in slicedchain:
3527 segmentforrevs(item[0], item[-1])
3535 segmentforrevs(item[0], item[-1])
3528
3536
3529 def doslice(r, chain, size):
3537 def doslice(r, chain, size):
3530 for s in slicechunk(r, chain, targetsize=size):
3538 for s in slicechunk(r, chain, targetsize=size):
3531 pass
3539 pass
3532
3540
3533 def dorawchunks(data, chain):
3541 def dorawchunks(data, chain):
3534 if not cache:
3542 if not cache:
3535 r.clearcaches()
3543 r.clearcaches()
3536 getrawchunks(data, chain)
3544 getrawchunks(data, chain)
3537
3545
3538 def dodecompress(chunks):
3546 def dodecompress(chunks):
3539 decomp = r.decompress
3547 decomp = r.decompress
3540 for chunk in chunks:
3548 for chunk in chunks:
3541 decomp(chunk)
3549 decomp(chunk)
3542
3550
3543 def dopatch(text, bins):
3551 def dopatch(text, bins):
3544 if not cache:
3552 if not cache:
3545 r.clearcaches()
3553 r.clearcaches()
3546 mdiff.patches(text, bins)
3554 mdiff.patches(text, bins)
3547
3555
3548 def dohash(text):
3556 def dohash(text):
3549 if not cache:
3557 if not cache:
3550 r.clearcaches()
3558 r.clearcaches()
3551 r.checkhash(text, node, rev=rev)
3559 r.checkhash(text, node, rev=rev)
3552
3560
3553 def dorevision():
3561 def dorevision():
3554 if not cache:
3562 if not cache:
3555 r.clearcaches()
3563 r.clearcaches()
3556 r.revision(node)
3564 r.revision(node)
3557
3565
3558 try:
3566 try:
3559 from mercurial.revlogutils.deltas import slicechunk
3567 from mercurial.revlogutils.deltas import slicechunk
3560 except ImportError:
3568 except ImportError:
3561 slicechunk = getattr(revlog, '_slicechunk', None)
3569 slicechunk = getattr(revlog, '_slicechunk', None)
3562
3570
3563 size = r.length(rev)
3571 size = r.length(rev)
3564 chain = r._deltachain(rev)[0]
3572 chain = r._deltachain(rev)[0]
3565 if not getattr(r, '_withsparseread', False):
3573 if not getattr(r, '_withsparseread', False):
3566 slicedchain = (chain,)
3574 slicedchain = (chain,)
3567 else:
3575 else:
3568 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3576 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3569 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3577 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3570 rawchunks = getrawchunks(data, slicedchain)
3578 rawchunks = getrawchunks(data, slicedchain)
3571 bins = r._chunks(chain)
3579 bins = r._chunks(chain)
3572 text = bytes(bins[0])
3580 text = bytes(bins[0])
3573 bins = bins[1:]
3581 bins = bins[1:]
3574 text = mdiff.patches(text, bins)
3582 text = mdiff.patches(text, bins)
3575
3583
3576 benches = [
3584 benches = [
3577 (lambda: dorevision(), b'full'),
3585 (lambda: dorevision(), b'full'),
3578 (lambda: dodeltachain(rev), b'deltachain'),
3586 (lambda: dodeltachain(rev), b'deltachain'),
3579 (lambda: doread(chain), b'read'),
3587 (lambda: doread(chain), b'read'),
3580 ]
3588 ]
3581
3589
3582 if getattr(r, '_withsparseread', False):
3590 if getattr(r, '_withsparseread', False):
3583 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3591 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3584 benches.append(slicing)
3592 benches.append(slicing)
3585
3593
3586 benches.extend(
3594 benches.extend(
3587 [
3595 [
3588 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3596 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3589 (lambda: dodecompress(rawchunks), b'decompress'),
3597 (lambda: dodecompress(rawchunks), b'decompress'),
3590 (lambda: dopatch(text, bins), b'patch'),
3598 (lambda: dopatch(text, bins), b'patch'),
3591 (lambda: dohash(text), b'hash'),
3599 (lambda: dohash(text), b'hash'),
3592 ]
3600 ]
3593 )
3601 )
3594
3602
3595 timer, fm = gettimer(ui, opts)
3603 timer, fm = gettimer(ui, opts)
3596 for fn, title in benches:
3604 for fn, title in benches:
3597 timer(fn, title=title)
3605 timer(fn, title=title)
3598 fm.end()
3606 fm.end()
3599
3607
3600
3608
3601 @command(
3609 @command(
3602 b'perf::revset|perfrevset',
3610 b'perf::revset|perfrevset',
3603 [
3611 [
3604 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3612 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3605 (b'', b'contexts', False, b'obtain changectx for each revision'),
3613 (b'', b'contexts', False, b'obtain changectx for each revision'),
3606 ]
3614 ]
3607 + formatteropts,
3615 + formatteropts,
3608 b"REVSET",
3616 b"REVSET",
3609 )
3617 )
3610 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3618 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3611 """benchmark the execution time of a revset
3619 """benchmark the execution time of a revset
3612
3620
3613 Use the --clean option if need to evaluate the impact of build volatile
3621 Use the --clean option if need to evaluate the impact of build volatile
3614 revisions set cache on the revset execution. Volatile cache hold filtered
3622 revisions set cache on the revset execution. Volatile cache hold filtered
3615 and obsolete related cache."""
3623 and obsolete related cache."""
3616 opts = _byteskwargs(opts)
3624 opts = _byteskwargs(opts)
3617
3625
3618 timer, fm = gettimer(ui, opts)
3626 timer, fm = gettimer(ui, opts)
3619
3627
3620 def d():
3628 def d():
3621 if clear:
3629 if clear:
3622 repo.invalidatevolatilesets()
3630 repo.invalidatevolatilesets()
3623 if contexts:
3631 if contexts:
3624 for ctx in repo.set(expr):
3632 for ctx in repo.set(expr):
3625 pass
3633 pass
3626 else:
3634 else:
3627 for r in repo.revs(expr):
3635 for r in repo.revs(expr):
3628 pass
3636 pass
3629
3637
3630 timer(d)
3638 timer(d)
3631 fm.end()
3639 fm.end()
3632
3640
3633
3641
3634 @command(
3642 @command(
3635 b'perf::volatilesets|perfvolatilesets',
3643 b'perf::volatilesets|perfvolatilesets',
3636 [
3644 [
3637 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3645 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3638 ]
3646 ]
3639 + formatteropts,
3647 + formatteropts,
3640 )
3648 )
3641 def perfvolatilesets(ui, repo, *names, **opts):
3649 def perfvolatilesets(ui, repo, *names, **opts):
3642 """benchmark the computation of various volatile set
3650 """benchmark the computation of various volatile set
3643
3651
3644 Volatile set computes element related to filtering and obsolescence."""
3652 Volatile set computes element related to filtering and obsolescence."""
3645 opts = _byteskwargs(opts)
3653 opts = _byteskwargs(opts)
3646 timer, fm = gettimer(ui, opts)
3654 timer, fm = gettimer(ui, opts)
3647 repo = repo.unfiltered()
3655 repo = repo.unfiltered()
3648
3656
3649 def getobs(name):
3657 def getobs(name):
3650 def d():
3658 def d():
3651 repo.invalidatevolatilesets()
3659 repo.invalidatevolatilesets()
3652 if opts[b'clear_obsstore']:
3660 if opts[b'clear_obsstore']:
3653 clearfilecache(repo, b'obsstore')
3661 clearfilecache(repo, b'obsstore')
3654 obsolete.getrevs(repo, name)
3662 obsolete.getrevs(repo, name)
3655
3663
3656 return d
3664 return d
3657
3665
3658 allobs = sorted(obsolete.cachefuncs)
3666 allobs = sorted(obsolete.cachefuncs)
3659 if names:
3667 if names:
3660 allobs = [n for n in allobs if n in names]
3668 allobs = [n for n in allobs if n in names]
3661
3669
3662 for name in allobs:
3670 for name in allobs:
3663 timer(getobs(name), title=name)
3671 timer(getobs(name), title=name)
3664
3672
3665 def getfiltered(name):
3673 def getfiltered(name):
3666 def d():
3674 def d():
3667 repo.invalidatevolatilesets()
3675 repo.invalidatevolatilesets()
3668 if opts[b'clear_obsstore']:
3676 if opts[b'clear_obsstore']:
3669 clearfilecache(repo, b'obsstore')
3677 clearfilecache(repo, b'obsstore')
3670 repoview.filterrevs(repo, name)
3678 repoview.filterrevs(repo, name)
3671
3679
3672 return d
3680 return d
3673
3681
3674 allfilter = sorted(repoview.filtertable)
3682 allfilter = sorted(repoview.filtertable)
3675 if names:
3683 if names:
3676 allfilter = [n for n in allfilter if n in names]
3684 allfilter = [n for n in allfilter if n in names]
3677
3685
3678 for name in allfilter:
3686 for name in allfilter:
3679 timer(getfiltered(name), title=name)
3687 timer(getfiltered(name), title=name)
3680 fm.end()
3688 fm.end()
3681
3689
3682
3690
3683 @command(
3691 @command(
3684 b'perf::branchmap|perfbranchmap',
3692 b'perf::branchmap|perfbranchmap',
3685 [
3693 [
3686 (b'f', b'full', False, b'Includes build time of subset'),
3694 (b'f', b'full', False, b'Includes build time of subset'),
3687 (
3695 (
3688 b'',
3696 b'',
3689 b'clear-revbranch',
3697 b'clear-revbranch',
3690 False,
3698 False,
3691 b'purge the revbranch cache between computation',
3699 b'purge the revbranch cache between computation',
3692 ),
3700 ),
3693 ]
3701 ]
3694 + formatteropts,
3702 + formatteropts,
3695 )
3703 )
3696 def perfbranchmap(ui, repo, *filternames, **opts):
3704 def perfbranchmap(ui, repo, *filternames, **opts):
3697 """benchmark the update of a branchmap
3705 """benchmark the update of a branchmap
3698
3706
3699 This benchmarks the full repo.branchmap() call with read and write disabled
3707 This benchmarks the full repo.branchmap() call with read and write disabled
3700 """
3708 """
3701 opts = _byteskwargs(opts)
3709 opts = _byteskwargs(opts)
3702 full = opts.get(b"full", False)
3710 full = opts.get(b"full", False)
3703 clear_revbranch = opts.get(b"clear_revbranch", False)
3711 clear_revbranch = opts.get(b"clear_revbranch", False)
3704 timer, fm = gettimer(ui, opts)
3712 timer, fm = gettimer(ui, opts)
3705
3713
3706 def getbranchmap(filtername):
3714 def getbranchmap(filtername):
3707 """generate a benchmark function for the filtername"""
3715 """generate a benchmark function for the filtername"""
3708 if filtername is None:
3716 if filtername is None:
3709 view = repo
3717 view = repo
3710 else:
3718 else:
3711 view = repo.filtered(filtername)
3719 view = repo.filtered(filtername)
3712 if util.safehasattr(view._branchcaches, '_per_filter'):
3720 if util.safehasattr(view._branchcaches, '_per_filter'):
3713 filtered = view._branchcaches._per_filter
3721 filtered = view._branchcaches._per_filter
3714 else:
3722 else:
3715 # older versions
3723 # older versions
3716 filtered = view._branchcaches
3724 filtered = view._branchcaches
3717
3725
3718 def d():
3726 def d():
3719 if clear_revbranch:
3727 if clear_revbranch:
3720 repo.revbranchcache()._clear()
3728 repo.revbranchcache()._clear()
3721 if full:
3729 if full:
3722 view._branchcaches.clear()
3730 view._branchcaches.clear()
3723 else:
3731 else:
3724 filtered.pop(filtername, None)
3732 filtered.pop(filtername, None)
3725 view.branchmap()
3733 view.branchmap()
3726
3734
3727 return d
3735 return d
3728
3736
3729 # add filter in smaller subset to bigger subset
3737 # add filter in smaller subset to bigger subset
3730 possiblefilters = set(repoview.filtertable)
3738 possiblefilters = set(repoview.filtertable)
3731 if filternames:
3739 if filternames:
3732 possiblefilters &= set(filternames)
3740 possiblefilters &= set(filternames)
3733 subsettable = getbranchmapsubsettable()
3741 subsettable = getbranchmapsubsettable()
3734 allfilters = []
3742 allfilters = []
3735 while possiblefilters:
3743 while possiblefilters:
3736 for name in possiblefilters:
3744 for name in possiblefilters:
3737 subset = subsettable.get(name)
3745 subset = subsettable.get(name)
3738 if subset not in possiblefilters:
3746 if subset not in possiblefilters:
3739 break
3747 break
3740 else:
3748 else:
3741 assert False, b'subset cycle %s!' % possiblefilters
3749 assert False, b'subset cycle %s!' % possiblefilters
3742 allfilters.append(name)
3750 allfilters.append(name)
3743 possiblefilters.remove(name)
3751 possiblefilters.remove(name)
3744
3752
3745 # warm the cache
3753 # warm the cache
3746 if not full:
3754 if not full:
3747 for name in allfilters:
3755 for name in allfilters:
3748 repo.filtered(name).branchmap()
3756 repo.filtered(name).branchmap()
3749 if not filternames or b'unfiltered' in filternames:
3757 if not filternames or b'unfiltered' in filternames:
3750 # add unfiltered
3758 # add unfiltered
3751 allfilters.append(None)
3759 allfilters.append(None)
3752
3760
3753 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3761 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3754 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3762 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3755 branchcacheread.set(classmethod(lambda *args: None))
3763 branchcacheread.set(classmethod(lambda *args: None))
3756 else:
3764 else:
3757 # older versions
3765 # older versions
3758 branchcacheread = safeattrsetter(branchmap, b'read')
3766 branchcacheread = safeattrsetter(branchmap, b'read')
3759 branchcacheread.set(lambda *args: None)
3767 branchcacheread.set(lambda *args: None)
3760 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3768 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3761 branchcachewrite.set(lambda *args: None)
3769 branchcachewrite.set(lambda *args: None)
3762 try:
3770 try:
3763 for name in allfilters:
3771 for name in allfilters:
3764 printname = name
3772 printname = name
3765 if name is None:
3773 if name is None:
3766 printname = b'unfiltered'
3774 printname = b'unfiltered'
3767 timer(getbranchmap(name), title=printname)
3775 timer(getbranchmap(name), title=printname)
3768 finally:
3776 finally:
3769 branchcacheread.restore()
3777 branchcacheread.restore()
3770 branchcachewrite.restore()
3778 branchcachewrite.restore()
3771 fm.end()
3779 fm.end()
3772
3780
3773
3781
3774 @command(
3782 @command(
3775 b'perf::branchmapupdate|perfbranchmapupdate',
3783 b'perf::branchmapupdate|perfbranchmapupdate',
3776 [
3784 [
3777 (b'', b'base', [], b'subset of revision to start from'),
3785 (b'', b'base', [], b'subset of revision to start from'),
3778 (b'', b'target', [], b'subset of revision to end with'),
3786 (b'', b'target', [], b'subset of revision to end with'),
3779 (b'', b'clear-caches', False, b'clear cache between each runs'),
3787 (b'', b'clear-caches', False, b'clear cache between each runs'),
3780 ]
3788 ]
3781 + formatteropts,
3789 + formatteropts,
3782 )
3790 )
3783 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3791 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3784 """benchmark branchmap update from for <base> revs to <target> revs
3792 """benchmark branchmap update from for <base> revs to <target> revs
3785
3793
3786 If `--clear-caches` is passed, the following items will be reset before
3794 If `--clear-caches` is passed, the following items will be reset before
3787 each update:
3795 each update:
3788 * the changelog instance and associated indexes
3796 * the changelog instance and associated indexes
3789 * the rev-branch-cache instance
3797 * the rev-branch-cache instance
3790
3798
3791 Examples:
3799 Examples:
3792
3800
3793 # update for the one last revision
3801 # update for the one last revision
3794 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3802 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3795
3803
3796 $ update for change coming with a new branch
3804 $ update for change coming with a new branch
3797 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3805 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3798 """
3806 """
3799 from mercurial import branchmap
3807 from mercurial import branchmap
3800 from mercurial import repoview
3808 from mercurial import repoview
3801
3809
3802 opts = _byteskwargs(opts)
3810 opts = _byteskwargs(opts)
3803 timer, fm = gettimer(ui, opts)
3811 timer, fm = gettimer(ui, opts)
3804 clearcaches = opts[b'clear_caches']
3812 clearcaches = opts[b'clear_caches']
3805 unfi = repo.unfiltered()
3813 unfi = repo.unfiltered()
3806 x = [None] # used to pass data between closure
3814 x = [None] # used to pass data between closure
3807
3815
3808 # we use a `list` here to avoid possible side effect from smartset
3816 # we use a `list` here to avoid possible side effect from smartset
3809 baserevs = list(scmutil.revrange(repo, base))
3817 baserevs = list(scmutil.revrange(repo, base))
3810 targetrevs = list(scmutil.revrange(repo, target))
3818 targetrevs = list(scmutil.revrange(repo, target))
3811 if not baserevs:
3819 if not baserevs:
3812 raise error.Abort(b'no revisions selected for --base')
3820 raise error.Abort(b'no revisions selected for --base')
3813 if not targetrevs:
3821 if not targetrevs:
3814 raise error.Abort(b'no revisions selected for --target')
3822 raise error.Abort(b'no revisions selected for --target')
3815
3823
3816 # make sure the target branchmap also contains the one in the base
3824 # make sure the target branchmap also contains the one in the base
3817 targetrevs = list(set(baserevs) | set(targetrevs))
3825 targetrevs = list(set(baserevs) | set(targetrevs))
3818 targetrevs.sort()
3826 targetrevs.sort()
3819
3827
3820 cl = repo.changelog
3828 cl = repo.changelog
3821 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3829 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3822 allbaserevs.sort()
3830 allbaserevs.sort()
3823 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3831 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3824
3832
3825 newrevs = list(alltargetrevs.difference(allbaserevs))
3833 newrevs = list(alltargetrevs.difference(allbaserevs))
3826 newrevs.sort()
3834 newrevs.sort()
3827
3835
3828 allrevs = frozenset(unfi.changelog.revs())
3836 allrevs = frozenset(unfi.changelog.revs())
3829 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3837 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3830 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3838 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3831
3839
3832 def basefilter(repo, visibilityexceptions=None):
3840 def basefilter(repo, visibilityexceptions=None):
3833 return basefilterrevs
3841 return basefilterrevs
3834
3842
3835 def targetfilter(repo, visibilityexceptions=None):
3843 def targetfilter(repo, visibilityexceptions=None):
3836 return targetfilterrevs
3844 return targetfilterrevs
3837
3845
3838 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3846 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3839 ui.status(msg % (len(allbaserevs), len(newrevs)))
3847 ui.status(msg % (len(allbaserevs), len(newrevs)))
3840 if targetfilterrevs:
3848 if targetfilterrevs:
3841 msg = b'(%d revisions still filtered)\n'
3849 msg = b'(%d revisions still filtered)\n'
3842 ui.status(msg % len(targetfilterrevs))
3850 ui.status(msg % len(targetfilterrevs))
3843
3851
3844 try:
3852 try:
3845 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3853 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3846 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3854 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3847
3855
3848 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3856 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3849 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3857 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3850
3858
3851 # try to find an existing branchmap to reuse
3859 # try to find an existing branchmap to reuse
3852 subsettable = getbranchmapsubsettable()
3860 subsettable = getbranchmapsubsettable()
3853 candidatefilter = subsettable.get(None)
3861 candidatefilter = subsettable.get(None)
3854 while candidatefilter is not None:
3862 while candidatefilter is not None:
3855 candidatebm = repo.filtered(candidatefilter).branchmap()
3863 candidatebm = repo.filtered(candidatefilter).branchmap()
3856 if candidatebm.validfor(baserepo):
3864 if candidatebm.validfor(baserepo):
3857 filtered = repoview.filterrevs(repo, candidatefilter)
3865 filtered = repoview.filterrevs(repo, candidatefilter)
3858 missing = [r for r in allbaserevs if r in filtered]
3866 missing = [r for r in allbaserevs if r in filtered]
3859 base = candidatebm.copy()
3867 base = candidatebm.copy()
3860 base.update(baserepo, missing)
3868 base.update(baserepo, missing)
3861 break
3869 break
3862 candidatefilter = subsettable.get(candidatefilter)
3870 candidatefilter = subsettable.get(candidatefilter)
3863 else:
3871 else:
3864 # no suitable subset where found
3872 # no suitable subset where found
3865 base = branchmap.branchcache()
3873 base = branchmap.branchcache()
3866 base.update(baserepo, allbaserevs)
3874 base.update(baserepo, allbaserevs)
3867
3875
3868 def setup():
3876 def setup():
3869 x[0] = base.copy()
3877 x[0] = base.copy()
3870 if clearcaches:
3878 if clearcaches:
3871 unfi._revbranchcache = None
3879 unfi._revbranchcache = None
3872 clearchangelog(repo)
3880 clearchangelog(repo)
3873
3881
3874 def bench():
3882 def bench():
3875 x[0].update(targetrepo, newrevs)
3883 x[0].update(targetrepo, newrevs)
3876
3884
3877 timer(bench, setup=setup)
3885 timer(bench, setup=setup)
3878 fm.end()
3886 fm.end()
3879 finally:
3887 finally:
3880 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3888 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3881 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3889 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3882
3890
3883
3891
3884 @command(
3892 @command(
3885 b'perf::branchmapload|perfbranchmapload',
3893 b'perf::branchmapload|perfbranchmapload',
3886 [
3894 [
3887 (b'f', b'filter', b'', b'Specify repoview filter'),
3895 (b'f', b'filter', b'', b'Specify repoview filter'),
3888 (b'', b'list', False, b'List brachmap filter caches'),
3896 (b'', b'list', False, b'List brachmap filter caches'),
3889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3897 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3890 ]
3898 ]
3891 + formatteropts,
3899 + formatteropts,
3892 )
3900 )
3893 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3901 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3894 """benchmark reading the branchmap"""
3902 """benchmark reading the branchmap"""
3895 opts = _byteskwargs(opts)
3903 opts = _byteskwargs(opts)
3896 clearrevlogs = opts[b'clear_revlogs']
3904 clearrevlogs = opts[b'clear_revlogs']
3897
3905
3898 if list:
3906 if list:
3899 for name, kind, st in repo.cachevfs.readdir(stat=True):
3907 for name, kind, st in repo.cachevfs.readdir(stat=True):
3900 if name.startswith(b'branch2'):
3908 if name.startswith(b'branch2'):
3901 filtername = name.partition(b'-')[2] or b'unfiltered'
3909 filtername = name.partition(b'-')[2] or b'unfiltered'
3902 ui.status(
3910 ui.status(
3903 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3911 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3904 )
3912 )
3905 return
3913 return
3906 if not filter:
3914 if not filter:
3907 filter = None
3915 filter = None
3908 subsettable = getbranchmapsubsettable()
3916 subsettable = getbranchmapsubsettable()
3909 if filter is None:
3917 if filter is None:
3910 repo = repo.unfiltered()
3918 repo = repo.unfiltered()
3911 else:
3919 else:
3912 repo = repoview.repoview(repo, filter)
3920 repo = repoview.repoview(repo, filter)
3913
3921
3914 repo.branchmap() # make sure we have a relevant, up to date branchmap
3922 repo.branchmap() # make sure we have a relevant, up to date branchmap
3915
3923
3916 try:
3924 try:
3917 fromfile = branchmap.branchcache.fromfile
3925 fromfile = branchmap.branchcache.fromfile
3918 except AttributeError:
3926 except AttributeError:
3919 # older versions
3927 # older versions
3920 fromfile = branchmap.read
3928 fromfile = branchmap.read
3921
3929
3922 currentfilter = filter
3930 currentfilter = filter
3923 # try once without timer, the filter may not be cached
3931 # try once without timer, the filter may not be cached
3924 while fromfile(repo) is None:
3932 while fromfile(repo) is None:
3925 currentfilter = subsettable.get(currentfilter)
3933 currentfilter = subsettable.get(currentfilter)
3926 if currentfilter is None:
3934 if currentfilter is None:
3927 raise error.Abort(
3935 raise error.Abort(
3928 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3936 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3929 )
3937 )
3930 repo = repo.filtered(currentfilter)
3938 repo = repo.filtered(currentfilter)
3931 timer, fm = gettimer(ui, opts)
3939 timer, fm = gettimer(ui, opts)
3932
3940
3933 def setup():
3941 def setup():
3934 if clearrevlogs:
3942 if clearrevlogs:
3935 clearchangelog(repo)
3943 clearchangelog(repo)
3936
3944
3937 def bench():
3945 def bench():
3938 fromfile(repo)
3946 fromfile(repo)
3939
3947
3940 timer(bench, setup=setup)
3948 timer(bench, setup=setup)
3941 fm.end()
3949 fm.end()
3942
3950
3943
3951
3944 @command(b'perf::loadmarkers|perfloadmarkers')
3952 @command(b'perf::loadmarkers|perfloadmarkers')
3945 def perfloadmarkers(ui, repo):
3953 def perfloadmarkers(ui, repo):
3946 """benchmark the time to parse the on-disk markers for a repo
3954 """benchmark the time to parse the on-disk markers for a repo
3947
3955
3948 Result is the number of markers in the repo."""
3956 Result is the number of markers in the repo."""
3949 timer, fm = gettimer(ui)
3957 timer, fm = gettimer(ui)
3950 svfs = getsvfs(repo)
3958 svfs = getsvfs(repo)
3951 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3959 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3952 fm.end()
3960 fm.end()
3953
3961
3954
3962
3955 @command(
3963 @command(
3956 b'perf::lrucachedict|perflrucachedict',
3964 b'perf::lrucachedict|perflrucachedict',
3957 formatteropts
3965 formatteropts
3958 + [
3966 + [
3959 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3967 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3960 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3968 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3961 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3969 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3962 (b'', b'size', 4, b'size of cache'),
3970 (b'', b'size', 4, b'size of cache'),
3963 (b'', b'gets', 10000, b'number of key lookups'),
3971 (b'', b'gets', 10000, b'number of key lookups'),
3964 (b'', b'sets', 10000, b'number of key sets'),
3972 (b'', b'sets', 10000, b'number of key sets'),
3965 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3973 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3966 (
3974 (
3967 b'',
3975 b'',
3968 b'mixedgetfreq',
3976 b'mixedgetfreq',
3969 50,
3977 50,
3970 b'frequency of get vs set ops in mixed mode',
3978 b'frequency of get vs set ops in mixed mode',
3971 ),
3979 ),
3972 ],
3980 ],
3973 norepo=True,
3981 norepo=True,
3974 )
3982 )
3975 def perflrucache(
3983 def perflrucache(
3976 ui,
3984 ui,
3977 mincost=0,
3985 mincost=0,
3978 maxcost=100,
3986 maxcost=100,
3979 costlimit=0,
3987 costlimit=0,
3980 size=4,
3988 size=4,
3981 gets=10000,
3989 gets=10000,
3982 sets=10000,
3990 sets=10000,
3983 mixed=10000,
3991 mixed=10000,
3984 mixedgetfreq=50,
3992 mixedgetfreq=50,
3985 **opts
3993 **opts
3986 ):
3994 ):
3987 opts = _byteskwargs(opts)
3995 opts = _byteskwargs(opts)
3988
3996
3989 def doinit():
3997 def doinit():
3990 for i in _xrange(10000):
3998 for i in _xrange(10000):
3991 util.lrucachedict(size)
3999 util.lrucachedict(size)
3992
4000
3993 costrange = list(range(mincost, maxcost + 1))
4001 costrange = list(range(mincost, maxcost + 1))
3994
4002
3995 values = []
4003 values = []
3996 for i in _xrange(size):
4004 for i in _xrange(size):
3997 values.append(random.randint(0, _maxint))
4005 values.append(random.randint(0, _maxint))
3998
4006
3999 # Get mode fills the cache and tests raw lookup performance with no
4007 # Get mode fills the cache and tests raw lookup performance with no
4000 # eviction.
4008 # eviction.
4001 getseq = []
4009 getseq = []
4002 for i in _xrange(gets):
4010 for i in _xrange(gets):
4003 getseq.append(random.choice(values))
4011 getseq.append(random.choice(values))
4004
4012
4005 def dogets():
4013 def dogets():
4006 d = util.lrucachedict(size)
4014 d = util.lrucachedict(size)
4007 for v in values:
4015 for v in values:
4008 d[v] = v
4016 d[v] = v
4009 for key in getseq:
4017 for key in getseq:
4010 value = d[key]
4018 value = d[key]
4011 value # silence pyflakes warning
4019 value # silence pyflakes warning
4012
4020
4013 def dogetscost():
4021 def dogetscost():
4014 d = util.lrucachedict(size, maxcost=costlimit)
4022 d = util.lrucachedict(size, maxcost=costlimit)
4015 for i, v in enumerate(values):
4023 for i, v in enumerate(values):
4016 d.insert(v, v, cost=costs[i])
4024 d.insert(v, v, cost=costs[i])
4017 for key in getseq:
4025 for key in getseq:
4018 try:
4026 try:
4019 value = d[key]
4027 value = d[key]
4020 value # silence pyflakes warning
4028 value # silence pyflakes warning
4021 except KeyError:
4029 except KeyError:
4022 pass
4030 pass
4023
4031
4024 # Set mode tests insertion speed with cache eviction.
4032 # Set mode tests insertion speed with cache eviction.
4025 setseq = []
4033 setseq = []
4026 costs = []
4034 costs = []
4027 for i in _xrange(sets):
4035 for i in _xrange(sets):
4028 setseq.append(random.randint(0, _maxint))
4036 setseq.append(random.randint(0, _maxint))
4029 costs.append(random.choice(costrange))
4037 costs.append(random.choice(costrange))
4030
4038
4031 def doinserts():
4039 def doinserts():
4032 d = util.lrucachedict(size)
4040 d = util.lrucachedict(size)
4033 for v in setseq:
4041 for v in setseq:
4034 d.insert(v, v)
4042 d.insert(v, v)
4035
4043
4036 def doinsertscost():
4044 def doinsertscost():
4037 d = util.lrucachedict(size, maxcost=costlimit)
4045 d = util.lrucachedict(size, maxcost=costlimit)
4038 for i, v in enumerate(setseq):
4046 for i, v in enumerate(setseq):
4039 d.insert(v, v, cost=costs[i])
4047 d.insert(v, v, cost=costs[i])
4040
4048
4041 def dosets():
4049 def dosets():
4042 d = util.lrucachedict(size)
4050 d = util.lrucachedict(size)
4043 for v in setseq:
4051 for v in setseq:
4044 d[v] = v
4052 d[v] = v
4045
4053
4046 # Mixed mode randomly performs gets and sets with eviction.
4054 # Mixed mode randomly performs gets and sets with eviction.
4047 mixedops = []
4055 mixedops = []
4048 for i in _xrange(mixed):
4056 for i in _xrange(mixed):
4049 r = random.randint(0, 100)
4057 r = random.randint(0, 100)
4050 if r < mixedgetfreq:
4058 if r < mixedgetfreq:
4051 op = 0
4059 op = 0
4052 else:
4060 else:
4053 op = 1
4061 op = 1
4054
4062
4055 mixedops.append(
4063 mixedops.append(
4056 (op, random.randint(0, size * 2), random.choice(costrange))
4064 (op, random.randint(0, size * 2), random.choice(costrange))
4057 )
4065 )
4058
4066
4059 def domixed():
4067 def domixed():
4060 d = util.lrucachedict(size)
4068 d = util.lrucachedict(size)
4061
4069
4062 for op, v, cost in mixedops:
4070 for op, v, cost in mixedops:
4063 if op == 0:
4071 if op == 0:
4064 try:
4072 try:
4065 d[v]
4073 d[v]
4066 except KeyError:
4074 except KeyError:
4067 pass
4075 pass
4068 else:
4076 else:
4069 d[v] = v
4077 d[v] = v
4070
4078
4071 def domixedcost():
4079 def domixedcost():
4072 d = util.lrucachedict(size, maxcost=costlimit)
4080 d = util.lrucachedict(size, maxcost=costlimit)
4073
4081
4074 for op, v, cost in mixedops:
4082 for op, v, cost in mixedops:
4075 if op == 0:
4083 if op == 0:
4076 try:
4084 try:
4077 d[v]
4085 d[v]
4078 except KeyError:
4086 except KeyError:
4079 pass
4087 pass
4080 else:
4088 else:
4081 d.insert(v, v, cost=cost)
4089 d.insert(v, v, cost=cost)
4082
4090
4083 benches = [
4091 benches = [
4084 (doinit, b'init'),
4092 (doinit, b'init'),
4085 ]
4093 ]
4086
4094
4087 if costlimit:
4095 if costlimit:
4088 benches.extend(
4096 benches.extend(
4089 [
4097 [
4090 (dogetscost, b'gets w/ cost limit'),
4098 (dogetscost, b'gets w/ cost limit'),
4091 (doinsertscost, b'inserts w/ cost limit'),
4099 (doinsertscost, b'inserts w/ cost limit'),
4092 (domixedcost, b'mixed w/ cost limit'),
4100 (domixedcost, b'mixed w/ cost limit'),
4093 ]
4101 ]
4094 )
4102 )
4095 else:
4103 else:
4096 benches.extend(
4104 benches.extend(
4097 [
4105 [
4098 (dogets, b'gets'),
4106 (dogets, b'gets'),
4099 (doinserts, b'inserts'),
4107 (doinserts, b'inserts'),
4100 (dosets, b'sets'),
4108 (dosets, b'sets'),
4101 (domixed, b'mixed'),
4109 (domixed, b'mixed'),
4102 ]
4110 ]
4103 )
4111 )
4104
4112
4105 for fn, title in benches:
4113 for fn, title in benches:
4106 timer, fm = gettimer(ui, opts)
4114 timer, fm = gettimer(ui, opts)
4107 timer(fn, title=title)
4115 timer(fn, title=title)
4108 fm.end()
4116 fm.end()
4109
4117
4110
4118
4111 @command(
4119 @command(
4112 b'perf::write|perfwrite',
4120 b'perf::write|perfwrite',
4113 formatteropts
4121 formatteropts
4114 + [
4122 + [
4115 (b'', b'write-method', b'write', b'ui write method'),
4123 (b'', b'write-method', b'write', b'ui write method'),
4116 (b'', b'nlines', 100, b'number of lines'),
4124 (b'', b'nlines', 100, b'number of lines'),
4117 (b'', b'nitems', 100, b'number of items (per line)'),
4125 (b'', b'nitems', 100, b'number of items (per line)'),
4118 (b'', b'item', b'x', b'item that is written'),
4126 (b'', b'item', b'x', b'item that is written'),
4119 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4127 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4120 (b'', b'flush-line', None, b'flush after each line'),
4128 (b'', b'flush-line', None, b'flush after each line'),
4121 ],
4129 ],
4122 )
4130 )
4123 def perfwrite(ui, repo, **opts):
4131 def perfwrite(ui, repo, **opts):
4124 """microbenchmark ui.write (and others)"""
4132 """microbenchmark ui.write (and others)"""
4125 opts = _byteskwargs(opts)
4133 opts = _byteskwargs(opts)
4126
4134
4127 write = getattr(ui, _sysstr(opts[b'write_method']))
4135 write = getattr(ui, _sysstr(opts[b'write_method']))
4128 nlines = int(opts[b'nlines'])
4136 nlines = int(opts[b'nlines'])
4129 nitems = int(opts[b'nitems'])
4137 nitems = int(opts[b'nitems'])
4130 item = opts[b'item']
4138 item = opts[b'item']
4131 batch_line = opts.get(b'batch_line')
4139 batch_line = opts.get(b'batch_line')
4132 flush_line = opts.get(b'flush_line')
4140 flush_line = opts.get(b'flush_line')
4133
4141
4134 if batch_line:
4142 if batch_line:
4135 line = item * nitems + b'\n'
4143 line = item * nitems + b'\n'
4136
4144
4137 def benchmark():
4145 def benchmark():
4138 for i in pycompat.xrange(nlines):
4146 for i in pycompat.xrange(nlines):
4139 if batch_line:
4147 if batch_line:
4140 write(line)
4148 write(line)
4141 else:
4149 else:
4142 for i in pycompat.xrange(nitems):
4150 for i in pycompat.xrange(nitems):
4143 write(item)
4151 write(item)
4144 write(b'\n')
4152 write(b'\n')
4145 if flush_line:
4153 if flush_line:
4146 ui.flush()
4154 ui.flush()
4147 ui.flush()
4155 ui.flush()
4148
4156
4149 timer, fm = gettimer(ui, opts)
4157 timer, fm = gettimer(ui, opts)
4150 timer(benchmark)
4158 timer(benchmark)
4151 fm.end()
4159 fm.end()
4152
4160
4153
4161
4154 def uisetup(ui):
4162 def uisetup(ui):
4155 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4163 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4156 commands, b'debugrevlogopts'
4164 commands, b'debugrevlogopts'
4157 ):
4165 ):
4158 # for "historical portability":
4166 # for "historical portability":
4159 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4167 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4160 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4168 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4161 # openrevlog() should cause failure, because it has been
4169 # openrevlog() should cause failure, because it has been
4162 # available since 3.5 (or 49c583ca48c4).
4170 # available since 3.5 (or 49c583ca48c4).
4163 def openrevlog(orig, repo, cmd, file_, opts):
4171 def openrevlog(orig, repo, cmd, file_, opts):
4164 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4172 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4165 raise error.Abort(
4173 raise error.Abort(
4166 b"This version doesn't support --dir option",
4174 b"This version doesn't support --dir option",
4167 hint=b"use 3.5 or later",
4175 hint=b"use 3.5 or later",
4168 )
4176 )
4169 return orig(repo, cmd, file_, opts)
4177 return orig(repo, cmd, file_, opts)
4170
4178
4171 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4179 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4172
4180
4173
4181
4174 @command(
4182 @command(
4175 b'perf::progress|perfprogress',
4183 b'perf::progress|perfprogress',
4176 formatteropts
4184 formatteropts
4177 + [
4185 + [
4178 (b'', b'topic', b'topic', b'topic for progress messages'),
4186 (b'', b'topic', b'topic', b'topic for progress messages'),
4179 (b'c', b'total', 1000000, b'total value we are progressing to'),
4187 (b'c', b'total', 1000000, b'total value we are progressing to'),
4180 ],
4188 ],
4181 norepo=True,
4189 norepo=True,
4182 )
4190 )
4183 def perfprogress(ui, topic=None, total=None, **opts):
4191 def perfprogress(ui, topic=None, total=None, **opts):
4184 """printing of progress bars"""
4192 """printing of progress bars"""
4185 opts = _byteskwargs(opts)
4193 opts = _byteskwargs(opts)
4186
4194
4187 timer, fm = gettimer(ui, opts)
4195 timer, fm = gettimer(ui, opts)
4188
4196
4189 def doprogress():
4197 def doprogress():
4190 with ui.makeprogress(topic, total=total) as progress:
4198 with ui.makeprogress(topic, total=total) as progress:
4191 for i in _xrange(total):
4199 for i in _xrange(total):
4192 progress.increment()
4200 progress.increment()
4193
4201
4194 timer(doprogress)
4202 timer(doprogress)
4195 fm.end()
4203 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now