##// END OF EJS Templates
perf: quiet stdout output in perf::unbundle...
marmoute -
r50339:52f31b66 default
parent child Browse files
Show More
@@ -1,4186 +1,4188 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1003 except ImportError:
1004 path = ui.expandpath(path)
1004 path = ui.expandpath(path)
1005
1005
1006 def s():
1006 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1007 repos[1] = hg.peer(ui, opts, path)
1008
1008
1009 def d():
1009 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1010 setdiscovery.findcommonheads(ui, *repos)
1011
1011
1012 timer(d, setup=s)
1012 timer(d, setup=s)
1013 fm.end()
1013 fm.end()
1014
1014
1015
1015
1016 @command(
1016 @command(
1017 b'perf::bookmarks|perfbookmarks',
1017 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1018 formatteropts
1019 + [
1019 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1021 ],
1022 )
1022 )
1023 def perfbookmarks(ui, repo, **opts):
1023 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1024 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027
1027
1028 clearrevlogs = opts[b'clear_revlogs']
1028 clearrevlogs = opts[b'clear_revlogs']
1029
1029
1030 def s():
1030 def s():
1031 if clearrevlogs:
1031 if clearrevlogs:
1032 clearchangelog(repo)
1032 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1033 clearfilecache(repo, b'_bookmarks')
1034
1034
1035 def d():
1035 def d():
1036 repo._bookmarks
1036 repo._bookmarks
1037
1037
1038 timer(d, setup=s)
1038 timer(d, setup=s)
1039 fm.end()
1039 fm.end()
1040
1040
1041
1041
1042 @command(
1042 @command(
1043 b'perf::bundle',
1043 b'perf::bundle',
1044 [
1044 [
1045 (
1045 (
1046 b'r',
1046 b'r',
1047 b'rev',
1047 b'rev',
1048 [],
1048 [],
1049 b'changesets to bundle',
1049 b'changesets to bundle',
1050 b'REV',
1050 b'REV',
1051 ),
1051 ),
1052 (
1052 (
1053 b't',
1053 b't',
1054 b'type',
1054 b'type',
1055 b'none',
1055 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1057 b'TYPE',
1058 ),
1058 ),
1059 ]
1059 ]
1060 + formatteropts,
1060 + formatteropts,
1061 b'REVS',
1061 b'REVS',
1062 )
1062 )
1063 def perfbundle(ui, repo, *revs, **opts):
1063 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1064 """benchmark the creation of a bundle from a repository
1065
1065
1066 For now, this only supports "none" compression.
1066 For now, this only supports "none" compression.
1067 """
1067 """
1068 from mercurial import bundlecaches
1068 from mercurial import bundlecaches
1069 from mercurial import discovery
1069 from mercurial import discovery
1070 from mercurial import bundle2
1070 from mercurial import bundle2
1071
1071
1072 opts = _byteskwargs(opts)
1072 opts = _byteskwargs(opts)
1073 timer, fm = gettimer(ui, opts)
1073 timer, fm = gettimer(ui, opts)
1074
1074
1075 cl = repo.changelog
1075 cl = repo.changelog
1076 revs = list(revs)
1076 revs = list(revs)
1077 revs.extend(opts.get(b'rev', ()))
1077 revs.extend(opts.get(b'rev', ()))
1078 revs = scmutil.revrange(repo, revs)
1078 revs = scmutil.revrange(repo, revs)
1079 if not revs:
1079 if not revs:
1080 raise error.Abort(b"not revision specified")
1080 raise error.Abort(b"not revision specified")
1081 # make it a consistent set (ie: without topological gaps)
1081 # make it a consistent set (ie: without topological gaps)
1082 old_len = len(revs)
1082 old_len = len(revs)
1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1084 if old_len != len(revs):
1084 if old_len != len(revs):
1085 new_count = len(revs) - old_len
1085 new_count = len(revs) - old_len
1086 msg = b"add %d new revisions to make it a consistent set\n"
1086 msg = b"add %d new revisions to make it a consistent set\n"
1087 ui.write_err(msg % new_count)
1087 ui.write_err(msg % new_count)
1088
1088
1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1091 outgoing = discovery.outgoing(repo, bases, targets)
1091 outgoing = discovery.outgoing(repo, bases, targets)
1092
1092
1093 bundle_spec = opts.get(b'type')
1093 bundle_spec = opts.get(b'type')
1094
1094
1095 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1095 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1096
1096
1097 cgversion = bundle_spec.params[b"cg.version"]
1097 cgversion = bundle_spec.params[b"cg.version"]
1098 if cgversion not in changegroup.supportedoutgoingversions(repo):
1098 if cgversion not in changegroup.supportedoutgoingversions(repo):
1099 err = b"repository does not support bundle version %s"
1099 err = b"repository does not support bundle version %s"
1100 raise error.Abort(err % cgversion)
1100 raise error.Abort(err % cgversion)
1101
1101
1102 if cgversion == b'01': # bundle1
1102 if cgversion == b'01': # bundle1
1103 bversion = b'HG10' + bundle_spec.wirecompression
1103 bversion = b'HG10' + bundle_spec.wirecompression
1104 bcompression = None
1104 bcompression = None
1105 elif cgversion in (b'02', b'03'):
1105 elif cgversion in (b'02', b'03'):
1106 bversion = b'HG20'
1106 bversion = b'HG20'
1107 bcompression = bundle_spec.wirecompression
1107 bcompression = bundle_spec.wirecompression
1108 else:
1108 else:
1109 err = b'perf::bundle: unexpected changegroup version %s'
1109 err = b'perf::bundle: unexpected changegroup version %s'
1110 raise error.ProgrammingError(err % cgversion)
1110 raise error.ProgrammingError(err % cgversion)
1111
1111
1112 if bcompression is None:
1112 if bcompression is None:
1113 bcompression = b'UN'
1113 bcompression = b'UN'
1114
1114
1115 if bcompression != b'UN':
1115 if bcompression != b'UN':
1116 err = b'perf::bundle: compression currently unsupported: %s'
1116 err = b'perf::bundle: compression currently unsupported: %s'
1117 raise error.ProgrammingError(err % bcompression)
1117 raise error.ProgrammingError(err % bcompression)
1118
1118
1119 def do_bundle():
1119 def do_bundle():
1120 bundle2.writenewbundle(
1120 bundle2.writenewbundle(
1121 ui,
1121 ui,
1122 repo,
1122 repo,
1123 b'perf::bundle',
1123 b'perf::bundle',
1124 os.devnull,
1124 os.devnull,
1125 bversion,
1125 bversion,
1126 outgoing,
1126 outgoing,
1127 bundle_spec.params,
1127 bundle_spec.params,
1128 )
1128 )
1129
1129
1130 timer(do_bundle)
1130 timer(do_bundle)
1131 fm.end()
1131 fm.end()
1132
1132
1133
1133
1134 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1134 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1135 def perfbundleread(ui, repo, bundlepath, **opts):
1135 def perfbundleread(ui, repo, bundlepath, **opts):
1136 """Benchmark reading of bundle files.
1136 """Benchmark reading of bundle files.
1137
1137
1138 This command is meant to isolate the I/O part of bundle reading as
1138 This command is meant to isolate the I/O part of bundle reading as
1139 much as possible.
1139 much as possible.
1140 """
1140 """
1141 from mercurial import (
1141 from mercurial import (
1142 bundle2,
1142 bundle2,
1143 exchange,
1143 exchange,
1144 streamclone,
1144 streamclone,
1145 )
1145 )
1146
1146
1147 opts = _byteskwargs(opts)
1147 opts = _byteskwargs(opts)
1148
1148
1149 def makebench(fn):
1149 def makebench(fn):
1150 def run():
1150 def run():
1151 with open(bundlepath, b'rb') as fh:
1151 with open(bundlepath, b'rb') as fh:
1152 bundle = exchange.readbundle(ui, fh, bundlepath)
1152 bundle = exchange.readbundle(ui, fh, bundlepath)
1153 fn(bundle)
1153 fn(bundle)
1154
1154
1155 return run
1155 return run
1156
1156
1157 def makereadnbytes(size):
1157 def makereadnbytes(size):
1158 def run():
1158 def run():
1159 with open(bundlepath, b'rb') as fh:
1159 with open(bundlepath, b'rb') as fh:
1160 bundle = exchange.readbundle(ui, fh, bundlepath)
1160 bundle = exchange.readbundle(ui, fh, bundlepath)
1161 while bundle.read(size):
1161 while bundle.read(size):
1162 pass
1162 pass
1163
1163
1164 return run
1164 return run
1165
1165
1166 def makestdioread(size):
1166 def makestdioread(size):
1167 def run():
1167 def run():
1168 with open(bundlepath, b'rb') as fh:
1168 with open(bundlepath, b'rb') as fh:
1169 while fh.read(size):
1169 while fh.read(size):
1170 pass
1170 pass
1171
1171
1172 return run
1172 return run
1173
1173
1174 # bundle1
1174 # bundle1
1175
1175
1176 def deltaiter(bundle):
1176 def deltaiter(bundle):
1177 for delta in bundle.deltaiter():
1177 for delta in bundle.deltaiter():
1178 pass
1178 pass
1179
1179
1180 def iterchunks(bundle):
1180 def iterchunks(bundle):
1181 for chunk in bundle.getchunks():
1181 for chunk in bundle.getchunks():
1182 pass
1182 pass
1183
1183
1184 # bundle2
1184 # bundle2
1185
1185
1186 def forwardchunks(bundle):
1186 def forwardchunks(bundle):
1187 for chunk in bundle._forwardchunks():
1187 for chunk in bundle._forwardchunks():
1188 pass
1188 pass
1189
1189
1190 def iterparts(bundle):
1190 def iterparts(bundle):
1191 for part in bundle.iterparts():
1191 for part in bundle.iterparts():
1192 pass
1192 pass
1193
1193
1194 def iterpartsseekable(bundle):
1194 def iterpartsseekable(bundle):
1195 for part in bundle.iterparts(seekable=True):
1195 for part in bundle.iterparts(seekable=True):
1196 pass
1196 pass
1197
1197
1198 def seek(bundle):
1198 def seek(bundle):
1199 for part in bundle.iterparts(seekable=True):
1199 for part in bundle.iterparts(seekable=True):
1200 part.seek(0, os.SEEK_END)
1200 part.seek(0, os.SEEK_END)
1201
1201
1202 def makepartreadnbytes(size):
1202 def makepartreadnbytes(size):
1203 def run():
1203 def run():
1204 with open(bundlepath, b'rb') as fh:
1204 with open(bundlepath, b'rb') as fh:
1205 bundle = exchange.readbundle(ui, fh, bundlepath)
1205 bundle = exchange.readbundle(ui, fh, bundlepath)
1206 for part in bundle.iterparts():
1206 for part in bundle.iterparts():
1207 while part.read(size):
1207 while part.read(size):
1208 pass
1208 pass
1209
1209
1210 return run
1210 return run
1211
1211
1212 benches = [
1212 benches = [
1213 (makestdioread(8192), b'read(8k)'),
1213 (makestdioread(8192), b'read(8k)'),
1214 (makestdioread(16384), b'read(16k)'),
1214 (makestdioread(16384), b'read(16k)'),
1215 (makestdioread(32768), b'read(32k)'),
1215 (makestdioread(32768), b'read(32k)'),
1216 (makestdioread(131072), b'read(128k)'),
1216 (makestdioread(131072), b'read(128k)'),
1217 ]
1217 ]
1218
1218
1219 with open(bundlepath, b'rb') as fh:
1219 with open(bundlepath, b'rb') as fh:
1220 bundle = exchange.readbundle(ui, fh, bundlepath)
1220 bundle = exchange.readbundle(ui, fh, bundlepath)
1221
1221
1222 if isinstance(bundle, changegroup.cg1unpacker):
1222 if isinstance(bundle, changegroup.cg1unpacker):
1223 benches.extend(
1223 benches.extend(
1224 [
1224 [
1225 (makebench(deltaiter), b'cg1 deltaiter()'),
1225 (makebench(deltaiter), b'cg1 deltaiter()'),
1226 (makebench(iterchunks), b'cg1 getchunks()'),
1226 (makebench(iterchunks), b'cg1 getchunks()'),
1227 (makereadnbytes(8192), b'cg1 read(8k)'),
1227 (makereadnbytes(8192), b'cg1 read(8k)'),
1228 (makereadnbytes(16384), b'cg1 read(16k)'),
1228 (makereadnbytes(16384), b'cg1 read(16k)'),
1229 (makereadnbytes(32768), b'cg1 read(32k)'),
1229 (makereadnbytes(32768), b'cg1 read(32k)'),
1230 (makereadnbytes(131072), b'cg1 read(128k)'),
1230 (makereadnbytes(131072), b'cg1 read(128k)'),
1231 ]
1231 ]
1232 )
1232 )
1233 elif isinstance(bundle, bundle2.unbundle20):
1233 elif isinstance(bundle, bundle2.unbundle20):
1234 benches.extend(
1234 benches.extend(
1235 [
1235 [
1236 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1236 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1237 (makebench(iterparts), b'bundle2 iterparts()'),
1237 (makebench(iterparts), b'bundle2 iterparts()'),
1238 (
1238 (
1239 makebench(iterpartsseekable),
1239 makebench(iterpartsseekable),
1240 b'bundle2 iterparts() seekable',
1240 b'bundle2 iterparts() seekable',
1241 ),
1241 ),
1242 (makebench(seek), b'bundle2 part seek()'),
1242 (makebench(seek), b'bundle2 part seek()'),
1243 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1243 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1244 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1244 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1245 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1245 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1246 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1246 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1247 ]
1247 ]
1248 )
1248 )
1249 elif isinstance(bundle, streamclone.streamcloneapplier):
1249 elif isinstance(bundle, streamclone.streamcloneapplier):
1250 raise error.Abort(b'stream clone bundles not supported')
1250 raise error.Abort(b'stream clone bundles not supported')
1251 else:
1251 else:
1252 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1252 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1253
1253
1254 for fn, title in benches:
1254 for fn, title in benches:
1255 timer, fm = gettimer(ui, opts)
1255 timer, fm = gettimer(ui, opts)
1256 timer(fn, title=title)
1256 timer(fn, title=title)
1257 fm.end()
1257 fm.end()
1258
1258
1259
1259
1260 @command(
1260 @command(
1261 b'perf::changegroupchangelog|perfchangegroupchangelog',
1261 b'perf::changegroupchangelog|perfchangegroupchangelog',
1262 formatteropts
1262 formatteropts
1263 + [
1263 + [
1264 (b'', b'cgversion', b'02', b'changegroup version'),
1264 (b'', b'cgversion', b'02', b'changegroup version'),
1265 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1265 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1266 ],
1266 ],
1267 )
1267 )
1268 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1268 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1269 """Benchmark producing a changelog group for a changegroup.
1269 """Benchmark producing a changelog group for a changegroup.
1270
1270
1271 This measures the time spent processing the changelog during a
1271 This measures the time spent processing the changelog during a
1272 bundle operation. This occurs during `hg bundle` and on a server
1272 bundle operation. This occurs during `hg bundle` and on a server
1273 processing a `getbundle` wire protocol request (handles clones
1273 processing a `getbundle` wire protocol request (handles clones
1274 and pull requests).
1274 and pull requests).
1275
1275
1276 By default, all revisions are added to the changegroup.
1276 By default, all revisions are added to the changegroup.
1277 """
1277 """
1278 opts = _byteskwargs(opts)
1278 opts = _byteskwargs(opts)
1279 cl = repo.changelog
1279 cl = repo.changelog
1280 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1280 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1281 bundler = changegroup.getbundler(cgversion, repo)
1281 bundler = changegroup.getbundler(cgversion, repo)
1282
1282
1283 def d():
1283 def d():
1284 state, chunks = bundler._generatechangelog(cl, nodes)
1284 state, chunks = bundler._generatechangelog(cl, nodes)
1285 for chunk in chunks:
1285 for chunk in chunks:
1286 pass
1286 pass
1287
1287
1288 timer, fm = gettimer(ui, opts)
1288 timer, fm = gettimer(ui, opts)
1289
1289
1290 # Terminal printing can interfere with timing. So disable it.
1290 # Terminal printing can interfere with timing. So disable it.
1291 with ui.configoverride({(b'progress', b'disable'): True}):
1291 with ui.configoverride({(b'progress', b'disable'): True}):
1292 timer(d)
1292 timer(d)
1293
1293
1294 fm.end()
1294 fm.end()
1295
1295
1296
1296
1297 @command(b'perf::dirs|perfdirs', formatteropts)
1297 @command(b'perf::dirs|perfdirs', formatteropts)
1298 def perfdirs(ui, repo, **opts):
1298 def perfdirs(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1301 dirstate = repo.dirstate
1301 dirstate = repo.dirstate
1302 b'a' in dirstate
1302 b'a' in dirstate
1303
1303
1304 def d():
1304 def d():
1305 dirstate.hasdir(b'a')
1305 dirstate.hasdir(b'a')
1306 try:
1306 try:
1307 del dirstate._map._dirs
1307 del dirstate._map._dirs
1308 except AttributeError:
1308 except AttributeError:
1309 pass
1309 pass
1310
1310
1311 timer(d)
1311 timer(d)
1312 fm.end()
1312 fm.end()
1313
1313
1314
1314
1315 @command(
1315 @command(
1316 b'perf::dirstate|perfdirstate',
1316 b'perf::dirstate|perfdirstate',
1317 [
1317 [
1318 (
1318 (
1319 b'',
1319 b'',
1320 b'iteration',
1320 b'iteration',
1321 None,
1321 None,
1322 b'benchmark a full iteration for the dirstate',
1322 b'benchmark a full iteration for the dirstate',
1323 ),
1323 ),
1324 (
1324 (
1325 b'',
1325 b'',
1326 b'contains',
1326 b'contains',
1327 None,
1327 None,
1328 b'benchmark a large amount of `nf in dirstate` calls',
1328 b'benchmark a large amount of `nf in dirstate` calls',
1329 ),
1329 ),
1330 ]
1330 ]
1331 + formatteropts,
1331 + formatteropts,
1332 )
1332 )
1333 def perfdirstate(ui, repo, **opts):
1333 def perfdirstate(ui, repo, **opts):
1334 """benchmap the time of various distate operations
1334 """benchmap the time of various distate operations
1335
1335
1336 By default benchmark the time necessary to load a dirstate from scratch.
1336 By default benchmark the time necessary to load a dirstate from scratch.
1337 The dirstate is loaded to the point were a "contains" request can be
1337 The dirstate is loaded to the point were a "contains" request can be
1338 answered.
1338 answered.
1339 """
1339 """
1340 opts = _byteskwargs(opts)
1340 opts = _byteskwargs(opts)
1341 timer, fm = gettimer(ui, opts)
1341 timer, fm = gettimer(ui, opts)
1342 b"a" in repo.dirstate
1342 b"a" in repo.dirstate
1343
1343
1344 if opts[b'iteration'] and opts[b'contains']:
1344 if opts[b'iteration'] and opts[b'contains']:
1345 msg = b'only specify one of --iteration or --contains'
1345 msg = b'only specify one of --iteration or --contains'
1346 raise error.Abort(msg)
1346 raise error.Abort(msg)
1347
1347
1348 if opts[b'iteration']:
1348 if opts[b'iteration']:
1349 setup = None
1349 setup = None
1350 dirstate = repo.dirstate
1350 dirstate = repo.dirstate
1351
1351
1352 def d():
1352 def d():
1353 for f in dirstate:
1353 for f in dirstate:
1354 pass
1354 pass
1355
1355
1356 elif opts[b'contains']:
1356 elif opts[b'contains']:
1357 setup = None
1357 setup = None
1358 dirstate = repo.dirstate
1358 dirstate = repo.dirstate
1359 allfiles = list(dirstate)
1359 allfiles = list(dirstate)
1360 # also add file path that will be "missing" from the dirstate
1360 # also add file path that will be "missing" from the dirstate
1361 allfiles.extend([f[::-1] for f in allfiles])
1361 allfiles.extend([f[::-1] for f in allfiles])
1362
1362
1363 def d():
1363 def d():
1364 for f in allfiles:
1364 for f in allfiles:
1365 f in dirstate
1365 f in dirstate
1366
1366
1367 else:
1367 else:
1368
1368
1369 def setup():
1369 def setup():
1370 repo.dirstate.invalidate()
1370 repo.dirstate.invalidate()
1371
1371
1372 def d():
1372 def d():
1373 b"a" in repo.dirstate
1373 b"a" in repo.dirstate
1374
1374
1375 timer(d, setup=setup)
1375 timer(d, setup=setup)
1376 fm.end()
1376 fm.end()
1377
1377
1378
1378
1379 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1379 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1380 def perfdirstatedirs(ui, repo, **opts):
1380 def perfdirstatedirs(ui, repo, **opts):
1381 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1381 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1382 opts = _byteskwargs(opts)
1382 opts = _byteskwargs(opts)
1383 timer, fm = gettimer(ui, opts)
1383 timer, fm = gettimer(ui, opts)
1384 repo.dirstate.hasdir(b"a")
1384 repo.dirstate.hasdir(b"a")
1385
1385
1386 def setup():
1386 def setup():
1387 try:
1387 try:
1388 del repo.dirstate._map._dirs
1388 del repo.dirstate._map._dirs
1389 except AttributeError:
1389 except AttributeError:
1390 pass
1390 pass
1391
1391
1392 def d():
1392 def d():
1393 repo.dirstate.hasdir(b"a")
1393 repo.dirstate.hasdir(b"a")
1394
1394
1395 timer(d, setup=setup)
1395 timer(d, setup=setup)
1396 fm.end()
1396 fm.end()
1397
1397
1398
1398
1399 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1399 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1400 def perfdirstatefoldmap(ui, repo, **opts):
1400 def perfdirstatefoldmap(ui, repo, **opts):
1401 """benchmap a `dirstate._map.filefoldmap.get()` request
1401 """benchmap a `dirstate._map.filefoldmap.get()` request
1402
1402
1403 The dirstate filefoldmap cache is dropped between every request.
1403 The dirstate filefoldmap cache is dropped between every request.
1404 """
1404 """
1405 opts = _byteskwargs(opts)
1405 opts = _byteskwargs(opts)
1406 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1407 dirstate = repo.dirstate
1407 dirstate = repo.dirstate
1408 dirstate._map.filefoldmap.get(b'a')
1408 dirstate._map.filefoldmap.get(b'a')
1409
1409
1410 def setup():
1410 def setup():
1411 del dirstate._map.filefoldmap
1411 del dirstate._map.filefoldmap
1412
1412
1413 def d():
1413 def d():
1414 dirstate._map.filefoldmap.get(b'a')
1414 dirstate._map.filefoldmap.get(b'a')
1415
1415
1416 timer(d, setup=setup)
1416 timer(d, setup=setup)
1417 fm.end()
1417 fm.end()
1418
1418
1419
1419
1420 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1420 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1421 def perfdirfoldmap(ui, repo, **opts):
1421 def perfdirfoldmap(ui, repo, **opts):
1422 """benchmap a `dirstate._map.dirfoldmap.get()` request
1422 """benchmap a `dirstate._map.dirfoldmap.get()` request
1423
1423
1424 The dirstate dirfoldmap cache is dropped between every request.
1424 The dirstate dirfoldmap cache is dropped between every request.
1425 """
1425 """
1426 opts = _byteskwargs(opts)
1426 opts = _byteskwargs(opts)
1427 timer, fm = gettimer(ui, opts)
1427 timer, fm = gettimer(ui, opts)
1428 dirstate = repo.dirstate
1428 dirstate = repo.dirstate
1429 dirstate._map.dirfoldmap.get(b'a')
1429 dirstate._map.dirfoldmap.get(b'a')
1430
1430
1431 def setup():
1431 def setup():
1432 del dirstate._map.dirfoldmap
1432 del dirstate._map.dirfoldmap
1433 try:
1433 try:
1434 del dirstate._map._dirs
1434 del dirstate._map._dirs
1435 except AttributeError:
1435 except AttributeError:
1436 pass
1436 pass
1437
1437
1438 def d():
1438 def d():
1439 dirstate._map.dirfoldmap.get(b'a')
1439 dirstate._map.dirfoldmap.get(b'a')
1440
1440
1441 timer(d, setup=setup)
1441 timer(d, setup=setup)
1442 fm.end()
1442 fm.end()
1443
1443
1444
1444
1445 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1445 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1446 def perfdirstatewrite(ui, repo, **opts):
1446 def perfdirstatewrite(ui, repo, **opts):
1447 """benchmap the time it take to write a dirstate on disk"""
1447 """benchmap the time it take to write a dirstate on disk"""
1448 opts = _byteskwargs(opts)
1448 opts = _byteskwargs(opts)
1449 timer, fm = gettimer(ui, opts)
1449 timer, fm = gettimer(ui, opts)
1450 ds = repo.dirstate
1450 ds = repo.dirstate
1451 b"a" in ds
1451 b"a" in ds
1452
1452
1453 def setup():
1453 def setup():
1454 ds._dirty = True
1454 ds._dirty = True
1455
1455
1456 def d():
1456 def d():
1457 ds.write(repo.currenttransaction())
1457 ds.write(repo.currenttransaction())
1458
1458
1459 timer(d, setup=setup)
1459 timer(d, setup=setup)
1460 fm.end()
1460 fm.end()
1461
1461
1462
1462
1463 def _getmergerevs(repo, opts):
1463 def _getmergerevs(repo, opts):
1464 """parse command argument to return rev involved in merge
1464 """parse command argument to return rev involved in merge
1465
1465
1466 input: options dictionnary with `rev`, `from` and `bse`
1466 input: options dictionnary with `rev`, `from` and `bse`
1467 output: (localctx, otherctx, basectx)
1467 output: (localctx, otherctx, basectx)
1468 """
1468 """
1469 if opts[b'from']:
1469 if opts[b'from']:
1470 fromrev = scmutil.revsingle(repo, opts[b'from'])
1470 fromrev = scmutil.revsingle(repo, opts[b'from'])
1471 wctx = repo[fromrev]
1471 wctx = repo[fromrev]
1472 else:
1472 else:
1473 wctx = repo[None]
1473 wctx = repo[None]
1474 # we don't want working dir files to be stat'd in the benchmark, so
1474 # we don't want working dir files to be stat'd in the benchmark, so
1475 # prime that cache
1475 # prime that cache
1476 wctx.dirty()
1476 wctx.dirty()
1477 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1477 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1478 if opts[b'base']:
1478 if opts[b'base']:
1479 fromrev = scmutil.revsingle(repo, opts[b'base'])
1479 fromrev = scmutil.revsingle(repo, opts[b'base'])
1480 ancestor = repo[fromrev]
1480 ancestor = repo[fromrev]
1481 else:
1481 else:
1482 ancestor = wctx.ancestor(rctx)
1482 ancestor = wctx.ancestor(rctx)
1483 return (wctx, rctx, ancestor)
1483 return (wctx, rctx, ancestor)
1484
1484
1485
1485
1486 @command(
1486 @command(
1487 b'perf::mergecalculate|perfmergecalculate',
1487 b'perf::mergecalculate|perfmergecalculate',
1488 [
1488 [
1489 (b'r', b'rev', b'.', b'rev to merge against'),
1489 (b'r', b'rev', b'.', b'rev to merge against'),
1490 (b'', b'from', b'', b'rev to merge from'),
1490 (b'', b'from', b'', b'rev to merge from'),
1491 (b'', b'base', b'', b'the revision to use as base'),
1491 (b'', b'base', b'', b'the revision to use as base'),
1492 ]
1492 ]
1493 + formatteropts,
1493 + formatteropts,
1494 )
1494 )
1495 def perfmergecalculate(ui, repo, **opts):
1495 def perfmergecalculate(ui, repo, **opts):
1496 opts = _byteskwargs(opts)
1496 opts = _byteskwargs(opts)
1497 timer, fm = gettimer(ui, opts)
1497 timer, fm = gettimer(ui, opts)
1498
1498
1499 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1499 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1500
1500
1501 def d():
1501 def d():
1502 # acceptremote is True because we don't want prompts in the middle of
1502 # acceptremote is True because we don't want prompts in the middle of
1503 # our benchmark
1503 # our benchmark
1504 merge.calculateupdates(
1504 merge.calculateupdates(
1505 repo,
1505 repo,
1506 wctx,
1506 wctx,
1507 rctx,
1507 rctx,
1508 [ancestor],
1508 [ancestor],
1509 branchmerge=False,
1509 branchmerge=False,
1510 force=False,
1510 force=False,
1511 acceptremote=True,
1511 acceptremote=True,
1512 followcopies=True,
1512 followcopies=True,
1513 )
1513 )
1514
1514
1515 timer(d)
1515 timer(d)
1516 fm.end()
1516 fm.end()
1517
1517
1518
1518
1519 @command(
1519 @command(
1520 b'perf::mergecopies|perfmergecopies',
1520 b'perf::mergecopies|perfmergecopies',
1521 [
1521 [
1522 (b'r', b'rev', b'.', b'rev to merge against'),
1522 (b'r', b'rev', b'.', b'rev to merge against'),
1523 (b'', b'from', b'', b'rev to merge from'),
1523 (b'', b'from', b'', b'rev to merge from'),
1524 (b'', b'base', b'', b'the revision to use as base'),
1524 (b'', b'base', b'', b'the revision to use as base'),
1525 ]
1525 ]
1526 + formatteropts,
1526 + formatteropts,
1527 )
1527 )
1528 def perfmergecopies(ui, repo, **opts):
1528 def perfmergecopies(ui, repo, **opts):
1529 """measure runtime of `copies.mergecopies`"""
1529 """measure runtime of `copies.mergecopies`"""
1530 opts = _byteskwargs(opts)
1530 opts = _byteskwargs(opts)
1531 timer, fm = gettimer(ui, opts)
1531 timer, fm = gettimer(ui, opts)
1532 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1533
1533
1534 def d():
1534 def d():
1535 # acceptremote is True because we don't want prompts in the middle of
1535 # acceptremote is True because we don't want prompts in the middle of
1536 # our benchmark
1536 # our benchmark
1537 copies.mergecopies(repo, wctx, rctx, ancestor)
1537 copies.mergecopies(repo, wctx, rctx, ancestor)
1538
1538
1539 timer(d)
1539 timer(d)
1540 fm.end()
1540 fm.end()
1541
1541
1542
1542
1543 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1543 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1544 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1544 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1545 """benchmark the copy tracing logic"""
1545 """benchmark the copy tracing logic"""
1546 opts = _byteskwargs(opts)
1546 opts = _byteskwargs(opts)
1547 timer, fm = gettimer(ui, opts)
1547 timer, fm = gettimer(ui, opts)
1548 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1548 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1549 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1549 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1550
1550
1551 def d():
1551 def d():
1552 copies.pathcopies(ctx1, ctx2)
1552 copies.pathcopies(ctx1, ctx2)
1553
1553
1554 timer(d)
1554 timer(d)
1555 fm.end()
1555 fm.end()
1556
1556
1557
1557
1558 @command(
1558 @command(
1559 b'perf::phases|perfphases',
1559 b'perf::phases|perfphases',
1560 [
1560 [
1561 (b'', b'full', False, b'include file reading time too'),
1561 (b'', b'full', False, b'include file reading time too'),
1562 ],
1562 ],
1563 b"",
1563 b"",
1564 )
1564 )
1565 def perfphases(ui, repo, **opts):
1565 def perfphases(ui, repo, **opts):
1566 """benchmark phasesets computation"""
1566 """benchmark phasesets computation"""
1567 opts = _byteskwargs(opts)
1567 opts = _byteskwargs(opts)
1568 timer, fm = gettimer(ui, opts)
1568 timer, fm = gettimer(ui, opts)
1569 _phases = repo._phasecache
1569 _phases = repo._phasecache
1570 full = opts.get(b'full')
1570 full = opts.get(b'full')
1571
1571
1572 def d():
1572 def d():
1573 phases = _phases
1573 phases = _phases
1574 if full:
1574 if full:
1575 clearfilecache(repo, b'_phasecache')
1575 clearfilecache(repo, b'_phasecache')
1576 phases = repo._phasecache
1576 phases = repo._phasecache
1577 phases.invalidate()
1577 phases.invalidate()
1578 phases.loadphaserevs(repo)
1578 phases.loadphaserevs(repo)
1579
1579
1580 timer(d)
1580 timer(d)
1581 fm.end()
1581 fm.end()
1582
1582
1583
1583
1584 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1584 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1585 def perfphasesremote(ui, repo, dest=None, **opts):
1585 def perfphasesremote(ui, repo, dest=None, **opts):
1586 """benchmark time needed to analyse phases of the remote server"""
1586 """benchmark time needed to analyse phases of the remote server"""
1587 from mercurial.node import bin
1587 from mercurial.node import bin
1588 from mercurial import (
1588 from mercurial import (
1589 exchange,
1589 exchange,
1590 hg,
1590 hg,
1591 phases,
1591 phases,
1592 )
1592 )
1593
1593
1594 opts = _byteskwargs(opts)
1594 opts = _byteskwargs(opts)
1595 timer, fm = gettimer(ui, opts)
1595 timer, fm = gettimer(ui, opts)
1596
1596
1597 path = ui.getpath(dest, default=(b'default-push', b'default'))
1597 path = ui.getpath(dest, default=(b'default-push', b'default'))
1598 if not path:
1598 if not path:
1599 raise error.Abort(
1599 raise error.Abort(
1600 b'default repository not configured!',
1600 b'default repository not configured!',
1601 hint=b"see 'hg help config.paths'",
1601 hint=b"see 'hg help config.paths'",
1602 )
1602 )
1603 dest = path.pushloc or path.loc
1603 dest = path.pushloc or path.loc
1604 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1604 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1605 other = hg.peer(repo, opts, dest)
1605 other = hg.peer(repo, opts, dest)
1606
1606
1607 # easier to perform discovery through the operation
1607 # easier to perform discovery through the operation
1608 op = exchange.pushoperation(repo, other)
1608 op = exchange.pushoperation(repo, other)
1609 exchange._pushdiscoverychangeset(op)
1609 exchange._pushdiscoverychangeset(op)
1610
1610
1611 remotesubset = op.fallbackheads
1611 remotesubset = op.fallbackheads
1612
1612
1613 with other.commandexecutor() as e:
1613 with other.commandexecutor() as e:
1614 remotephases = e.callcommand(
1614 remotephases = e.callcommand(
1615 b'listkeys', {b'namespace': b'phases'}
1615 b'listkeys', {b'namespace': b'phases'}
1616 ).result()
1616 ).result()
1617 del other
1617 del other
1618 publishing = remotephases.get(b'publishing', False)
1618 publishing = remotephases.get(b'publishing', False)
1619 if publishing:
1619 if publishing:
1620 ui.statusnoi18n(b'publishing: yes\n')
1620 ui.statusnoi18n(b'publishing: yes\n')
1621 else:
1621 else:
1622 ui.statusnoi18n(b'publishing: no\n')
1622 ui.statusnoi18n(b'publishing: no\n')
1623
1623
1624 has_node = getattr(repo.changelog.index, 'has_node', None)
1624 has_node = getattr(repo.changelog.index, 'has_node', None)
1625 if has_node is None:
1625 if has_node is None:
1626 has_node = repo.changelog.nodemap.__contains__
1626 has_node = repo.changelog.nodemap.__contains__
1627 nonpublishroots = 0
1627 nonpublishroots = 0
1628 for nhex, phase in remotephases.iteritems():
1628 for nhex, phase in remotephases.iteritems():
1629 if nhex == b'publishing': # ignore data related to publish option
1629 if nhex == b'publishing': # ignore data related to publish option
1630 continue
1630 continue
1631 node = bin(nhex)
1631 node = bin(nhex)
1632 if has_node(node) and int(phase):
1632 if has_node(node) and int(phase):
1633 nonpublishroots += 1
1633 nonpublishroots += 1
1634 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1634 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1635 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1635 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1636
1636
1637 def d():
1637 def d():
1638 phases.remotephasessummary(repo, remotesubset, remotephases)
1638 phases.remotephasessummary(repo, remotesubset, remotephases)
1639
1639
1640 timer(d)
1640 timer(d)
1641 fm.end()
1641 fm.end()
1642
1642
1643
1643
1644 @command(
1644 @command(
1645 b'perf::manifest|perfmanifest',
1645 b'perf::manifest|perfmanifest',
1646 [
1646 [
1647 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1647 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1648 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1648 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1649 ]
1649 ]
1650 + formatteropts,
1650 + formatteropts,
1651 b'REV|NODE',
1651 b'REV|NODE',
1652 )
1652 )
1653 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1653 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1654 """benchmark the time to read a manifest from disk and return a usable
1654 """benchmark the time to read a manifest from disk and return a usable
1655 dict-like object
1655 dict-like object
1656
1656
1657 Manifest caches are cleared before retrieval."""
1657 Manifest caches are cleared before retrieval."""
1658 opts = _byteskwargs(opts)
1658 opts = _byteskwargs(opts)
1659 timer, fm = gettimer(ui, opts)
1659 timer, fm = gettimer(ui, opts)
1660 if not manifest_rev:
1660 if not manifest_rev:
1661 ctx = scmutil.revsingle(repo, rev, rev)
1661 ctx = scmutil.revsingle(repo, rev, rev)
1662 t = ctx.manifestnode()
1662 t = ctx.manifestnode()
1663 else:
1663 else:
1664 from mercurial.node import bin
1664 from mercurial.node import bin
1665
1665
1666 if len(rev) == 40:
1666 if len(rev) == 40:
1667 t = bin(rev)
1667 t = bin(rev)
1668 else:
1668 else:
1669 try:
1669 try:
1670 rev = int(rev)
1670 rev = int(rev)
1671
1671
1672 if util.safehasattr(repo.manifestlog, b'getstorage'):
1672 if util.safehasattr(repo.manifestlog, b'getstorage'):
1673 t = repo.manifestlog.getstorage(b'').node(rev)
1673 t = repo.manifestlog.getstorage(b'').node(rev)
1674 else:
1674 else:
1675 t = repo.manifestlog._revlog.lookup(rev)
1675 t = repo.manifestlog._revlog.lookup(rev)
1676 except ValueError:
1676 except ValueError:
1677 raise error.Abort(
1677 raise error.Abort(
1678 b'manifest revision must be integer or full node'
1678 b'manifest revision must be integer or full node'
1679 )
1679 )
1680
1680
1681 def d():
1681 def d():
1682 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1682 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1683 repo.manifestlog[t].read()
1683 repo.manifestlog[t].read()
1684
1684
1685 timer(d)
1685 timer(d)
1686 fm.end()
1686 fm.end()
1687
1687
1688
1688
1689 @command(b'perf::changeset|perfchangeset', formatteropts)
1689 @command(b'perf::changeset|perfchangeset', formatteropts)
1690 def perfchangeset(ui, repo, rev, **opts):
1690 def perfchangeset(ui, repo, rev, **opts):
1691 opts = _byteskwargs(opts)
1691 opts = _byteskwargs(opts)
1692 timer, fm = gettimer(ui, opts)
1692 timer, fm = gettimer(ui, opts)
1693 n = scmutil.revsingle(repo, rev).node()
1693 n = scmutil.revsingle(repo, rev).node()
1694
1694
1695 def d():
1695 def d():
1696 repo.changelog.read(n)
1696 repo.changelog.read(n)
1697 # repo.changelog._cache = None
1697 # repo.changelog._cache = None
1698
1698
1699 timer(d)
1699 timer(d)
1700 fm.end()
1700 fm.end()
1701
1701
1702
1702
1703 @command(b'perf::ignore|perfignore', formatteropts)
1703 @command(b'perf::ignore|perfignore', formatteropts)
1704 def perfignore(ui, repo, **opts):
1704 def perfignore(ui, repo, **opts):
1705 """benchmark operation related to computing ignore"""
1705 """benchmark operation related to computing ignore"""
1706 opts = _byteskwargs(opts)
1706 opts = _byteskwargs(opts)
1707 timer, fm = gettimer(ui, opts)
1707 timer, fm = gettimer(ui, opts)
1708 dirstate = repo.dirstate
1708 dirstate = repo.dirstate
1709
1709
1710 def setupone():
1710 def setupone():
1711 dirstate.invalidate()
1711 dirstate.invalidate()
1712 clearfilecache(dirstate, b'_ignore')
1712 clearfilecache(dirstate, b'_ignore')
1713
1713
1714 def runone():
1714 def runone():
1715 dirstate._ignore
1715 dirstate._ignore
1716
1716
1717 timer(runone, setup=setupone, title=b"load")
1717 timer(runone, setup=setupone, title=b"load")
1718 fm.end()
1718 fm.end()
1719
1719
1720
1720
1721 @command(
1721 @command(
1722 b'perf::index|perfindex',
1722 b'perf::index|perfindex',
1723 [
1723 [
1724 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1724 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1725 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1725 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1726 ]
1726 ]
1727 + formatteropts,
1727 + formatteropts,
1728 )
1728 )
1729 def perfindex(ui, repo, **opts):
1729 def perfindex(ui, repo, **opts):
1730 """benchmark index creation time followed by a lookup
1730 """benchmark index creation time followed by a lookup
1731
1731
1732 The default is to look `tip` up. Depending on the index implementation,
1732 The default is to look `tip` up. Depending on the index implementation,
1733 the revision looked up can matters. For example, an implementation
1733 the revision looked up can matters. For example, an implementation
1734 scanning the index will have a faster lookup time for `--rev tip` than for
1734 scanning the index will have a faster lookup time for `--rev tip` than for
1735 `--rev 0`. The number of looked up revisions and their order can also
1735 `--rev 0`. The number of looked up revisions and their order can also
1736 matters.
1736 matters.
1737
1737
1738 Example of useful set to test:
1738 Example of useful set to test:
1739
1739
1740 * tip
1740 * tip
1741 * 0
1741 * 0
1742 * -10:
1742 * -10:
1743 * :10
1743 * :10
1744 * -10: + :10
1744 * -10: + :10
1745 * :10: + -10:
1745 * :10: + -10:
1746 * -10000:
1746 * -10000:
1747 * -10000: + 0
1747 * -10000: + 0
1748
1748
1749 It is not currently possible to check for lookup of a missing node. For
1749 It is not currently possible to check for lookup of a missing node. For
1750 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1750 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1751 import mercurial.revlog
1751 import mercurial.revlog
1752
1752
1753 opts = _byteskwargs(opts)
1753 opts = _byteskwargs(opts)
1754 timer, fm = gettimer(ui, opts)
1754 timer, fm = gettimer(ui, opts)
1755 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1755 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1756 if opts[b'no_lookup']:
1756 if opts[b'no_lookup']:
1757 if opts['rev']:
1757 if opts['rev']:
1758 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1758 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1759 nodes = []
1759 nodes = []
1760 elif not opts[b'rev']:
1760 elif not opts[b'rev']:
1761 nodes = [repo[b"tip"].node()]
1761 nodes = [repo[b"tip"].node()]
1762 else:
1762 else:
1763 revs = scmutil.revrange(repo, opts[b'rev'])
1763 revs = scmutil.revrange(repo, opts[b'rev'])
1764 cl = repo.changelog
1764 cl = repo.changelog
1765 nodes = [cl.node(r) for r in revs]
1765 nodes = [cl.node(r) for r in revs]
1766
1766
1767 unfi = repo.unfiltered()
1767 unfi = repo.unfiltered()
1768 # find the filecache func directly
1768 # find the filecache func directly
1769 # This avoid polluting the benchmark with the filecache logic
1769 # This avoid polluting the benchmark with the filecache logic
1770 makecl = unfi.__class__.changelog.func
1770 makecl = unfi.__class__.changelog.func
1771
1771
1772 def setup():
1772 def setup():
1773 # probably not necessary, but for good measure
1773 # probably not necessary, but for good measure
1774 clearchangelog(unfi)
1774 clearchangelog(unfi)
1775
1775
1776 def d():
1776 def d():
1777 cl = makecl(unfi)
1777 cl = makecl(unfi)
1778 for n in nodes:
1778 for n in nodes:
1779 cl.rev(n)
1779 cl.rev(n)
1780
1780
1781 timer(d, setup=setup)
1781 timer(d, setup=setup)
1782 fm.end()
1782 fm.end()
1783
1783
1784
1784
1785 @command(
1785 @command(
1786 b'perf::nodemap|perfnodemap',
1786 b'perf::nodemap|perfnodemap',
1787 [
1787 [
1788 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1788 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1789 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1789 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1790 ]
1790 ]
1791 + formatteropts,
1791 + formatteropts,
1792 )
1792 )
1793 def perfnodemap(ui, repo, **opts):
1793 def perfnodemap(ui, repo, **opts):
1794 """benchmark the time necessary to look up revision from a cold nodemap
1794 """benchmark the time necessary to look up revision from a cold nodemap
1795
1795
1796 Depending on the implementation, the amount and order of revision we look
1796 Depending on the implementation, the amount and order of revision we look
1797 up can varies. Example of useful set to test:
1797 up can varies. Example of useful set to test:
1798 * tip
1798 * tip
1799 * 0
1799 * 0
1800 * -10:
1800 * -10:
1801 * :10
1801 * :10
1802 * -10: + :10
1802 * -10: + :10
1803 * :10: + -10:
1803 * :10: + -10:
1804 * -10000:
1804 * -10000:
1805 * -10000: + 0
1805 * -10000: + 0
1806
1806
1807 The command currently focus on valid binary lookup. Benchmarking for
1807 The command currently focus on valid binary lookup. Benchmarking for
1808 hexlookup, prefix lookup and missing lookup would also be valuable.
1808 hexlookup, prefix lookup and missing lookup would also be valuable.
1809 """
1809 """
1810 import mercurial.revlog
1810 import mercurial.revlog
1811
1811
1812 opts = _byteskwargs(opts)
1812 opts = _byteskwargs(opts)
1813 timer, fm = gettimer(ui, opts)
1813 timer, fm = gettimer(ui, opts)
1814 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1814 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1815
1815
1816 unfi = repo.unfiltered()
1816 unfi = repo.unfiltered()
1817 clearcaches = opts[b'clear_caches']
1817 clearcaches = opts[b'clear_caches']
1818 # find the filecache func directly
1818 # find the filecache func directly
1819 # This avoid polluting the benchmark with the filecache logic
1819 # This avoid polluting the benchmark with the filecache logic
1820 makecl = unfi.__class__.changelog.func
1820 makecl = unfi.__class__.changelog.func
1821 if not opts[b'rev']:
1821 if not opts[b'rev']:
1822 raise error.Abort(b'use --rev to specify revisions to look up')
1822 raise error.Abort(b'use --rev to specify revisions to look up')
1823 revs = scmutil.revrange(repo, opts[b'rev'])
1823 revs = scmutil.revrange(repo, opts[b'rev'])
1824 cl = repo.changelog
1824 cl = repo.changelog
1825 nodes = [cl.node(r) for r in revs]
1825 nodes = [cl.node(r) for r in revs]
1826
1826
1827 # use a list to pass reference to a nodemap from one closure to the next
1827 # use a list to pass reference to a nodemap from one closure to the next
1828 nodeget = [None]
1828 nodeget = [None]
1829
1829
1830 def setnodeget():
1830 def setnodeget():
1831 # probably not necessary, but for good measure
1831 # probably not necessary, but for good measure
1832 clearchangelog(unfi)
1832 clearchangelog(unfi)
1833 cl = makecl(unfi)
1833 cl = makecl(unfi)
1834 if util.safehasattr(cl.index, 'get_rev'):
1834 if util.safehasattr(cl.index, 'get_rev'):
1835 nodeget[0] = cl.index.get_rev
1835 nodeget[0] = cl.index.get_rev
1836 else:
1836 else:
1837 nodeget[0] = cl.nodemap.get
1837 nodeget[0] = cl.nodemap.get
1838
1838
1839 def d():
1839 def d():
1840 get = nodeget[0]
1840 get = nodeget[0]
1841 for n in nodes:
1841 for n in nodes:
1842 get(n)
1842 get(n)
1843
1843
1844 setup = None
1844 setup = None
1845 if clearcaches:
1845 if clearcaches:
1846
1846
1847 def setup():
1847 def setup():
1848 setnodeget()
1848 setnodeget()
1849
1849
1850 else:
1850 else:
1851 setnodeget()
1851 setnodeget()
1852 d() # prewarm the data structure
1852 d() # prewarm the data structure
1853 timer(d, setup=setup)
1853 timer(d, setup=setup)
1854 fm.end()
1854 fm.end()
1855
1855
1856
1856
1857 @command(b'perf::startup|perfstartup', formatteropts)
1857 @command(b'perf::startup|perfstartup', formatteropts)
1858 def perfstartup(ui, repo, **opts):
1858 def perfstartup(ui, repo, **opts):
1859 opts = _byteskwargs(opts)
1859 opts = _byteskwargs(opts)
1860 timer, fm = gettimer(ui, opts)
1860 timer, fm = gettimer(ui, opts)
1861
1861
1862 def d():
1862 def d():
1863 if os.name != 'nt':
1863 if os.name != 'nt':
1864 os.system(
1864 os.system(
1865 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1865 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1866 )
1866 )
1867 else:
1867 else:
1868 os.environ['HGRCPATH'] = r' '
1868 os.environ['HGRCPATH'] = r' '
1869 os.system("%s version -q > NUL" % sys.argv[0])
1869 os.system("%s version -q > NUL" % sys.argv[0])
1870
1870
1871 timer(d)
1871 timer(d)
1872 fm.end()
1872 fm.end()
1873
1873
1874
1874
1875 @command(b'perf::parents|perfparents', formatteropts)
1875 @command(b'perf::parents|perfparents', formatteropts)
1876 def perfparents(ui, repo, **opts):
1876 def perfparents(ui, repo, **opts):
1877 """benchmark the time necessary to fetch one changeset's parents.
1877 """benchmark the time necessary to fetch one changeset's parents.
1878
1878
1879 The fetch is done using the `node identifier`, traversing all object layers
1879 The fetch is done using the `node identifier`, traversing all object layers
1880 from the repository object. The first N revisions will be used for this
1880 from the repository object. The first N revisions will be used for this
1881 benchmark. N is controlled by the ``perf.parentscount`` config option
1881 benchmark. N is controlled by the ``perf.parentscount`` config option
1882 (default: 1000).
1882 (default: 1000).
1883 """
1883 """
1884 opts = _byteskwargs(opts)
1884 opts = _byteskwargs(opts)
1885 timer, fm = gettimer(ui, opts)
1885 timer, fm = gettimer(ui, opts)
1886 # control the number of commits perfparents iterates over
1886 # control the number of commits perfparents iterates over
1887 # experimental config: perf.parentscount
1887 # experimental config: perf.parentscount
1888 count = getint(ui, b"perf", b"parentscount", 1000)
1888 count = getint(ui, b"perf", b"parentscount", 1000)
1889 if len(repo.changelog) < count:
1889 if len(repo.changelog) < count:
1890 raise error.Abort(b"repo needs %d commits for this test" % count)
1890 raise error.Abort(b"repo needs %d commits for this test" % count)
1891 repo = repo.unfiltered()
1891 repo = repo.unfiltered()
1892 nl = [repo.changelog.node(i) for i in _xrange(count)]
1892 nl = [repo.changelog.node(i) for i in _xrange(count)]
1893
1893
1894 def d():
1894 def d():
1895 for n in nl:
1895 for n in nl:
1896 repo.changelog.parents(n)
1896 repo.changelog.parents(n)
1897
1897
1898 timer(d)
1898 timer(d)
1899 fm.end()
1899 fm.end()
1900
1900
1901
1901
1902 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1902 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1903 def perfctxfiles(ui, repo, x, **opts):
1903 def perfctxfiles(ui, repo, x, **opts):
1904 opts = _byteskwargs(opts)
1904 opts = _byteskwargs(opts)
1905 x = int(x)
1905 x = int(x)
1906 timer, fm = gettimer(ui, opts)
1906 timer, fm = gettimer(ui, opts)
1907
1907
1908 def d():
1908 def d():
1909 len(repo[x].files())
1909 len(repo[x].files())
1910
1910
1911 timer(d)
1911 timer(d)
1912 fm.end()
1912 fm.end()
1913
1913
1914
1914
1915 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1915 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1916 def perfrawfiles(ui, repo, x, **opts):
1916 def perfrawfiles(ui, repo, x, **opts):
1917 opts = _byteskwargs(opts)
1917 opts = _byteskwargs(opts)
1918 x = int(x)
1918 x = int(x)
1919 timer, fm = gettimer(ui, opts)
1919 timer, fm = gettimer(ui, opts)
1920 cl = repo.changelog
1920 cl = repo.changelog
1921
1921
1922 def d():
1922 def d():
1923 len(cl.read(x)[3])
1923 len(cl.read(x)[3])
1924
1924
1925 timer(d)
1925 timer(d)
1926 fm.end()
1926 fm.end()
1927
1927
1928
1928
1929 @command(b'perf::lookup|perflookup', formatteropts)
1929 @command(b'perf::lookup|perflookup', formatteropts)
1930 def perflookup(ui, repo, rev, **opts):
1930 def perflookup(ui, repo, rev, **opts):
1931 opts = _byteskwargs(opts)
1931 opts = _byteskwargs(opts)
1932 timer, fm = gettimer(ui, opts)
1932 timer, fm = gettimer(ui, opts)
1933 timer(lambda: len(repo.lookup(rev)))
1933 timer(lambda: len(repo.lookup(rev)))
1934 fm.end()
1934 fm.end()
1935
1935
1936
1936
1937 @command(
1937 @command(
1938 b'perf::linelogedits|perflinelogedits',
1938 b'perf::linelogedits|perflinelogedits',
1939 [
1939 [
1940 (b'n', b'edits', 10000, b'number of edits'),
1940 (b'n', b'edits', 10000, b'number of edits'),
1941 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1941 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1942 ],
1942 ],
1943 norepo=True,
1943 norepo=True,
1944 )
1944 )
1945 def perflinelogedits(ui, **opts):
1945 def perflinelogedits(ui, **opts):
1946 from mercurial import linelog
1946 from mercurial import linelog
1947
1947
1948 opts = _byteskwargs(opts)
1948 opts = _byteskwargs(opts)
1949
1949
1950 edits = opts[b'edits']
1950 edits = opts[b'edits']
1951 maxhunklines = opts[b'max_hunk_lines']
1951 maxhunklines = opts[b'max_hunk_lines']
1952
1952
1953 maxb1 = 100000
1953 maxb1 = 100000
1954 random.seed(0)
1954 random.seed(0)
1955 randint = random.randint
1955 randint = random.randint
1956 currentlines = 0
1956 currentlines = 0
1957 arglist = []
1957 arglist = []
1958 for rev in _xrange(edits):
1958 for rev in _xrange(edits):
1959 a1 = randint(0, currentlines)
1959 a1 = randint(0, currentlines)
1960 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1960 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1961 b1 = randint(0, maxb1)
1961 b1 = randint(0, maxb1)
1962 b2 = randint(b1, b1 + maxhunklines)
1962 b2 = randint(b1, b1 + maxhunklines)
1963 currentlines += (b2 - b1) - (a2 - a1)
1963 currentlines += (b2 - b1) - (a2 - a1)
1964 arglist.append((rev, a1, a2, b1, b2))
1964 arglist.append((rev, a1, a2, b1, b2))
1965
1965
1966 def d():
1966 def d():
1967 ll = linelog.linelog()
1967 ll = linelog.linelog()
1968 for args in arglist:
1968 for args in arglist:
1969 ll.replacelines(*args)
1969 ll.replacelines(*args)
1970
1970
1971 timer, fm = gettimer(ui, opts)
1971 timer, fm = gettimer(ui, opts)
1972 timer(d)
1972 timer(d)
1973 fm.end()
1973 fm.end()
1974
1974
1975
1975
1976 @command(b'perf::revrange|perfrevrange', formatteropts)
1976 @command(b'perf::revrange|perfrevrange', formatteropts)
1977 def perfrevrange(ui, repo, *specs, **opts):
1977 def perfrevrange(ui, repo, *specs, **opts):
1978 opts = _byteskwargs(opts)
1978 opts = _byteskwargs(opts)
1979 timer, fm = gettimer(ui, opts)
1979 timer, fm = gettimer(ui, opts)
1980 revrange = scmutil.revrange
1980 revrange = scmutil.revrange
1981 timer(lambda: len(revrange(repo, specs)))
1981 timer(lambda: len(revrange(repo, specs)))
1982 fm.end()
1982 fm.end()
1983
1983
1984
1984
1985 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1985 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1986 def perfnodelookup(ui, repo, rev, **opts):
1986 def perfnodelookup(ui, repo, rev, **opts):
1987 opts = _byteskwargs(opts)
1987 opts = _byteskwargs(opts)
1988 timer, fm = gettimer(ui, opts)
1988 timer, fm = gettimer(ui, opts)
1989 import mercurial.revlog
1989 import mercurial.revlog
1990
1990
1991 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1991 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1992 n = scmutil.revsingle(repo, rev).node()
1992 n = scmutil.revsingle(repo, rev).node()
1993
1993
1994 try:
1994 try:
1995 cl = revlog(getsvfs(repo), radix=b"00changelog")
1995 cl = revlog(getsvfs(repo), radix=b"00changelog")
1996 except TypeError:
1996 except TypeError:
1997 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1997 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1998
1998
1999 def d():
1999 def d():
2000 cl.rev(n)
2000 cl.rev(n)
2001 clearcaches(cl)
2001 clearcaches(cl)
2002
2002
2003 timer(d)
2003 timer(d)
2004 fm.end()
2004 fm.end()
2005
2005
2006
2006
2007 @command(
2007 @command(
2008 b'perf::log|perflog',
2008 b'perf::log|perflog',
2009 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2009 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2010 )
2010 )
2011 def perflog(ui, repo, rev=None, **opts):
2011 def perflog(ui, repo, rev=None, **opts):
2012 opts = _byteskwargs(opts)
2012 opts = _byteskwargs(opts)
2013 if rev is None:
2013 if rev is None:
2014 rev = []
2014 rev = []
2015 timer, fm = gettimer(ui, opts)
2015 timer, fm = gettimer(ui, opts)
2016 ui.pushbuffer()
2016 ui.pushbuffer()
2017 timer(
2017 timer(
2018 lambda: commands.log(
2018 lambda: commands.log(
2019 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2019 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2020 )
2020 )
2021 )
2021 )
2022 ui.popbuffer()
2022 ui.popbuffer()
2023 fm.end()
2023 fm.end()
2024
2024
2025
2025
2026 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2026 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2027 def perfmoonwalk(ui, repo, **opts):
2027 def perfmoonwalk(ui, repo, **opts):
2028 """benchmark walking the changelog backwards
2028 """benchmark walking the changelog backwards
2029
2029
2030 This also loads the changelog data for each revision in the changelog.
2030 This also loads the changelog data for each revision in the changelog.
2031 """
2031 """
2032 opts = _byteskwargs(opts)
2032 opts = _byteskwargs(opts)
2033 timer, fm = gettimer(ui, opts)
2033 timer, fm = gettimer(ui, opts)
2034
2034
2035 def moonwalk():
2035 def moonwalk():
2036 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2036 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2037 ctx = repo[i]
2037 ctx = repo[i]
2038 ctx.branch() # read changelog data (in addition to the index)
2038 ctx.branch() # read changelog data (in addition to the index)
2039
2039
2040 timer(moonwalk)
2040 timer(moonwalk)
2041 fm.end()
2041 fm.end()
2042
2042
2043
2043
2044 @command(
2044 @command(
2045 b'perf::templating|perftemplating',
2045 b'perf::templating|perftemplating',
2046 [
2046 [
2047 (b'r', b'rev', [], b'revisions to run the template on'),
2047 (b'r', b'rev', [], b'revisions to run the template on'),
2048 ]
2048 ]
2049 + formatteropts,
2049 + formatteropts,
2050 )
2050 )
2051 def perftemplating(ui, repo, testedtemplate=None, **opts):
2051 def perftemplating(ui, repo, testedtemplate=None, **opts):
2052 """test the rendering time of a given template"""
2052 """test the rendering time of a given template"""
2053 if makelogtemplater is None:
2053 if makelogtemplater is None:
2054 raise error.Abort(
2054 raise error.Abort(
2055 b"perftemplating not available with this Mercurial",
2055 b"perftemplating not available with this Mercurial",
2056 hint=b"use 4.3 or later",
2056 hint=b"use 4.3 or later",
2057 )
2057 )
2058
2058
2059 opts = _byteskwargs(opts)
2059 opts = _byteskwargs(opts)
2060
2060
2061 nullui = ui.copy()
2061 nullui = ui.copy()
2062 nullui.fout = open(os.devnull, 'wb')
2062 nullui.fout = open(os.devnull, 'wb')
2063 nullui.disablepager()
2063 nullui.disablepager()
2064 revs = opts.get(b'rev')
2064 revs = opts.get(b'rev')
2065 if not revs:
2065 if not revs:
2066 revs = [b'all()']
2066 revs = [b'all()']
2067 revs = list(scmutil.revrange(repo, revs))
2067 revs = list(scmutil.revrange(repo, revs))
2068
2068
2069 defaulttemplate = (
2069 defaulttemplate = (
2070 b'{date|shortdate} [{rev}:{node|short}]'
2070 b'{date|shortdate} [{rev}:{node|short}]'
2071 b' {author|person}: {desc|firstline}\n'
2071 b' {author|person}: {desc|firstline}\n'
2072 )
2072 )
2073 if testedtemplate is None:
2073 if testedtemplate is None:
2074 testedtemplate = defaulttemplate
2074 testedtemplate = defaulttemplate
2075 displayer = makelogtemplater(nullui, repo, testedtemplate)
2075 displayer = makelogtemplater(nullui, repo, testedtemplate)
2076
2076
2077 def format():
2077 def format():
2078 for r in revs:
2078 for r in revs:
2079 ctx = repo[r]
2079 ctx = repo[r]
2080 displayer.show(ctx)
2080 displayer.show(ctx)
2081 displayer.flush(ctx)
2081 displayer.flush(ctx)
2082
2082
2083 timer, fm = gettimer(ui, opts)
2083 timer, fm = gettimer(ui, opts)
2084 timer(format)
2084 timer(format)
2085 fm.end()
2085 fm.end()
2086
2086
2087
2087
2088 def _displaystats(ui, opts, entries, data):
2088 def _displaystats(ui, opts, entries, data):
2089 # use a second formatter because the data are quite different, not sure
2089 # use a second formatter because the data are quite different, not sure
2090 # how it flies with the templater.
2090 # how it flies with the templater.
2091 fm = ui.formatter(b'perf-stats', opts)
2091 fm = ui.formatter(b'perf-stats', opts)
2092 for key, title in entries:
2092 for key, title in entries:
2093 values = data[key]
2093 values = data[key]
2094 nbvalues = len(data)
2094 nbvalues = len(data)
2095 values.sort()
2095 values.sort()
2096 stats = {
2096 stats = {
2097 'key': key,
2097 'key': key,
2098 'title': title,
2098 'title': title,
2099 'nbitems': len(values),
2099 'nbitems': len(values),
2100 'min': values[0][0],
2100 'min': values[0][0],
2101 '10%': values[(nbvalues * 10) // 100][0],
2101 '10%': values[(nbvalues * 10) // 100][0],
2102 '25%': values[(nbvalues * 25) // 100][0],
2102 '25%': values[(nbvalues * 25) // 100][0],
2103 '50%': values[(nbvalues * 50) // 100][0],
2103 '50%': values[(nbvalues * 50) // 100][0],
2104 '75%': values[(nbvalues * 75) // 100][0],
2104 '75%': values[(nbvalues * 75) // 100][0],
2105 '80%': values[(nbvalues * 80) // 100][0],
2105 '80%': values[(nbvalues * 80) // 100][0],
2106 '85%': values[(nbvalues * 85) // 100][0],
2106 '85%': values[(nbvalues * 85) // 100][0],
2107 '90%': values[(nbvalues * 90) // 100][0],
2107 '90%': values[(nbvalues * 90) // 100][0],
2108 '95%': values[(nbvalues * 95) // 100][0],
2108 '95%': values[(nbvalues * 95) // 100][0],
2109 '99%': values[(nbvalues * 99) // 100][0],
2109 '99%': values[(nbvalues * 99) // 100][0],
2110 'max': values[-1][0],
2110 'max': values[-1][0],
2111 }
2111 }
2112 fm.startitem()
2112 fm.startitem()
2113 fm.data(**stats)
2113 fm.data(**stats)
2114 # make node pretty for the human output
2114 # make node pretty for the human output
2115 fm.plain('### %s (%d items)\n' % (title, len(values)))
2115 fm.plain('### %s (%d items)\n' % (title, len(values)))
2116 lines = [
2116 lines = [
2117 'min',
2117 'min',
2118 '10%',
2118 '10%',
2119 '25%',
2119 '25%',
2120 '50%',
2120 '50%',
2121 '75%',
2121 '75%',
2122 '80%',
2122 '80%',
2123 '85%',
2123 '85%',
2124 '90%',
2124 '90%',
2125 '95%',
2125 '95%',
2126 '99%',
2126 '99%',
2127 'max',
2127 'max',
2128 ]
2128 ]
2129 for l in lines:
2129 for l in lines:
2130 fm.plain('%s: %s\n' % (l, stats[l]))
2130 fm.plain('%s: %s\n' % (l, stats[l]))
2131 fm.end()
2131 fm.end()
2132
2132
2133
2133
2134 @command(
2134 @command(
2135 b'perf::helper-mergecopies|perfhelper-mergecopies',
2135 b'perf::helper-mergecopies|perfhelper-mergecopies',
2136 formatteropts
2136 formatteropts
2137 + [
2137 + [
2138 (b'r', b'revs', [], b'restrict search to these revisions'),
2138 (b'r', b'revs', [], b'restrict search to these revisions'),
2139 (b'', b'timing', False, b'provides extra data (costly)'),
2139 (b'', b'timing', False, b'provides extra data (costly)'),
2140 (b'', b'stats', False, b'provides statistic about the measured data'),
2140 (b'', b'stats', False, b'provides statistic about the measured data'),
2141 ],
2141 ],
2142 )
2142 )
2143 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2143 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2144 """find statistics about potential parameters for `perfmergecopies`
2144 """find statistics about potential parameters for `perfmergecopies`
2145
2145
2146 This command find (base, p1, p2) triplet relevant for copytracing
2146 This command find (base, p1, p2) triplet relevant for copytracing
2147 benchmarking in the context of a merge. It reports values for some of the
2147 benchmarking in the context of a merge. It reports values for some of the
2148 parameters that impact merge copy tracing time during merge.
2148 parameters that impact merge copy tracing time during merge.
2149
2149
2150 If `--timing` is set, rename detection is run and the associated timing
2150 If `--timing` is set, rename detection is run and the associated timing
2151 will be reported. The extra details come at the cost of slower command
2151 will be reported. The extra details come at the cost of slower command
2152 execution.
2152 execution.
2153
2153
2154 Since rename detection is only run once, other factors might easily
2154 Since rename detection is only run once, other factors might easily
2155 affect the precision of the timing. However it should give a good
2155 affect the precision of the timing. However it should give a good
2156 approximation of which revision triplets are very costly.
2156 approximation of which revision triplets are very costly.
2157 """
2157 """
2158 opts = _byteskwargs(opts)
2158 opts = _byteskwargs(opts)
2159 fm = ui.formatter(b'perf', opts)
2159 fm = ui.formatter(b'perf', opts)
2160 dotiming = opts[b'timing']
2160 dotiming = opts[b'timing']
2161 dostats = opts[b'stats']
2161 dostats = opts[b'stats']
2162
2162
2163 output_template = [
2163 output_template = [
2164 ("base", "%(base)12s"),
2164 ("base", "%(base)12s"),
2165 ("p1", "%(p1.node)12s"),
2165 ("p1", "%(p1.node)12s"),
2166 ("p2", "%(p2.node)12s"),
2166 ("p2", "%(p2.node)12s"),
2167 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2167 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2168 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2168 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2169 ("p1.renames", "%(p1.renamedfiles)12d"),
2169 ("p1.renames", "%(p1.renamedfiles)12d"),
2170 ("p1.time", "%(p1.time)12.3f"),
2170 ("p1.time", "%(p1.time)12.3f"),
2171 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2171 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2172 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2172 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2173 ("p2.renames", "%(p2.renamedfiles)12d"),
2173 ("p2.renames", "%(p2.renamedfiles)12d"),
2174 ("p2.time", "%(p2.time)12.3f"),
2174 ("p2.time", "%(p2.time)12.3f"),
2175 ("renames", "%(nbrenamedfiles)12d"),
2175 ("renames", "%(nbrenamedfiles)12d"),
2176 ("total.time", "%(time)12.3f"),
2176 ("total.time", "%(time)12.3f"),
2177 ]
2177 ]
2178 if not dotiming:
2178 if not dotiming:
2179 output_template = [
2179 output_template = [
2180 i
2180 i
2181 for i in output_template
2181 for i in output_template
2182 if not ('time' in i[0] or 'renames' in i[0])
2182 if not ('time' in i[0] or 'renames' in i[0])
2183 ]
2183 ]
2184 header_names = [h for (h, v) in output_template]
2184 header_names = [h for (h, v) in output_template]
2185 output = ' '.join([v for (h, v) in output_template]) + '\n'
2185 output = ' '.join([v for (h, v) in output_template]) + '\n'
2186 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2186 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2187 fm.plain(header % tuple(header_names))
2187 fm.plain(header % tuple(header_names))
2188
2188
2189 if not revs:
2189 if not revs:
2190 revs = ['all()']
2190 revs = ['all()']
2191 revs = scmutil.revrange(repo, revs)
2191 revs = scmutil.revrange(repo, revs)
2192
2192
2193 if dostats:
2193 if dostats:
2194 alldata = {
2194 alldata = {
2195 'nbrevs': [],
2195 'nbrevs': [],
2196 'nbmissingfiles': [],
2196 'nbmissingfiles': [],
2197 }
2197 }
2198 if dotiming:
2198 if dotiming:
2199 alldata['parentnbrenames'] = []
2199 alldata['parentnbrenames'] = []
2200 alldata['totalnbrenames'] = []
2200 alldata['totalnbrenames'] = []
2201 alldata['parenttime'] = []
2201 alldata['parenttime'] = []
2202 alldata['totaltime'] = []
2202 alldata['totaltime'] = []
2203
2203
2204 roi = repo.revs('merge() and %ld', revs)
2204 roi = repo.revs('merge() and %ld', revs)
2205 for r in roi:
2205 for r in roi:
2206 ctx = repo[r]
2206 ctx = repo[r]
2207 p1 = ctx.p1()
2207 p1 = ctx.p1()
2208 p2 = ctx.p2()
2208 p2 = ctx.p2()
2209 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2209 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2210 for b in bases:
2210 for b in bases:
2211 b = repo[b]
2211 b = repo[b]
2212 p1missing = copies._computeforwardmissing(b, p1)
2212 p1missing = copies._computeforwardmissing(b, p1)
2213 p2missing = copies._computeforwardmissing(b, p2)
2213 p2missing = copies._computeforwardmissing(b, p2)
2214 data = {
2214 data = {
2215 b'base': b.hex(),
2215 b'base': b.hex(),
2216 b'p1.node': p1.hex(),
2216 b'p1.node': p1.hex(),
2217 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2217 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2218 b'p1.nbmissingfiles': len(p1missing),
2218 b'p1.nbmissingfiles': len(p1missing),
2219 b'p2.node': p2.hex(),
2219 b'p2.node': p2.hex(),
2220 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2220 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2221 b'p2.nbmissingfiles': len(p2missing),
2221 b'p2.nbmissingfiles': len(p2missing),
2222 }
2222 }
2223 if dostats:
2223 if dostats:
2224 if p1missing:
2224 if p1missing:
2225 alldata['nbrevs'].append(
2225 alldata['nbrevs'].append(
2226 (data['p1.nbrevs'], b.hex(), p1.hex())
2226 (data['p1.nbrevs'], b.hex(), p1.hex())
2227 )
2227 )
2228 alldata['nbmissingfiles'].append(
2228 alldata['nbmissingfiles'].append(
2229 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2229 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2230 )
2230 )
2231 if p2missing:
2231 if p2missing:
2232 alldata['nbrevs'].append(
2232 alldata['nbrevs'].append(
2233 (data['p2.nbrevs'], b.hex(), p2.hex())
2233 (data['p2.nbrevs'], b.hex(), p2.hex())
2234 )
2234 )
2235 alldata['nbmissingfiles'].append(
2235 alldata['nbmissingfiles'].append(
2236 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2236 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2237 )
2237 )
2238 if dotiming:
2238 if dotiming:
2239 begin = util.timer()
2239 begin = util.timer()
2240 mergedata = copies.mergecopies(repo, p1, p2, b)
2240 mergedata = copies.mergecopies(repo, p1, p2, b)
2241 end = util.timer()
2241 end = util.timer()
2242 # not very stable timing since we did only one run
2242 # not very stable timing since we did only one run
2243 data['time'] = end - begin
2243 data['time'] = end - begin
2244 # mergedata contains five dicts: "copy", "movewithdir",
2244 # mergedata contains five dicts: "copy", "movewithdir",
2245 # "diverge", "renamedelete" and "dirmove".
2245 # "diverge", "renamedelete" and "dirmove".
2246 # The first 4 are about renamed file so lets count that.
2246 # The first 4 are about renamed file so lets count that.
2247 renames = len(mergedata[0])
2247 renames = len(mergedata[0])
2248 renames += len(mergedata[1])
2248 renames += len(mergedata[1])
2249 renames += len(mergedata[2])
2249 renames += len(mergedata[2])
2250 renames += len(mergedata[3])
2250 renames += len(mergedata[3])
2251 data['nbrenamedfiles'] = renames
2251 data['nbrenamedfiles'] = renames
2252 begin = util.timer()
2252 begin = util.timer()
2253 p1renames = copies.pathcopies(b, p1)
2253 p1renames = copies.pathcopies(b, p1)
2254 end = util.timer()
2254 end = util.timer()
2255 data['p1.time'] = end - begin
2255 data['p1.time'] = end - begin
2256 begin = util.timer()
2256 begin = util.timer()
2257 p2renames = copies.pathcopies(b, p2)
2257 p2renames = copies.pathcopies(b, p2)
2258 end = util.timer()
2258 end = util.timer()
2259 data['p2.time'] = end - begin
2259 data['p2.time'] = end - begin
2260 data['p1.renamedfiles'] = len(p1renames)
2260 data['p1.renamedfiles'] = len(p1renames)
2261 data['p2.renamedfiles'] = len(p2renames)
2261 data['p2.renamedfiles'] = len(p2renames)
2262
2262
2263 if dostats:
2263 if dostats:
2264 if p1missing:
2264 if p1missing:
2265 alldata['parentnbrenames'].append(
2265 alldata['parentnbrenames'].append(
2266 (data['p1.renamedfiles'], b.hex(), p1.hex())
2266 (data['p1.renamedfiles'], b.hex(), p1.hex())
2267 )
2267 )
2268 alldata['parenttime'].append(
2268 alldata['parenttime'].append(
2269 (data['p1.time'], b.hex(), p1.hex())
2269 (data['p1.time'], b.hex(), p1.hex())
2270 )
2270 )
2271 if p2missing:
2271 if p2missing:
2272 alldata['parentnbrenames'].append(
2272 alldata['parentnbrenames'].append(
2273 (data['p2.renamedfiles'], b.hex(), p2.hex())
2273 (data['p2.renamedfiles'], b.hex(), p2.hex())
2274 )
2274 )
2275 alldata['parenttime'].append(
2275 alldata['parenttime'].append(
2276 (data['p2.time'], b.hex(), p2.hex())
2276 (data['p2.time'], b.hex(), p2.hex())
2277 )
2277 )
2278 if p1missing or p2missing:
2278 if p1missing or p2missing:
2279 alldata['totalnbrenames'].append(
2279 alldata['totalnbrenames'].append(
2280 (
2280 (
2281 data['nbrenamedfiles'],
2281 data['nbrenamedfiles'],
2282 b.hex(),
2282 b.hex(),
2283 p1.hex(),
2283 p1.hex(),
2284 p2.hex(),
2284 p2.hex(),
2285 )
2285 )
2286 )
2286 )
2287 alldata['totaltime'].append(
2287 alldata['totaltime'].append(
2288 (data['time'], b.hex(), p1.hex(), p2.hex())
2288 (data['time'], b.hex(), p1.hex(), p2.hex())
2289 )
2289 )
2290 fm.startitem()
2290 fm.startitem()
2291 fm.data(**data)
2291 fm.data(**data)
2292 # make node pretty for the human output
2292 # make node pretty for the human output
2293 out = data.copy()
2293 out = data.copy()
2294 out['base'] = fm.hexfunc(b.node())
2294 out['base'] = fm.hexfunc(b.node())
2295 out['p1.node'] = fm.hexfunc(p1.node())
2295 out['p1.node'] = fm.hexfunc(p1.node())
2296 out['p2.node'] = fm.hexfunc(p2.node())
2296 out['p2.node'] = fm.hexfunc(p2.node())
2297 fm.plain(output % out)
2297 fm.plain(output % out)
2298
2298
2299 fm.end()
2299 fm.end()
2300 if dostats:
2300 if dostats:
2301 # use a second formatter because the data are quite different, not sure
2301 # use a second formatter because the data are quite different, not sure
2302 # how it flies with the templater.
2302 # how it flies with the templater.
2303 entries = [
2303 entries = [
2304 ('nbrevs', 'number of revision covered'),
2304 ('nbrevs', 'number of revision covered'),
2305 ('nbmissingfiles', 'number of missing files at head'),
2305 ('nbmissingfiles', 'number of missing files at head'),
2306 ]
2306 ]
2307 if dotiming:
2307 if dotiming:
2308 entries.append(
2308 entries.append(
2309 ('parentnbrenames', 'rename from one parent to base')
2309 ('parentnbrenames', 'rename from one parent to base')
2310 )
2310 )
2311 entries.append(('totalnbrenames', 'total number of renames'))
2311 entries.append(('totalnbrenames', 'total number of renames'))
2312 entries.append(('parenttime', 'time for one parent'))
2312 entries.append(('parenttime', 'time for one parent'))
2313 entries.append(('totaltime', 'time for both parents'))
2313 entries.append(('totaltime', 'time for both parents'))
2314 _displaystats(ui, opts, entries, alldata)
2314 _displaystats(ui, opts, entries, alldata)
2315
2315
2316
2316
2317 @command(
2317 @command(
2318 b'perf::helper-pathcopies|perfhelper-pathcopies',
2318 b'perf::helper-pathcopies|perfhelper-pathcopies',
2319 formatteropts
2319 formatteropts
2320 + [
2320 + [
2321 (b'r', b'revs', [], b'restrict search to these revisions'),
2321 (b'r', b'revs', [], b'restrict search to these revisions'),
2322 (b'', b'timing', False, b'provides extra data (costly)'),
2322 (b'', b'timing', False, b'provides extra data (costly)'),
2323 (b'', b'stats', False, b'provides statistic about the measured data'),
2323 (b'', b'stats', False, b'provides statistic about the measured data'),
2324 ],
2324 ],
2325 )
2325 )
2326 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2326 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2327 """find statistic about potential parameters for the `perftracecopies`
2327 """find statistic about potential parameters for the `perftracecopies`
2328
2328
2329 This command find source-destination pair relevant for copytracing testing.
2329 This command find source-destination pair relevant for copytracing testing.
2330 It report value for some of the parameters that impact copy tracing time.
2330 It report value for some of the parameters that impact copy tracing time.
2331
2331
2332 If `--timing` is set, rename detection is run and the associated timing
2332 If `--timing` is set, rename detection is run and the associated timing
2333 will be reported. The extra details comes at the cost of a slower command
2333 will be reported. The extra details comes at the cost of a slower command
2334 execution.
2334 execution.
2335
2335
2336 Since the rename detection is only run once, other factors might easily
2336 Since the rename detection is only run once, other factors might easily
2337 affect the precision of the timing. However it should give a good
2337 affect the precision of the timing. However it should give a good
2338 approximation of which revision pairs are very costly.
2338 approximation of which revision pairs are very costly.
2339 """
2339 """
2340 opts = _byteskwargs(opts)
2340 opts = _byteskwargs(opts)
2341 fm = ui.formatter(b'perf', opts)
2341 fm = ui.formatter(b'perf', opts)
2342 dotiming = opts[b'timing']
2342 dotiming = opts[b'timing']
2343 dostats = opts[b'stats']
2343 dostats = opts[b'stats']
2344
2344
2345 if dotiming:
2345 if dotiming:
2346 header = '%12s %12s %12s %12s %12s %12s\n'
2346 header = '%12s %12s %12s %12s %12s %12s\n'
2347 output = (
2347 output = (
2348 "%(source)12s %(destination)12s "
2348 "%(source)12s %(destination)12s "
2349 "%(nbrevs)12d %(nbmissingfiles)12d "
2349 "%(nbrevs)12d %(nbmissingfiles)12d "
2350 "%(nbrenamedfiles)12d %(time)18.5f\n"
2350 "%(nbrenamedfiles)12d %(time)18.5f\n"
2351 )
2351 )
2352 header_names = (
2352 header_names = (
2353 "source",
2353 "source",
2354 "destination",
2354 "destination",
2355 "nb-revs",
2355 "nb-revs",
2356 "nb-files",
2356 "nb-files",
2357 "nb-renames",
2357 "nb-renames",
2358 "time",
2358 "time",
2359 )
2359 )
2360 fm.plain(header % header_names)
2360 fm.plain(header % header_names)
2361 else:
2361 else:
2362 header = '%12s %12s %12s %12s\n'
2362 header = '%12s %12s %12s %12s\n'
2363 output = (
2363 output = (
2364 "%(source)12s %(destination)12s "
2364 "%(source)12s %(destination)12s "
2365 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2365 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2366 )
2366 )
2367 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2367 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2368
2368
2369 if not revs:
2369 if not revs:
2370 revs = ['all()']
2370 revs = ['all()']
2371 revs = scmutil.revrange(repo, revs)
2371 revs = scmutil.revrange(repo, revs)
2372
2372
2373 if dostats:
2373 if dostats:
2374 alldata = {
2374 alldata = {
2375 'nbrevs': [],
2375 'nbrevs': [],
2376 'nbmissingfiles': [],
2376 'nbmissingfiles': [],
2377 }
2377 }
2378 if dotiming:
2378 if dotiming:
2379 alldata['nbrenames'] = []
2379 alldata['nbrenames'] = []
2380 alldata['time'] = []
2380 alldata['time'] = []
2381
2381
2382 roi = repo.revs('merge() and %ld', revs)
2382 roi = repo.revs('merge() and %ld', revs)
2383 for r in roi:
2383 for r in roi:
2384 ctx = repo[r]
2384 ctx = repo[r]
2385 p1 = ctx.p1().rev()
2385 p1 = ctx.p1().rev()
2386 p2 = ctx.p2().rev()
2386 p2 = ctx.p2().rev()
2387 bases = repo.changelog._commonancestorsheads(p1, p2)
2387 bases = repo.changelog._commonancestorsheads(p1, p2)
2388 for p in (p1, p2):
2388 for p in (p1, p2):
2389 for b in bases:
2389 for b in bases:
2390 base = repo[b]
2390 base = repo[b]
2391 parent = repo[p]
2391 parent = repo[p]
2392 missing = copies._computeforwardmissing(base, parent)
2392 missing = copies._computeforwardmissing(base, parent)
2393 if not missing:
2393 if not missing:
2394 continue
2394 continue
2395 data = {
2395 data = {
2396 b'source': base.hex(),
2396 b'source': base.hex(),
2397 b'destination': parent.hex(),
2397 b'destination': parent.hex(),
2398 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2398 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2399 b'nbmissingfiles': len(missing),
2399 b'nbmissingfiles': len(missing),
2400 }
2400 }
2401 if dostats:
2401 if dostats:
2402 alldata['nbrevs'].append(
2402 alldata['nbrevs'].append(
2403 (
2403 (
2404 data['nbrevs'],
2404 data['nbrevs'],
2405 base.hex(),
2405 base.hex(),
2406 parent.hex(),
2406 parent.hex(),
2407 )
2407 )
2408 )
2408 )
2409 alldata['nbmissingfiles'].append(
2409 alldata['nbmissingfiles'].append(
2410 (
2410 (
2411 data['nbmissingfiles'],
2411 data['nbmissingfiles'],
2412 base.hex(),
2412 base.hex(),
2413 parent.hex(),
2413 parent.hex(),
2414 )
2414 )
2415 )
2415 )
2416 if dotiming:
2416 if dotiming:
2417 begin = util.timer()
2417 begin = util.timer()
2418 renames = copies.pathcopies(base, parent)
2418 renames = copies.pathcopies(base, parent)
2419 end = util.timer()
2419 end = util.timer()
2420 # not very stable timing since we did only one run
2420 # not very stable timing since we did only one run
2421 data['time'] = end - begin
2421 data['time'] = end - begin
2422 data['nbrenamedfiles'] = len(renames)
2422 data['nbrenamedfiles'] = len(renames)
2423 if dostats:
2423 if dostats:
2424 alldata['time'].append(
2424 alldata['time'].append(
2425 (
2425 (
2426 data['time'],
2426 data['time'],
2427 base.hex(),
2427 base.hex(),
2428 parent.hex(),
2428 parent.hex(),
2429 )
2429 )
2430 )
2430 )
2431 alldata['nbrenames'].append(
2431 alldata['nbrenames'].append(
2432 (
2432 (
2433 data['nbrenamedfiles'],
2433 data['nbrenamedfiles'],
2434 base.hex(),
2434 base.hex(),
2435 parent.hex(),
2435 parent.hex(),
2436 )
2436 )
2437 )
2437 )
2438 fm.startitem()
2438 fm.startitem()
2439 fm.data(**data)
2439 fm.data(**data)
2440 out = data.copy()
2440 out = data.copy()
2441 out['source'] = fm.hexfunc(base.node())
2441 out['source'] = fm.hexfunc(base.node())
2442 out['destination'] = fm.hexfunc(parent.node())
2442 out['destination'] = fm.hexfunc(parent.node())
2443 fm.plain(output % out)
2443 fm.plain(output % out)
2444
2444
2445 fm.end()
2445 fm.end()
2446 if dostats:
2446 if dostats:
2447 entries = [
2447 entries = [
2448 ('nbrevs', 'number of revision covered'),
2448 ('nbrevs', 'number of revision covered'),
2449 ('nbmissingfiles', 'number of missing files at head'),
2449 ('nbmissingfiles', 'number of missing files at head'),
2450 ]
2450 ]
2451 if dotiming:
2451 if dotiming:
2452 entries.append(('nbrenames', 'renamed files'))
2452 entries.append(('nbrenames', 'renamed files'))
2453 entries.append(('time', 'time'))
2453 entries.append(('time', 'time'))
2454 _displaystats(ui, opts, entries, alldata)
2454 _displaystats(ui, opts, entries, alldata)
2455
2455
2456
2456
2457 @command(b'perf::cca|perfcca', formatteropts)
2457 @command(b'perf::cca|perfcca', formatteropts)
2458 def perfcca(ui, repo, **opts):
2458 def perfcca(ui, repo, **opts):
2459 opts = _byteskwargs(opts)
2459 opts = _byteskwargs(opts)
2460 timer, fm = gettimer(ui, opts)
2460 timer, fm = gettimer(ui, opts)
2461 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2461 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2462 fm.end()
2462 fm.end()
2463
2463
2464
2464
2465 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2465 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2466 def perffncacheload(ui, repo, **opts):
2466 def perffncacheload(ui, repo, **opts):
2467 opts = _byteskwargs(opts)
2467 opts = _byteskwargs(opts)
2468 timer, fm = gettimer(ui, opts)
2468 timer, fm = gettimer(ui, opts)
2469 s = repo.store
2469 s = repo.store
2470
2470
2471 def d():
2471 def d():
2472 s.fncache._load()
2472 s.fncache._load()
2473
2473
2474 timer(d)
2474 timer(d)
2475 fm.end()
2475 fm.end()
2476
2476
2477
2477
2478 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2478 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2479 def perffncachewrite(ui, repo, **opts):
2479 def perffncachewrite(ui, repo, **opts):
2480 opts = _byteskwargs(opts)
2480 opts = _byteskwargs(opts)
2481 timer, fm = gettimer(ui, opts)
2481 timer, fm = gettimer(ui, opts)
2482 s = repo.store
2482 s = repo.store
2483 lock = repo.lock()
2483 lock = repo.lock()
2484 s.fncache._load()
2484 s.fncache._load()
2485 tr = repo.transaction(b'perffncachewrite')
2485 tr = repo.transaction(b'perffncachewrite')
2486 tr.addbackup(b'fncache')
2486 tr.addbackup(b'fncache')
2487
2487
2488 def d():
2488 def d():
2489 s.fncache._dirty = True
2489 s.fncache._dirty = True
2490 s.fncache.write(tr)
2490 s.fncache.write(tr)
2491
2491
2492 timer(d)
2492 timer(d)
2493 tr.close()
2493 tr.close()
2494 lock.release()
2494 lock.release()
2495 fm.end()
2495 fm.end()
2496
2496
2497
2497
2498 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2498 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2499 def perffncacheencode(ui, repo, **opts):
2499 def perffncacheencode(ui, repo, **opts):
2500 opts = _byteskwargs(opts)
2500 opts = _byteskwargs(opts)
2501 timer, fm = gettimer(ui, opts)
2501 timer, fm = gettimer(ui, opts)
2502 s = repo.store
2502 s = repo.store
2503 s.fncache._load()
2503 s.fncache._load()
2504
2504
2505 def d():
2505 def d():
2506 for p in s.fncache.entries:
2506 for p in s.fncache.entries:
2507 s.encode(p)
2507 s.encode(p)
2508
2508
2509 timer(d)
2509 timer(d)
2510 fm.end()
2510 fm.end()
2511
2511
2512
2512
2513 def _bdiffworker(q, blocks, xdiff, ready, done):
2513 def _bdiffworker(q, blocks, xdiff, ready, done):
2514 while not done.is_set():
2514 while not done.is_set():
2515 pair = q.get()
2515 pair = q.get()
2516 while pair is not None:
2516 while pair is not None:
2517 if xdiff:
2517 if xdiff:
2518 mdiff.bdiff.xdiffblocks(*pair)
2518 mdiff.bdiff.xdiffblocks(*pair)
2519 elif blocks:
2519 elif blocks:
2520 mdiff.bdiff.blocks(*pair)
2520 mdiff.bdiff.blocks(*pair)
2521 else:
2521 else:
2522 mdiff.textdiff(*pair)
2522 mdiff.textdiff(*pair)
2523 q.task_done()
2523 q.task_done()
2524 pair = q.get()
2524 pair = q.get()
2525 q.task_done() # for the None one
2525 q.task_done() # for the None one
2526 with ready:
2526 with ready:
2527 ready.wait()
2527 ready.wait()
2528
2528
2529
2529
2530 def _manifestrevision(repo, mnode):
2530 def _manifestrevision(repo, mnode):
2531 ml = repo.manifestlog
2531 ml = repo.manifestlog
2532
2532
2533 if util.safehasattr(ml, b'getstorage'):
2533 if util.safehasattr(ml, b'getstorage'):
2534 store = ml.getstorage(b'')
2534 store = ml.getstorage(b'')
2535 else:
2535 else:
2536 store = ml._revlog
2536 store = ml._revlog
2537
2537
2538 return store.revision(mnode)
2538 return store.revision(mnode)
2539
2539
2540
2540
2541 @command(
2541 @command(
2542 b'perf::bdiff|perfbdiff',
2542 b'perf::bdiff|perfbdiff',
2543 revlogopts
2543 revlogopts
2544 + formatteropts
2544 + formatteropts
2545 + [
2545 + [
2546 (
2546 (
2547 b'',
2547 b'',
2548 b'count',
2548 b'count',
2549 1,
2549 1,
2550 b'number of revisions to test (when using --startrev)',
2550 b'number of revisions to test (when using --startrev)',
2551 ),
2551 ),
2552 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2552 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2553 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2553 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2554 (b'', b'blocks', False, b'test computing diffs into blocks'),
2554 (b'', b'blocks', False, b'test computing diffs into blocks'),
2555 (b'', b'xdiff', False, b'use xdiff algorithm'),
2555 (b'', b'xdiff', False, b'use xdiff algorithm'),
2556 ],
2556 ],
2557 b'-c|-m|FILE REV',
2557 b'-c|-m|FILE REV',
2558 )
2558 )
2559 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2559 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2560 """benchmark a bdiff between revisions
2560 """benchmark a bdiff between revisions
2561
2561
2562 By default, benchmark a bdiff between its delta parent and itself.
2562 By default, benchmark a bdiff between its delta parent and itself.
2563
2563
2564 With ``--count``, benchmark bdiffs between delta parents and self for N
2564 With ``--count``, benchmark bdiffs between delta parents and self for N
2565 revisions starting at the specified revision.
2565 revisions starting at the specified revision.
2566
2566
2567 With ``--alldata``, assume the requested revision is a changeset and
2567 With ``--alldata``, assume the requested revision is a changeset and
2568 measure bdiffs for all changes related to that changeset (manifest
2568 measure bdiffs for all changes related to that changeset (manifest
2569 and filelogs).
2569 and filelogs).
2570 """
2570 """
2571 opts = _byteskwargs(opts)
2571 opts = _byteskwargs(opts)
2572
2572
2573 if opts[b'xdiff'] and not opts[b'blocks']:
2573 if opts[b'xdiff'] and not opts[b'blocks']:
2574 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2574 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2575
2575
2576 if opts[b'alldata']:
2576 if opts[b'alldata']:
2577 opts[b'changelog'] = True
2577 opts[b'changelog'] = True
2578
2578
2579 if opts.get(b'changelog') or opts.get(b'manifest'):
2579 if opts.get(b'changelog') or opts.get(b'manifest'):
2580 file_, rev = None, file_
2580 file_, rev = None, file_
2581 elif rev is None:
2581 elif rev is None:
2582 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2582 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2583
2583
2584 blocks = opts[b'blocks']
2584 blocks = opts[b'blocks']
2585 xdiff = opts[b'xdiff']
2585 xdiff = opts[b'xdiff']
2586 textpairs = []
2586 textpairs = []
2587
2587
2588 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2588 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2589
2589
2590 startrev = r.rev(r.lookup(rev))
2590 startrev = r.rev(r.lookup(rev))
2591 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2591 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2592 if opts[b'alldata']:
2592 if opts[b'alldata']:
2593 # Load revisions associated with changeset.
2593 # Load revisions associated with changeset.
2594 ctx = repo[rev]
2594 ctx = repo[rev]
2595 mtext = _manifestrevision(repo, ctx.manifestnode())
2595 mtext = _manifestrevision(repo, ctx.manifestnode())
2596 for pctx in ctx.parents():
2596 for pctx in ctx.parents():
2597 pman = _manifestrevision(repo, pctx.manifestnode())
2597 pman = _manifestrevision(repo, pctx.manifestnode())
2598 textpairs.append((pman, mtext))
2598 textpairs.append((pman, mtext))
2599
2599
2600 # Load filelog revisions by iterating manifest delta.
2600 # Load filelog revisions by iterating manifest delta.
2601 man = ctx.manifest()
2601 man = ctx.manifest()
2602 pman = ctx.p1().manifest()
2602 pman = ctx.p1().manifest()
2603 for filename, change in pman.diff(man).items():
2603 for filename, change in pman.diff(man).items():
2604 fctx = repo.file(filename)
2604 fctx = repo.file(filename)
2605 f1 = fctx.revision(change[0][0] or -1)
2605 f1 = fctx.revision(change[0][0] or -1)
2606 f2 = fctx.revision(change[1][0] or -1)
2606 f2 = fctx.revision(change[1][0] or -1)
2607 textpairs.append((f1, f2))
2607 textpairs.append((f1, f2))
2608 else:
2608 else:
2609 dp = r.deltaparent(rev)
2609 dp = r.deltaparent(rev)
2610 textpairs.append((r.revision(dp), r.revision(rev)))
2610 textpairs.append((r.revision(dp), r.revision(rev)))
2611
2611
2612 withthreads = threads > 0
2612 withthreads = threads > 0
2613 if not withthreads:
2613 if not withthreads:
2614
2614
2615 def d():
2615 def d():
2616 for pair in textpairs:
2616 for pair in textpairs:
2617 if xdiff:
2617 if xdiff:
2618 mdiff.bdiff.xdiffblocks(*pair)
2618 mdiff.bdiff.xdiffblocks(*pair)
2619 elif blocks:
2619 elif blocks:
2620 mdiff.bdiff.blocks(*pair)
2620 mdiff.bdiff.blocks(*pair)
2621 else:
2621 else:
2622 mdiff.textdiff(*pair)
2622 mdiff.textdiff(*pair)
2623
2623
2624 else:
2624 else:
2625 q = queue()
2625 q = queue()
2626 for i in _xrange(threads):
2626 for i in _xrange(threads):
2627 q.put(None)
2627 q.put(None)
2628 ready = threading.Condition()
2628 ready = threading.Condition()
2629 done = threading.Event()
2629 done = threading.Event()
2630 for i in _xrange(threads):
2630 for i in _xrange(threads):
2631 threading.Thread(
2631 threading.Thread(
2632 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2632 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2633 ).start()
2633 ).start()
2634 q.join()
2634 q.join()
2635
2635
2636 def d():
2636 def d():
2637 for pair in textpairs:
2637 for pair in textpairs:
2638 q.put(pair)
2638 q.put(pair)
2639 for i in _xrange(threads):
2639 for i in _xrange(threads):
2640 q.put(None)
2640 q.put(None)
2641 with ready:
2641 with ready:
2642 ready.notify_all()
2642 ready.notify_all()
2643 q.join()
2643 q.join()
2644
2644
2645 timer, fm = gettimer(ui, opts)
2645 timer, fm = gettimer(ui, opts)
2646 timer(d)
2646 timer(d)
2647 fm.end()
2647 fm.end()
2648
2648
2649 if withthreads:
2649 if withthreads:
2650 done.set()
2650 done.set()
2651 for i in _xrange(threads):
2651 for i in _xrange(threads):
2652 q.put(None)
2652 q.put(None)
2653 with ready:
2653 with ready:
2654 ready.notify_all()
2654 ready.notify_all()
2655
2655
2656
2656
2657 @command(
2657 @command(
2658 b'perf::unbundle',
2658 b'perf::unbundle',
2659 formatteropts,
2659 formatteropts,
2660 b'BUNDLE_FILE',
2660 b'BUNDLE_FILE',
2661 )
2661 )
2662 def perf_unbundle(ui, repo, fname, **opts):
2662 def perf_unbundle(ui, repo, fname, **opts):
2663 """benchmark application of a bundle in a repository.
2663 """benchmark application of a bundle in a repository.
2664
2664
2665 This does not include the final transaction processing"""
2665 This does not include the final transaction processing"""
2666 from mercurial import exchange
2666 from mercurial import exchange
2667 from mercurial import bundle2
2667 from mercurial import bundle2
2668
2668
2669 with repo.lock():
2669 with repo.lock():
2670 bundle = [None, None]
2670 bundle = [None, None]
2671 orig_quiet = repo.ui.quiet
2671 orig_quiet = repo.ui.quiet
2672 try:
2672 try:
2673 repo.ui.quiet = True
2673 with open(fname, mode="rb") as f:
2674 with open(fname, mode="rb") as f:
2674
2675
2675 def noop_report(*args, **kwargs):
2676 def noop_report(*args, **kwargs):
2676 pass
2677 pass
2677
2678
2678 def setup():
2679 def setup():
2679 gen, tr = bundle
2680 gen, tr = bundle
2680 if tr is not None:
2681 if tr is not None:
2681 tr.abort()
2682 tr.abort()
2682 bundle[:] = [None, None]
2683 bundle[:] = [None, None]
2683 f.seek(0)
2684 f.seek(0)
2684 bundle[0] = exchange.readbundle(ui, f, fname)
2685 bundle[0] = exchange.readbundle(ui, f, fname)
2685 bundle[1] = repo.transaction(b'perf::unbundle')
2686 bundle[1] = repo.transaction(b'perf::unbundle')
2686 bundle[1]._report = noop_report # silence the transaction
2687 bundle[1]._report = noop_report # silence the transaction
2687
2688
2688 def apply():
2689 def apply():
2689 gen, tr = bundle
2690 gen, tr = bundle
2690 bundle2.applybundle(
2691 bundle2.applybundle(
2691 repo,
2692 repo,
2692 gen,
2693 gen,
2693 tr,
2694 tr,
2694 source=b'perf::unbundle',
2695 source=b'perf::unbundle',
2695 url=fname,
2696 url=fname,
2696 )
2697 )
2697
2698
2698 timer, fm = gettimer(ui, opts)
2699 timer, fm = gettimer(ui, opts)
2699 timer(apply, setup=setup)
2700 timer(apply, setup=setup)
2700 fm.end()
2701 fm.end()
2701 finally:
2702 finally:
2703 repo.ui.quiet == orig_quiet
2702 gen, tr = bundle
2704 gen, tr = bundle
2703 if tr is not None:
2705 if tr is not None:
2704 tr.abort()
2706 tr.abort()
2705
2707
2706
2708
2707 @command(
2709 @command(
2708 b'perf::unidiff|perfunidiff',
2710 b'perf::unidiff|perfunidiff',
2709 revlogopts
2711 revlogopts
2710 + formatteropts
2712 + formatteropts
2711 + [
2713 + [
2712 (
2714 (
2713 b'',
2715 b'',
2714 b'count',
2716 b'count',
2715 1,
2717 1,
2716 b'number of revisions to test (when using --startrev)',
2718 b'number of revisions to test (when using --startrev)',
2717 ),
2719 ),
2718 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2720 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2719 ],
2721 ],
2720 b'-c|-m|FILE REV',
2722 b'-c|-m|FILE REV',
2721 )
2723 )
2722 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2724 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2723 """benchmark a unified diff between revisions
2725 """benchmark a unified diff between revisions
2724
2726
2725 This doesn't include any copy tracing - it's just a unified diff
2727 This doesn't include any copy tracing - it's just a unified diff
2726 of the texts.
2728 of the texts.
2727
2729
2728 By default, benchmark a diff between its delta parent and itself.
2730 By default, benchmark a diff between its delta parent and itself.
2729
2731
2730 With ``--count``, benchmark diffs between delta parents and self for N
2732 With ``--count``, benchmark diffs between delta parents and self for N
2731 revisions starting at the specified revision.
2733 revisions starting at the specified revision.
2732
2734
2733 With ``--alldata``, assume the requested revision is a changeset and
2735 With ``--alldata``, assume the requested revision is a changeset and
2734 measure diffs for all changes related to that changeset (manifest
2736 measure diffs for all changes related to that changeset (manifest
2735 and filelogs).
2737 and filelogs).
2736 """
2738 """
2737 opts = _byteskwargs(opts)
2739 opts = _byteskwargs(opts)
2738 if opts[b'alldata']:
2740 if opts[b'alldata']:
2739 opts[b'changelog'] = True
2741 opts[b'changelog'] = True
2740
2742
2741 if opts.get(b'changelog') or opts.get(b'manifest'):
2743 if opts.get(b'changelog') or opts.get(b'manifest'):
2742 file_, rev = None, file_
2744 file_, rev = None, file_
2743 elif rev is None:
2745 elif rev is None:
2744 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2746 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2745
2747
2746 textpairs = []
2748 textpairs = []
2747
2749
2748 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2750 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2749
2751
2750 startrev = r.rev(r.lookup(rev))
2752 startrev = r.rev(r.lookup(rev))
2751 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2753 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2752 if opts[b'alldata']:
2754 if opts[b'alldata']:
2753 # Load revisions associated with changeset.
2755 # Load revisions associated with changeset.
2754 ctx = repo[rev]
2756 ctx = repo[rev]
2755 mtext = _manifestrevision(repo, ctx.manifestnode())
2757 mtext = _manifestrevision(repo, ctx.manifestnode())
2756 for pctx in ctx.parents():
2758 for pctx in ctx.parents():
2757 pman = _manifestrevision(repo, pctx.manifestnode())
2759 pman = _manifestrevision(repo, pctx.manifestnode())
2758 textpairs.append((pman, mtext))
2760 textpairs.append((pman, mtext))
2759
2761
2760 # Load filelog revisions by iterating manifest delta.
2762 # Load filelog revisions by iterating manifest delta.
2761 man = ctx.manifest()
2763 man = ctx.manifest()
2762 pman = ctx.p1().manifest()
2764 pman = ctx.p1().manifest()
2763 for filename, change in pman.diff(man).items():
2765 for filename, change in pman.diff(man).items():
2764 fctx = repo.file(filename)
2766 fctx = repo.file(filename)
2765 f1 = fctx.revision(change[0][0] or -1)
2767 f1 = fctx.revision(change[0][0] or -1)
2766 f2 = fctx.revision(change[1][0] or -1)
2768 f2 = fctx.revision(change[1][0] or -1)
2767 textpairs.append((f1, f2))
2769 textpairs.append((f1, f2))
2768 else:
2770 else:
2769 dp = r.deltaparent(rev)
2771 dp = r.deltaparent(rev)
2770 textpairs.append((r.revision(dp), r.revision(rev)))
2772 textpairs.append((r.revision(dp), r.revision(rev)))
2771
2773
2772 def d():
2774 def d():
2773 for left, right in textpairs:
2775 for left, right in textpairs:
2774 # The date strings don't matter, so we pass empty strings.
2776 # The date strings don't matter, so we pass empty strings.
2775 headerlines, hunks = mdiff.unidiff(
2777 headerlines, hunks = mdiff.unidiff(
2776 left, b'', right, b'', b'left', b'right', binary=False
2778 left, b'', right, b'', b'left', b'right', binary=False
2777 )
2779 )
2778 # consume iterators in roughly the way patch.py does
2780 # consume iterators in roughly the way patch.py does
2779 b'\n'.join(headerlines)
2781 b'\n'.join(headerlines)
2780 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2782 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2781
2783
2782 timer, fm = gettimer(ui, opts)
2784 timer, fm = gettimer(ui, opts)
2783 timer(d)
2785 timer(d)
2784 fm.end()
2786 fm.end()
2785
2787
2786
2788
2787 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2789 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2788 def perfdiffwd(ui, repo, **opts):
2790 def perfdiffwd(ui, repo, **opts):
2789 """Profile diff of working directory changes"""
2791 """Profile diff of working directory changes"""
2790 opts = _byteskwargs(opts)
2792 opts = _byteskwargs(opts)
2791 timer, fm = gettimer(ui, opts)
2793 timer, fm = gettimer(ui, opts)
2792 options = {
2794 options = {
2793 'w': 'ignore_all_space',
2795 'w': 'ignore_all_space',
2794 'b': 'ignore_space_change',
2796 'b': 'ignore_space_change',
2795 'B': 'ignore_blank_lines',
2797 'B': 'ignore_blank_lines',
2796 }
2798 }
2797
2799
2798 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2800 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2799 opts = {options[c]: b'1' for c in diffopt}
2801 opts = {options[c]: b'1' for c in diffopt}
2800
2802
2801 def d():
2803 def d():
2802 ui.pushbuffer()
2804 ui.pushbuffer()
2803 commands.diff(ui, repo, **opts)
2805 commands.diff(ui, repo, **opts)
2804 ui.popbuffer()
2806 ui.popbuffer()
2805
2807
2806 diffopt = diffopt.encode('ascii')
2808 diffopt = diffopt.encode('ascii')
2807 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2809 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2808 timer(d, title=title)
2810 timer(d, title=title)
2809 fm.end()
2811 fm.end()
2810
2812
2811
2813
2812 @command(
2814 @command(
2813 b'perf::revlogindex|perfrevlogindex',
2815 b'perf::revlogindex|perfrevlogindex',
2814 revlogopts + formatteropts,
2816 revlogopts + formatteropts,
2815 b'-c|-m|FILE',
2817 b'-c|-m|FILE',
2816 )
2818 )
2817 def perfrevlogindex(ui, repo, file_=None, **opts):
2819 def perfrevlogindex(ui, repo, file_=None, **opts):
2818 """Benchmark operations against a revlog index.
2820 """Benchmark operations against a revlog index.
2819
2821
2820 This tests constructing a revlog instance, reading index data,
2822 This tests constructing a revlog instance, reading index data,
2821 parsing index data, and performing various operations related to
2823 parsing index data, and performing various operations related to
2822 index data.
2824 index data.
2823 """
2825 """
2824
2826
2825 opts = _byteskwargs(opts)
2827 opts = _byteskwargs(opts)
2826
2828
2827 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2829 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2828
2830
2829 opener = getattr(rl, 'opener') # trick linter
2831 opener = getattr(rl, 'opener') # trick linter
2830 # compat with hg <= 5.8
2832 # compat with hg <= 5.8
2831 radix = getattr(rl, 'radix', None)
2833 radix = getattr(rl, 'radix', None)
2832 indexfile = getattr(rl, '_indexfile', None)
2834 indexfile = getattr(rl, '_indexfile', None)
2833 if indexfile is None:
2835 if indexfile is None:
2834 # compatibility with <= hg-5.8
2836 # compatibility with <= hg-5.8
2835 indexfile = getattr(rl, 'indexfile')
2837 indexfile = getattr(rl, 'indexfile')
2836 data = opener.read(indexfile)
2838 data = opener.read(indexfile)
2837
2839
2838 header = struct.unpack(b'>I', data[0:4])[0]
2840 header = struct.unpack(b'>I', data[0:4])[0]
2839 version = header & 0xFFFF
2841 version = header & 0xFFFF
2840 if version == 1:
2842 if version == 1:
2841 inline = header & (1 << 16)
2843 inline = header & (1 << 16)
2842 else:
2844 else:
2843 raise error.Abort(b'unsupported revlog version: %d' % version)
2845 raise error.Abort(b'unsupported revlog version: %d' % version)
2844
2846
2845 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2847 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2846 if parse_index_v1 is None:
2848 if parse_index_v1 is None:
2847 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2849 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2848
2850
2849 rllen = len(rl)
2851 rllen = len(rl)
2850
2852
2851 node0 = rl.node(0)
2853 node0 = rl.node(0)
2852 node25 = rl.node(rllen // 4)
2854 node25 = rl.node(rllen // 4)
2853 node50 = rl.node(rllen // 2)
2855 node50 = rl.node(rllen // 2)
2854 node75 = rl.node(rllen // 4 * 3)
2856 node75 = rl.node(rllen // 4 * 3)
2855 node100 = rl.node(rllen - 1)
2857 node100 = rl.node(rllen - 1)
2856
2858
2857 allrevs = range(rllen)
2859 allrevs = range(rllen)
2858 allrevsrev = list(reversed(allrevs))
2860 allrevsrev = list(reversed(allrevs))
2859 allnodes = [rl.node(rev) for rev in range(rllen)]
2861 allnodes = [rl.node(rev) for rev in range(rllen)]
2860 allnodesrev = list(reversed(allnodes))
2862 allnodesrev = list(reversed(allnodes))
2861
2863
2862 def constructor():
2864 def constructor():
2863 if radix is not None:
2865 if radix is not None:
2864 revlog(opener, radix=radix)
2866 revlog(opener, radix=radix)
2865 else:
2867 else:
2866 # hg <= 5.8
2868 # hg <= 5.8
2867 revlog(opener, indexfile=indexfile)
2869 revlog(opener, indexfile=indexfile)
2868
2870
2869 def read():
2871 def read():
2870 with opener(indexfile) as fh:
2872 with opener(indexfile) as fh:
2871 fh.read()
2873 fh.read()
2872
2874
2873 def parseindex():
2875 def parseindex():
2874 parse_index_v1(data, inline)
2876 parse_index_v1(data, inline)
2875
2877
2876 def getentry(revornode):
2878 def getentry(revornode):
2877 index = parse_index_v1(data, inline)[0]
2879 index = parse_index_v1(data, inline)[0]
2878 index[revornode]
2880 index[revornode]
2879
2881
2880 def getentries(revs, count=1):
2882 def getentries(revs, count=1):
2881 index = parse_index_v1(data, inline)[0]
2883 index = parse_index_v1(data, inline)[0]
2882
2884
2883 for i in range(count):
2885 for i in range(count):
2884 for rev in revs:
2886 for rev in revs:
2885 index[rev]
2887 index[rev]
2886
2888
2887 def resolvenode(node):
2889 def resolvenode(node):
2888 index = parse_index_v1(data, inline)[0]
2890 index = parse_index_v1(data, inline)[0]
2889 rev = getattr(index, 'rev', None)
2891 rev = getattr(index, 'rev', None)
2890 if rev is None:
2892 if rev is None:
2891 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2893 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2892 # This only works for the C code.
2894 # This only works for the C code.
2893 if nodemap is None:
2895 if nodemap is None:
2894 return
2896 return
2895 rev = nodemap.__getitem__
2897 rev = nodemap.__getitem__
2896
2898
2897 try:
2899 try:
2898 rev(node)
2900 rev(node)
2899 except error.RevlogError:
2901 except error.RevlogError:
2900 pass
2902 pass
2901
2903
2902 def resolvenodes(nodes, count=1):
2904 def resolvenodes(nodes, count=1):
2903 index = parse_index_v1(data, inline)[0]
2905 index = parse_index_v1(data, inline)[0]
2904 rev = getattr(index, 'rev', None)
2906 rev = getattr(index, 'rev', None)
2905 if rev is None:
2907 if rev is None:
2906 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2908 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2907 # This only works for the C code.
2909 # This only works for the C code.
2908 if nodemap is None:
2910 if nodemap is None:
2909 return
2911 return
2910 rev = nodemap.__getitem__
2912 rev = nodemap.__getitem__
2911
2913
2912 for i in range(count):
2914 for i in range(count):
2913 for node in nodes:
2915 for node in nodes:
2914 try:
2916 try:
2915 rev(node)
2917 rev(node)
2916 except error.RevlogError:
2918 except error.RevlogError:
2917 pass
2919 pass
2918
2920
2919 benches = [
2921 benches = [
2920 (constructor, b'revlog constructor'),
2922 (constructor, b'revlog constructor'),
2921 (read, b'read'),
2923 (read, b'read'),
2922 (parseindex, b'create index object'),
2924 (parseindex, b'create index object'),
2923 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2925 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2924 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2926 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2925 (lambda: resolvenode(node0), b'look up node at rev 0'),
2927 (lambda: resolvenode(node0), b'look up node at rev 0'),
2926 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2928 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2927 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2929 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2928 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2930 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2929 (lambda: resolvenode(node100), b'look up node at tip'),
2931 (lambda: resolvenode(node100), b'look up node at tip'),
2930 # 2x variation is to measure caching impact.
2932 # 2x variation is to measure caching impact.
2931 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2933 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2932 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2934 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2933 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2935 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2934 (
2936 (
2935 lambda: resolvenodes(allnodesrev, 2),
2937 lambda: resolvenodes(allnodesrev, 2),
2936 b'look up all nodes 2x (reverse)',
2938 b'look up all nodes 2x (reverse)',
2937 ),
2939 ),
2938 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2940 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2939 (
2941 (
2940 lambda: getentries(allrevs, 2),
2942 lambda: getentries(allrevs, 2),
2941 b'retrieve all index entries 2x (forward)',
2943 b'retrieve all index entries 2x (forward)',
2942 ),
2944 ),
2943 (
2945 (
2944 lambda: getentries(allrevsrev),
2946 lambda: getentries(allrevsrev),
2945 b'retrieve all index entries (reverse)',
2947 b'retrieve all index entries (reverse)',
2946 ),
2948 ),
2947 (
2949 (
2948 lambda: getentries(allrevsrev, 2),
2950 lambda: getentries(allrevsrev, 2),
2949 b'retrieve all index entries 2x (reverse)',
2951 b'retrieve all index entries 2x (reverse)',
2950 ),
2952 ),
2951 ]
2953 ]
2952
2954
2953 for fn, title in benches:
2955 for fn, title in benches:
2954 timer, fm = gettimer(ui, opts)
2956 timer, fm = gettimer(ui, opts)
2955 timer(fn, title=title)
2957 timer(fn, title=title)
2956 fm.end()
2958 fm.end()
2957
2959
2958
2960
2959 @command(
2961 @command(
2960 b'perf::revlogrevisions|perfrevlogrevisions',
2962 b'perf::revlogrevisions|perfrevlogrevisions',
2961 revlogopts
2963 revlogopts
2962 + formatteropts
2964 + formatteropts
2963 + [
2965 + [
2964 (b'd', b'dist', 100, b'distance between the revisions'),
2966 (b'd', b'dist', 100, b'distance between the revisions'),
2965 (b's', b'startrev', 0, b'revision to start reading at'),
2967 (b's', b'startrev', 0, b'revision to start reading at'),
2966 (b'', b'reverse', False, b'read in reverse'),
2968 (b'', b'reverse', False, b'read in reverse'),
2967 ],
2969 ],
2968 b'-c|-m|FILE',
2970 b'-c|-m|FILE',
2969 )
2971 )
2970 def perfrevlogrevisions(
2972 def perfrevlogrevisions(
2971 ui, repo, file_=None, startrev=0, reverse=False, **opts
2973 ui, repo, file_=None, startrev=0, reverse=False, **opts
2972 ):
2974 ):
2973 """Benchmark reading a series of revisions from a revlog.
2975 """Benchmark reading a series of revisions from a revlog.
2974
2976
2975 By default, we read every ``-d/--dist`` revision from 0 to tip of
2977 By default, we read every ``-d/--dist`` revision from 0 to tip of
2976 the specified revlog.
2978 the specified revlog.
2977
2979
2978 The start revision can be defined via ``-s/--startrev``.
2980 The start revision can be defined via ``-s/--startrev``.
2979 """
2981 """
2980 opts = _byteskwargs(opts)
2982 opts = _byteskwargs(opts)
2981
2983
2982 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2984 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2983 rllen = getlen(ui)(rl)
2985 rllen = getlen(ui)(rl)
2984
2986
2985 if startrev < 0:
2987 if startrev < 0:
2986 startrev = rllen + startrev
2988 startrev = rllen + startrev
2987
2989
2988 def d():
2990 def d():
2989 rl.clearcaches()
2991 rl.clearcaches()
2990
2992
2991 beginrev = startrev
2993 beginrev = startrev
2992 endrev = rllen
2994 endrev = rllen
2993 dist = opts[b'dist']
2995 dist = opts[b'dist']
2994
2996
2995 if reverse:
2997 if reverse:
2996 beginrev, endrev = endrev - 1, beginrev - 1
2998 beginrev, endrev = endrev - 1, beginrev - 1
2997 dist = -1 * dist
2999 dist = -1 * dist
2998
3000
2999 for x in _xrange(beginrev, endrev, dist):
3001 for x in _xrange(beginrev, endrev, dist):
3000 # Old revisions don't support passing int.
3002 # Old revisions don't support passing int.
3001 n = rl.node(x)
3003 n = rl.node(x)
3002 rl.revision(n)
3004 rl.revision(n)
3003
3005
3004 timer, fm = gettimer(ui, opts)
3006 timer, fm = gettimer(ui, opts)
3005 timer(d)
3007 timer(d)
3006 fm.end()
3008 fm.end()
3007
3009
3008
3010
3009 @command(
3011 @command(
3010 b'perf::revlogwrite|perfrevlogwrite',
3012 b'perf::revlogwrite|perfrevlogwrite',
3011 revlogopts
3013 revlogopts
3012 + formatteropts
3014 + formatteropts
3013 + [
3015 + [
3014 (b's', b'startrev', 1000, b'revision to start writing at'),
3016 (b's', b'startrev', 1000, b'revision to start writing at'),
3015 (b'', b'stoprev', -1, b'last revision to write'),
3017 (b'', b'stoprev', -1, b'last revision to write'),
3016 (b'', b'count', 3, b'number of passes to perform'),
3018 (b'', b'count', 3, b'number of passes to perform'),
3017 (b'', b'details', False, b'print timing for every revisions tested'),
3019 (b'', b'details', False, b'print timing for every revisions tested'),
3018 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3020 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3019 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3021 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3020 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3022 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3021 ],
3023 ],
3022 b'-c|-m|FILE',
3024 b'-c|-m|FILE',
3023 )
3025 )
3024 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3026 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3025 """Benchmark writing a series of revisions to a revlog.
3027 """Benchmark writing a series of revisions to a revlog.
3026
3028
3027 Possible source values are:
3029 Possible source values are:
3028 * `full`: add from a full text (default).
3030 * `full`: add from a full text (default).
3029 * `parent-1`: add from a delta to the first parent
3031 * `parent-1`: add from a delta to the first parent
3030 * `parent-2`: add from a delta to the second parent if it exists
3032 * `parent-2`: add from a delta to the second parent if it exists
3031 (use a delta from the first parent otherwise)
3033 (use a delta from the first parent otherwise)
3032 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3034 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3033 * `storage`: add from the existing precomputed deltas
3035 * `storage`: add from the existing precomputed deltas
3034
3036
3035 Note: This performance command measures performance in a custom way. As a
3037 Note: This performance command measures performance in a custom way. As a
3036 result some of the global configuration of the 'perf' command does not
3038 result some of the global configuration of the 'perf' command does not
3037 apply to it:
3039 apply to it:
3038
3040
3039 * ``pre-run``: disabled
3041 * ``pre-run``: disabled
3040
3042
3041 * ``profile-benchmark``: disabled
3043 * ``profile-benchmark``: disabled
3042
3044
3043 * ``run-limits``: disabled use --count instead
3045 * ``run-limits``: disabled use --count instead
3044 """
3046 """
3045 opts = _byteskwargs(opts)
3047 opts = _byteskwargs(opts)
3046
3048
3047 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3049 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3048 rllen = getlen(ui)(rl)
3050 rllen = getlen(ui)(rl)
3049 if startrev < 0:
3051 if startrev < 0:
3050 startrev = rllen + startrev
3052 startrev = rllen + startrev
3051 if stoprev < 0:
3053 if stoprev < 0:
3052 stoprev = rllen + stoprev
3054 stoprev = rllen + stoprev
3053
3055
3054 lazydeltabase = opts['lazydeltabase']
3056 lazydeltabase = opts['lazydeltabase']
3055 source = opts['source']
3057 source = opts['source']
3056 clearcaches = opts['clear_caches']
3058 clearcaches = opts['clear_caches']
3057 validsource = (
3059 validsource = (
3058 b'full',
3060 b'full',
3059 b'parent-1',
3061 b'parent-1',
3060 b'parent-2',
3062 b'parent-2',
3061 b'parent-smallest',
3063 b'parent-smallest',
3062 b'storage',
3064 b'storage',
3063 )
3065 )
3064 if source not in validsource:
3066 if source not in validsource:
3065 raise error.Abort('invalid source type: %s' % source)
3067 raise error.Abort('invalid source type: %s' % source)
3066
3068
3067 ### actually gather results
3069 ### actually gather results
3068 count = opts['count']
3070 count = opts['count']
3069 if count <= 0:
3071 if count <= 0:
3070 raise error.Abort('invalide run count: %d' % count)
3072 raise error.Abort('invalide run count: %d' % count)
3071 allresults = []
3073 allresults = []
3072 for c in range(count):
3074 for c in range(count):
3073 timing = _timeonewrite(
3075 timing = _timeonewrite(
3074 ui,
3076 ui,
3075 rl,
3077 rl,
3076 source,
3078 source,
3077 startrev,
3079 startrev,
3078 stoprev,
3080 stoprev,
3079 c + 1,
3081 c + 1,
3080 lazydeltabase=lazydeltabase,
3082 lazydeltabase=lazydeltabase,
3081 clearcaches=clearcaches,
3083 clearcaches=clearcaches,
3082 )
3084 )
3083 allresults.append(timing)
3085 allresults.append(timing)
3084
3086
3085 ### consolidate the results in a single list
3087 ### consolidate the results in a single list
3086 results = []
3088 results = []
3087 for idx, (rev, t) in enumerate(allresults[0]):
3089 for idx, (rev, t) in enumerate(allresults[0]):
3088 ts = [t]
3090 ts = [t]
3089 for other in allresults[1:]:
3091 for other in allresults[1:]:
3090 orev, ot = other[idx]
3092 orev, ot = other[idx]
3091 assert orev == rev
3093 assert orev == rev
3092 ts.append(ot)
3094 ts.append(ot)
3093 results.append((rev, ts))
3095 results.append((rev, ts))
3094 resultcount = len(results)
3096 resultcount = len(results)
3095
3097
3096 ### Compute and display relevant statistics
3098 ### Compute and display relevant statistics
3097
3099
3098 # get a formatter
3100 # get a formatter
3099 fm = ui.formatter(b'perf', opts)
3101 fm = ui.formatter(b'perf', opts)
3100 displayall = ui.configbool(b"perf", b"all-timing", False)
3102 displayall = ui.configbool(b"perf", b"all-timing", False)
3101
3103
3102 # print individual details if requested
3104 # print individual details if requested
3103 if opts['details']:
3105 if opts['details']:
3104 for idx, item in enumerate(results, 1):
3106 for idx, item in enumerate(results, 1):
3105 rev, data = item
3107 rev, data = item
3106 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3108 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3107 formatone(fm, data, title=title, displayall=displayall)
3109 formatone(fm, data, title=title, displayall=displayall)
3108
3110
3109 # sorts results by median time
3111 # sorts results by median time
3110 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3112 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3111 # list of (name, index) to display)
3113 # list of (name, index) to display)
3112 relevants = [
3114 relevants = [
3113 ("min", 0),
3115 ("min", 0),
3114 ("10%", resultcount * 10 // 100),
3116 ("10%", resultcount * 10 // 100),
3115 ("25%", resultcount * 25 // 100),
3117 ("25%", resultcount * 25 // 100),
3116 ("50%", resultcount * 70 // 100),
3118 ("50%", resultcount * 70 // 100),
3117 ("75%", resultcount * 75 // 100),
3119 ("75%", resultcount * 75 // 100),
3118 ("90%", resultcount * 90 // 100),
3120 ("90%", resultcount * 90 // 100),
3119 ("95%", resultcount * 95 // 100),
3121 ("95%", resultcount * 95 // 100),
3120 ("99%", resultcount * 99 // 100),
3122 ("99%", resultcount * 99 // 100),
3121 ("99.9%", resultcount * 999 // 1000),
3123 ("99.9%", resultcount * 999 // 1000),
3122 ("99.99%", resultcount * 9999 // 10000),
3124 ("99.99%", resultcount * 9999 // 10000),
3123 ("99.999%", resultcount * 99999 // 100000),
3125 ("99.999%", resultcount * 99999 // 100000),
3124 ("max", -1),
3126 ("max", -1),
3125 ]
3127 ]
3126 if not ui.quiet:
3128 if not ui.quiet:
3127 for name, idx in relevants:
3129 for name, idx in relevants:
3128 data = results[idx]
3130 data = results[idx]
3129 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3131 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3130 formatone(fm, data[1], title=title, displayall=displayall)
3132 formatone(fm, data[1], title=title, displayall=displayall)
3131
3133
3132 # XXX summing that many float will not be very precise, we ignore this fact
3134 # XXX summing that many float will not be very precise, we ignore this fact
3133 # for now
3135 # for now
3134 totaltime = []
3136 totaltime = []
3135 for item in allresults:
3137 for item in allresults:
3136 totaltime.append(
3138 totaltime.append(
3137 (
3139 (
3138 sum(x[1][0] for x in item),
3140 sum(x[1][0] for x in item),
3139 sum(x[1][1] for x in item),
3141 sum(x[1][1] for x in item),
3140 sum(x[1][2] for x in item),
3142 sum(x[1][2] for x in item),
3141 )
3143 )
3142 )
3144 )
3143 formatone(
3145 formatone(
3144 fm,
3146 fm,
3145 totaltime,
3147 totaltime,
3146 title="total time (%d revs)" % resultcount,
3148 title="total time (%d revs)" % resultcount,
3147 displayall=displayall,
3149 displayall=displayall,
3148 )
3150 )
3149 fm.end()
3151 fm.end()
3150
3152
3151
3153
3152 class _faketr:
3154 class _faketr:
3153 def add(s, x, y, z=None):
3155 def add(s, x, y, z=None):
3154 return None
3156 return None
3155
3157
3156
3158
3157 def _timeonewrite(
3159 def _timeonewrite(
3158 ui,
3160 ui,
3159 orig,
3161 orig,
3160 source,
3162 source,
3161 startrev,
3163 startrev,
3162 stoprev,
3164 stoprev,
3163 runidx=None,
3165 runidx=None,
3164 lazydeltabase=True,
3166 lazydeltabase=True,
3165 clearcaches=True,
3167 clearcaches=True,
3166 ):
3168 ):
3167 timings = []
3169 timings = []
3168 tr = _faketr()
3170 tr = _faketr()
3169 with _temprevlog(ui, orig, startrev) as dest:
3171 with _temprevlog(ui, orig, startrev) as dest:
3170 dest._lazydeltabase = lazydeltabase
3172 dest._lazydeltabase = lazydeltabase
3171 revs = list(orig.revs(startrev, stoprev))
3173 revs = list(orig.revs(startrev, stoprev))
3172 total = len(revs)
3174 total = len(revs)
3173 topic = 'adding'
3175 topic = 'adding'
3174 if runidx is not None:
3176 if runidx is not None:
3175 topic += ' (run #%d)' % runidx
3177 topic += ' (run #%d)' % runidx
3176 # Support both old and new progress API
3178 # Support both old and new progress API
3177 if util.safehasattr(ui, 'makeprogress'):
3179 if util.safehasattr(ui, 'makeprogress'):
3178 progress = ui.makeprogress(topic, unit='revs', total=total)
3180 progress = ui.makeprogress(topic, unit='revs', total=total)
3179
3181
3180 def updateprogress(pos):
3182 def updateprogress(pos):
3181 progress.update(pos)
3183 progress.update(pos)
3182
3184
3183 def completeprogress():
3185 def completeprogress():
3184 progress.complete()
3186 progress.complete()
3185
3187
3186 else:
3188 else:
3187
3189
3188 def updateprogress(pos):
3190 def updateprogress(pos):
3189 ui.progress(topic, pos, unit='revs', total=total)
3191 ui.progress(topic, pos, unit='revs', total=total)
3190
3192
3191 def completeprogress():
3193 def completeprogress():
3192 ui.progress(topic, None, unit='revs', total=total)
3194 ui.progress(topic, None, unit='revs', total=total)
3193
3195
3194 for idx, rev in enumerate(revs):
3196 for idx, rev in enumerate(revs):
3195 updateprogress(idx)
3197 updateprogress(idx)
3196 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3198 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3197 if clearcaches:
3199 if clearcaches:
3198 dest.index.clearcaches()
3200 dest.index.clearcaches()
3199 dest.clearcaches()
3201 dest.clearcaches()
3200 with timeone() as r:
3202 with timeone() as r:
3201 dest.addrawrevision(*addargs, **addkwargs)
3203 dest.addrawrevision(*addargs, **addkwargs)
3202 timings.append((rev, r[0]))
3204 timings.append((rev, r[0]))
3203 updateprogress(total)
3205 updateprogress(total)
3204 completeprogress()
3206 completeprogress()
3205 return timings
3207 return timings
3206
3208
3207
3209
3208 def _getrevisionseed(orig, rev, tr, source):
3210 def _getrevisionseed(orig, rev, tr, source):
3209 from mercurial.node import nullid
3211 from mercurial.node import nullid
3210
3212
3211 linkrev = orig.linkrev(rev)
3213 linkrev = orig.linkrev(rev)
3212 node = orig.node(rev)
3214 node = orig.node(rev)
3213 p1, p2 = orig.parents(node)
3215 p1, p2 = orig.parents(node)
3214 flags = orig.flags(rev)
3216 flags = orig.flags(rev)
3215 cachedelta = None
3217 cachedelta = None
3216 text = None
3218 text = None
3217
3219
3218 if source == b'full':
3220 if source == b'full':
3219 text = orig.revision(rev)
3221 text = orig.revision(rev)
3220 elif source == b'parent-1':
3222 elif source == b'parent-1':
3221 baserev = orig.rev(p1)
3223 baserev = orig.rev(p1)
3222 cachedelta = (baserev, orig.revdiff(p1, rev))
3224 cachedelta = (baserev, orig.revdiff(p1, rev))
3223 elif source == b'parent-2':
3225 elif source == b'parent-2':
3224 parent = p2
3226 parent = p2
3225 if p2 == nullid:
3227 if p2 == nullid:
3226 parent = p1
3228 parent = p1
3227 baserev = orig.rev(parent)
3229 baserev = orig.rev(parent)
3228 cachedelta = (baserev, orig.revdiff(parent, rev))
3230 cachedelta = (baserev, orig.revdiff(parent, rev))
3229 elif source == b'parent-smallest':
3231 elif source == b'parent-smallest':
3230 p1diff = orig.revdiff(p1, rev)
3232 p1diff = orig.revdiff(p1, rev)
3231 parent = p1
3233 parent = p1
3232 diff = p1diff
3234 diff = p1diff
3233 if p2 != nullid:
3235 if p2 != nullid:
3234 p2diff = orig.revdiff(p2, rev)
3236 p2diff = orig.revdiff(p2, rev)
3235 if len(p1diff) > len(p2diff):
3237 if len(p1diff) > len(p2diff):
3236 parent = p2
3238 parent = p2
3237 diff = p2diff
3239 diff = p2diff
3238 baserev = orig.rev(parent)
3240 baserev = orig.rev(parent)
3239 cachedelta = (baserev, diff)
3241 cachedelta = (baserev, diff)
3240 elif source == b'storage':
3242 elif source == b'storage':
3241 baserev = orig.deltaparent(rev)
3243 baserev = orig.deltaparent(rev)
3242 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3244 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3243
3245
3244 return (
3246 return (
3245 (text, tr, linkrev, p1, p2),
3247 (text, tr, linkrev, p1, p2),
3246 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3248 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3247 )
3249 )
3248
3250
3249
3251
3250 @contextlib.contextmanager
3252 @contextlib.contextmanager
3251 def _temprevlog(ui, orig, truncaterev):
3253 def _temprevlog(ui, orig, truncaterev):
3252 from mercurial import vfs as vfsmod
3254 from mercurial import vfs as vfsmod
3253
3255
3254 if orig._inline:
3256 if orig._inline:
3255 raise error.Abort('not supporting inline revlog (yet)')
3257 raise error.Abort('not supporting inline revlog (yet)')
3256 revlogkwargs = {}
3258 revlogkwargs = {}
3257 k = 'upperboundcomp'
3259 k = 'upperboundcomp'
3258 if util.safehasattr(orig, k):
3260 if util.safehasattr(orig, k):
3259 revlogkwargs[k] = getattr(orig, k)
3261 revlogkwargs[k] = getattr(orig, k)
3260
3262
3261 indexfile = getattr(orig, '_indexfile', None)
3263 indexfile = getattr(orig, '_indexfile', None)
3262 if indexfile is None:
3264 if indexfile is None:
3263 # compatibility with <= hg-5.8
3265 # compatibility with <= hg-5.8
3264 indexfile = getattr(orig, 'indexfile')
3266 indexfile = getattr(orig, 'indexfile')
3265 origindexpath = orig.opener.join(indexfile)
3267 origindexpath = orig.opener.join(indexfile)
3266
3268
3267 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3269 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3268 origdatapath = orig.opener.join(datafile)
3270 origdatapath = orig.opener.join(datafile)
3269 radix = b'revlog'
3271 radix = b'revlog'
3270 indexname = b'revlog.i'
3272 indexname = b'revlog.i'
3271 dataname = b'revlog.d'
3273 dataname = b'revlog.d'
3272
3274
3273 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3275 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3274 try:
3276 try:
3275 # copy the data file in a temporary directory
3277 # copy the data file in a temporary directory
3276 ui.debug('copying data in %s\n' % tmpdir)
3278 ui.debug('copying data in %s\n' % tmpdir)
3277 destindexpath = os.path.join(tmpdir, 'revlog.i')
3279 destindexpath = os.path.join(tmpdir, 'revlog.i')
3278 destdatapath = os.path.join(tmpdir, 'revlog.d')
3280 destdatapath = os.path.join(tmpdir, 'revlog.d')
3279 shutil.copyfile(origindexpath, destindexpath)
3281 shutil.copyfile(origindexpath, destindexpath)
3280 shutil.copyfile(origdatapath, destdatapath)
3282 shutil.copyfile(origdatapath, destdatapath)
3281
3283
3282 # remove the data we want to add again
3284 # remove the data we want to add again
3283 ui.debug('truncating data to be rewritten\n')
3285 ui.debug('truncating data to be rewritten\n')
3284 with open(destindexpath, 'ab') as index:
3286 with open(destindexpath, 'ab') as index:
3285 index.seek(0)
3287 index.seek(0)
3286 index.truncate(truncaterev * orig._io.size)
3288 index.truncate(truncaterev * orig._io.size)
3287 with open(destdatapath, 'ab') as data:
3289 with open(destdatapath, 'ab') as data:
3288 data.seek(0)
3290 data.seek(0)
3289 data.truncate(orig.start(truncaterev))
3291 data.truncate(orig.start(truncaterev))
3290
3292
3291 # instantiate a new revlog from the temporary copy
3293 # instantiate a new revlog from the temporary copy
3292 ui.debug('truncating adding to be rewritten\n')
3294 ui.debug('truncating adding to be rewritten\n')
3293 vfs = vfsmod.vfs(tmpdir)
3295 vfs = vfsmod.vfs(tmpdir)
3294 vfs.options = getattr(orig.opener, 'options', None)
3296 vfs.options = getattr(orig.opener, 'options', None)
3295
3297
3296 try:
3298 try:
3297 dest = revlog(vfs, radix=radix, **revlogkwargs)
3299 dest = revlog(vfs, radix=radix, **revlogkwargs)
3298 except TypeError:
3300 except TypeError:
3299 dest = revlog(
3301 dest = revlog(
3300 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3302 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3301 )
3303 )
3302 if dest._inline:
3304 if dest._inline:
3303 raise error.Abort('not supporting inline revlog (yet)')
3305 raise error.Abort('not supporting inline revlog (yet)')
3304 # make sure internals are initialized
3306 # make sure internals are initialized
3305 dest.revision(len(dest) - 1)
3307 dest.revision(len(dest) - 1)
3306 yield dest
3308 yield dest
3307 del dest, vfs
3309 del dest, vfs
3308 finally:
3310 finally:
3309 shutil.rmtree(tmpdir, True)
3311 shutil.rmtree(tmpdir, True)
3310
3312
3311
3313
3312 @command(
3314 @command(
3313 b'perf::revlogchunks|perfrevlogchunks',
3315 b'perf::revlogchunks|perfrevlogchunks',
3314 revlogopts
3316 revlogopts
3315 + formatteropts
3317 + formatteropts
3316 + [
3318 + [
3317 (b'e', b'engines', b'', b'compression engines to use'),
3319 (b'e', b'engines', b'', b'compression engines to use'),
3318 (b's', b'startrev', 0, b'revision to start at'),
3320 (b's', b'startrev', 0, b'revision to start at'),
3319 ],
3321 ],
3320 b'-c|-m|FILE',
3322 b'-c|-m|FILE',
3321 )
3323 )
3322 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3324 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3323 """Benchmark operations on revlog chunks.
3325 """Benchmark operations on revlog chunks.
3324
3326
3325 Logically, each revlog is a collection of fulltext revisions. However,
3327 Logically, each revlog is a collection of fulltext revisions. However,
3326 stored within each revlog are "chunks" of possibly compressed data. This
3328 stored within each revlog are "chunks" of possibly compressed data. This
3327 data needs to be read and decompressed or compressed and written.
3329 data needs to be read and decompressed or compressed and written.
3328
3330
3329 This command measures the time it takes to read+decompress and recompress
3331 This command measures the time it takes to read+decompress and recompress
3330 chunks in a revlog. It effectively isolates I/O and compression performance.
3332 chunks in a revlog. It effectively isolates I/O and compression performance.
3331 For measurements of higher-level operations like resolving revisions,
3333 For measurements of higher-level operations like resolving revisions,
3332 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3334 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3333 """
3335 """
3334 opts = _byteskwargs(opts)
3336 opts = _byteskwargs(opts)
3335
3337
3336 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3338 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3337
3339
3338 # _chunkraw was renamed to _getsegmentforrevs.
3340 # _chunkraw was renamed to _getsegmentforrevs.
3339 try:
3341 try:
3340 segmentforrevs = rl._getsegmentforrevs
3342 segmentforrevs = rl._getsegmentforrevs
3341 except AttributeError:
3343 except AttributeError:
3342 segmentforrevs = rl._chunkraw
3344 segmentforrevs = rl._chunkraw
3343
3345
3344 # Verify engines argument.
3346 # Verify engines argument.
3345 if engines:
3347 if engines:
3346 engines = {e.strip() for e in engines.split(b',')}
3348 engines = {e.strip() for e in engines.split(b',')}
3347 for engine in engines:
3349 for engine in engines:
3348 try:
3350 try:
3349 util.compressionengines[engine]
3351 util.compressionengines[engine]
3350 except KeyError:
3352 except KeyError:
3351 raise error.Abort(b'unknown compression engine: %s' % engine)
3353 raise error.Abort(b'unknown compression engine: %s' % engine)
3352 else:
3354 else:
3353 engines = []
3355 engines = []
3354 for e in util.compengines:
3356 for e in util.compengines:
3355 engine = util.compengines[e]
3357 engine = util.compengines[e]
3356 try:
3358 try:
3357 if engine.available():
3359 if engine.available():
3358 engine.revlogcompressor().compress(b'dummy')
3360 engine.revlogcompressor().compress(b'dummy')
3359 engines.append(e)
3361 engines.append(e)
3360 except NotImplementedError:
3362 except NotImplementedError:
3361 pass
3363 pass
3362
3364
3363 revs = list(rl.revs(startrev, len(rl) - 1))
3365 revs = list(rl.revs(startrev, len(rl) - 1))
3364
3366
3365 def rlfh(rl):
3367 def rlfh(rl):
3366 if rl._inline:
3368 if rl._inline:
3367 indexfile = getattr(rl, '_indexfile', None)
3369 indexfile = getattr(rl, '_indexfile', None)
3368 if indexfile is None:
3370 if indexfile is None:
3369 # compatibility with <= hg-5.8
3371 # compatibility with <= hg-5.8
3370 indexfile = getattr(rl, 'indexfile')
3372 indexfile = getattr(rl, 'indexfile')
3371 return getsvfs(repo)(indexfile)
3373 return getsvfs(repo)(indexfile)
3372 else:
3374 else:
3373 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3375 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3374 return getsvfs(repo)(datafile)
3376 return getsvfs(repo)(datafile)
3375
3377
3376 def doread():
3378 def doread():
3377 rl.clearcaches()
3379 rl.clearcaches()
3378 for rev in revs:
3380 for rev in revs:
3379 segmentforrevs(rev, rev)
3381 segmentforrevs(rev, rev)
3380
3382
3381 def doreadcachedfh():
3383 def doreadcachedfh():
3382 rl.clearcaches()
3384 rl.clearcaches()
3383 fh = rlfh(rl)
3385 fh = rlfh(rl)
3384 for rev in revs:
3386 for rev in revs:
3385 segmentforrevs(rev, rev, df=fh)
3387 segmentforrevs(rev, rev, df=fh)
3386
3388
3387 def doreadbatch():
3389 def doreadbatch():
3388 rl.clearcaches()
3390 rl.clearcaches()
3389 segmentforrevs(revs[0], revs[-1])
3391 segmentforrevs(revs[0], revs[-1])
3390
3392
3391 def doreadbatchcachedfh():
3393 def doreadbatchcachedfh():
3392 rl.clearcaches()
3394 rl.clearcaches()
3393 fh = rlfh(rl)
3395 fh = rlfh(rl)
3394 segmentforrevs(revs[0], revs[-1], df=fh)
3396 segmentforrevs(revs[0], revs[-1], df=fh)
3395
3397
3396 def dochunk():
3398 def dochunk():
3397 rl.clearcaches()
3399 rl.clearcaches()
3398 fh = rlfh(rl)
3400 fh = rlfh(rl)
3399 for rev in revs:
3401 for rev in revs:
3400 rl._chunk(rev, df=fh)
3402 rl._chunk(rev, df=fh)
3401
3403
3402 chunks = [None]
3404 chunks = [None]
3403
3405
3404 def dochunkbatch():
3406 def dochunkbatch():
3405 rl.clearcaches()
3407 rl.clearcaches()
3406 fh = rlfh(rl)
3408 fh = rlfh(rl)
3407 # Save chunks as a side-effect.
3409 # Save chunks as a side-effect.
3408 chunks[0] = rl._chunks(revs, df=fh)
3410 chunks[0] = rl._chunks(revs, df=fh)
3409
3411
3410 def docompress(compressor):
3412 def docompress(compressor):
3411 rl.clearcaches()
3413 rl.clearcaches()
3412
3414
3413 try:
3415 try:
3414 # Swap in the requested compression engine.
3416 # Swap in the requested compression engine.
3415 oldcompressor = rl._compressor
3417 oldcompressor = rl._compressor
3416 rl._compressor = compressor
3418 rl._compressor = compressor
3417 for chunk in chunks[0]:
3419 for chunk in chunks[0]:
3418 rl.compress(chunk)
3420 rl.compress(chunk)
3419 finally:
3421 finally:
3420 rl._compressor = oldcompressor
3422 rl._compressor = oldcompressor
3421
3423
3422 benches = [
3424 benches = [
3423 (lambda: doread(), b'read'),
3425 (lambda: doread(), b'read'),
3424 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3426 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3425 (lambda: doreadbatch(), b'read batch'),
3427 (lambda: doreadbatch(), b'read batch'),
3426 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3428 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3427 (lambda: dochunk(), b'chunk'),
3429 (lambda: dochunk(), b'chunk'),
3428 (lambda: dochunkbatch(), b'chunk batch'),
3430 (lambda: dochunkbatch(), b'chunk batch'),
3429 ]
3431 ]
3430
3432
3431 for engine in sorted(engines):
3433 for engine in sorted(engines):
3432 compressor = util.compengines[engine].revlogcompressor()
3434 compressor = util.compengines[engine].revlogcompressor()
3433 benches.append(
3435 benches.append(
3434 (
3436 (
3435 functools.partial(docompress, compressor),
3437 functools.partial(docompress, compressor),
3436 b'compress w/ %s' % engine,
3438 b'compress w/ %s' % engine,
3437 )
3439 )
3438 )
3440 )
3439
3441
3440 for fn, title in benches:
3442 for fn, title in benches:
3441 timer, fm = gettimer(ui, opts)
3443 timer, fm = gettimer(ui, opts)
3442 timer(fn, title=title)
3444 timer(fn, title=title)
3443 fm.end()
3445 fm.end()
3444
3446
3445
3447
3446 @command(
3448 @command(
3447 b'perf::revlogrevision|perfrevlogrevision',
3449 b'perf::revlogrevision|perfrevlogrevision',
3448 revlogopts
3450 revlogopts
3449 + formatteropts
3451 + formatteropts
3450 + [(b'', b'cache', False, b'use caches instead of clearing')],
3452 + [(b'', b'cache', False, b'use caches instead of clearing')],
3451 b'-c|-m|FILE REV',
3453 b'-c|-m|FILE REV',
3452 )
3454 )
3453 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3455 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3454 """Benchmark obtaining a revlog revision.
3456 """Benchmark obtaining a revlog revision.
3455
3457
3456 Obtaining a revlog revision consists of roughly the following steps:
3458 Obtaining a revlog revision consists of roughly the following steps:
3457
3459
3458 1. Compute the delta chain
3460 1. Compute the delta chain
3459 2. Slice the delta chain if applicable
3461 2. Slice the delta chain if applicable
3460 3. Obtain the raw chunks for that delta chain
3462 3. Obtain the raw chunks for that delta chain
3461 4. Decompress each raw chunk
3463 4. Decompress each raw chunk
3462 5. Apply binary patches to obtain fulltext
3464 5. Apply binary patches to obtain fulltext
3463 6. Verify hash of fulltext
3465 6. Verify hash of fulltext
3464
3466
3465 This command measures the time spent in each of these phases.
3467 This command measures the time spent in each of these phases.
3466 """
3468 """
3467 opts = _byteskwargs(opts)
3469 opts = _byteskwargs(opts)
3468
3470
3469 if opts.get(b'changelog') or opts.get(b'manifest'):
3471 if opts.get(b'changelog') or opts.get(b'manifest'):
3470 file_, rev = None, file_
3472 file_, rev = None, file_
3471 elif rev is None:
3473 elif rev is None:
3472 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3474 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3473
3475
3474 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3476 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3475
3477
3476 # _chunkraw was renamed to _getsegmentforrevs.
3478 # _chunkraw was renamed to _getsegmentforrevs.
3477 try:
3479 try:
3478 segmentforrevs = r._getsegmentforrevs
3480 segmentforrevs = r._getsegmentforrevs
3479 except AttributeError:
3481 except AttributeError:
3480 segmentforrevs = r._chunkraw
3482 segmentforrevs = r._chunkraw
3481
3483
3482 node = r.lookup(rev)
3484 node = r.lookup(rev)
3483 rev = r.rev(node)
3485 rev = r.rev(node)
3484
3486
3485 def getrawchunks(data, chain):
3487 def getrawchunks(data, chain):
3486 start = r.start
3488 start = r.start
3487 length = r.length
3489 length = r.length
3488 inline = r._inline
3490 inline = r._inline
3489 try:
3491 try:
3490 iosize = r.index.entry_size
3492 iosize = r.index.entry_size
3491 except AttributeError:
3493 except AttributeError:
3492 iosize = r._io.size
3494 iosize = r._io.size
3493 buffer = util.buffer
3495 buffer = util.buffer
3494
3496
3495 chunks = []
3497 chunks = []
3496 ladd = chunks.append
3498 ladd = chunks.append
3497 for idx, item in enumerate(chain):
3499 for idx, item in enumerate(chain):
3498 offset = start(item[0])
3500 offset = start(item[0])
3499 bits = data[idx]
3501 bits = data[idx]
3500 for rev in item:
3502 for rev in item:
3501 chunkstart = start(rev)
3503 chunkstart = start(rev)
3502 if inline:
3504 if inline:
3503 chunkstart += (rev + 1) * iosize
3505 chunkstart += (rev + 1) * iosize
3504 chunklength = length(rev)
3506 chunklength = length(rev)
3505 ladd(buffer(bits, chunkstart - offset, chunklength))
3507 ladd(buffer(bits, chunkstart - offset, chunklength))
3506
3508
3507 return chunks
3509 return chunks
3508
3510
3509 def dodeltachain(rev):
3511 def dodeltachain(rev):
3510 if not cache:
3512 if not cache:
3511 r.clearcaches()
3513 r.clearcaches()
3512 r._deltachain(rev)
3514 r._deltachain(rev)
3513
3515
3514 def doread(chain):
3516 def doread(chain):
3515 if not cache:
3517 if not cache:
3516 r.clearcaches()
3518 r.clearcaches()
3517 for item in slicedchain:
3519 for item in slicedchain:
3518 segmentforrevs(item[0], item[-1])
3520 segmentforrevs(item[0], item[-1])
3519
3521
3520 def doslice(r, chain, size):
3522 def doslice(r, chain, size):
3521 for s in slicechunk(r, chain, targetsize=size):
3523 for s in slicechunk(r, chain, targetsize=size):
3522 pass
3524 pass
3523
3525
3524 def dorawchunks(data, chain):
3526 def dorawchunks(data, chain):
3525 if not cache:
3527 if not cache:
3526 r.clearcaches()
3528 r.clearcaches()
3527 getrawchunks(data, chain)
3529 getrawchunks(data, chain)
3528
3530
3529 def dodecompress(chunks):
3531 def dodecompress(chunks):
3530 decomp = r.decompress
3532 decomp = r.decompress
3531 for chunk in chunks:
3533 for chunk in chunks:
3532 decomp(chunk)
3534 decomp(chunk)
3533
3535
3534 def dopatch(text, bins):
3536 def dopatch(text, bins):
3535 if not cache:
3537 if not cache:
3536 r.clearcaches()
3538 r.clearcaches()
3537 mdiff.patches(text, bins)
3539 mdiff.patches(text, bins)
3538
3540
3539 def dohash(text):
3541 def dohash(text):
3540 if not cache:
3542 if not cache:
3541 r.clearcaches()
3543 r.clearcaches()
3542 r.checkhash(text, node, rev=rev)
3544 r.checkhash(text, node, rev=rev)
3543
3545
3544 def dorevision():
3546 def dorevision():
3545 if not cache:
3547 if not cache:
3546 r.clearcaches()
3548 r.clearcaches()
3547 r.revision(node)
3549 r.revision(node)
3548
3550
3549 try:
3551 try:
3550 from mercurial.revlogutils.deltas import slicechunk
3552 from mercurial.revlogutils.deltas import slicechunk
3551 except ImportError:
3553 except ImportError:
3552 slicechunk = getattr(revlog, '_slicechunk', None)
3554 slicechunk = getattr(revlog, '_slicechunk', None)
3553
3555
3554 size = r.length(rev)
3556 size = r.length(rev)
3555 chain = r._deltachain(rev)[0]
3557 chain = r._deltachain(rev)[0]
3556 if not getattr(r, '_withsparseread', False):
3558 if not getattr(r, '_withsparseread', False):
3557 slicedchain = (chain,)
3559 slicedchain = (chain,)
3558 else:
3560 else:
3559 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3561 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3560 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3562 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3561 rawchunks = getrawchunks(data, slicedchain)
3563 rawchunks = getrawchunks(data, slicedchain)
3562 bins = r._chunks(chain)
3564 bins = r._chunks(chain)
3563 text = bytes(bins[0])
3565 text = bytes(bins[0])
3564 bins = bins[1:]
3566 bins = bins[1:]
3565 text = mdiff.patches(text, bins)
3567 text = mdiff.patches(text, bins)
3566
3568
3567 benches = [
3569 benches = [
3568 (lambda: dorevision(), b'full'),
3570 (lambda: dorevision(), b'full'),
3569 (lambda: dodeltachain(rev), b'deltachain'),
3571 (lambda: dodeltachain(rev), b'deltachain'),
3570 (lambda: doread(chain), b'read'),
3572 (lambda: doread(chain), b'read'),
3571 ]
3573 ]
3572
3574
3573 if getattr(r, '_withsparseread', False):
3575 if getattr(r, '_withsparseread', False):
3574 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3576 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3575 benches.append(slicing)
3577 benches.append(slicing)
3576
3578
3577 benches.extend(
3579 benches.extend(
3578 [
3580 [
3579 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3581 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3580 (lambda: dodecompress(rawchunks), b'decompress'),
3582 (lambda: dodecompress(rawchunks), b'decompress'),
3581 (lambda: dopatch(text, bins), b'patch'),
3583 (lambda: dopatch(text, bins), b'patch'),
3582 (lambda: dohash(text), b'hash'),
3584 (lambda: dohash(text), b'hash'),
3583 ]
3585 ]
3584 )
3586 )
3585
3587
3586 timer, fm = gettimer(ui, opts)
3588 timer, fm = gettimer(ui, opts)
3587 for fn, title in benches:
3589 for fn, title in benches:
3588 timer(fn, title=title)
3590 timer(fn, title=title)
3589 fm.end()
3591 fm.end()
3590
3592
3591
3593
3592 @command(
3594 @command(
3593 b'perf::revset|perfrevset',
3595 b'perf::revset|perfrevset',
3594 [
3596 [
3595 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3597 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3596 (b'', b'contexts', False, b'obtain changectx for each revision'),
3598 (b'', b'contexts', False, b'obtain changectx for each revision'),
3597 ]
3599 ]
3598 + formatteropts,
3600 + formatteropts,
3599 b"REVSET",
3601 b"REVSET",
3600 )
3602 )
3601 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3603 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3602 """benchmark the execution time of a revset
3604 """benchmark the execution time of a revset
3603
3605
3604 Use the --clean option if need to evaluate the impact of build volatile
3606 Use the --clean option if need to evaluate the impact of build volatile
3605 revisions set cache on the revset execution. Volatile cache hold filtered
3607 revisions set cache on the revset execution. Volatile cache hold filtered
3606 and obsolete related cache."""
3608 and obsolete related cache."""
3607 opts = _byteskwargs(opts)
3609 opts = _byteskwargs(opts)
3608
3610
3609 timer, fm = gettimer(ui, opts)
3611 timer, fm = gettimer(ui, opts)
3610
3612
3611 def d():
3613 def d():
3612 if clear:
3614 if clear:
3613 repo.invalidatevolatilesets()
3615 repo.invalidatevolatilesets()
3614 if contexts:
3616 if contexts:
3615 for ctx in repo.set(expr):
3617 for ctx in repo.set(expr):
3616 pass
3618 pass
3617 else:
3619 else:
3618 for r in repo.revs(expr):
3620 for r in repo.revs(expr):
3619 pass
3621 pass
3620
3622
3621 timer(d)
3623 timer(d)
3622 fm.end()
3624 fm.end()
3623
3625
3624
3626
3625 @command(
3627 @command(
3626 b'perf::volatilesets|perfvolatilesets',
3628 b'perf::volatilesets|perfvolatilesets',
3627 [
3629 [
3628 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3630 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3629 ]
3631 ]
3630 + formatteropts,
3632 + formatteropts,
3631 )
3633 )
3632 def perfvolatilesets(ui, repo, *names, **opts):
3634 def perfvolatilesets(ui, repo, *names, **opts):
3633 """benchmark the computation of various volatile set
3635 """benchmark the computation of various volatile set
3634
3636
3635 Volatile set computes element related to filtering and obsolescence."""
3637 Volatile set computes element related to filtering and obsolescence."""
3636 opts = _byteskwargs(opts)
3638 opts = _byteskwargs(opts)
3637 timer, fm = gettimer(ui, opts)
3639 timer, fm = gettimer(ui, opts)
3638 repo = repo.unfiltered()
3640 repo = repo.unfiltered()
3639
3641
3640 def getobs(name):
3642 def getobs(name):
3641 def d():
3643 def d():
3642 repo.invalidatevolatilesets()
3644 repo.invalidatevolatilesets()
3643 if opts[b'clear_obsstore']:
3645 if opts[b'clear_obsstore']:
3644 clearfilecache(repo, b'obsstore')
3646 clearfilecache(repo, b'obsstore')
3645 obsolete.getrevs(repo, name)
3647 obsolete.getrevs(repo, name)
3646
3648
3647 return d
3649 return d
3648
3650
3649 allobs = sorted(obsolete.cachefuncs)
3651 allobs = sorted(obsolete.cachefuncs)
3650 if names:
3652 if names:
3651 allobs = [n for n in allobs if n in names]
3653 allobs = [n for n in allobs if n in names]
3652
3654
3653 for name in allobs:
3655 for name in allobs:
3654 timer(getobs(name), title=name)
3656 timer(getobs(name), title=name)
3655
3657
3656 def getfiltered(name):
3658 def getfiltered(name):
3657 def d():
3659 def d():
3658 repo.invalidatevolatilesets()
3660 repo.invalidatevolatilesets()
3659 if opts[b'clear_obsstore']:
3661 if opts[b'clear_obsstore']:
3660 clearfilecache(repo, b'obsstore')
3662 clearfilecache(repo, b'obsstore')
3661 repoview.filterrevs(repo, name)
3663 repoview.filterrevs(repo, name)
3662
3664
3663 return d
3665 return d
3664
3666
3665 allfilter = sorted(repoview.filtertable)
3667 allfilter = sorted(repoview.filtertable)
3666 if names:
3668 if names:
3667 allfilter = [n for n in allfilter if n in names]
3669 allfilter = [n for n in allfilter if n in names]
3668
3670
3669 for name in allfilter:
3671 for name in allfilter:
3670 timer(getfiltered(name), title=name)
3672 timer(getfiltered(name), title=name)
3671 fm.end()
3673 fm.end()
3672
3674
3673
3675
3674 @command(
3676 @command(
3675 b'perf::branchmap|perfbranchmap',
3677 b'perf::branchmap|perfbranchmap',
3676 [
3678 [
3677 (b'f', b'full', False, b'Includes build time of subset'),
3679 (b'f', b'full', False, b'Includes build time of subset'),
3678 (
3680 (
3679 b'',
3681 b'',
3680 b'clear-revbranch',
3682 b'clear-revbranch',
3681 False,
3683 False,
3682 b'purge the revbranch cache between computation',
3684 b'purge the revbranch cache between computation',
3683 ),
3685 ),
3684 ]
3686 ]
3685 + formatteropts,
3687 + formatteropts,
3686 )
3688 )
3687 def perfbranchmap(ui, repo, *filternames, **opts):
3689 def perfbranchmap(ui, repo, *filternames, **opts):
3688 """benchmark the update of a branchmap
3690 """benchmark the update of a branchmap
3689
3691
3690 This benchmarks the full repo.branchmap() call with read and write disabled
3692 This benchmarks the full repo.branchmap() call with read and write disabled
3691 """
3693 """
3692 opts = _byteskwargs(opts)
3694 opts = _byteskwargs(opts)
3693 full = opts.get(b"full", False)
3695 full = opts.get(b"full", False)
3694 clear_revbranch = opts.get(b"clear_revbranch", False)
3696 clear_revbranch = opts.get(b"clear_revbranch", False)
3695 timer, fm = gettimer(ui, opts)
3697 timer, fm = gettimer(ui, opts)
3696
3698
3697 def getbranchmap(filtername):
3699 def getbranchmap(filtername):
3698 """generate a benchmark function for the filtername"""
3700 """generate a benchmark function for the filtername"""
3699 if filtername is None:
3701 if filtername is None:
3700 view = repo
3702 view = repo
3701 else:
3703 else:
3702 view = repo.filtered(filtername)
3704 view = repo.filtered(filtername)
3703 if util.safehasattr(view._branchcaches, '_per_filter'):
3705 if util.safehasattr(view._branchcaches, '_per_filter'):
3704 filtered = view._branchcaches._per_filter
3706 filtered = view._branchcaches._per_filter
3705 else:
3707 else:
3706 # older versions
3708 # older versions
3707 filtered = view._branchcaches
3709 filtered = view._branchcaches
3708
3710
3709 def d():
3711 def d():
3710 if clear_revbranch:
3712 if clear_revbranch:
3711 repo.revbranchcache()._clear()
3713 repo.revbranchcache()._clear()
3712 if full:
3714 if full:
3713 view._branchcaches.clear()
3715 view._branchcaches.clear()
3714 else:
3716 else:
3715 filtered.pop(filtername, None)
3717 filtered.pop(filtername, None)
3716 view.branchmap()
3718 view.branchmap()
3717
3719
3718 return d
3720 return d
3719
3721
3720 # add filter in smaller subset to bigger subset
3722 # add filter in smaller subset to bigger subset
3721 possiblefilters = set(repoview.filtertable)
3723 possiblefilters = set(repoview.filtertable)
3722 if filternames:
3724 if filternames:
3723 possiblefilters &= set(filternames)
3725 possiblefilters &= set(filternames)
3724 subsettable = getbranchmapsubsettable()
3726 subsettable = getbranchmapsubsettable()
3725 allfilters = []
3727 allfilters = []
3726 while possiblefilters:
3728 while possiblefilters:
3727 for name in possiblefilters:
3729 for name in possiblefilters:
3728 subset = subsettable.get(name)
3730 subset = subsettable.get(name)
3729 if subset not in possiblefilters:
3731 if subset not in possiblefilters:
3730 break
3732 break
3731 else:
3733 else:
3732 assert False, b'subset cycle %s!' % possiblefilters
3734 assert False, b'subset cycle %s!' % possiblefilters
3733 allfilters.append(name)
3735 allfilters.append(name)
3734 possiblefilters.remove(name)
3736 possiblefilters.remove(name)
3735
3737
3736 # warm the cache
3738 # warm the cache
3737 if not full:
3739 if not full:
3738 for name in allfilters:
3740 for name in allfilters:
3739 repo.filtered(name).branchmap()
3741 repo.filtered(name).branchmap()
3740 if not filternames or b'unfiltered' in filternames:
3742 if not filternames or b'unfiltered' in filternames:
3741 # add unfiltered
3743 # add unfiltered
3742 allfilters.append(None)
3744 allfilters.append(None)
3743
3745
3744 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3746 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3745 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3747 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3746 branchcacheread.set(classmethod(lambda *args: None))
3748 branchcacheread.set(classmethod(lambda *args: None))
3747 else:
3749 else:
3748 # older versions
3750 # older versions
3749 branchcacheread = safeattrsetter(branchmap, b'read')
3751 branchcacheread = safeattrsetter(branchmap, b'read')
3750 branchcacheread.set(lambda *args: None)
3752 branchcacheread.set(lambda *args: None)
3751 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3753 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3752 branchcachewrite.set(lambda *args: None)
3754 branchcachewrite.set(lambda *args: None)
3753 try:
3755 try:
3754 for name in allfilters:
3756 for name in allfilters:
3755 printname = name
3757 printname = name
3756 if name is None:
3758 if name is None:
3757 printname = b'unfiltered'
3759 printname = b'unfiltered'
3758 timer(getbranchmap(name), title=printname)
3760 timer(getbranchmap(name), title=printname)
3759 finally:
3761 finally:
3760 branchcacheread.restore()
3762 branchcacheread.restore()
3761 branchcachewrite.restore()
3763 branchcachewrite.restore()
3762 fm.end()
3764 fm.end()
3763
3765
3764
3766
3765 @command(
3767 @command(
3766 b'perf::branchmapupdate|perfbranchmapupdate',
3768 b'perf::branchmapupdate|perfbranchmapupdate',
3767 [
3769 [
3768 (b'', b'base', [], b'subset of revision to start from'),
3770 (b'', b'base', [], b'subset of revision to start from'),
3769 (b'', b'target', [], b'subset of revision to end with'),
3771 (b'', b'target', [], b'subset of revision to end with'),
3770 (b'', b'clear-caches', False, b'clear cache between each runs'),
3772 (b'', b'clear-caches', False, b'clear cache between each runs'),
3771 ]
3773 ]
3772 + formatteropts,
3774 + formatteropts,
3773 )
3775 )
3774 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3776 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3775 """benchmark branchmap update from for <base> revs to <target> revs
3777 """benchmark branchmap update from for <base> revs to <target> revs
3776
3778
3777 If `--clear-caches` is passed, the following items will be reset before
3779 If `--clear-caches` is passed, the following items will be reset before
3778 each update:
3780 each update:
3779 * the changelog instance and associated indexes
3781 * the changelog instance and associated indexes
3780 * the rev-branch-cache instance
3782 * the rev-branch-cache instance
3781
3783
3782 Examples:
3784 Examples:
3783
3785
3784 # update for the one last revision
3786 # update for the one last revision
3785 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3787 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3786
3788
3787 $ update for change coming with a new branch
3789 $ update for change coming with a new branch
3788 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3790 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3789 """
3791 """
3790 from mercurial import branchmap
3792 from mercurial import branchmap
3791 from mercurial import repoview
3793 from mercurial import repoview
3792
3794
3793 opts = _byteskwargs(opts)
3795 opts = _byteskwargs(opts)
3794 timer, fm = gettimer(ui, opts)
3796 timer, fm = gettimer(ui, opts)
3795 clearcaches = opts[b'clear_caches']
3797 clearcaches = opts[b'clear_caches']
3796 unfi = repo.unfiltered()
3798 unfi = repo.unfiltered()
3797 x = [None] # used to pass data between closure
3799 x = [None] # used to pass data between closure
3798
3800
3799 # we use a `list` here to avoid possible side effect from smartset
3801 # we use a `list` here to avoid possible side effect from smartset
3800 baserevs = list(scmutil.revrange(repo, base))
3802 baserevs = list(scmutil.revrange(repo, base))
3801 targetrevs = list(scmutil.revrange(repo, target))
3803 targetrevs = list(scmutil.revrange(repo, target))
3802 if not baserevs:
3804 if not baserevs:
3803 raise error.Abort(b'no revisions selected for --base')
3805 raise error.Abort(b'no revisions selected for --base')
3804 if not targetrevs:
3806 if not targetrevs:
3805 raise error.Abort(b'no revisions selected for --target')
3807 raise error.Abort(b'no revisions selected for --target')
3806
3808
3807 # make sure the target branchmap also contains the one in the base
3809 # make sure the target branchmap also contains the one in the base
3808 targetrevs = list(set(baserevs) | set(targetrevs))
3810 targetrevs = list(set(baserevs) | set(targetrevs))
3809 targetrevs.sort()
3811 targetrevs.sort()
3810
3812
3811 cl = repo.changelog
3813 cl = repo.changelog
3812 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3814 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3813 allbaserevs.sort()
3815 allbaserevs.sort()
3814 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3816 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3815
3817
3816 newrevs = list(alltargetrevs.difference(allbaserevs))
3818 newrevs = list(alltargetrevs.difference(allbaserevs))
3817 newrevs.sort()
3819 newrevs.sort()
3818
3820
3819 allrevs = frozenset(unfi.changelog.revs())
3821 allrevs = frozenset(unfi.changelog.revs())
3820 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3822 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3821 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3823 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3822
3824
3823 def basefilter(repo, visibilityexceptions=None):
3825 def basefilter(repo, visibilityexceptions=None):
3824 return basefilterrevs
3826 return basefilterrevs
3825
3827
3826 def targetfilter(repo, visibilityexceptions=None):
3828 def targetfilter(repo, visibilityexceptions=None):
3827 return targetfilterrevs
3829 return targetfilterrevs
3828
3830
3829 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3831 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3830 ui.status(msg % (len(allbaserevs), len(newrevs)))
3832 ui.status(msg % (len(allbaserevs), len(newrevs)))
3831 if targetfilterrevs:
3833 if targetfilterrevs:
3832 msg = b'(%d revisions still filtered)\n'
3834 msg = b'(%d revisions still filtered)\n'
3833 ui.status(msg % len(targetfilterrevs))
3835 ui.status(msg % len(targetfilterrevs))
3834
3836
3835 try:
3837 try:
3836 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3838 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3837 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3839 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3838
3840
3839 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3841 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3840 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3842 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3841
3843
3842 # try to find an existing branchmap to reuse
3844 # try to find an existing branchmap to reuse
3843 subsettable = getbranchmapsubsettable()
3845 subsettable = getbranchmapsubsettable()
3844 candidatefilter = subsettable.get(None)
3846 candidatefilter = subsettable.get(None)
3845 while candidatefilter is not None:
3847 while candidatefilter is not None:
3846 candidatebm = repo.filtered(candidatefilter).branchmap()
3848 candidatebm = repo.filtered(candidatefilter).branchmap()
3847 if candidatebm.validfor(baserepo):
3849 if candidatebm.validfor(baserepo):
3848 filtered = repoview.filterrevs(repo, candidatefilter)
3850 filtered = repoview.filterrevs(repo, candidatefilter)
3849 missing = [r for r in allbaserevs if r in filtered]
3851 missing = [r for r in allbaserevs if r in filtered]
3850 base = candidatebm.copy()
3852 base = candidatebm.copy()
3851 base.update(baserepo, missing)
3853 base.update(baserepo, missing)
3852 break
3854 break
3853 candidatefilter = subsettable.get(candidatefilter)
3855 candidatefilter = subsettable.get(candidatefilter)
3854 else:
3856 else:
3855 # no suitable subset where found
3857 # no suitable subset where found
3856 base = branchmap.branchcache()
3858 base = branchmap.branchcache()
3857 base.update(baserepo, allbaserevs)
3859 base.update(baserepo, allbaserevs)
3858
3860
3859 def setup():
3861 def setup():
3860 x[0] = base.copy()
3862 x[0] = base.copy()
3861 if clearcaches:
3863 if clearcaches:
3862 unfi._revbranchcache = None
3864 unfi._revbranchcache = None
3863 clearchangelog(repo)
3865 clearchangelog(repo)
3864
3866
3865 def bench():
3867 def bench():
3866 x[0].update(targetrepo, newrevs)
3868 x[0].update(targetrepo, newrevs)
3867
3869
3868 timer(bench, setup=setup)
3870 timer(bench, setup=setup)
3869 fm.end()
3871 fm.end()
3870 finally:
3872 finally:
3871 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3873 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3872 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3874 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3873
3875
3874
3876
3875 @command(
3877 @command(
3876 b'perf::branchmapload|perfbranchmapload',
3878 b'perf::branchmapload|perfbranchmapload',
3877 [
3879 [
3878 (b'f', b'filter', b'', b'Specify repoview filter'),
3880 (b'f', b'filter', b'', b'Specify repoview filter'),
3879 (b'', b'list', False, b'List brachmap filter caches'),
3881 (b'', b'list', False, b'List brachmap filter caches'),
3880 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3882 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3881 ]
3883 ]
3882 + formatteropts,
3884 + formatteropts,
3883 )
3885 )
3884 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3886 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3885 """benchmark reading the branchmap"""
3887 """benchmark reading the branchmap"""
3886 opts = _byteskwargs(opts)
3888 opts = _byteskwargs(opts)
3887 clearrevlogs = opts[b'clear_revlogs']
3889 clearrevlogs = opts[b'clear_revlogs']
3888
3890
3889 if list:
3891 if list:
3890 for name, kind, st in repo.cachevfs.readdir(stat=True):
3892 for name, kind, st in repo.cachevfs.readdir(stat=True):
3891 if name.startswith(b'branch2'):
3893 if name.startswith(b'branch2'):
3892 filtername = name.partition(b'-')[2] or b'unfiltered'
3894 filtername = name.partition(b'-')[2] or b'unfiltered'
3893 ui.status(
3895 ui.status(
3894 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3896 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3895 )
3897 )
3896 return
3898 return
3897 if not filter:
3899 if not filter:
3898 filter = None
3900 filter = None
3899 subsettable = getbranchmapsubsettable()
3901 subsettable = getbranchmapsubsettable()
3900 if filter is None:
3902 if filter is None:
3901 repo = repo.unfiltered()
3903 repo = repo.unfiltered()
3902 else:
3904 else:
3903 repo = repoview.repoview(repo, filter)
3905 repo = repoview.repoview(repo, filter)
3904
3906
3905 repo.branchmap() # make sure we have a relevant, up to date branchmap
3907 repo.branchmap() # make sure we have a relevant, up to date branchmap
3906
3908
3907 try:
3909 try:
3908 fromfile = branchmap.branchcache.fromfile
3910 fromfile = branchmap.branchcache.fromfile
3909 except AttributeError:
3911 except AttributeError:
3910 # older versions
3912 # older versions
3911 fromfile = branchmap.read
3913 fromfile = branchmap.read
3912
3914
3913 currentfilter = filter
3915 currentfilter = filter
3914 # try once without timer, the filter may not be cached
3916 # try once without timer, the filter may not be cached
3915 while fromfile(repo) is None:
3917 while fromfile(repo) is None:
3916 currentfilter = subsettable.get(currentfilter)
3918 currentfilter = subsettable.get(currentfilter)
3917 if currentfilter is None:
3919 if currentfilter is None:
3918 raise error.Abort(
3920 raise error.Abort(
3919 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3921 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3920 )
3922 )
3921 repo = repo.filtered(currentfilter)
3923 repo = repo.filtered(currentfilter)
3922 timer, fm = gettimer(ui, opts)
3924 timer, fm = gettimer(ui, opts)
3923
3925
3924 def setup():
3926 def setup():
3925 if clearrevlogs:
3927 if clearrevlogs:
3926 clearchangelog(repo)
3928 clearchangelog(repo)
3927
3929
3928 def bench():
3930 def bench():
3929 fromfile(repo)
3931 fromfile(repo)
3930
3932
3931 timer(bench, setup=setup)
3933 timer(bench, setup=setup)
3932 fm.end()
3934 fm.end()
3933
3935
3934
3936
3935 @command(b'perf::loadmarkers|perfloadmarkers')
3937 @command(b'perf::loadmarkers|perfloadmarkers')
3936 def perfloadmarkers(ui, repo):
3938 def perfloadmarkers(ui, repo):
3937 """benchmark the time to parse the on-disk markers for a repo
3939 """benchmark the time to parse the on-disk markers for a repo
3938
3940
3939 Result is the number of markers in the repo."""
3941 Result is the number of markers in the repo."""
3940 timer, fm = gettimer(ui)
3942 timer, fm = gettimer(ui)
3941 svfs = getsvfs(repo)
3943 svfs = getsvfs(repo)
3942 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3944 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3943 fm.end()
3945 fm.end()
3944
3946
3945
3947
3946 @command(
3948 @command(
3947 b'perf::lrucachedict|perflrucachedict',
3949 b'perf::lrucachedict|perflrucachedict',
3948 formatteropts
3950 formatteropts
3949 + [
3951 + [
3950 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3952 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3951 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3953 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3952 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3954 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3953 (b'', b'size', 4, b'size of cache'),
3955 (b'', b'size', 4, b'size of cache'),
3954 (b'', b'gets', 10000, b'number of key lookups'),
3956 (b'', b'gets', 10000, b'number of key lookups'),
3955 (b'', b'sets', 10000, b'number of key sets'),
3957 (b'', b'sets', 10000, b'number of key sets'),
3956 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3958 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3957 (
3959 (
3958 b'',
3960 b'',
3959 b'mixedgetfreq',
3961 b'mixedgetfreq',
3960 50,
3962 50,
3961 b'frequency of get vs set ops in mixed mode',
3963 b'frequency of get vs set ops in mixed mode',
3962 ),
3964 ),
3963 ],
3965 ],
3964 norepo=True,
3966 norepo=True,
3965 )
3967 )
3966 def perflrucache(
3968 def perflrucache(
3967 ui,
3969 ui,
3968 mincost=0,
3970 mincost=0,
3969 maxcost=100,
3971 maxcost=100,
3970 costlimit=0,
3972 costlimit=0,
3971 size=4,
3973 size=4,
3972 gets=10000,
3974 gets=10000,
3973 sets=10000,
3975 sets=10000,
3974 mixed=10000,
3976 mixed=10000,
3975 mixedgetfreq=50,
3977 mixedgetfreq=50,
3976 **opts
3978 **opts
3977 ):
3979 ):
3978 opts = _byteskwargs(opts)
3980 opts = _byteskwargs(opts)
3979
3981
3980 def doinit():
3982 def doinit():
3981 for i in _xrange(10000):
3983 for i in _xrange(10000):
3982 util.lrucachedict(size)
3984 util.lrucachedict(size)
3983
3985
3984 costrange = list(range(mincost, maxcost + 1))
3986 costrange = list(range(mincost, maxcost + 1))
3985
3987
3986 values = []
3988 values = []
3987 for i in _xrange(size):
3989 for i in _xrange(size):
3988 values.append(random.randint(0, _maxint))
3990 values.append(random.randint(0, _maxint))
3989
3991
3990 # Get mode fills the cache and tests raw lookup performance with no
3992 # Get mode fills the cache and tests raw lookup performance with no
3991 # eviction.
3993 # eviction.
3992 getseq = []
3994 getseq = []
3993 for i in _xrange(gets):
3995 for i in _xrange(gets):
3994 getseq.append(random.choice(values))
3996 getseq.append(random.choice(values))
3995
3997
3996 def dogets():
3998 def dogets():
3997 d = util.lrucachedict(size)
3999 d = util.lrucachedict(size)
3998 for v in values:
4000 for v in values:
3999 d[v] = v
4001 d[v] = v
4000 for key in getseq:
4002 for key in getseq:
4001 value = d[key]
4003 value = d[key]
4002 value # silence pyflakes warning
4004 value # silence pyflakes warning
4003
4005
4004 def dogetscost():
4006 def dogetscost():
4005 d = util.lrucachedict(size, maxcost=costlimit)
4007 d = util.lrucachedict(size, maxcost=costlimit)
4006 for i, v in enumerate(values):
4008 for i, v in enumerate(values):
4007 d.insert(v, v, cost=costs[i])
4009 d.insert(v, v, cost=costs[i])
4008 for key in getseq:
4010 for key in getseq:
4009 try:
4011 try:
4010 value = d[key]
4012 value = d[key]
4011 value # silence pyflakes warning
4013 value # silence pyflakes warning
4012 except KeyError:
4014 except KeyError:
4013 pass
4015 pass
4014
4016
4015 # Set mode tests insertion speed with cache eviction.
4017 # Set mode tests insertion speed with cache eviction.
4016 setseq = []
4018 setseq = []
4017 costs = []
4019 costs = []
4018 for i in _xrange(sets):
4020 for i in _xrange(sets):
4019 setseq.append(random.randint(0, _maxint))
4021 setseq.append(random.randint(0, _maxint))
4020 costs.append(random.choice(costrange))
4022 costs.append(random.choice(costrange))
4021
4023
4022 def doinserts():
4024 def doinserts():
4023 d = util.lrucachedict(size)
4025 d = util.lrucachedict(size)
4024 for v in setseq:
4026 for v in setseq:
4025 d.insert(v, v)
4027 d.insert(v, v)
4026
4028
4027 def doinsertscost():
4029 def doinsertscost():
4028 d = util.lrucachedict(size, maxcost=costlimit)
4030 d = util.lrucachedict(size, maxcost=costlimit)
4029 for i, v in enumerate(setseq):
4031 for i, v in enumerate(setseq):
4030 d.insert(v, v, cost=costs[i])
4032 d.insert(v, v, cost=costs[i])
4031
4033
4032 def dosets():
4034 def dosets():
4033 d = util.lrucachedict(size)
4035 d = util.lrucachedict(size)
4034 for v in setseq:
4036 for v in setseq:
4035 d[v] = v
4037 d[v] = v
4036
4038
4037 # Mixed mode randomly performs gets and sets with eviction.
4039 # Mixed mode randomly performs gets and sets with eviction.
4038 mixedops = []
4040 mixedops = []
4039 for i in _xrange(mixed):
4041 for i in _xrange(mixed):
4040 r = random.randint(0, 100)
4042 r = random.randint(0, 100)
4041 if r < mixedgetfreq:
4043 if r < mixedgetfreq:
4042 op = 0
4044 op = 0
4043 else:
4045 else:
4044 op = 1
4046 op = 1
4045
4047
4046 mixedops.append(
4048 mixedops.append(
4047 (op, random.randint(0, size * 2), random.choice(costrange))
4049 (op, random.randint(0, size * 2), random.choice(costrange))
4048 )
4050 )
4049
4051
4050 def domixed():
4052 def domixed():
4051 d = util.lrucachedict(size)
4053 d = util.lrucachedict(size)
4052
4054
4053 for op, v, cost in mixedops:
4055 for op, v, cost in mixedops:
4054 if op == 0:
4056 if op == 0:
4055 try:
4057 try:
4056 d[v]
4058 d[v]
4057 except KeyError:
4059 except KeyError:
4058 pass
4060 pass
4059 else:
4061 else:
4060 d[v] = v
4062 d[v] = v
4061
4063
4062 def domixedcost():
4064 def domixedcost():
4063 d = util.lrucachedict(size, maxcost=costlimit)
4065 d = util.lrucachedict(size, maxcost=costlimit)
4064
4066
4065 for op, v, cost in mixedops:
4067 for op, v, cost in mixedops:
4066 if op == 0:
4068 if op == 0:
4067 try:
4069 try:
4068 d[v]
4070 d[v]
4069 except KeyError:
4071 except KeyError:
4070 pass
4072 pass
4071 else:
4073 else:
4072 d.insert(v, v, cost=cost)
4074 d.insert(v, v, cost=cost)
4073
4075
4074 benches = [
4076 benches = [
4075 (doinit, b'init'),
4077 (doinit, b'init'),
4076 ]
4078 ]
4077
4079
4078 if costlimit:
4080 if costlimit:
4079 benches.extend(
4081 benches.extend(
4080 [
4082 [
4081 (dogetscost, b'gets w/ cost limit'),
4083 (dogetscost, b'gets w/ cost limit'),
4082 (doinsertscost, b'inserts w/ cost limit'),
4084 (doinsertscost, b'inserts w/ cost limit'),
4083 (domixedcost, b'mixed w/ cost limit'),
4085 (domixedcost, b'mixed w/ cost limit'),
4084 ]
4086 ]
4085 )
4087 )
4086 else:
4088 else:
4087 benches.extend(
4089 benches.extend(
4088 [
4090 [
4089 (dogets, b'gets'),
4091 (dogets, b'gets'),
4090 (doinserts, b'inserts'),
4092 (doinserts, b'inserts'),
4091 (dosets, b'sets'),
4093 (dosets, b'sets'),
4092 (domixed, b'mixed'),
4094 (domixed, b'mixed'),
4093 ]
4095 ]
4094 )
4096 )
4095
4097
4096 for fn, title in benches:
4098 for fn, title in benches:
4097 timer, fm = gettimer(ui, opts)
4099 timer, fm = gettimer(ui, opts)
4098 timer(fn, title=title)
4100 timer(fn, title=title)
4099 fm.end()
4101 fm.end()
4100
4102
4101
4103
4102 @command(
4104 @command(
4103 b'perf::write|perfwrite',
4105 b'perf::write|perfwrite',
4104 formatteropts
4106 formatteropts
4105 + [
4107 + [
4106 (b'', b'write-method', b'write', b'ui write method'),
4108 (b'', b'write-method', b'write', b'ui write method'),
4107 (b'', b'nlines', 100, b'number of lines'),
4109 (b'', b'nlines', 100, b'number of lines'),
4108 (b'', b'nitems', 100, b'number of items (per line)'),
4110 (b'', b'nitems', 100, b'number of items (per line)'),
4109 (b'', b'item', b'x', b'item that is written'),
4111 (b'', b'item', b'x', b'item that is written'),
4110 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4112 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4111 (b'', b'flush-line', None, b'flush after each line'),
4113 (b'', b'flush-line', None, b'flush after each line'),
4112 ],
4114 ],
4113 )
4115 )
4114 def perfwrite(ui, repo, **opts):
4116 def perfwrite(ui, repo, **opts):
4115 """microbenchmark ui.write (and others)"""
4117 """microbenchmark ui.write (and others)"""
4116 opts = _byteskwargs(opts)
4118 opts = _byteskwargs(opts)
4117
4119
4118 write = getattr(ui, _sysstr(opts[b'write_method']))
4120 write = getattr(ui, _sysstr(opts[b'write_method']))
4119 nlines = int(opts[b'nlines'])
4121 nlines = int(opts[b'nlines'])
4120 nitems = int(opts[b'nitems'])
4122 nitems = int(opts[b'nitems'])
4121 item = opts[b'item']
4123 item = opts[b'item']
4122 batch_line = opts.get(b'batch_line')
4124 batch_line = opts.get(b'batch_line')
4123 flush_line = opts.get(b'flush_line')
4125 flush_line = opts.get(b'flush_line')
4124
4126
4125 if batch_line:
4127 if batch_line:
4126 line = item * nitems + b'\n'
4128 line = item * nitems + b'\n'
4127
4129
4128 def benchmark():
4130 def benchmark():
4129 for i in pycompat.xrange(nlines):
4131 for i in pycompat.xrange(nlines):
4130 if batch_line:
4132 if batch_line:
4131 write(line)
4133 write(line)
4132 else:
4134 else:
4133 for i in pycompat.xrange(nitems):
4135 for i in pycompat.xrange(nitems):
4134 write(item)
4136 write(item)
4135 write(b'\n')
4137 write(b'\n')
4136 if flush_line:
4138 if flush_line:
4137 ui.flush()
4139 ui.flush()
4138 ui.flush()
4140 ui.flush()
4139
4141
4140 timer, fm = gettimer(ui, opts)
4142 timer, fm = gettimer(ui, opts)
4141 timer(benchmark)
4143 timer(benchmark)
4142 fm.end()
4144 fm.end()
4143
4145
4144
4146
4145 def uisetup(ui):
4147 def uisetup(ui):
4146 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4148 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4147 commands, b'debugrevlogopts'
4149 commands, b'debugrevlogopts'
4148 ):
4150 ):
4149 # for "historical portability":
4151 # for "historical portability":
4150 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4152 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4151 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4153 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4152 # openrevlog() should cause failure, because it has been
4154 # openrevlog() should cause failure, because it has been
4153 # available since 3.5 (or 49c583ca48c4).
4155 # available since 3.5 (or 49c583ca48c4).
4154 def openrevlog(orig, repo, cmd, file_, opts):
4156 def openrevlog(orig, repo, cmd, file_, opts):
4155 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4157 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4156 raise error.Abort(
4158 raise error.Abort(
4157 b"This version doesn't support --dir option",
4159 b"This version doesn't support --dir option",
4158 hint=b"use 3.5 or later",
4160 hint=b"use 3.5 or later",
4159 )
4161 )
4160 return orig(repo, cmd, file_, opts)
4162 return orig(repo, cmd, file_, opts)
4161
4163
4162 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4164 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4163
4165
4164
4166
4165 @command(
4167 @command(
4166 b'perf::progress|perfprogress',
4168 b'perf::progress|perfprogress',
4167 formatteropts
4169 formatteropts
4168 + [
4170 + [
4169 (b'', b'topic', b'topic', b'topic for progress messages'),
4171 (b'', b'topic', b'topic', b'topic for progress messages'),
4170 (b'c', b'total', 1000000, b'total value we are progressing to'),
4172 (b'c', b'total', 1000000, b'total value we are progressing to'),
4171 ],
4173 ],
4172 norepo=True,
4174 norepo=True,
4173 )
4175 )
4174 def perfprogress(ui, topic=None, total=None, **opts):
4176 def perfprogress(ui, topic=None, total=None, **opts):
4175 """printing of progress bars"""
4177 """printing of progress bars"""
4176 opts = _byteskwargs(opts)
4178 opts = _byteskwargs(opts)
4177
4179
4178 timer, fm = gettimer(ui, opts)
4180 timer, fm = gettimer(ui, opts)
4179
4181
4180 def doprogress():
4182 def doprogress():
4181 with ui.makeprogress(topic, total=total) as progress:
4183 with ui.makeprogress(topic, total=total) as progress:
4182 for i in _xrange(total):
4184 for i in _xrange(total):
4183 progress.increment()
4185 progress.increment()
4184
4186
4185 timer(doprogress)
4187 timer(doprogress)
4186 fm.end()
4188 fm.end()
@@ -1,438 +1,435 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::tags (no help text available)
191 perf::tags (no help text available)
192 perf::templating
192 perf::templating
193 test the rendering time of a given template
193 test the rendering time of a given template
194 perf::unbundle
194 perf::unbundle
195 benchmark application of a bundle in a repository.
195 benchmark application of a bundle in a repository.
196 perf::unidiff
196 perf::unidiff
197 benchmark a unified diff between revisions
197 benchmark a unified diff between revisions
198 perf::volatilesets
198 perf::volatilesets
199 benchmark the computation of various volatile set
199 benchmark the computation of various volatile set
200 perf::walk (no help text available)
200 perf::walk (no help text available)
201 perf::write microbenchmark ui.write (and others)
201 perf::write microbenchmark ui.write (and others)
202
202
203 (use 'hg help -v perf' to show built-in aliases and global options)
203 (use 'hg help -v perf' to show built-in aliases and global options)
204
204
205 $ hg help perfaddremove
205 $ hg help perfaddremove
206 hg perf::addremove
206 hg perf::addremove
207
207
208 aliases: perfaddremove
208 aliases: perfaddremove
209
209
210 (no help text available)
210 (no help text available)
211
211
212 options:
212 options:
213
213
214 -T --template TEMPLATE display with template
214 -T --template TEMPLATE display with template
215
215
216 (some details hidden, use --verbose to show complete help)
216 (some details hidden, use --verbose to show complete help)
217
217
218 $ hg perfaddremove
218 $ hg perfaddremove
219 $ hg perfancestors
219 $ hg perfancestors
220 $ hg perfancestorset 2
220 $ hg perfancestorset 2
221 $ hg perfannotate a
221 $ hg perfannotate a
222 $ hg perfbdiff -c 1
222 $ hg perfbdiff -c 1
223 $ hg perfbdiff --alldata 1
223 $ hg perfbdiff --alldata 1
224 $ hg perfunidiff -c 1
224 $ hg perfunidiff -c 1
225 $ hg perfunidiff --alldata 1
225 $ hg perfunidiff --alldata 1
226 $ hg perfbookmarks
226 $ hg perfbookmarks
227 $ hg perfbranchmap
227 $ hg perfbranchmap
228 $ hg perfbranchmapload
228 $ hg perfbranchmapload
229 $ hg perfbranchmapupdate --base "not tip" --target "tip"
229 $ hg perfbranchmapupdate --base "not tip" --target "tip"
230 benchmark of branchmap with 3 revisions with 1 new ones
230 benchmark of branchmap with 3 revisions with 1 new ones
231 $ hg perfcca
231 $ hg perfcca
232 $ hg perfchangegroupchangelog
232 $ hg perfchangegroupchangelog
233 $ hg perfchangegroupchangelog --cgversion 01
233 $ hg perfchangegroupchangelog --cgversion 01
234 $ hg perfchangeset 2
234 $ hg perfchangeset 2
235 $ hg perfctxfiles 2
235 $ hg perfctxfiles 2
236 $ hg perfdiffwd
236 $ hg perfdiffwd
237 $ hg perfdirfoldmap
237 $ hg perfdirfoldmap
238 $ hg perfdirs
238 $ hg perfdirs
239 $ hg perfdirstate
239 $ hg perfdirstate
240 $ hg perfdirstate --contains
240 $ hg perfdirstate --contains
241 $ hg perfdirstate --iteration
241 $ hg perfdirstate --iteration
242 $ hg perfdirstatedirs
242 $ hg perfdirstatedirs
243 $ hg perfdirstatefoldmap
243 $ hg perfdirstatefoldmap
244 $ hg perfdirstatewrite
244 $ hg perfdirstatewrite
245 #if repofncache
245 #if repofncache
246 $ hg perffncacheencode
246 $ hg perffncacheencode
247 $ hg perffncacheload
247 $ hg perffncacheload
248 $ hg debugrebuildfncache
248 $ hg debugrebuildfncache
249 fncache already up to date
249 fncache already up to date
250 $ hg perffncachewrite
250 $ hg perffncachewrite
251 $ hg debugrebuildfncache
251 $ hg debugrebuildfncache
252 fncache already up to date
252 fncache already up to date
253 #endif
253 #endif
254 $ hg perfheads
254 $ hg perfheads
255 $ hg perfignore
255 $ hg perfignore
256 $ hg perfindex
256 $ hg perfindex
257 $ hg perflinelogedits -n 1
257 $ hg perflinelogedits -n 1
258 $ hg perfloadmarkers
258 $ hg perfloadmarkers
259 $ hg perflog
259 $ hg perflog
260 $ hg perflookup 2
260 $ hg perflookup 2
261 $ hg perflrucache
261 $ hg perflrucache
262 $ hg perfmanifest 2
262 $ hg perfmanifest 2
263 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
263 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
264 $ hg perfmanifest -m 44fe2c8352bb
264 $ hg perfmanifest -m 44fe2c8352bb
265 abort: manifest revision must be integer or full node
265 abort: manifest revision must be integer or full node
266 [255]
266 [255]
267 $ hg perfmergecalculate -r 3
267 $ hg perfmergecalculate -r 3
268 $ hg perfmoonwalk
268 $ hg perfmoonwalk
269 $ hg perfnodelookup 2
269 $ hg perfnodelookup 2
270 $ hg perfpathcopies 1 2
270 $ hg perfpathcopies 1 2
271 $ hg perfprogress --total 1000
271 $ hg perfprogress --total 1000
272 $ hg perfrawfiles 2
272 $ hg perfrawfiles 2
273 $ hg perfrevlogindex -c
273 $ hg perfrevlogindex -c
274 #if reporevlogstore
274 #if reporevlogstore
275 $ hg perfrevlogrevisions .hg/store/data/a.i
275 $ hg perfrevlogrevisions .hg/store/data/a.i
276 #endif
276 #endif
277 $ hg perfrevlogrevision -m 0
277 $ hg perfrevlogrevision -m 0
278 $ hg perfrevlogchunks -c
278 $ hg perfrevlogchunks -c
279 $ hg perfrevrange
279 $ hg perfrevrange
280 $ hg perfrevset 'all()'
280 $ hg perfrevset 'all()'
281 $ hg perfstartup
281 $ hg perfstartup
282 $ hg perfstatus
282 $ hg perfstatus
283 $ hg perfstatus --dirstate
283 $ hg perfstatus --dirstate
284 $ hg perftags
284 $ hg perftags
285 $ hg perftemplating
285 $ hg perftemplating
286 $ hg perfvolatilesets
286 $ hg perfvolatilesets
287 $ hg perfwalk
287 $ hg perfwalk
288 $ hg perfparents
288 $ hg perfparents
289 $ hg perfdiscovery -q .
289 $ hg perfdiscovery -q .
290
290
291 Test run control
291 Test run control
292 ----------------
292 ----------------
293
293
294 Simple single entry
294 Simple single entry
295
295
296 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
296 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
297 ! wall * comb * user * sys * (best of 15) (glob)
297 ! wall * comb * user * sys * (best of 15) (glob)
298
298
299 Multiple entries
299 Multiple entries
300
300
301 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
301 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
302 ! wall * comb * user * sys * (best of 5) (glob)
302 ! wall * comb * user * sys * (best of 5) (glob)
303
303
304 error case are ignored
304 error case are ignored
305
305
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
307 malformatted run limit entry, missing "-": 500
307 malformatted run limit entry, missing "-": 500
308 ! wall * comb * user * sys * (best of 5) (glob)
308 ! wall * comb * user * sys * (best of 5) (glob)
309 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
309 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
310 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
310 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
311 ! wall * comb * user * sys * (best of 5) (glob)
311 ! wall * comb * user * sys * (best of 5) (glob)
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
313 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
313 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
314 ! wall * comb * user * sys * (best of 5) (glob)
314 ! wall * comb * user * sys * (best of 5) (glob)
315
315
316 test actual output
316 test actual output
317 ------------------
317 ------------------
318
318
319 normal output:
319 normal output:
320
320
321 $ hg perfheads --config perf.stub=no
321 $ hg perfheads --config perf.stub=no
322 ! wall * comb * user * sys * (best of *) (glob)
322 ! wall * comb * user * sys * (best of *) (glob)
323
323
324 detailed output:
324 detailed output:
325
325
326 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
326 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
327 ! wall * comb * user * sys * (best of *) (glob)
327 ! wall * comb * user * sys * (best of *) (glob)
328 ! wall * comb * user * sys * (max of *) (glob)
328 ! wall * comb * user * sys * (max of *) (glob)
329 ! wall * comb * user * sys * (avg of *) (glob)
329 ! wall * comb * user * sys * (avg of *) (glob)
330 ! wall * comb * user * sys * (median of *) (glob)
330 ! wall * comb * user * sys * (median of *) (glob)
331
331
332 test json output
332 test json output
333 ----------------
333 ----------------
334
334
335 normal output:
335 normal output:
336
336
337 $ hg perfheads --template json --config perf.stub=no
337 $ hg perfheads --template json --config perf.stub=no
338 [
338 [
339 {
339 {
340 "comb": *, (glob)
340 "comb": *, (glob)
341 "count": *, (glob)
341 "count": *, (glob)
342 "sys": *, (glob)
342 "sys": *, (glob)
343 "user": *, (glob)
343 "user": *, (glob)
344 "wall": * (glob)
344 "wall": * (glob)
345 }
345 }
346 ]
346 ]
347
347
348 detailed output:
348 detailed output:
349
349
350 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
350 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
351 [
351 [
352 {
352 {
353 "avg.comb": *, (glob)
353 "avg.comb": *, (glob)
354 "avg.count": *, (glob)
354 "avg.count": *, (glob)
355 "avg.sys": *, (glob)
355 "avg.sys": *, (glob)
356 "avg.user": *, (glob)
356 "avg.user": *, (glob)
357 "avg.wall": *, (glob)
357 "avg.wall": *, (glob)
358 "comb": *, (glob)
358 "comb": *, (glob)
359 "count": *, (glob)
359 "count": *, (glob)
360 "max.comb": *, (glob)
360 "max.comb": *, (glob)
361 "max.count": *, (glob)
361 "max.count": *, (glob)
362 "max.sys": *, (glob)
362 "max.sys": *, (glob)
363 "max.user": *, (glob)
363 "max.user": *, (glob)
364 "max.wall": *, (glob)
364 "max.wall": *, (glob)
365 "median.comb": *, (glob)
365 "median.comb": *, (glob)
366 "median.count": *, (glob)
366 "median.count": *, (glob)
367 "median.sys": *, (glob)
367 "median.sys": *, (glob)
368 "median.user": *, (glob)
368 "median.user": *, (glob)
369 "median.wall": *, (glob)
369 "median.wall": *, (glob)
370 "sys": *, (glob)
370 "sys": *, (glob)
371 "user": *, (glob)
371 "user": *, (glob)
372 "wall": * (glob)
372 "wall": * (glob)
373 }
373 }
374 ]
374 ]
375
375
376 Test pre-run feature
376 Test pre-run feature
377 --------------------
377 --------------------
378
378
379 (perf discovery has some spurious output)
379 (perf discovery has some spurious output)
380
380
381 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
381 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
382 ! wall * comb * user * sys * (best of 1) (glob)
382 ! wall * comb * user * sys * (best of 1) (glob)
383 searching for changes
383 searching for changes
384 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
384 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
385 ! wall * comb * user * sys * (best of 1) (glob)
385 ! wall * comb * user * sys * (best of 1) (glob)
386 searching for changes
386 searching for changes
387 searching for changes
387 searching for changes
388 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
388 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
389 ! wall * comb * user * sys * (best of 1) (glob)
389 ! wall * comb * user * sys * (best of 1) (glob)
390 searching for changes
390 searching for changes
391 searching for changes
391 searching for changes
392 searching for changes
392 searching for changes
393 searching for changes
393 searching for changes
394 $ hg perf::bundle 'last(all(), 5)'
394 $ hg perf::bundle 'last(all(), 5)'
395 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
395 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
396 4 changesets found
396 4 changesets found
397 $ hg perf::unbundle last-5.hg
397 $ hg perf::unbundle last-5.hg
398 adding changesets
399 adding manifests
400 adding file changes
401
398
402
399
403 test profile-benchmark option
400 test profile-benchmark option
404 ------------------------------
401 ------------------------------
405
402
406 Function to check that statprof ran
403 Function to check that statprof ran
407 $ statprofran () {
404 $ statprofran () {
408 > egrep 'Sample count:|No samples recorded' > /dev/null
405 > egrep 'Sample count:|No samples recorded' > /dev/null
409 > }
406 > }
410 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
407 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
411
408
412 Check perf.py for historical portability
409 Check perf.py for historical portability
413 ----------------------------------------
410 ----------------------------------------
414
411
415 $ cd "$TESTDIR/.."
412 $ cd "$TESTDIR/.."
416
413
417 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
414 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
418 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
415 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
419 > "$TESTDIR"/check-perf-code.py contrib/perf.py
416 > "$TESTDIR"/check-perf-code.py contrib/perf.py
420 contrib/perf.py:\d+: (re)
417 contrib/perf.py:\d+: (re)
421 > from mercurial import (
418 > from mercurial import (
422 import newer module separately in try clause for early Mercurial
419 import newer module separately in try clause for early Mercurial
423 contrib/perf.py:\d+: (re)
420 contrib/perf.py:\d+: (re)
424 > from mercurial import (
421 > from mercurial import (
425 import newer module separately in try clause for early Mercurial
422 import newer module separately in try clause for early Mercurial
426 contrib/perf.py:\d+: (re)
423 contrib/perf.py:\d+: (re)
427 > origindexpath = orig.opener.join(indexfile)
424 > origindexpath = orig.opener.join(indexfile)
428 use getvfs()/getsvfs() for early Mercurial
425 use getvfs()/getsvfs() for early Mercurial
429 contrib/perf.py:\d+: (re)
426 contrib/perf.py:\d+: (re)
430 > origdatapath = orig.opener.join(datafile)
427 > origdatapath = orig.opener.join(datafile)
431 use getvfs()/getsvfs() for early Mercurial
428 use getvfs()/getsvfs() for early Mercurial
432 contrib/perf.py:\d+: (re)
429 contrib/perf.py:\d+: (re)
433 > vfs = vfsmod.vfs(tmpdir)
430 > vfs = vfsmod.vfs(tmpdir)
434 use getvfs()/getsvfs() for early Mercurial
431 use getvfs()/getsvfs() for early Mercurial
435 contrib/perf.py:\d+: (re)
432 contrib/perf.py:\d+: (re)
436 > vfs.options = getattr(orig.opener, 'options', None)
433 > vfs.options = getattr(orig.opener, 'options', None)
437 use getvfs()/getsvfs() for early Mercurial
434 use getvfs()/getsvfs() for early Mercurial
438 [1]
435 [1]
General Comments 0
You need to be logged in to leave comments. Login now