##// END OF EJS Templates
perf-tags: fix the --clear-fnode-cache-rev code...
marmoute -
r52044:2705748b stable
parent child Browse files
Show More
@@ -1,4638 +1,4638 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 clear_fnodes_fn = getattr(
976 clear_fnodes_fn = getattr(
977 tags,
977 tags,
978 "clear_cache_fnodes",
978 "clear_cache_fnodes",
979 _default_clear_on_disk_tags_fnodes_cache,
979 _default_clear_on_disk_tags_fnodes_cache,
980 )
980 )
981 clear_fnodes_rev_fn = getattr(
981 clear_fnodes_rev_fn = getattr(
982 tags,
982 tags,
983 "forget_fnodes",
983 "forget_fnodes",
984 _default_forget_fnodes,
984 _default_forget_fnodes,
985 )
985 )
986
986
987 clear_revs = []
987 clear_revs = []
988 if clear_fnode_revs:
988 if clear_fnode_revs:
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
989 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
990
990
991 if update_last:
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
994 clear_revs.extend(last_revs)
995
995
996 from mercurial import repoview
996 from mercurial import repoview
997
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1007
1008 clear_revs = sorted(set(clear_revs))
1008 clear_revs = sorted(set(clear_revs))
1009
1009
1010 def s():
1010 def s():
1011 if update_last:
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
1012 util.copyfile(old_tags_path, new_tags_path)
1013 if clearrevlogs:
1013 if clearrevlogs:
1014 clearchangelog(repo)
1014 clearchangelog(repo)
1015 clearfilecache(repo.unfiltered(), 'manifest')
1015 clearfilecache(repo.unfiltered(), 'manifest')
1016 if clear_disk:
1016 if clear_disk:
1017 clear_disk_fn(repo)
1017 clear_disk_fn(repo)
1018 if clear_fnode:
1018 if clear_fnode:
1019 clear_fnodes_fn(repo)
1019 clear_fnodes_fn(repo)
1020 elif clear_revs:
1020 elif clear_revs:
1021 clear_fnodes_rev_fn(repo, clear_revs)
1021 clear_fnodes_rev_fn(repo, clear_revs)
1022 repocleartagscache()
1022 repocleartagscache()
1023
1023
1024 def t():
1024 def t():
1025 len(repo.tags())
1025 len(repo.tags())
1026
1026
1027 timer(t, setup=s)
1027 timer(t, setup=s)
1028 fm.end()
1028 fm.end()
1029
1029
1030
1030
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
1032 def perfancestors(ui, repo, **opts):
1032 def perfancestors(ui, repo, **opts):
1033 opts = _byteskwargs(opts)
1033 opts = _byteskwargs(opts)
1034 timer, fm = gettimer(ui, opts)
1034 timer, fm = gettimer(ui, opts)
1035 heads = repo.changelog.headrevs()
1035 heads = repo.changelog.headrevs()
1036
1036
1037 def d():
1037 def d():
1038 for a in repo.changelog.ancestors(heads):
1038 for a in repo.changelog.ancestors(heads):
1039 pass
1039 pass
1040
1040
1041 timer(d)
1041 timer(d)
1042 fm.end()
1042 fm.end()
1043
1043
1044
1044
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1046 def perfancestorset(ui, repo, revset, **opts):
1046 def perfancestorset(ui, repo, revset, **opts):
1047 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
1048 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
1049 revs = repo.revs(revset)
1049 revs = repo.revs(revset)
1050 heads = repo.changelog.headrevs()
1050 heads = repo.changelog.headrevs()
1051
1051
1052 def d():
1052 def d():
1053 s = repo.changelog.ancestors(heads)
1053 s = repo.changelog.ancestors(heads)
1054 for rev in revs:
1054 for rev in revs:
1055 rev in s
1055 rev in s
1056
1056
1057 timer(d)
1057 timer(d)
1058 fm.end()
1058 fm.end()
1059
1059
1060
1060
1061 @command(
1061 @command(
1062 b'perf::delta-find',
1062 b'perf::delta-find',
1063 revlogopts + formatteropts,
1063 revlogopts + formatteropts,
1064 b'-c|-m|FILE REV',
1064 b'-c|-m|FILE REV',
1065 )
1065 )
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1067 """benchmark the process of finding a valid delta for a revlog revision
1067 """benchmark the process of finding a valid delta for a revlog revision
1068
1068
1069 When a revlog receives a new revision (e.g. from a commit, or from an
1069 When a revlog receives a new revision (e.g. from a commit, or from an
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1071 This perf command measures how much time we spend in this process. It
1071 This perf command measures how much time we spend in this process. It
1072 operates on an already stored revision.
1072 operates on an already stored revision.
1073
1073
1074 See `hg help debug-delta-find` for another related command.
1074 See `hg help debug-delta-find` for another related command.
1075 """
1075 """
1076 from mercurial import revlogutils
1076 from mercurial import revlogutils
1077 import mercurial.revlogutils.deltas as deltautil
1077 import mercurial.revlogutils.deltas as deltautil
1078
1078
1079 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1080 if arg_2 is None:
1080 if arg_2 is None:
1081 file_ = None
1081 file_ = None
1082 rev = arg_1
1082 rev = arg_1
1083 else:
1083 else:
1084 file_ = arg_1
1084 file_ = arg_1
1085 rev = arg_2
1085 rev = arg_2
1086
1086
1087 repo = repo.unfiltered()
1087 repo = repo.unfiltered()
1088
1088
1089 timer, fm = gettimer(ui, opts)
1089 timer, fm = gettimer(ui, opts)
1090
1090
1091 rev = int(rev)
1091 rev = int(rev)
1092
1092
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1094
1094
1095 deltacomputer = deltautil.deltacomputer(revlog)
1095 deltacomputer = deltautil.deltacomputer(revlog)
1096
1096
1097 node = revlog.node(rev)
1097 node = revlog.node(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
1099 p1 = revlog.node(p1r)
1099 p1 = revlog.node(p1r)
1100 p2 = revlog.node(p2r)
1100 p2 = revlog.node(p2r)
1101 full_text = revlog.revision(rev)
1101 full_text = revlog.revision(rev)
1102 textlen = len(full_text)
1102 textlen = len(full_text)
1103 cachedelta = None
1103 cachedelta = None
1104 flags = revlog.flags(rev)
1104 flags = revlog.flags(rev)
1105
1105
1106 revinfo = revlogutils.revisioninfo(
1106 revinfo = revlogutils.revisioninfo(
1107 node,
1107 node,
1108 p1,
1108 p1,
1109 p2,
1109 p2,
1110 [full_text], # btext
1110 [full_text], # btext
1111 textlen,
1111 textlen,
1112 cachedelta,
1112 cachedelta,
1113 flags,
1113 flags,
1114 )
1114 )
1115
1115
1116 # Note: we should probably purge the potential caches (like the full
1116 # Note: we should probably purge the potential caches (like the full
1117 # manifest cache) between runs.
1117 # manifest cache) between runs.
1118 def find_one():
1118 def find_one():
1119 with revlog._datafp() as fh:
1119 with revlog._datafp() as fh:
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1121
1121
1122 timer(find_one)
1122 timer(find_one)
1123 fm.end()
1123 fm.end()
1124
1124
1125
1125
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1127 def perfdiscovery(ui, repo, path, **opts):
1127 def perfdiscovery(ui, repo, path, **opts):
1128 """benchmark discovery between local repo and the peer at given path"""
1128 """benchmark discovery between local repo and the peer at given path"""
1129 repos = [repo, None]
1129 repos = [repo, None]
1130 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1131
1131
1132 try:
1132 try:
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1134
1134
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1136 except ImportError:
1136 except ImportError:
1137 try:
1137 try:
1138 from mercurial.utils.urlutil import get_unique_pull_path
1138 from mercurial.utils.urlutil import get_unique_pull_path
1139
1139
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1141 except ImportError:
1141 except ImportError:
1142 path = ui.expandpath(path)
1142 path = ui.expandpath(path)
1143
1143
1144 def s():
1144 def s():
1145 repos[1] = hg.peer(ui, opts, path)
1145 repos[1] = hg.peer(ui, opts, path)
1146
1146
1147 def d():
1147 def d():
1148 setdiscovery.findcommonheads(ui, *repos)
1148 setdiscovery.findcommonheads(ui, *repos)
1149
1149
1150 timer(d, setup=s)
1150 timer(d, setup=s)
1151 fm.end()
1151 fm.end()
1152
1152
1153
1153
1154 @command(
1154 @command(
1155 b'perf::bookmarks|perfbookmarks',
1155 b'perf::bookmarks|perfbookmarks',
1156 formatteropts
1156 formatteropts
1157 + [
1157 + [
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1159 ],
1159 ],
1160 )
1160 )
1161 def perfbookmarks(ui, repo, **opts):
1161 def perfbookmarks(ui, repo, **opts):
1162 """benchmark parsing bookmarks from disk to memory"""
1162 """benchmark parsing bookmarks from disk to memory"""
1163 opts = _byteskwargs(opts)
1163 opts = _byteskwargs(opts)
1164 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1165
1165
1166 clearrevlogs = opts[b'clear_revlogs']
1166 clearrevlogs = opts[b'clear_revlogs']
1167
1167
1168 def s():
1168 def s():
1169 if clearrevlogs:
1169 if clearrevlogs:
1170 clearchangelog(repo)
1170 clearchangelog(repo)
1171 clearfilecache(repo, b'_bookmarks')
1171 clearfilecache(repo, b'_bookmarks')
1172
1172
1173 def d():
1173 def d():
1174 repo._bookmarks
1174 repo._bookmarks
1175
1175
1176 timer(d, setup=s)
1176 timer(d, setup=s)
1177 fm.end()
1177 fm.end()
1178
1178
1179
1179
1180 @command(
1180 @command(
1181 b'perf::bundle',
1181 b'perf::bundle',
1182 [
1182 [
1183 (
1183 (
1184 b'r',
1184 b'r',
1185 b'rev',
1185 b'rev',
1186 [],
1186 [],
1187 b'changesets to bundle',
1187 b'changesets to bundle',
1188 b'REV',
1188 b'REV',
1189 ),
1189 ),
1190 (
1190 (
1191 b't',
1191 b't',
1192 b'type',
1192 b'type',
1193 b'none',
1193 b'none',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1195 b'TYPE',
1195 b'TYPE',
1196 ),
1196 ),
1197 ]
1197 ]
1198 + formatteropts,
1198 + formatteropts,
1199 b'REVS',
1199 b'REVS',
1200 )
1200 )
1201 def perfbundle(ui, repo, *revs, **opts):
1201 def perfbundle(ui, repo, *revs, **opts):
1202 """benchmark the creation of a bundle from a repository
1202 """benchmark the creation of a bundle from a repository
1203
1203
1204 For now, this only supports "none" compression.
1204 For now, this only supports "none" compression.
1205 """
1205 """
1206 try:
1206 try:
1207 from mercurial import bundlecaches
1207 from mercurial import bundlecaches
1208
1208
1209 parsebundlespec = bundlecaches.parsebundlespec
1209 parsebundlespec = bundlecaches.parsebundlespec
1210 except ImportError:
1210 except ImportError:
1211 from mercurial import exchange
1211 from mercurial import exchange
1212
1212
1213 parsebundlespec = exchange.parsebundlespec
1213 parsebundlespec = exchange.parsebundlespec
1214
1214
1215 from mercurial import discovery
1215 from mercurial import discovery
1216 from mercurial import bundle2
1216 from mercurial import bundle2
1217
1217
1218 opts = _byteskwargs(opts)
1218 opts = _byteskwargs(opts)
1219 timer, fm = gettimer(ui, opts)
1219 timer, fm = gettimer(ui, opts)
1220
1220
1221 cl = repo.changelog
1221 cl = repo.changelog
1222 revs = list(revs)
1222 revs = list(revs)
1223 revs.extend(opts.get(b'rev', ()))
1223 revs.extend(opts.get(b'rev', ()))
1224 revs = scmutil.revrange(repo, revs)
1224 revs = scmutil.revrange(repo, revs)
1225 if not revs:
1225 if not revs:
1226 raise error.Abort(b"not revision specified")
1226 raise error.Abort(b"not revision specified")
1227 # make it a consistent set (ie: without topological gaps)
1227 # make it a consistent set (ie: without topological gaps)
1228 old_len = len(revs)
1228 old_len = len(revs)
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1230 if old_len != len(revs):
1230 if old_len != len(revs):
1231 new_count = len(revs) - old_len
1231 new_count = len(revs) - old_len
1232 msg = b"add %d new revisions to make it a consistent set\n"
1232 msg = b"add %d new revisions to make it a consistent set\n"
1233 ui.write_err(msg % new_count)
1233 ui.write_err(msg % new_count)
1234
1234
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1237 outgoing = discovery.outgoing(repo, bases, targets)
1237 outgoing = discovery.outgoing(repo, bases, targets)
1238
1238
1239 bundle_spec = opts.get(b'type')
1239 bundle_spec = opts.get(b'type')
1240
1240
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1242
1242
1243 cgversion = bundle_spec.params.get(b"cg.version")
1243 cgversion = bundle_spec.params.get(b"cg.version")
1244 if cgversion is None:
1244 if cgversion is None:
1245 if bundle_spec.version == b'v1':
1245 if bundle_spec.version == b'v1':
1246 cgversion = b'01'
1246 cgversion = b'01'
1247 if bundle_spec.version == b'v2':
1247 if bundle_spec.version == b'v2':
1248 cgversion = b'02'
1248 cgversion = b'02'
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1250 err = b"repository does not support bundle version %s"
1250 err = b"repository does not support bundle version %s"
1251 raise error.Abort(err % cgversion)
1251 raise error.Abort(err % cgversion)
1252
1252
1253 if cgversion == b'01': # bundle1
1253 if cgversion == b'01': # bundle1
1254 bversion = b'HG10' + bundle_spec.wirecompression
1254 bversion = b'HG10' + bundle_spec.wirecompression
1255 bcompression = None
1255 bcompression = None
1256 elif cgversion in (b'02', b'03'):
1256 elif cgversion in (b'02', b'03'):
1257 bversion = b'HG20'
1257 bversion = b'HG20'
1258 bcompression = bundle_spec.wirecompression
1258 bcompression = bundle_spec.wirecompression
1259 else:
1259 else:
1260 err = b'perf::bundle: unexpected changegroup version %s'
1260 err = b'perf::bundle: unexpected changegroup version %s'
1261 raise error.ProgrammingError(err % cgversion)
1261 raise error.ProgrammingError(err % cgversion)
1262
1262
1263 if bcompression is None:
1263 if bcompression is None:
1264 bcompression = b'UN'
1264 bcompression = b'UN'
1265
1265
1266 if bcompression != b'UN':
1266 if bcompression != b'UN':
1267 err = b'perf::bundle: compression currently unsupported: %s'
1267 err = b'perf::bundle: compression currently unsupported: %s'
1268 raise error.ProgrammingError(err % bcompression)
1268 raise error.ProgrammingError(err % bcompression)
1269
1269
1270 def do_bundle():
1270 def do_bundle():
1271 bundle2.writenewbundle(
1271 bundle2.writenewbundle(
1272 ui,
1272 ui,
1273 repo,
1273 repo,
1274 b'perf::bundle',
1274 b'perf::bundle',
1275 os.devnull,
1275 os.devnull,
1276 bversion,
1276 bversion,
1277 outgoing,
1277 outgoing,
1278 bundle_spec.params,
1278 bundle_spec.params,
1279 )
1279 )
1280
1280
1281 timer(do_bundle)
1281 timer(do_bundle)
1282 fm.end()
1282 fm.end()
1283
1283
1284
1284
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1287 """Benchmark reading of bundle files.
1287 """Benchmark reading of bundle files.
1288
1288
1289 This command is meant to isolate the I/O part of bundle reading as
1289 This command is meant to isolate the I/O part of bundle reading as
1290 much as possible.
1290 much as possible.
1291 """
1291 """
1292 from mercurial import (
1292 from mercurial import (
1293 bundle2,
1293 bundle2,
1294 exchange,
1294 exchange,
1295 streamclone,
1295 streamclone,
1296 )
1296 )
1297
1297
1298 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1299
1299
1300 def makebench(fn):
1300 def makebench(fn):
1301 def run():
1301 def run():
1302 with open(bundlepath, b'rb') as fh:
1302 with open(bundlepath, b'rb') as fh:
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1304 fn(bundle)
1304 fn(bundle)
1305
1305
1306 return run
1306 return run
1307
1307
1308 def makereadnbytes(size):
1308 def makereadnbytes(size):
1309 def run():
1309 def run():
1310 with open(bundlepath, b'rb') as fh:
1310 with open(bundlepath, b'rb') as fh:
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1312 while bundle.read(size):
1312 while bundle.read(size):
1313 pass
1313 pass
1314
1314
1315 return run
1315 return run
1316
1316
1317 def makestdioread(size):
1317 def makestdioread(size):
1318 def run():
1318 def run():
1319 with open(bundlepath, b'rb') as fh:
1319 with open(bundlepath, b'rb') as fh:
1320 while fh.read(size):
1320 while fh.read(size):
1321 pass
1321 pass
1322
1322
1323 return run
1323 return run
1324
1324
1325 # bundle1
1325 # bundle1
1326
1326
1327 def deltaiter(bundle):
1327 def deltaiter(bundle):
1328 for delta in bundle.deltaiter():
1328 for delta in bundle.deltaiter():
1329 pass
1329 pass
1330
1330
1331 def iterchunks(bundle):
1331 def iterchunks(bundle):
1332 for chunk in bundle.getchunks():
1332 for chunk in bundle.getchunks():
1333 pass
1333 pass
1334
1334
1335 # bundle2
1335 # bundle2
1336
1336
1337 def forwardchunks(bundle):
1337 def forwardchunks(bundle):
1338 for chunk in bundle._forwardchunks():
1338 for chunk in bundle._forwardchunks():
1339 pass
1339 pass
1340
1340
1341 def iterparts(bundle):
1341 def iterparts(bundle):
1342 for part in bundle.iterparts():
1342 for part in bundle.iterparts():
1343 pass
1343 pass
1344
1344
1345 def iterpartsseekable(bundle):
1345 def iterpartsseekable(bundle):
1346 for part in bundle.iterparts(seekable=True):
1346 for part in bundle.iterparts(seekable=True):
1347 pass
1347 pass
1348
1348
1349 def seek(bundle):
1349 def seek(bundle):
1350 for part in bundle.iterparts(seekable=True):
1350 for part in bundle.iterparts(seekable=True):
1351 part.seek(0, os.SEEK_END)
1351 part.seek(0, os.SEEK_END)
1352
1352
1353 def makepartreadnbytes(size):
1353 def makepartreadnbytes(size):
1354 def run():
1354 def run():
1355 with open(bundlepath, b'rb') as fh:
1355 with open(bundlepath, b'rb') as fh:
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1357 for part in bundle.iterparts():
1357 for part in bundle.iterparts():
1358 while part.read(size):
1358 while part.read(size):
1359 pass
1359 pass
1360
1360
1361 return run
1361 return run
1362
1362
1363 benches = [
1363 benches = [
1364 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(8192), b'read(8k)'),
1365 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(16384), b'read(16k)'),
1366 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(32768), b'read(32k)'),
1367 (makestdioread(131072), b'read(128k)'),
1367 (makestdioread(131072), b'read(128k)'),
1368 ]
1368 ]
1369
1369
1370 with open(bundlepath, b'rb') as fh:
1370 with open(bundlepath, b'rb') as fh:
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1372
1372
1373 if isinstance(bundle, changegroup.cg1unpacker):
1373 if isinstance(bundle, changegroup.cg1unpacker):
1374 benches.extend(
1374 benches.extend(
1375 [
1375 [
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1382 ]
1382 ]
1383 )
1383 )
1384 elif isinstance(bundle, bundle2.unbundle20):
1384 elif isinstance(bundle, bundle2.unbundle20):
1385 benches.extend(
1385 benches.extend(
1386 [
1386 [
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1389 (
1389 (
1390 makebench(iterpartsseekable),
1390 makebench(iterpartsseekable),
1391 b'bundle2 iterparts() seekable',
1391 b'bundle2 iterparts() seekable',
1392 ),
1392 ),
1393 (makebench(seek), b'bundle2 part seek()'),
1393 (makebench(seek), b'bundle2 part seek()'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1398 ]
1398 ]
1399 )
1399 )
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1401 raise error.Abort(b'stream clone bundles not supported')
1401 raise error.Abort(b'stream clone bundles not supported')
1402 else:
1402 else:
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1404
1404
1405 for fn, title in benches:
1405 for fn, title in benches:
1406 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1407 timer(fn, title=title)
1407 timer(fn, title=title)
1408 fm.end()
1408 fm.end()
1409
1409
1410
1410
1411 @command(
1411 @command(
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1413 formatteropts
1413 formatteropts
1414 + [
1414 + [
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1417 ],
1417 ],
1418 )
1418 )
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1420 """Benchmark producing a changelog group for a changegroup.
1420 """Benchmark producing a changelog group for a changegroup.
1421
1421
1422 This measures the time spent processing the changelog during a
1422 This measures the time spent processing the changelog during a
1423 bundle operation. This occurs during `hg bundle` and on a server
1423 bundle operation. This occurs during `hg bundle` and on a server
1424 processing a `getbundle` wire protocol request (handles clones
1424 processing a `getbundle` wire protocol request (handles clones
1425 and pull requests).
1425 and pull requests).
1426
1426
1427 By default, all revisions are added to the changegroup.
1427 By default, all revisions are added to the changegroup.
1428 """
1428 """
1429 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1430 cl = repo.changelog
1430 cl = repo.changelog
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1432 bundler = changegroup.getbundler(cgversion, repo)
1432 bundler = changegroup.getbundler(cgversion, repo)
1433
1433
1434 def d():
1434 def d():
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1436 for chunk in chunks:
1436 for chunk in chunks:
1437 pass
1437 pass
1438
1438
1439 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1440
1440
1441 # Terminal printing can interfere with timing. So disable it.
1441 # Terminal printing can interfere with timing. So disable it.
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1443 timer(d)
1443 timer(d)
1444
1444
1445 fm.end()
1445 fm.end()
1446
1446
1447
1447
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1449 def perfdirs(ui, repo, **opts):
1449 def perfdirs(ui, repo, **opts):
1450 opts = _byteskwargs(opts)
1450 opts = _byteskwargs(opts)
1451 timer, fm = gettimer(ui, opts)
1451 timer, fm = gettimer(ui, opts)
1452 dirstate = repo.dirstate
1452 dirstate = repo.dirstate
1453 b'a' in dirstate
1453 b'a' in dirstate
1454
1454
1455 def d():
1455 def d():
1456 dirstate.hasdir(b'a')
1456 dirstate.hasdir(b'a')
1457 try:
1457 try:
1458 del dirstate._map._dirs
1458 del dirstate._map._dirs
1459 except AttributeError:
1459 except AttributeError:
1460 pass
1460 pass
1461
1461
1462 timer(d)
1462 timer(d)
1463 fm.end()
1463 fm.end()
1464
1464
1465
1465
1466 @command(
1466 @command(
1467 b'perf::dirstate|perfdirstate',
1467 b'perf::dirstate|perfdirstate',
1468 [
1468 [
1469 (
1469 (
1470 b'',
1470 b'',
1471 b'iteration',
1471 b'iteration',
1472 None,
1472 None,
1473 b'benchmark a full iteration for the dirstate',
1473 b'benchmark a full iteration for the dirstate',
1474 ),
1474 ),
1475 (
1475 (
1476 b'',
1476 b'',
1477 b'contains',
1477 b'contains',
1478 None,
1478 None,
1479 b'benchmark a large amount of `nf in dirstate` calls',
1479 b'benchmark a large amount of `nf in dirstate` calls',
1480 ),
1480 ),
1481 ]
1481 ]
1482 + formatteropts,
1482 + formatteropts,
1483 )
1483 )
1484 def perfdirstate(ui, repo, **opts):
1484 def perfdirstate(ui, repo, **opts):
1485 """benchmap the time of various distate operations
1485 """benchmap the time of various distate operations
1486
1486
1487 By default benchmark the time necessary to load a dirstate from scratch.
1487 By default benchmark the time necessary to load a dirstate from scratch.
1488 The dirstate is loaded to the point were a "contains" request can be
1488 The dirstate is loaded to the point were a "contains" request can be
1489 answered.
1489 answered.
1490 """
1490 """
1491 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1492 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1493 b"a" in repo.dirstate
1493 b"a" in repo.dirstate
1494
1494
1495 if opts[b'iteration'] and opts[b'contains']:
1495 if opts[b'iteration'] and opts[b'contains']:
1496 msg = b'only specify one of --iteration or --contains'
1496 msg = b'only specify one of --iteration or --contains'
1497 raise error.Abort(msg)
1497 raise error.Abort(msg)
1498
1498
1499 if opts[b'iteration']:
1499 if opts[b'iteration']:
1500 setup = None
1500 setup = None
1501 dirstate = repo.dirstate
1501 dirstate = repo.dirstate
1502
1502
1503 def d():
1503 def d():
1504 for f in dirstate:
1504 for f in dirstate:
1505 pass
1505 pass
1506
1506
1507 elif opts[b'contains']:
1507 elif opts[b'contains']:
1508 setup = None
1508 setup = None
1509 dirstate = repo.dirstate
1509 dirstate = repo.dirstate
1510 allfiles = list(dirstate)
1510 allfiles = list(dirstate)
1511 # also add file path that will be "missing" from the dirstate
1511 # also add file path that will be "missing" from the dirstate
1512 allfiles.extend([f[::-1] for f in allfiles])
1512 allfiles.extend([f[::-1] for f in allfiles])
1513
1513
1514 def d():
1514 def d():
1515 for f in allfiles:
1515 for f in allfiles:
1516 f in dirstate
1516 f in dirstate
1517
1517
1518 else:
1518 else:
1519
1519
1520 def setup():
1520 def setup():
1521 repo.dirstate.invalidate()
1521 repo.dirstate.invalidate()
1522
1522
1523 def d():
1523 def d():
1524 b"a" in repo.dirstate
1524 b"a" in repo.dirstate
1525
1525
1526 timer(d, setup=setup)
1526 timer(d, setup=setup)
1527 fm.end()
1527 fm.end()
1528
1528
1529
1529
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1531 def perfdirstatedirs(ui, repo, **opts):
1531 def perfdirstatedirs(ui, repo, **opts):
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1533 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1534 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1535 repo.dirstate.hasdir(b"a")
1535 repo.dirstate.hasdir(b"a")
1536
1536
1537 def setup():
1537 def setup():
1538 try:
1538 try:
1539 del repo.dirstate._map._dirs
1539 del repo.dirstate._map._dirs
1540 except AttributeError:
1540 except AttributeError:
1541 pass
1541 pass
1542
1542
1543 def d():
1543 def d():
1544 repo.dirstate.hasdir(b"a")
1544 repo.dirstate.hasdir(b"a")
1545
1545
1546 timer(d, setup=setup)
1546 timer(d, setup=setup)
1547 fm.end()
1547 fm.end()
1548
1548
1549
1549
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1551 def perfdirstatefoldmap(ui, repo, **opts):
1551 def perfdirstatefoldmap(ui, repo, **opts):
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1553
1553
1554 The dirstate filefoldmap cache is dropped between every request.
1554 The dirstate filefoldmap cache is dropped between every request.
1555 """
1555 """
1556 opts = _byteskwargs(opts)
1556 opts = _byteskwargs(opts)
1557 timer, fm = gettimer(ui, opts)
1557 timer, fm = gettimer(ui, opts)
1558 dirstate = repo.dirstate
1558 dirstate = repo.dirstate
1559 dirstate._map.filefoldmap.get(b'a')
1559 dirstate._map.filefoldmap.get(b'a')
1560
1560
1561 def setup():
1561 def setup():
1562 del dirstate._map.filefoldmap
1562 del dirstate._map.filefoldmap
1563
1563
1564 def d():
1564 def d():
1565 dirstate._map.filefoldmap.get(b'a')
1565 dirstate._map.filefoldmap.get(b'a')
1566
1566
1567 timer(d, setup=setup)
1567 timer(d, setup=setup)
1568 fm.end()
1568 fm.end()
1569
1569
1570
1570
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1572 def perfdirfoldmap(ui, repo, **opts):
1572 def perfdirfoldmap(ui, repo, **opts):
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1574
1574
1575 The dirstate dirfoldmap cache is dropped between every request.
1575 The dirstate dirfoldmap cache is dropped between every request.
1576 """
1576 """
1577 opts = _byteskwargs(opts)
1577 opts = _byteskwargs(opts)
1578 timer, fm = gettimer(ui, opts)
1578 timer, fm = gettimer(ui, opts)
1579 dirstate = repo.dirstate
1579 dirstate = repo.dirstate
1580 dirstate._map.dirfoldmap.get(b'a')
1580 dirstate._map.dirfoldmap.get(b'a')
1581
1581
1582 def setup():
1582 def setup():
1583 del dirstate._map.dirfoldmap
1583 del dirstate._map.dirfoldmap
1584 try:
1584 try:
1585 del dirstate._map._dirs
1585 del dirstate._map._dirs
1586 except AttributeError:
1586 except AttributeError:
1587 pass
1587 pass
1588
1588
1589 def d():
1589 def d():
1590 dirstate._map.dirfoldmap.get(b'a')
1590 dirstate._map.dirfoldmap.get(b'a')
1591
1591
1592 timer(d, setup=setup)
1592 timer(d, setup=setup)
1593 fm.end()
1593 fm.end()
1594
1594
1595
1595
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1597 def perfdirstatewrite(ui, repo, **opts):
1597 def perfdirstatewrite(ui, repo, **opts):
1598 """benchmap the time it take to write a dirstate on disk"""
1598 """benchmap the time it take to write a dirstate on disk"""
1599 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1601 ds = repo.dirstate
1601 ds = repo.dirstate
1602 b"a" in ds
1602 b"a" in ds
1603
1603
1604 def setup():
1604 def setup():
1605 ds._dirty = True
1605 ds._dirty = True
1606
1606
1607 def d():
1607 def d():
1608 ds.write(repo.currenttransaction())
1608 ds.write(repo.currenttransaction())
1609
1609
1610 with repo.wlock():
1610 with repo.wlock():
1611 timer(d, setup=setup)
1611 timer(d, setup=setup)
1612 fm.end()
1612 fm.end()
1613
1613
1614
1614
1615 def _getmergerevs(repo, opts):
1615 def _getmergerevs(repo, opts):
1616 """parse command argument to return rev involved in merge
1616 """parse command argument to return rev involved in merge
1617
1617
1618 input: options dictionnary with `rev`, `from` and `bse`
1618 input: options dictionnary with `rev`, `from` and `bse`
1619 output: (localctx, otherctx, basectx)
1619 output: (localctx, otherctx, basectx)
1620 """
1620 """
1621 if opts[b'from']:
1621 if opts[b'from']:
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1623 wctx = repo[fromrev]
1623 wctx = repo[fromrev]
1624 else:
1624 else:
1625 wctx = repo[None]
1625 wctx = repo[None]
1626 # we don't want working dir files to be stat'd in the benchmark, so
1626 # we don't want working dir files to be stat'd in the benchmark, so
1627 # prime that cache
1627 # prime that cache
1628 wctx.dirty()
1628 wctx.dirty()
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1630 if opts[b'base']:
1630 if opts[b'base']:
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1632 ancestor = repo[fromrev]
1632 ancestor = repo[fromrev]
1633 else:
1633 else:
1634 ancestor = wctx.ancestor(rctx)
1634 ancestor = wctx.ancestor(rctx)
1635 return (wctx, rctx, ancestor)
1635 return (wctx, rctx, ancestor)
1636
1636
1637
1637
1638 @command(
1638 @command(
1639 b'perf::mergecalculate|perfmergecalculate',
1639 b'perf::mergecalculate|perfmergecalculate',
1640 [
1640 [
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1642 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'from', b'', b'rev to merge from'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1644 ]
1644 ]
1645 + formatteropts,
1645 + formatteropts,
1646 )
1646 )
1647 def perfmergecalculate(ui, repo, **opts):
1647 def perfmergecalculate(ui, repo, **opts):
1648 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1649 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1650
1650
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1652
1652
1653 def d():
1653 def d():
1654 # acceptremote is True because we don't want prompts in the middle of
1654 # acceptremote is True because we don't want prompts in the middle of
1655 # our benchmark
1655 # our benchmark
1656 merge.calculateupdates(
1656 merge.calculateupdates(
1657 repo,
1657 repo,
1658 wctx,
1658 wctx,
1659 rctx,
1659 rctx,
1660 [ancestor],
1660 [ancestor],
1661 branchmerge=False,
1661 branchmerge=False,
1662 force=False,
1662 force=False,
1663 acceptremote=True,
1663 acceptremote=True,
1664 followcopies=True,
1664 followcopies=True,
1665 )
1665 )
1666
1666
1667 timer(d)
1667 timer(d)
1668 fm.end()
1668 fm.end()
1669
1669
1670
1670
1671 @command(
1671 @command(
1672 b'perf::mergecopies|perfmergecopies',
1672 b'perf::mergecopies|perfmergecopies',
1673 [
1673 [
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1675 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'from', b'', b'rev to merge from'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1677 ]
1677 ]
1678 + formatteropts,
1678 + formatteropts,
1679 )
1679 )
1680 def perfmergecopies(ui, repo, **opts):
1680 def perfmergecopies(ui, repo, **opts):
1681 """measure runtime of `copies.mergecopies`"""
1681 """measure runtime of `copies.mergecopies`"""
1682 opts = _byteskwargs(opts)
1682 opts = _byteskwargs(opts)
1683 timer, fm = gettimer(ui, opts)
1683 timer, fm = gettimer(ui, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1685
1685
1686 def d():
1686 def d():
1687 # acceptremote is True because we don't want prompts in the middle of
1687 # acceptremote is True because we don't want prompts in the middle of
1688 # our benchmark
1688 # our benchmark
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1690
1690
1691 timer(d)
1691 timer(d)
1692 fm.end()
1692 fm.end()
1693
1693
1694
1694
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1697 """benchmark the copy tracing logic"""
1697 """benchmark the copy tracing logic"""
1698 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1699 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1702
1702
1703 def d():
1703 def d():
1704 copies.pathcopies(ctx1, ctx2)
1704 copies.pathcopies(ctx1, ctx2)
1705
1705
1706 timer(d)
1706 timer(d)
1707 fm.end()
1707 fm.end()
1708
1708
1709
1709
1710 @command(
1710 @command(
1711 b'perf::phases|perfphases',
1711 b'perf::phases|perfphases',
1712 [
1712 [
1713 (b'', b'full', False, b'include file reading time too'),
1713 (b'', b'full', False, b'include file reading time too'),
1714 ],
1714 ],
1715 b"",
1715 b"",
1716 )
1716 )
1717 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1721 _phases = repo._phasecache
1722 full = opts.get(b'full')
1722 full = opts.get(b'full')
1723
1723
1724 def d():
1724 def d():
1725 phases = _phases
1725 phases = _phases
1726 if full:
1726 if full:
1727 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1728 phases = repo._phasecache
1728 phases = repo._phasecache
1729 phases.invalidate()
1729 phases.invalidate()
1730 phases.loadphaserevs(repo)
1730 phases.loadphaserevs(repo)
1731
1731
1732 timer(d)
1732 timer(d)
1733 fm.end()
1733 fm.end()
1734
1734
1735
1735
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1738 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1739 from mercurial.node import bin
1739 from mercurial.node import bin
1740 from mercurial import (
1740 from mercurial import (
1741 exchange,
1741 exchange,
1742 hg,
1742 hg,
1743 phases,
1743 phases,
1744 )
1744 )
1745
1745
1746 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1747 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1748
1748
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 if not path:
1750 if not path:
1751 raise error.Abort(
1751 raise error.Abort(
1752 b'default repository not configured!',
1752 b'default repository not configured!',
1753 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1754 )
1754 )
1755 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1756 path = path.get_push_variant()
1756 path = path.get_push_variant()
1757 dest = path.loc
1757 dest = path.loc
1758 else:
1758 else:
1759 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1762
1762
1763 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1764 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1765 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1766
1766
1767 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1768
1768
1769 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1770 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1771 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1772 ).result()
1772 ).result()
1773 del other
1773 del other
1774 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1775 if publishing:
1775 if publishing:
1776 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1777 else:
1777 else:
1778 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1779
1779
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 if has_node is None:
1781 if has_node is None:
1782 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1783 nonpublishroots = 0
1783 nonpublishroots = 0
1784 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1785 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1786 continue
1786 continue
1787 node = bin(nhex)
1787 node = bin(nhex)
1788 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1789 nonpublishroots += 1
1789 nonpublishroots += 1
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792
1792
1793 def d():
1793 def d():
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1795
1795
1796 timer(d)
1796 timer(d)
1797 fm.end()
1797 fm.end()
1798
1798
1799
1799
1800 @command(
1800 @command(
1801 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1802 [
1802 [
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 ]
1805 ]
1806 + formatteropts,
1806 + formatteropts,
1807 b'REV|NODE',
1807 b'REV|NODE',
1808 )
1808 )
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1811 dict-like object
1811 dict-like object
1812
1812
1813 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1814 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1815 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1816 if not manifest_rev:
1816 if not manifest_rev:
1817 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1818 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1819 else:
1819 else:
1820 from mercurial.node import bin
1820 from mercurial.node import bin
1821
1821
1822 if len(rev) == 40:
1822 if len(rev) == 40:
1823 t = bin(rev)
1823 t = bin(rev)
1824 else:
1824 else:
1825 try:
1825 try:
1826 rev = int(rev)
1826 rev = int(rev)
1827
1827
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1830 else:
1830 else:
1831 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1832 except ValueError:
1832 except ValueError:
1833 raise error.Abort(
1833 raise error.Abort(
1834 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1835 )
1835 )
1836
1836
1837 def d():
1837 def d():
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1840
1840
1841 timer(d)
1841 timer(d)
1842 fm.end()
1842 fm.end()
1843
1843
1844
1844
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1847 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1848 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1849 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1850
1850
1851 def d():
1851 def d():
1852 repo.changelog.read(n)
1852 repo.changelog.read(n)
1853 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1854
1854
1855 timer(d)
1855 timer(d)
1856 fm.end()
1856 fm.end()
1857
1857
1858
1858
1859 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1860 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1861 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1862 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1863 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1864 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1865
1865
1866 def setupone():
1866 def setupone():
1867 dirstate.invalidate()
1867 dirstate.invalidate()
1868 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1869
1869
1870 def runone():
1870 def runone():
1871 dirstate._ignore
1871 dirstate._ignore
1872
1872
1873 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1874 fm.end()
1874 fm.end()
1875
1875
1876
1876
1877 @command(
1877 @command(
1878 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1879 [
1879 [
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 ]
1882 ]
1883 + formatteropts,
1883 + formatteropts,
1884 )
1884 )
1885 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1886 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1887
1887
1888 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1889 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1891 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1892 matters.
1892 matters.
1893
1893
1894 Example of useful set to test:
1894 Example of useful set to test:
1895
1895
1896 * tip
1896 * tip
1897 * 0
1897 * 0
1898 * -10:
1898 * -10:
1899 * :10
1899 * :10
1900 * -10: + :10
1900 * -10: + :10
1901 * :10: + -10:
1901 * :10: + -10:
1902 * -10000:
1902 * -10000:
1903 * -10000: + 0
1903 * -10000: + 0
1904
1904
1905 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 import mercurial.revlog
1907 import mercurial.revlog
1908
1908
1909 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1910 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1913 if opts['rev']:
1913 if opts['rev']:
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 nodes = []
1915 nodes = []
1916 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1917 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1918 else:
1918 else:
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1920 cl = repo.changelog
1920 cl = repo.changelog
1921 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1922
1922
1923 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1924 # find the filecache func directly
1924 # find the filecache func directly
1925 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1926 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1927
1927
1928 def setup():
1928 def setup():
1929 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1930 clearchangelog(unfi)
1930 clearchangelog(unfi)
1931
1931
1932 def d():
1932 def d():
1933 cl = makecl(unfi)
1933 cl = makecl(unfi)
1934 for n in nodes:
1934 for n in nodes:
1935 cl.rev(n)
1935 cl.rev(n)
1936
1936
1937 timer(d, setup=setup)
1937 timer(d, setup=setup)
1938 fm.end()
1938 fm.end()
1939
1939
1940
1940
1941 @command(
1941 @command(
1942 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1943 [
1943 [
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 ]
1946 ]
1947 + formatteropts,
1947 + formatteropts,
1948 )
1948 )
1949 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1950 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1951
1951
1952 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1953 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1954 * tip
1954 * tip
1955 * 0
1955 * 0
1956 * -10:
1956 * -10:
1957 * :10
1957 * :10
1958 * -10: + :10
1958 * -10: + :10
1959 * :10: + -10:
1959 * :10: + -10:
1960 * -10000:
1960 * -10000:
1961 * -10000: + 0
1961 * -10000: + 0
1962
1962
1963 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 """
1965 """
1966 import mercurial.revlog
1966 import mercurial.revlog
1967
1967
1968 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1969 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971
1971
1972 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1973 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1974 # find the filecache func directly
1974 # find the filecache func directly
1975 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1976 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1977 if not opts[b'rev']:
1977 if not opts[b'rev']:
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1980 cl = repo.changelog
1980 cl = repo.changelog
1981 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1982
1982
1983 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1984 nodeget = [None]
1984 nodeget = [None]
1985
1985
1986 def setnodeget():
1986 def setnodeget():
1987 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1988 clearchangelog(unfi)
1988 clearchangelog(unfi)
1989 cl = makecl(unfi)
1989 cl = makecl(unfi)
1990 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1991 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1992 else:
1992 else:
1993 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1994
1994
1995 def d():
1995 def d():
1996 get = nodeget[0]
1996 get = nodeget[0]
1997 for n in nodes:
1997 for n in nodes:
1998 get(n)
1998 get(n)
1999
1999
2000 setup = None
2000 setup = None
2001 if clearcaches:
2001 if clearcaches:
2002
2002
2003 def setup():
2003 def setup():
2004 setnodeget()
2004 setnodeget()
2005
2005
2006 else:
2006 else:
2007 setnodeget()
2007 setnodeget()
2008 d() # prewarm the data structure
2008 d() # prewarm the data structure
2009 timer(d, setup=setup)
2009 timer(d, setup=setup)
2010 fm.end()
2010 fm.end()
2011
2011
2012
2012
2013 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
2014 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
2015 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
2016 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
2017
2017
2018 def d():
2018 def d():
2019 if os.name != 'nt':
2019 if os.name != 'nt':
2020 os.system(
2020 os.system(
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 )
2022 )
2023 else:
2023 else:
2024 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
2025 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
2026
2026
2027 timer(d)
2027 timer(d)
2028 fm.end()
2028 fm.end()
2029
2029
2030
2030
2031 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
2032 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
2033 import mercurial.streamclone
2033 import mercurial.streamclone
2034
2034
2035 available = {}
2035 available = {}
2036
2036
2037 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 if generatev1 is not None:
2039 if generatev1 is not None:
2040
2040
2041 def generate(repo):
2041 def generate(repo):
2042 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
2043 return data
2043 return data
2044
2044
2045 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
2046 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 if generatev2 is not None:
2048 if generatev2 is not None:
2049
2049
2050 def generate(repo):
2050 def generate(repo):
2051 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
2052 return data
2052 return data
2053
2053
2054 available[b'v2'] = generate
2054 available[b'v2'] = generate
2055 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 if generatev3 is not None:
2057 if generatev3 is not None:
2058
2058
2059 def generate(repo):
2059 def generate(repo):
2060 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
2061 return data
2061 return data
2062
2062
2063 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2064
2064
2065 # resolve the request
2065 # resolve the request
2066 if version == b"latest":
2066 if version == b"latest":
2067 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 return available[latest_key]
2069 return available[latest_key]
2070 elif version in available:
2070 elif version in available:
2071 return available[version]
2071 return available[version]
2072 else:
2072 else:
2073 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2074 msg %= version
2074 msg %= version
2075 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2076 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2077 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2078
2078
2079
2079
2080 @command(
2080 @command(
2081 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2082 [
2082 [
2083 (
2083 (
2084 b'',
2084 b'',
2085 b'stream-version',
2085 b'stream-version',
2086 b'latest',
2086 b'latest',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2088 ),
2088 ),
2089 ]
2089 ]
2090 + formatteropts,
2090 + formatteropts,
2091 )
2091 )
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2094
2094
2095 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2096 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2097
2097
2098 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
2099 # measure
2099 # measure
2100 result_holder = [None]
2100 result_holder = [None]
2101
2101
2102 def setupone():
2102 def setupone():
2103 result_holder[0] = None
2103 result_holder[0] = None
2104
2104
2105 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
2106
2106
2107 def runone():
2107 def runone():
2108 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
2109 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
2110
2110
2111 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
2112 fm.end()
2112 fm.end()
2113
2113
2114
2114
2115 @command(
2115 @command(
2116 b'perf::stream-generate',
2116 b'perf::stream-generate',
2117 [
2117 [
2118 (
2118 (
2119 b'',
2119 b'',
2120 b'stream-version',
2120 b'stream-version',
2121 b'latest',
2121 b'latest',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2123 ),
2123 ),
2124 ]
2124 ]
2125 + formatteropts,
2125 + formatteropts,
2126 )
2126 )
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2128 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2129
2129
2130 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2131 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2132
2132
2133 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2134 # measure
2134 # measure
2135
2135
2136 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2137
2137
2138 def runone():
2138 def runone():
2139 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2140 for chunk in generate(repo):
2140 for chunk in generate(repo):
2141 pass
2141 pass
2142
2142
2143 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2144 fm.end()
2144 fm.end()
2145
2145
2146
2146
2147 @command(
2147 @command(
2148 b'perf::stream-consume',
2148 b'perf::stream-consume',
2149 formatteropts,
2149 formatteropts,
2150 )
2150 )
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2152 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2153
2153
2154 This include the creation of the repository
2154 This include the creation of the repository
2155 """
2155 """
2156 # try except to appease check code
2156 # try except to appease check code
2157 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2158 try:
2158 try:
2159 from mercurial import bundle2
2159 from mercurial import bundle2
2160 except ImportError as exc:
2160 except ImportError as exc:
2161 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2162 raise error.Abort(msg)
2162 raise error.Abort(msg)
2163 try:
2163 try:
2164 from mercurial import exchange
2164 from mercurial import exchange
2165 except ImportError as exc:
2165 except ImportError as exc:
2166 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2167 raise error.Abort(msg)
2167 raise error.Abort(msg)
2168 try:
2168 try:
2169 from mercurial import hg
2169 from mercurial import hg
2170 except ImportError as exc:
2170 except ImportError as exc:
2171 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2172 raise error.Abort(msg)
2172 raise error.Abort(msg)
2173 try:
2173 try:
2174 from mercurial import localrepo
2174 from mercurial import localrepo
2175 except ImportError as exc:
2175 except ImportError as exc:
2176 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2177 raise error.Abort(msg)
2177 raise error.Abort(msg)
2178
2178
2179 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2180 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2181
2181
2182 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2183 # measure
2183 # measure
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2185 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2186
2186
2187 run_variables = [None, None]
2187 run_variables = [None, None]
2188
2188
2189 @contextlib.contextmanager
2189 @contextlib.contextmanager
2190 def context():
2190 def context():
2191 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2193 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2194 run_variables[0] = bundle
2194 run_variables[0] = bundle
2195 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2196 yield
2196 yield
2197 run_variables[0] = None
2197 run_variables[0] = None
2198 run_variables[1] = None
2198 run_variables[1] = None
2199
2199
2200 def runone():
2200 def runone():
2201 bundle = run_variables[0]
2201 bundle = run_variables[0]
2202 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2203 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2204 localrepo.createrepository(
2204 localrepo.createrepository(
2205 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2206 )
2206 )
2207 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2209 # stream v1
2209 # stream v1
2210 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2211 gen.apply(target)
2211 gen.apply(target)
2212 else:
2212 else:
2213 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2214 bundle2.applybundle(
2214 bundle2.applybundle(
2215 target,
2215 target,
2216 gen,
2216 gen,
2217 tr,
2217 tr,
2218 source=b'unbundle',
2218 source=b'unbundle',
2219 url=filename,
2219 url=filename,
2220 )
2220 )
2221
2221
2222 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2223 fm.end()
2223 fm.end()
2224
2224
2225
2225
2226 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2227 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2228 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2229
2229
2230 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2231 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2233 (default: 1000).
2233 (default: 1000).
2234 """
2234 """
2235 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2236 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2237 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2238 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2240 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2242 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2244
2244
2245 def d():
2245 def d():
2246 for n in nl:
2246 for n in nl:
2247 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2248
2248
2249 timer(d)
2249 timer(d)
2250 fm.end()
2250 fm.end()
2251
2251
2252
2252
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2254 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2255 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2256 x = int(x)
2256 x = int(x)
2257 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2258
2258
2259 def d():
2259 def d():
2260 len(repo[x].files())
2260 len(repo[x].files())
2261
2261
2262 timer(d)
2262 timer(d)
2263 fm.end()
2263 fm.end()
2264
2264
2265
2265
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2267 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2268 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2269 x = int(x)
2269 x = int(x)
2270 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2271 cl = repo.changelog
2271 cl = repo.changelog
2272
2272
2273 def d():
2273 def d():
2274 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2275
2275
2276 timer(d)
2276 timer(d)
2277 fm.end()
2277 fm.end()
2278
2278
2279
2279
2280 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2281 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2282 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2283 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2284 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2285 fm.end()
2285 fm.end()
2286
2286
2287
2287
2288 @command(
2288 @command(
2289 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2290 [
2290 [
2291 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2293 ],
2293 ],
2294 norepo=True,
2294 norepo=True,
2295 )
2295 )
2296 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2297 from mercurial import linelog
2297 from mercurial import linelog
2298
2298
2299 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2300
2300
2301 edits = opts[b'edits']
2301 edits = opts[b'edits']
2302 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2303
2303
2304 maxb1 = 100000
2304 maxb1 = 100000
2305 random.seed(0)
2305 random.seed(0)
2306 randint = random.randint
2306 randint = random.randint
2307 currentlines = 0
2307 currentlines = 0
2308 arglist = []
2308 arglist = []
2309 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2310 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2312 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2313 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2314 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2315 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2316
2316
2317 def d():
2317 def d():
2318 ll = linelog.linelog()
2318 ll = linelog.linelog()
2319 for args in arglist:
2319 for args in arglist:
2320 ll.replacelines(*args)
2320 ll.replacelines(*args)
2321
2321
2322 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2323 timer(d)
2323 timer(d)
2324 fm.end()
2324 fm.end()
2325
2325
2326
2326
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2328 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2329 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2330 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2331 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2332 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2333 fm.end()
2333 fm.end()
2334
2334
2335
2335
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2337 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2338 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2339 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2340 import mercurial.revlog
2340 import mercurial.revlog
2341
2341
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2343 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2344
2344
2345 try:
2345 try:
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2347 except TypeError:
2347 except TypeError:
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2349
2349
2350 def d():
2350 def d():
2351 cl.rev(n)
2351 cl.rev(n)
2352 clearcaches(cl)
2352 clearcaches(cl)
2353
2353
2354 timer(d)
2354 timer(d)
2355 fm.end()
2355 fm.end()
2356
2356
2357
2357
2358 @command(
2358 @command(
2359 b'perf::log|perflog',
2359 b'perf::log|perflog',
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2361 )
2361 )
2362 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2363 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2364 if rev is None:
2364 if rev is None:
2365 rev = []
2365 rev = []
2366 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2367 ui.pushbuffer()
2367 ui.pushbuffer()
2368 timer(
2368 timer(
2369 lambda: commands.log(
2369 lambda: commands.log(
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2371 )
2371 )
2372 )
2372 )
2373 ui.popbuffer()
2373 ui.popbuffer()
2374 fm.end()
2374 fm.end()
2375
2375
2376
2376
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2378 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2379 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2380
2380
2381 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2382 """
2382 """
2383 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2384 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2385
2385
2386 def moonwalk():
2386 def moonwalk():
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2388 ctx = repo[i]
2388 ctx = repo[i]
2389 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2390
2390
2391 timer(moonwalk)
2391 timer(moonwalk)
2392 fm.end()
2392 fm.end()
2393
2393
2394
2394
2395 @command(
2395 @command(
2396 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2397 [
2397 [
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2399 ]
2399 ]
2400 + formatteropts,
2400 + formatteropts,
2401 )
2401 )
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2403 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2404 if makelogtemplater is None:
2404 if makelogtemplater is None:
2405 raise error.Abort(
2405 raise error.Abort(
2406 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2407 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2408 )
2408 )
2409
2409
2410 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2411
2411
2412 nullui = ui.copy()
2412 nullui = ui.copy()
2413 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2414 nullui.disablepager()
2414 nullui.disablepager()
2415 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2416 if not revs:
2416 if not revs:
2417 revs = [b'all()']
2417 revs = [b'all()']
2418 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2419
2419
2420 defaulttemplate = (
2420 defaulttemplate = (
2421 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2422 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2423 )
2423 )
2424 if testedtemplate is None:
2424 if testedtemplate is None:
2425 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2427
2427
2428 def format():
2428 def format():
2429 for r in revs:
2429 for r in revs:
2430 ctx = repo[r]
2430 ctx = repo[r]
2431 displayer.show(ctx)
2431 displayer.show(ctx)
2432 displayer.flush(ctx)
2432 displayer.flush(ctx)
2433
2433
2434 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2435 timer(format)
2435 timer(format)
2436 fm.end()
2436 fm.end()
2437
2437
2438
2438
2439 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2440 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2441 # how it flies with the templater.
2441 # how it flies with the templater.
2442 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2443 for key, title in entries:
2443 for key, title in entries:
2444 values = data[key]
2444 values = data[key]
2445 nbvalues = len(data)
2445 nbvalues = len(data)
2446 values.sort()
2446 values.sort()
2447 stats = {
2447 stats = {
2448 'key': key,
2448 'key': key,
2449 'title': title,
2449 'title': title,
2450 'nbitems': len(values),
2450 'nbitems': len(values),
2451 'min': values[0][0],
2451 'min': values[0][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2461 'max': values[-1][0],
2461 'max': values[-1][0],
2462 }
2462 }
2463 fm.startitem()
2463 fm.startitem()
2464 fm.data(**stats)
2464 fm.data(**stats)
2465 # make node pretty for the human output
2465 # make node pretty for the human output
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2467 lines = [
2467 lines = [
2468 'min',
2468 'min',
2469 '10%',
2469 '10%',
2470 '25%',
2470 '25%',
2471 '50%',
2471 '50%',
2472 '75%',
2472 '75%',
2473 '80%',
2473 '80%',
2474 '85%',
2474 '85%',
2475 '90%',
2475 '90%',
2476 '95%',
2476 '95%',
2477 '99%',
2477 '99%',
2478 'max',
2478 'max',
2479 ]
2479 ]
2480 for l in lines:
2480 for l in lines:
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2482 fm.end()
2482 fm.end()
2483
2483
2484
2484
2485 @command(
2485 @command(
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2487 formatteropts
2487 formatteropts
2488 + [
2488 + [
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2492 ],
2492 ],
2493 )
2493 )
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2495 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2496
2496
2497 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2498 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2499 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2500
2500
2501 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2502 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2503 execution.
2503 execution.
2504
2504
2505 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2506 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2507 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2508 """
2508 """
2509 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2510 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2511 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2512 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2513
2513
2514 output_template = [
2514 output_template = [
2515 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2516 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2521 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2525 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2527 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2528 ]
2528 ]
2529 if not dotiming:
2529 if not dotiming:
2530 output_template = [
2530 output_template = [
2531 i
2531 i
2532 for i in output_template
2532 for i in output_template
2533 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2534 ]
2534 ]
2535 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2538 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2539
2539
2540 if not revs:
2540 if not revs:
2541 revs = ['all()']
2541 revs = ['all()']
2542 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2543
2543
2544 if dostats:
2544 if dostats:
2545 alldata = {
2545 alldata = {
2546 'nbrevs': [],
2546 'nbrevs': [],
2547 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2548 }
2548 }
2549 if dotiming:
2549 if dotiming:
2550 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2552 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2553 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2554
2554
2555 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2556 for r in roi:
2556 for r in roi:
2557 ctx = repo[r]
2557 ctx = repo[r]
2558 p1 = ctx.p1()
2558 p1 = ctx.p1()
2559 p2 = ctx.p2()
2559 p2 = ctx.p2()
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2561 for b in bases:
2561 for b in bases:
2562 b = repo[b]
2562 b = repo[b]
2563 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2564 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2565 data = {
2565 data = {
2566 b'base': b.hex(),
2566 b'base': b.hex(),
2567 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2569 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2570 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2572 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2573 }
2573 }
2574 if dostats:
2574 if dostats:
2575 if p1missing:
2575 if p1missing:
2576 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2578 )
2578 )
2579 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2581 )
2581 )
2582 if p2missing:
2582 if p2missing:
2583 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2585 )
2585 )
2586 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2588 )
2588 )
2589 if dotiming:
2589 if dotiming:
2590 begin = util.timer()
2590 begin = util.timer()
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2592 end = util.timer()
2592 end = util.timer()
2593 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2594 data['time'] = end - begin
2594 data['time'] = end - begin
2595 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2596 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2597 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2598 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2599 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2600 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2601 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2602 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2603 begin = util.timer()
2603 begin = util.timer()
2604 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2605 end = util.timer()
2605 end = util.timer()
2606 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2607 begin = util.timer()
2607 begin = util.timer()
2608 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2609 end = util.timer()
2609 end = util.timer()
2610 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2611 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2613
2613
2614 if dostats:
2614 if dostats:
2615 if p1missing:
2615 if p1missing:
2616 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2618 )
2618 )
2619 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2620 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2621 )
2621 )
2622 if p2missing:
2622 if p2missing:
2623 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2625 )
2625 )
2626 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2627 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2628 )
2628 )
2629 if p1missing or p2missing:
2629 if p1missing or p2missing:
2630 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2631 (
2631 (
2632 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2633 b.hex(),
2633 b.hex(),
2634 p1.hex(),
2634 p1.hex(),
2635 p2.hex(),
2635 p2.hex(),
2636 )
2636 )
2637 )
2637 )
2638 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2640 )
2640 )
2641 fm.startitem()
2641 fm.startitem()
2642 fm.data(**data)
2642 fm.data(**data)
2643 # make node pretty for the human output
2643 # make node pretty for the human output
2644 out = data.copy()
2644 out = data.copy()
2645 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2648 fm.plain(output % out)
2648 fm.plain(output % out)
2649
2649
2650 fm.end()
2650 fm.end()
2651 if dostats:
2651 if dostats:
2652 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2653 # how it flies with the templater.
2653 # how it flies with the templater.
2654 entries = [
2654 entries = [
2655 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2657 ]
2657 ]
2658 if dotiming:
2658 if dotiming:
2659 entries.append(
2659 entries.append(
2660 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2661 )
2661 )
2662 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2663 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2664 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2665 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2666
2666
2667
2667
2668 @command(
2668 @command(
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2670 formatteropts
2670 formatteropts
2671 + [
2671 + [
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2675 ],
2675 ],
2676 )
2676 )
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2678 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2679
2679
2680 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2681 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2682
2682
2683 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2684 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2685 execution.
2685 execution.
2686
2686
2687 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2688 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2689 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2690 """
2690 """
2691 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2692 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2693 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2694 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2695
2695
2696 if dotiming:
2696 if dotiming:
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2698 output = (
2698 output = (
2699 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2702 )
2702 )
2703 header_names = (
2703 header_names = (
2704 "source",
2704 "source",
2705 "destination",
2705 "destination",
2706 "nb-revs",
2706 "nb-revs",
2707 "nb-files",
2707 "nb-files",
2708 "nb-renames",
2708 "nb-renames",
2709 "time",
2709 "time",
2710 )
2710 )
2711 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2712 else:
2712 else:
2713 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2714 output = (
2714 output = (
2715 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2717 )
2717 )
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2719
2719
2720 if not revs:
2720 if not revs:
2721 revs = ['all()']
2721 revs = ['all()']
2722 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2723
2723
2724 if dostats:
2724 if dostats:
2725 alldata = {
2725 alldata = {
2726 'nbrevs': [],
2726 'nbrevs': [],
2727 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2728 }
2728 }
2729 if dotiming:
2729 if dotiming:
2730 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2731 alldata['time'] = []
2731 alldata['time'] = []
2732
2732
2733 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2734 for r in roi:
2734 for r in roi:
2735 ctx = repo[r]
2735 ctx = repo[r]
2736 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2737 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2739 for p in (p1, p2):
2739 for p in (p1, p2):
2740 for b in bases:
2740 for b in bases:
2741 base = repo[b]
2741 base = repo[b]
2742 parent = repo[p]
2742 parent = repo[p]
2743 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2744 if not missing:
2744 if not missing:
2745 continue
2745 continue
2746 data = {
2746 data = {
2747 b'source': base.hex(),
2747 b'source': base.hex(),
2748 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2750 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2751 }
2751 }
2752 if dostats:
2752 if dostats:
2753 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2754 (
2754 (
2755 data['nbrevs'],
2755 data['nbrevs'],
2756 base.hex(),
2756 base.hex(),
2757 parent.hex(),
2757 parent.hex(),
2758 )
2758 )
2759 )
2759 )
2760 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2761 (
2761 (
2762 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2763 base.hex(),
2763 base.hex(),
2764 parent.hex(),
2764 parent.hex(),
2765 )
2765 )
2766 )
2766 )
2767 if dotiming:
2767 if dotiming:
2768 begin = util.timer()
2768 begin = util.timer()
2769 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2770 end = util.timer()
2770 end = util.timer()
2771 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2772 data['time'] = end - begin
2772 data['time'] = end - begin
2773 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2774 if dostats:
2774 if dostats:
2775 alldata['time'].append(
2775 alldata['time'].append(
2776 (
2776 (
2777 data['time'],
2777 data['time'],
2778 base.hex(),
2778 base.hex(),
2779 parent.hex(),
2779 parent.hex(),
2780 )
2780 )
2781 )
2781 )
2782 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2783 (
2783 (
2784 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2785 base.hex(),
2785 base.hex(),
2786 parent.hex(),
2786 parent.hex(),
2787 )
2787 )
2788 )
2788 )
2789 fm.startitem()
2789 fm.startitem()
2790 fm.data(**data)
2790 fm.data(**data)
2791 out = data.copy()
2791 out = data.copy()
2792 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2794 fm.plain(output % out)
2794 fm.plain(output % out)
2795
2795
2796 fm.end()
2796 fm.end()
2797 if dostats:
2797 if dostats:
2798 entries = [
2798 entries = [
2799 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2801 ]
2801 ]
2802 if dotiming:
2802 if dotiming:
2803 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2804 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2805 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2806
2806
2807
2807
2808 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2809 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2810 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2811 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2813 fm.end()
2813 fm.end()
2814
2814
2815
2815
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2817 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2818 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2819 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2820 s = repo.store
2820 s = repo.store
2821
2821
2822 def d():
2822 def d():
2823 s.fncache._load()
2823 s.fncache._load()
2824
2824
2825 timer(d)
2825 timer(d)
2826 fm.end()
2826 fm.end()
2827
2827
2828
2828
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2830 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2831 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2832 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2833 s = repo.store
2833 s = repo.store
2834 lock = repo.lock()
2834 lock = repo.lock()
2835 s.fncache._load()
2835 s.fncache._load()
2836 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2837 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2838
2838
2839 def d():
2839 def d():
2840 s.fncache._dirty = True
2840 s.fncache._dirty = True
2841 s.fncache.write(tr)
2841 s.fncache.write(tr)
2842
2842
2843 timer(d)
2843 timer(d)
2844 tr.close()
2844 tr.close()
2845 lock.release()
2845 lock.release()
2846 fm.end()
2846 fm.end()
2847
2847
2848
2848
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2850 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2851 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2852 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2853 s = repo.store
2853 s = repo.store
2854 s.fncache._load()
2854 s.fncache._load()
2855
2855
2856 def d():
2856 def d():
2857 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2858 s.encode(p)
2858 s.encode(p)
2859
2859
2860 timer(d)
2860 timer(d)
2861 fm.end()
2861 fm.end()
2862
2862
2863
2863
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2865 while not done.is_set():
2865 while not done.is_set():
2866 pair = q.get()
2866 pair = q.get()
2867 while pair is not None:
2867 while pair is not None:
2868 if xdiff:
2868 if xdiff:
2869 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2870 elif blocks:
2870 elif blocks:
2871 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2872 else:
2872 else:
2873 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2874 q.task_done()
2874 q.task_done()
2875 pair = q.get()
2875 pair = q.get()
2876 q.task_done() # for the None one
2876 q.task_done() # for the None one
2877 with ready:
2877 with ready:
2878 ready.wait()
2878 ready.wait()
2879
2879
2880
2880
2881 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2882 ml = repo.manifestlog
2882 ml = repo.manifestlog
2883
2883
2884 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2885 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2886 else:
2886 else:
2887 store = ml._revlog
2887 store = ml._revlog
2888
2888
2889 return store.revision(mnode)
2889 return store.revision(mnode)
2890
2890
2891
2891
2892 @command(
2892 @command(
2893 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2894 revlogopts
2894 revlogopts
2895 + formatteropts
2895 + formatteropts
2896 + [
2896 + [
2897 (
2897 (
2898 b'',
2898 b'',
2899 b'count',
2899 b'count',
2900 1,
2900 1,
2901 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2902 ),
2902 ),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2907 ],
2907 ],
2908 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2909 )
2909 )
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2911 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2912
2912
2913 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2914
2914
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2916 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2917
2917
2918 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2919 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2920 and filelogs).
2920 and filelogs).
2921 """
2921 """
2922 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2923
2923
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2926
2926
2927 if opts[b'alldata']:
2927 if opts[b'alldata']:
2928 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2929
2929
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2931 file_, rev = None, file_
2931 file_, rev = None, file_
2932 elif rev is None:
2932 elif rev is None:
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2934
2934
2935 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2936 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2937 textpairs = []
2937 textpairs = []
2938
2938
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2940
2940
2941 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2943 if opts[b'alldata']:
2943 if opts[b'alldata']:
2944 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2945 ctx = repo[rev]
2945 ctx = repo[rev]
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2947 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2949 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2950
2950
2951 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2952 man = ctx.manifest()
2952 man = ctx.manifest()
2953 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2954 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2955 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2956 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2958 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2959 else:
2959 else:
2960 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2962
2962
2963 withthreads = threads > 0
2963 withthreads = threads > 0
2964 if not withthreads:
2964 if not withthreads:
2965
2965
2966 def d():
2966 def d():
2967 for pair in textpairs:
2967 for pair in textpairs:
2968 if xdiff:
2968 if xdiff:
2969 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2970 elif blocks:
2970 elif blocks:
2971 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2972 else:
2972 else:
2973 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2974
2974
2975 else:
2975 else:
2976 q = queue()
2976 q = queue()
2977 for i in _xrange(threads):
2977 for i in _xrange(threads):
2978 q.put(None)
2978 q.put(None)
2979 ready = threading.Condition()
2979 ready = threading.Condition()
2980 done = threading.Event()
2980 done = threading.Event()
2981 for i in _xrange(threads):
2981 for i in _xrange(threads):
2982 threading.Thread(
2982 threading.Thread(
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2984 ).start()
2984 ).start()
2985 q.join()
2985 q.join()
2986
2986
2987 def d():
2987 def d():
2988 for pair in textpairs:
2988 for pair in textpairs:
2989 q.put(pair)
2989 q.put(pair)
2990 for i in _xrange(threads):
2990 for i in _xrange(threads):
2991 q.put(None)
2991 q.put(None)
2992 with ready:
2992 with ready:
2993 ready.notify_all()
2993 ready.notify_all()
2994 q.join()
2994 q.join()
2995
2995
2996 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2997 timer(d)
2997 timer(d)
2998 fm.end()
2998 fm.end()
2999
2999
3000 if withthreads:
3000 if withthreads:
3001 done.set()
3001 done.set()
3002 for i in _xrange(threads):
3002 for i in _xrange(threads):
3003 q.put(None)
3003 q.put(None)
3004 with ready:
3004 with ready:
3005 ready.notify_all()
3005 ready.notify_all()
3006
3006
3007
3007
3008 @command(
3008 @command(
3009 b'perf::unbundle',
3009 b'perf::unbundle',
3010 formatteropts,
3010 formatteropts,
3011 b'BUNDLE_FILE',
3011 b'BUNDLE_FILE',
3012 )
3012 )
3013 def perf_unbundle(ui, repo, fname, **opts):
3013 def perf_unbundle(ui, repo, fname, **opts):
3014 """benchmark application of a bundle in a repository.
3014 """benchmark application of a bundle in a repository.
3015
3015
3016 This does not include the final transaction processing"""
3016 This does not include the final transaction processing"""
3017
3017
3018 from mercurial import exchange
3018 from mercurial import exchange
3019 from mercurial import bundle2
3019 from mercurial import bundle2
3020 from mercurial import transaction
3020 from mercurial import transaction
3021
3021
3022 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
3023
3023
3024 ### some compatibility hotfix
3024 ### some compatibility hotfix
3025 #
3025 #
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3027 # critical regression that break transaction rollback for files that are
3027 # critical regression that break transaction rollback for files that are
3028 # de-inlined.
3028 # de-inlined.
3029 method = transaction.transaction._addentry
3029 method = transaction.transaction._addentry
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3034 args = getargspec(error.Abort.__init__).args
3034 args = getargspec(error.Abort.__init__).args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3035 post_18415fc918a1 = "detailed_exit_code" in args
3036
3036
3037 old_max_inline = None
3037 old_max_inline = None
3038 try:
3038 try:
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3040 # disable inlining
3040 # disable inlining
3041 old_max_inline = mercurial.revlog._maxinline
3041 old_max_inline = mercurial.revlog._maxinline
3042 # large enough to never happen
3042 # large enough to never happen
3043 mercurial.revlog._maxinline = 2 ** 50
3043 mercurial.revlog._maxinline = 2 ** 50
3044
3044
3045 with repo.lock():
3045 with repo.lock():
3046 bundle = [None, None]
3046 bundle = [None, None]
3047 orig_quiet = repo.ui.quiet
3047 orig_quiet = repo.ui.quiet
3048 try:
3048 try:
3049 repo.ui.quiet = True
3049 repo.ui.quiet = True
3050 with open(fname, mode="rb") as f:
3050 with open(fname, mode="rb") as f:
3051
3051
3052 def noop_report(*args, **kwargs):
3052 def noop_report(*args, **kwargs):
3053 pass
3053 pass
3054
3054
3055 def setup():
3055 def setup():
3056 gen, tr = bundle
3056 gen, tr = bundle
3057 if tr is not None:
3057 if tr is not None:
3058 tr.abort()
3058 tr.abort()
3059 bundle[:] = [None, None]
3059 bundle[:] = [None, None]
3060 f.seek(0)
3060 f.seek(0)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3063 # silence the transaction
3063 # silence the transaction
3064 bundle[1]._report = noop_report
3064 bundle[1]._report = noop_report
3065
3065
3066 def apply():
3066 def apply():
3067 gen, tr = bundle
3067 gen, tr = bundle
3068 bundle2.applybundle(
3068 bundle2.applybundle(
3069 repo,
3069 repo,
3070 gen,
3070 gen,
3071 tr,
3071 tr,
3072 source=b'perf::unbundle',
3072 source=b'perf::unbundle',
3073 url=fname,
3073 url=fname,
3074 )
3074 )
3075
3075
3076 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
3077 timer(apply, setup=setup)
3077 timer(apply, setup=setup)
3078 fm.end()
3078 fm.end()
3079 finally:
3079 finally:
3080 repo.ui.quiet == orig_quiet
3080 repo.ui.quiet == orig_quiet
3081 gen, tr = bundle
3081 gen, tr = bundle
3082 if tr is not None:
3082 if tr is not None:
3083 tr.abort()
3083 tr.abort()
3084 finally:
3084 finally:
3085 if old_max_inline is not None:
3085 if old_max_inline is not None:
3086 mercurial.revlog._maxinline = old_max_inline
3086 mercurial.revlog._maxinline = old_max_inline
3087
3087
3088
3088
3089 @command(
3089 @command(
3090 b'perf::unidiff|perfunidiff',
3090 b'perf::unidiff|perfunidiff',
3091 revlogopts
3091 revlogopts
3092 + formatteropts
3092 + formatteropts
3093 + [
3093 + [
3094 (
3094 (
3095 b'',
3095 b'',
3096 b'count',
3096 b'count',
3097 1,
3097 1,
3098 b'number of revisions to test (when using --startrev)',
3098 b'number of revisions to test (when using --startrev)',
3099 ),
3099 ),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3101 ],
3101 ],
3102 b'-c|-m|FILE REV',
3102 b'-c|-m|FILE REV',
3103 )
3103 )
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3105 """benchmark a unified diff between revisions
3105 """benchmark a unified diff between revisions
3106
3106
3107 This doesn't include any copy tracing - it's just a unified diff
3107 This doesn't include any copy tracing - it's just a unified diff
3108 of the texts.
3108 of the texts.
3109
3109
3110 By default, benchmark a diff between its delta parent and itself.
3110 By default, benchmark a diff between its delta parent and itself.
3111
3111
3112 With ``--count``, benchmark diffs between delta parents and self for N
3112 With ``--count``, benchmark diffs between delta parents and self for N
3113 revisions starting at the specified revision.
3113 revisions starting at the specified revision.
3114
3114
3115 With ``--alldata``, assume the requested revision is a changeset and
3115 With ``--alldata``, assume the requested revision is a changeset and
3116 measure diffs for all changes related to that changeset (manifest
3116 measure diffs for all changes related to that changeset (manifest
3117 and filelogs).
3117 and filelogs).
3118 """
3118 """
3119 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3120 if opts[b'alldata']:
3120 if opts[b'alldata']:
3121 opts[b'changelog'] = True
3121 opts[b'changelog'] = True
3122
3122
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3124 file_, rev = None, file_
3124 file_, rev = None, file_
3125 elif rev is None:
3125 elif rev is None:
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3127
3127
3128 textpairs = []
3128 textpairs = []
3129
3129
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3131
3131
3132 startrev = r.rev(r.lookup(rev))
3132 startrev = r.rev(r.lookup(rev))
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3134 if opts[b'alldata']:
3134 if opts[b'alldata']:
3135 # Load revisions associated with changeset.
3135 # Load revisions associated with changeset.
3136 ctx = repo[rev]
3136 ctx = repo[rev]
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3138 for pctx in ctx.parents():
3138 for pctx in ctx.parents():
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3140 textpairs.append((pman, mtext))
3140 textpairs.append((pman, mtext))
3141
3141
3142 # Load filelog revisions by iterating manifest delta.
3142 # Load filelog revisions by iterating manifest delta.
3143 man = ctx.manifest()
3143 man = ctx.manifest()
3144 pman = ctx.p1().manifest()
3144 pman = ctx.p1().manifest()
3145 for filename, change in pman.diff(man).items():
3145 for filename, change in pman.diff(man).items():
3146 fctx = repo.file(filename)
3146 fctx = repo.file(filename)
3147 f1 = fctx.revision(change[0][0] or -1)
3147 f1 = fctx.revision(change[0][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3149 textpairs.append((f1, f2))
3149 textpairs.append((f1, f2))
3150 else:
3150 else:
3151 dp = r.deltaparent(rev)
3151 dp = r.deltaparent(rev)
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3153
3153
3154 def d():
3154 def d():
3155 for left, right in textpairs:
3155 for left, right in textpairs:
3156 # The date strings don't matter, so we pass empty strings.
3156 # The date strings don't matter, so we pass empty strings.
3157 headerlines, hunks = mdiff.unidiff(
3157 headerlines, hunks = mdiff.unidiff(
3158 left, b'', right, b'', b'left', b'right', binary=False
3158 left, b'', right, b'', b'left', b'right', binary=False
3159 )
3159 )
3160 # consume iterators in roughly the way patch.py does
3160 # consume iterators in roughly the way patch.py does
3161 b'\n'.join(headerlines)
3161 b'\n'.join(headerlines)
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3163
3163
3164 timer, fm = gettimer(ui, opts)
3164 timer, fm = gettimer(ui, opts)
3165 timer(d)
3165 timer(d)
3166 fm.end()
3166 fm.end()
3167
3167
3168
3168
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3170 def perfdiffwd(ui, repo, **opts):
3170 def perfdiffwd(ui, repo, **opts):
3171 """Profile diff of working directory changes"""
3171 """Profile diff of working directory changes"""
3172 opts = _byteskwargs(opts)
3172 opts = _byteskwargs(opts)
3173 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3174 options = {
3174 options = {
3175 'w': 'ignore_all_space',
3175 'w': 'ignore_all_space',
3176 'b': 'ignore_space_change',
3176 'b': 'ignore_space_change',
3177 'B': 'ignore_blank_lines',
3177 'B': 'ignore_blank_lines',
3178 }
3178 }
3179
3179
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3181 opts = {options[c]: b'1' for c in diffopt}
3181 opts = {options[c]: b'1' for c in diffopt}
3182
3182
3183 def d():
3183 def d():
3184 ui.pushbuffer()
3184 ui.pushbuffer()
3185 commands.diff(ui, repo, **opts)
3185 commands.diff(ui, repo, **opts)
3186 ui.popbuffer()
3186 ui.popbuffer()
3187
3187
3188 diffopt = diffopt.encode('ascii')
3188 diffopt = diffopt.encode('ascii')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3190 timer(d, title=title)
3190 timer(d, title=title)
3191 fm.end()
3191 fm.end()
3192
3192
3193
3193
3194 @command(
3194 @command(
3195 b'perf::revlogindex|perfrevlogindex',
3195 b'perf::revlogindex|perfrevlogindex',
3196 revlogopts + formatteropts,
3196 revlogopts + formatteropts,
3197 b'-c|-m|FILE',
3197 b'-c|-m|FILE',
3198 )
3198 )
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3200 """Benchmark operations against a revlog index.
3200 """Benchmark operations against a revlog index.
3201
3201
3202 This tests constructing a revlog instance, reading index data,
3202 This tests constructing a revlog instance, reading index data,
3203 parsing index data, and performing various operations related to
3203 parsing index data, and performing various operations related to
3204 index data.
3204 index data.
3205 """
3205 """
3206
3206
3207 opts = _byteskwargs(opts)
3207 opts = _byteskwargs(opts)
3208
3208
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3210
3210
3211 opener = getattr(rl, 'opener') # trick linter
3211 opener = getattr(rl, 'opener') # trick linter
3212 # compat with hg <= 5.8
3212 # compat with hg <= 5.8
3213 radix = getattr(rl, 'radix', None)
3213 radix = getattr(rl, 'radix', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3215 if indexfile is None:
3215 if indexfile is None:
3216 # compatibility with <= hg-5.8
3216 # compatibility with <= hg-5.8
3217 indexfile = getattr(rl, 'indexfile')
3217 indexfile = getattr(rl, 'indexfile')
3218 data = opener.read(indexfile)
3218 data = opener.read(indexfile)
3219
3219
3220 header = struct.unpack(b'>I', data[0:4])[0]
3220 header = struct.unpack(b'>I', data[0:4])[0]
3221 version = header & 0xFFFF
3221 version = header & 0xFFFF
3222 if version == 1:
3222 if version == 1:
3223 inline = header & (1 << 16)
3223 inline = header & (1 << 16)
3224 else:
3224 else:
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3226
3226
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3228 if parse_index_v1 is None:
3228 if parse_index_v1 is None:
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3230
3230
3231 rllen = len(rl)
3231 rllen = len(rl)
3232
3232
3233 node0 = rl.node(0)
3233 node0 = rl.node(0)
3234 node25 = rl.node(rllen // 4)
3234 node25 = rl.node(rllen // 4)
3235 node50 = rl.node(rllen // 2)
3235 node50 = rl.node(rllen // 2)
3236 node75 = rl.node(rllen // 4 * 3)
3236 node75 = rl.node(rllen // 4 * 3)
3237 node100 = rl.node(rllen - 1)
3237 node100 = rl.node(rllen - 1)
3238
3238
3239 allrevs = range(rllen)
3239 allrevs = range(rllen)
3240 allrevsrev = list(reversed(allrevs))
3240 allrevsrev = list(reversed(allrevs))
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3242 allnodesrev = list(reversed(allnodes))
3242 allnodesrev = list(reversed(allnodes))
3243
3243
3244 def constructor():
3244 def constructor():
3245 if radix is not None:
3245 if radix is not None:
3246 revlog(opener, radix=radix)
3246 revlog(opener, radix=radix)
3247 else:
3247 else:
3248 # hg <= 5.8
3248 # hg <= 5.8
3249 revlog(opener, indexfile=indexfile)
3249 revlog(opener, indexfile=indexfile)
3250
3250
3251 def read():
3251 def read():
3252 with opener(indexfile) as fh:
3252 with opener(indexfile) as fh:
3253 fh.read()
3253 fh.read()
3254
3254
3255 def parseindex():
3255 def parseindex():
3256 parse_index_v1(data, inline)
3256 parse_index_v1(data, inline)
3257
3257
3258 def getentry(revornode):
3258 def getentry(revornode):
3259 index = parse_index_v1(data, inline)[0]
3259 index = parse_index_v1(data, inline)[0]
3260 index[revornode]
3260 index[revornode]
3261
3261
3262 def getentries(revs, count=1):
3262 def getentries(revs, count=1):
3263 index = parse_index_v1(data, inline)[0]
3263 index = parse_index_v1(data, inline)[0]
3264
3264
3265 for i in range(count):
3265 for i in range(count):
3266 for rev in revs:
3266 for rev in revs:
3267 index[rev]
3267 index[rev]
3268
3268
3269 def resolvenode(node):
3269 def resolvenode(node):
3270 index = parse_index_v1(data, inline)[0]
3270 index = parse_index_v1(data, inline)[0]
3271 rev = getattr(index, 'rev', None)
3271 rev = getattr(index, 'rev', None)
3272 if rev is None:
3272 if rev is None:
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3274 # This only works for the C code.
3274 # This only works for the C code.
3275 if nodemap is None:
3275 if nodemap is None:
3276 return
3276 return
3277 rev = nodemap.__getitem__
3277 rev = nodemap.__getitem__
3278
3278
3279 try:
3279 try:
3280 rev(node)
3280 rev(node)
3281 except error.RevlogError:
3281 except error.RevlogError:
3282 pass
3282 pass
3283
3283
3284 def resolvenodes(nodes, count=1):
3284 def resolvenodes(nodes, count=1):
3285 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3286 rev = getattr(index, 'rev', None)
3286 rev = getattr(index, 'rev', None)
3287 if rev is None:
3287 if rev is None:
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3289 # This only works for the C code.
3289 # This only works for the C code.
3290 if nodemap is None:
3290 if nodemap is None:
3291 return
3291 return
3292 rev = nodemap.__getitem__
3292 rev = nodemap.__getitem__
3293
3293
3294 for i in range(count):
3294 for i in range(count):
3295 for node in nodes:
3295 for node in nodes:
3296 try:
3296 try:
3297 rev(node)
3297 rev(node)
3298 except error.RevlogError:
3298 except error.RevlogError:
3299 pass
3299 pass
3300
3300
3301 benches = [
3301 benches = [
3302 (constructor, b'revlog constructor'),
3302 (constructor, b'revlog constructor'),
3303 (read, b'read'),
3303 (read, b'read'),
3304 (parseindex, b'create index object'),
3304 (parseindex, b'create index object'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3312 # 2x variation is to measure caching impact.
3312 # 2x variation is to measure caching impact.
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3316 (
3316 (
3317 lambda: resolvenodes(allnodesrev, 2),
3317 lambda: resolvenodes(allnodesrev, 2),
3318 b'look up all nodes 2x (reverse)',
3318 b'look up all nodes 2x (reverse)',
3319 ),
3319 ),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3321 (
3321 (
3322 lambda: getentries(allrevs, 2),
3322 lambda: getentries(allrevs, 2),
3323 b'retrieve all index entries 2x (forward)',
3323 b'retrieve all index entries 2x (forward)',
3324 ),
3324 ),
3325 (
3325 (
3326 lambda: getentries(allrevsrev),
3326 lambda: getentries(allrevsrev),
3327 b'retrieve all index entries (reverse)',
3327 b'retrieve all index entries (reverse)',
3328 ),
3328 ),
3329 (
3329 (
3330 lambda: getentries(allrevsrev, 2),
3330 lambda: getentries(allrevsrev, 2),
3331 b'retrieve all index entries 2x (reverse)',
3331 b'retrieve all index entries 2x (reverse)',
3332 ),
3332 ),
3333 ]
3333 ]
3334
3334
3335 for fn, title in benches:
3335 for fn, title in benches:
3336 timer, fm = gettimer(ui, opts)
3336 timer, fm = gettimer(ui, opts)
3337 timer(fn, title=title)
3337 timer(fn, title=title)
3338 fm.end()
3338 fm.end()
3339
3339
3340
3340
3341 @command(
3341 @command(
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3343 revlogopts
3343 revlogopts
3344 + formatteropts
3344 + formatteropts
3345 + [
3345 + [
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3348 (b'', b'reverse', False, b'read in reverse'),
3348 (b'', b'reverse', False, b'read in reverse'),
3349 ],
3349 ],
3350 b'-c|-m|FILE',
3350 b'-c|-m|FILE',
3351 )
3351 )
3352 def perfrevlogrevisions(
3352 def perfrevlogrevisions(
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3354 ):
3354 ):
3355 """Benchmark reading a series of revisions from a revlog.
3355 """Benchmark reading a series of revisions from a revlog.
3356
3356
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3358 the specified revlog.
3358 the specified revlog.
3359
3359
3360 The start revision can be defined via ``-s/--startrev``.
3360 The start revision can be defined via ``-s/--startrev``.
3361 """
3361 """
3362 opts = _byteskwargs(opts)
3362 opts = _byteskwargs(opts)
3363
3363
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3365 rllen = getlen(ui)(rl)
3365 rllen = getlen(ui)(rl)
3366
3366
3367 if startrev < 0:
3367 if startrev < 0:
3368 startrev = rllen + startrev
3368 startrev = rllen + startrev
3369
3369
3370 def d():
3370 def d():
3371 rl.clearcaches()
3371 rl.clearcaches()
3372
3372
3373 beginrev = startrev
3373 beginrev = startrev
3374 endrev = rllen
3374 endrev = rllen
3375 dist = opts[b'dist']
3375 dist = opts[b'dist']
3376
3376
3377 if reverse:
3377 if reverse:
3378 beginrev, endrev = endrev - 1, beginrev - 1
3378 beginrev, endrev = endrev - 1, beginrev - 1
3379 dist = -1 * dist
3379 dist = -1 * dist
3380
3380
3381 for x in _xrange(beginrev, endrev, dist):
3381 for x in _xrange(beginrev, endrev, dist):
3382 # Old revisions don't support passing int.
3382 # Old revisions don't support passing int.
3383 n = rl.node(x)
3383 n = rl.node(x)
3384 rl.revision(n)
3384 rl.revision(n)
3385
3385
3386 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3387 timer(d)
3387 timer(d)
3388 fm.end()
3388 fm.end()
3389
3389
3390
3390
3391 @command(
3391 @command(
3392 b'perf::revlogwrite|perfrevlogwrite',
3392 b'perf::revlogwrite|perfrevlogwrite',
3393 revlogopts
3393 revlogopts
3394 + formatteropts
3394 + formatteropts
3395 + [
3395 + [
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3403 ],
3403 ],
3404 b'-c|-m|FILE',
3404 b'-c|-m|FILE',
3405 )
3405 )
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3407 """Benchmark writing a series of revisions to a revlog.
3407 """Benchmark writing a series of revisions to a revlog.
3408
3408
3409 Possible source values are:
3409 Possible source values are:
3410 * `full`: add from a full text (default).
3410 * `full`: add from a full text (default).
3411 * `parent-1`: add from a delta to the first parent
3411 * `parent-1`: add from a delta to the first parent
3412 * `parent-2`: add from a delta to the second parent if it exists
3412 * `parent-2`: add from a delta to the second parent if it exists
3413 (use a delta from the first parent otherwise)
3413 (use a delta from the first parent otherwise)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3415 * `storage`: add from the existing precomputed deltas
3415 * `storage`: add from the existing precomputed deltas
3416
3416
3417 Note: This performance command measures performance in a custom way. As a
3417 Note: This performance command measures performance in a custom way. As a
3418 result some of the global configuration of the 'perf' command does not
3418 result some of the global configuration of the 'perf' command does not
3419 apply to it:
3419 apply to it:
3420
3420
3421 * ``pre-run``: disabled
3421 * ``pre-run``: disabled
3422
3422
3423 * ``profile-benchmark``: disabled
3423 * ``profile-benchmark``: disabled
3424
3424
3425 * ``run-limits``: disabled use --count instead
3425 * ``run-limits``: disabled use --count instead
3426 """
3426 """
3427 opts = _byteskwargs(opts)
3427 opts = _byteskwargs(opts)
3428
3428
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3430 rllen = getlen(ui)(rl)
3430 rllen = getlen(ui)(rl)
3431 if startrev < 0:
3431 if startrev < 0:
3432 startrev = rllen + startrev
3432 startrev = rllen + startrev
3433 if stoprev < 0:
3433 if stoprev < 0:
3434 stoprev = rllen + stoprev
3434 stoprev = rllen + stoprev
3435
3435
3436 lazydeltabase = opts['lazydeltabase']
3436 lazydeltabase = opts['lazydeltabase']
3437 source = opts['source']
3437 source = opts['source']
3438 clearcaches = opts['clear_caches']
3438 clearcaches = opts['clear_caches']
3439 validsource = (
3439 validsource = (
3440 b'full',
3440 b'full',
3441 b'parent-1',
3441 b'parent-1',
3442 b'parent-2',
3442 b'parent-2',
3443 b'parent-smallest',
3443 b'parent-smallest',
3444 b'storage',
3444 b'storage',
3445 )
3445 )
3446 if source not in validsource:
3446 if source not in validsource:
3447 raise error.Abort('invalid source type: %s' % source)
3447 raise error.Abort('invalid source type: %s' % source)
3448
3448
3449 ### actually gather results
3449 ### actually gather results
3450 count = opts['count']
3450 count = opts['count']
3451 if count <= 0:
3451 if count <= 0:
3452 raise error.Abort('invalide run count: %d' % count)
3452 raise error.Abort('invalide run count: %d' % count)
3453 allresults = []
3453 allresults = []
3454 for c in range(count):
3454 for c in range(count):
3455 timing = _timeonewrite(
3455 timing = _timeonewrite(
3456 ui,
3456 ui,
3457 rl,
3457 rl,
3458 source,
3458 source,
3459 startrev,
3459 startrev,
3460 stoprev,
3460 stoprev,
3461 c + 1,
3461 c + 1,
3462 lazydeltabase=lazydeltabase,
3462 lazydeltabase=lazydeltabase,
3463 clearcaches=clearcaches,
3463 clearcaches=clearcaches,
3464 )
3464 )
3465 allresults.append(timing)
3465 allresults.append(timing)
3466
3466
3467 ### consolidate the results in a single list
3467 ### consolidate the results in a single list
3468 results = []
3468 results = []
3469 for idx, (rev, t) in enumerate(allresults[0]):
3469 for idx, (rev, t) in enumerate(allresults[0]):
3470 ts = [t]
3470 ts = [t]
3471 for other in allresults[1:]:
3471 for other in allresults[1:]:
3472 orev, ot = other[idx]
3472 orev, ot = other[idx]
3473 assert orev == rev
3473 assert orev == rev
3474 ts.append(ot)
3474 ts.append(ot)
3475 results.append((rev, ts))
3475 results.append((rev, ts))
3476 resultcount = len(results)
3476 resultcount = len(results)
3477
3477
3478 ### Compute and display relevant statistics
3478 ### Compute and display relevant statistics
3479
3479
3480 # get a formatter
3480 # get a formatter
3481 fm = ui.formatter(b'perf', opts)
3481 fm = ui.formatter(b'perf', opts)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3483
3483
3484 # print individual details if requested
3484 # print individual details if requested
3485 if opts['details']:
3485 if opts['details']:
3486 for idx, item in enumerate(results, 1):
3486 for idx, item in enumerate(results, 1):
3487 rev, data = item
3487 rev, data = item
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3489 formatone(fm, data, title=title, displayall=displayall)
3489 formatone(fm, data, title=title, displayall=displayall)
3490
3490
3491 # sorts results by median time
3491 # sorts results by median time
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3493 # list of (name, index) to display)
3493 # list of (name, index) to display)
3494 relevants = [
3494 relevants = [
3495 ("min", 0),
3495 ("min", 0),
3496 ("10%", resultcount * 10 // 100),
3496 ("10%", resultcount * 10 // 100),
3497 ("25%", resultcount * 25 // 100),
3497 ("25%", resultcount * 25 // 100),
3498 ("50%", resultcount * 70 // 100),
3498 ("50%", resultcount * 70 // 100),
3499 ("75%", resultcount * 75 // 100),
3499 ("75%", resultcount * 75 // 100),
3500 ("90%", resultcount * 90 // 100),
3500 ("90%", resultcount * 90 // 100),
3501 ("95%", resultcount * 95 // 100),
3501 ("95%", resultcount * 95 // 100),
3502 ("99%", resultcount * 99 // 100),
3502 ("99%", resultcount * 99 // 100),
3503 ("99.9%", resultcount * 999 // 1000),
3503 ("99.9%", resultcount * 999 // 1000),
3504 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.99%", resultcount * 9999 // 10000),
3505 ("99.999%", resultcount * 99999 // 100000),
3505 ("99.999%", resultcount * 99999 // 100000),
3506 ("max", -1),
3506 ("max", -1),
3507 ]
3507 ]
3508 if not ui.quiet:
3508 if not ui.quiet:
3509 for name, idx in relevants:
3509 for name, idx in relevants:
3510 data = results[idx]
3510 data = results[idx]
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3512 formatone(fm, data[1], title=title, displayall=displayall)
3512 formatone(fm, data[1], title=title, displayall=displayall)
3513
3513
3514 # XXX summing that many float will not be very precise, we ignore this fact
3514 # XXX summing that many float will not be very precise, we ignore this fact
3515 # for now
3515 # for now
3516 totaltime = []
3516 totaltime = []
3517 for item in allresults:
3517 for item in allresults:
3518 totaltime.append(
3518 totaltime.append(
3519 (
3519 (
3520 sum(x[1][0] for x in item),
3520 sum(x[1][0] for x in item),
3521 sum(x[1][1] for x in item),
3521 sum(x[1][1] for x in item),
3522 sum(x[1][2] for x in item),
3522 sum(x[1][2] for x in item),
3523 )
3523 )
3524 )
3524 )
3525 formatone(
3525 formatone(
3526 fm,
3526 fm,
3527 totaltime,
3527 totaltime,
3528 title="total time (%d revs)" % resultcount,
3528 title="total time (%d revs)" % resultcount,
3529 displayall=displayall,
3529 displayall=displayall,
3530 )
3530 )
3531 fm.end()
3531 fm.end()
3532
3532
3533
3533
3534 class _faketr:
3534 class _faketr:
3535 def add(s, x, y, z=None):
3535 def add(s, x, y, z=None):
3536 return None
3536 return None
3537
3537
3538
3538
3539 def _timeonewrite(
3539 def _timeonewrite(
3540 ui,
3540 ui,
3541 orig,
3541 orig,
3542 source,
3542 source,
3543 startrev,
3543 startrev,
3544 stoprev,
3544 stoprev,
3545 runidx=None,
3545 runidx=None,
3546 lazydeltabase=True,
3546 lazydeltabase=True,
3547 clearcaches=True,
3547 clearcaches=True,
3548 ):
3548 ):
3549 timings = []
3549 timings = []
3550 tr = _faketr()
3550 tr = _faketr()
3551 with _temprevlog(ui, orig, startrev) as dest:
3551 with _temprevlog(ui, orig, startrev) as dest:
3552 if hasattr(dest, "delta_config"):
3552 if hasattr(dest, "delta_config"):
3553 dest.delta_config.lazy_delta_base = lazydeltabase
3553 dest.delta_config.lazy_delta_base = lazydeltabase
3554 else:
3554 else:
3555 dest._lazydeltabase = lazydeltabase
3555 dest._lazydeltabase = lazydeltabase
3556 revs = list(orig.revs(startrev, stoprev))
3556 revs = list(orig.revs(startrev, stoprev))
3557 total = len(revs)
3557 total = len(revs)
3558 topic = 'adding'
3558 topic = 'adding'
3559 if runidx is not None:
3559 if runidx is not None:
3560 topic += ' (run #%d)' % runidx
3560 topic += ' (run #%d)' % runidx
3561 # Support both old and new progress API
3561 # Support both old and new progress API
3562 if util.safehasattr(ui, 'makeprogress'):
3562 if util.safehasattr(ui, 'makeprogress'):
3563 progress = ui.makeprogress(topic, unit='revs', total=total)
3563 progress = ui.makeprogress(topic, unit='revs', total=total)
3564
3564
3565 def updateprogress(pos):
3565 def updateprogress(pos):
3566 progress.update(pos)
3566 progress.update(pos)
3567
3567
3568 def completeprogress():
3568 def completeprogress():
3569 progress.complete()
3569 progress.complete()
3570
3570
3571 else:
3571 else:
3572
3572
3573 def updateprogress(pos):
3573 def updateprogress(pos):
3574 ui.progress(topic, pos, unit='revs', total=total)
3574 ui.progress(topic, pos, unit='revs', total=total)
3575
3575
3576 def completeprogress():
3576 def completeprogress():
3577 ui.progress(topic, None, unit='revs', total=total)
3577 ui.progress(topic, None, unit='revs', total=total)
3578
3578
3579 for idx, rev in enumerate(revs):
3579 for idx, rev in enumerate(revs):
3580 updateprogress(idx)
3580 updateprogress(idx)
3581 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3581 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3582 if clearcaches:
3582 if clearcaches:
3583 dest.index.clearcaches()
3583 dest.index.clearcaches()
3584 dest.clearcaches()
3584 dest.clearcaches()
3585 with timeone() as r:
3585 with timeone() as r:
3586 dest.addrawrevision(*addargs, **addkwargs)
3586 dest.addrawrevision(*addargs, **addkwargs)
3587 timings.append((rev, r[0]))
3587 timings.append((rev, r[0]))
3588 updateprogress(total)
3588 updateprogress(total)
3589 completeprogress()
3589 completeprogress()
3590 return timings
3590 return timings
3591
3591
3592
3592
3593 def _getrevisionseed(orig, rev, tr, source):
3593 def _getrevisionseed(orig, rev, tr, source):
3594 from mercurial.node import nullid
3594 from mercurial.node import nullid
3595
3595
3596 linkrev = orig.linkrev(rev)
3596 linkrev = orig.linkrev(rev)
3597 node = orig.node(rev)
3597 node = orig.node(rev)
3598 p1, p2 = orig.parents(node)
3598 p1, p2 = orig.parents(node)
3599 flags = orig.flags(rev)
3599 flags = orig.flags(rev)
3600 cachedelta = None
3600 cachedelta = None
3601 text = None
3601 text = None
3602
3602
3603 if source == b'full':
3603 if source == b'full':
3604 text = orig.revision(rev)
3604 text = orig.revision(rev)
3605 elif source == b'parent-1':
3605 elif source == b'parent-1':
3606 baserev = orig.rev(p1)
3606 baserev = orig.rev(p1)
3607 cachedelta = (baserev, orig.revdiff(p1, rev))
3607 cachedelta = (baserev, orig.revdiff(p1, rev))
3608 elif source == b'parent-2':
3608 elif source == b'parent-2':
3609 parent = p2
3609 parent = p2
3610 if p2 == nullid:
3610 if p2 == nullid:
3611 parent = p1
3611 parent = p1
3612 baserev = orig.rev(parent)
3612 baserev = orig.rev(parent)
3613 cachedelta = (baserev, orig.revdiff(parent, rev))
3613 cachedelta = (baserev, orig.revdiff(parent, rev))
3614 elif source == b'parent-smallest':
3614 elif source == b'parent-smallest':
3615 p1diff = orig.revdiff(p1, rev)
3615 p1diff = orig.revdiff(p1, rev)
3616 parent = p1
3616 parent = p1
3617 diff = p1diff
3617 diff = p1diff
3618 if p2 != nullid:
3618 if p2 != nullid:
3619 p2diff = orig.revdiff(p2, rev)
3619 p2diff = orig.revdiff(p2, rev)
3620 if len(p1diff) > len(p2diff):
3620 if len(p1diff) > len(p2diff):
3621 parent = p2
3621 parent = p2
3622 diff = p2diff
3622 diff = p2diff
3623 baserev = orig.rev(parent)
3623 baserev = orig.rev(parent)
3624 cachedelta = (baserev, diff)
3624 cachedelta = (baserev, diff)
3625 elif source == b'storage':
3625 elif source == b'storage':
3626 baserev = orig.deltaparent(rev)
3626 baserev = orig.deltaparent(rev)
3627 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3627 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3628
3628
3629 return (
3629 return (
3630 (text, tr, linkrev, p1, p2),
3630 (text, tr, linkrev, p1, p2),
3631 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3631 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3632 )
3632 )
3633
3633
3634
3634
3635 @contextlib.contextmanager
3635 @contextlib.contextmanager
3636 def _temprevlog(ui, orig, truncaterev):
3636 def _temprevlog(ui, orig, truncaterev):
3637 from mercurial import vfs as vfsmod
3637 from mercurial import vfs as vfsmod
3638
3638
3639 if orig._inline:
3639 if orig._inline:
3640 raise error.Abort('not supporting inline revlog (yet)')
3640 raise error.Abort('not supporting inline revlog (yet)')
3641 revlogkwargs = {}
3641 revlogkwargs = {}
3642 k = 'upperboundcomp'
3642 k = 'upperboundcomp'
3643 if util.safehasattr(orig, k):
3643 if util.safehasattr(orig, k):
3644 revlogkwargs[k] = getattr(orig, k)
3644 revlogkwargs[k] = getattr(orig, k)
3645
3645
3646 indexfile = getattr(orig, '_indexfile', None)
3646 indexfile = getattr(orig, '_indexfile', None)
3647 if indexfile is None:
3647 if indexfile is None:
3648 # compatibility with <= hg-5.8
3648 # compatibility with <= hg-5.8
3649 indexfile = getattr(orig, 'indexfile')
3649 indexfile = getattr(orig, 'indexfile')
3650 origindexpath = orig.opener.join(indexfile)
3650 origindexpath = orig.opener.join(indexfile)
3651
3651
3652 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3652 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3653 origdatapath = orig.opener.join(datafile)
3653 origdatapath = orig.opener.join(datafile)
3654 radix = b'revlog'
3654 radix = b'revlog'
3655 indexname = b'revlog.i'
3655 indexname = b'revlog.i'
3656 dataname = b'revlog.d'
3656 dataname = b'revlog.d'
3657
3657
3658 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3658 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3659 try:
3659 try:
3660 # copy the data file in a temporary directory
3660 # copy the data file in a temporary directory
3661 ui.debug('copying data in %s\n' % tmpdir)
3661 ui.debug('copying data in %s\n' % tmpdir)
3662 destindexpath = os.path.join(tmpdir, 'revlog.i')
3662 destindexpath = os.path.join(tmpdir, 'revlog.i')
3663 destdatapath = os.path.join(tmpdir, 'revlog.d')
3663 destdatapath = os.path.join(tmpdir, 'revlog.d')
3664 shutil.copyfile(origindexpath, destindexpath)
3664 shutil.copyfile(origindexpath, destindexpath)
3665 shutil.copyfile(origdatapath, destdatapath)
3665 shutil.copyfile(origdatapath, destdatapath)
3666
3666
3667 # remove the data we want to add again
3667 # remove the data we want to add again
3668 ui.debug('truncating data to be rewritten\n')
3668 ui.debug('truncating data to be rewritten\n')
3669 with open(destindexpath, 'ab') as index:
3669 with open(destindexpath, 'ab') as index:
3670 index.seek(0)
3670 index.seek(0)
3671 index.truncate(truncaterev * orig._io.size)
3671 index.truncate(truncaterev * orig._io.size)
3672 with open(destdatapath, 'ab') as data:
3672 with open(destdatapath, 'ab') as data:
3673 data.seek(0)
3673 data.seek(0)
3674 data.truncate(orig.start(truncaterev))
3674 data.truncate(orig.start(truncaterev))
3675
3675
3676 # instantiate a new revlog from the temporary copy
3676 # instantiate a new revlog from the temporary copy
3677 ui.debug('truncating adding to be rewritten\n')
3677 ui.debug('truncating adding to be rewritten\n')
3678 vfs = vfsmod.vfs(tmpdir)
3678 vfs = vfsmod.vfs(tmpdir)
3679 vfs.options = getattr(orig.opener, 'options', None)
3679 vfs.options = getattr(orig.opener, 'options', None)
3680
3680
3681 try:
3681 try:
3682 dest = revlog(vfs, radix=radix, **revlogkwargs)
3682 dest = revlog(vfs, radix=radix, **revlogkwargs)
3683 except TypeError:
3683 except TypeError:
3684 dest = revlog(
3684 dest = revlog(
3685 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3685 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3686 )
3686 )
3687 if dest._inline:
3687 if dest._inline:
3688 raise error.Abort('not supporting inline revlog (yet)')
3688 raise error.Abort('not supporting inline revlog (yet)')
3689 # make sure internals are initialized
3689 # make sure internals are initialized
3690 dest.revision(len(dest) - 1)
3690 dest.revision(len(dest) - 1)
3691 yield dest
3691 yield dest
3692 del dest, vfs
3692 del dest, vfs
3693 finally:
3693 finally:
3694 shutil.rmtree(tmpdir, True)
3694 shutil.rmtree(tmpdir, True)
3695
3695
3696
3696
3697 @command(
3697 @command(
3698 b'perf::revlogchunks|perfrevlogchunks',
3698 b'perf::revlogchunks|perfrevlogchunks',
3699 revlogopts
3699 revlogopts
3700 + formatteropts
3700 + formatteropts
3701 + [
3701 + [
3702 (b'e', b'engines', b'', b'compression engines to use'),
3702 (b'e', b'engines', b'', b'compression engines to use'),
3703 (b's', b'startrev', 0, b'revision to start at'),
3703 (b's', b'startrev', 0, b'revision to start at'),
3704 ],
3704 ],
3705 b'-c|-m|FILE',
3705 b'-c|-m|FILE',
3706 )
3706 )
3707 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3707 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3708 """Benchmark operations on revlog chunks.
3708 """Benchmark operations on revlog chunks.
3709
3709
3710 Logically, each revlog is a collection of fulltext revisions. However,
3710 Logically, each revlog is a collection of fulltext revisions. However,
3711 stored within each revlog are "chunks" of possibly compressed data. This
3711 stored within each revlog are "chunks" of possibly compressed data. This
3712 data needs to be read and decompressed or compressed and written.
3712 data needs to be read and decompressed or compressed and written.
3713
3713
3714 This command measures the time it takes to read+decompress and recompress
3714 This command measures the time it takes to read+decompress and recompress
3715 chunks in a revlog. It effectively isolates I/O and compression performance.
3715 chunks in a revlog. It effectively isolates I/O and compression performance.
3716 For measurements of higher-level operations like resolving revisions,
3716 For measurements of higher-level operations like resolving revisions,
3717 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3717 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3718 """
3718 """
3719 opts = _byteskwargs(opts)
3719 opts = _byteskwargs(opts)
3720
3720
3721 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3721 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3722
3722
3723 # - _chunkraw was renamed to _getsegmentforrevs
3723 # - _chunkraw was renamed to _getsegmentforrevs
3724 # - _getsegmentforrevs was moved on the inner object
3724 # - _getsegmentforrevs was moved on the inner object
3725 try:
3725 try:
3726 segmentforrevs = rl._inner.get_segment_for_revs
3726 segmentforrevs = rl._inner.get_segment_for_revs
3727 except AttributeError:
3727 except AttributeError:
3728 try:
3728 try:
3729 segmentforrevs = rl._getsegmentforrevs
3729 segmentforrevs = rl._getsegmentforrevs
3730 except AttributeError:
3730 except AttributeError:
3731 segmentforrevs = rl._chunkraw
3731 segmentforrevs = rl._chunkraw
3732
3732
3733 # Verify engines argument.
3733 # Verify engines argument.
3734 if engines:
3734 if engines:
3735 engines = {e.strip() for e in engines.split(b',')}
3735 engines = {e.strip() for e in engines.split(b',')}
3736 for engine in engines:
3736 for engine in engines:
3737 try:
3737 try:
3738 util.compressionengines[engine]
3738 util.compressionengines[engine]
3739 except KeyError:
3739 except KeyError:
3740 raise error.Abort(b'unknown compression engine: %s' % engine)
3740 raise error.Abort(b'unknown compression engine: %s' % engine)
3741 else:
3741 else:
3742 engines = []
3742 engines = []
3743 for e in util.compengines:
3743 for e in util.compengines:
3744 engine = util.compengines[e]
3744 engine = util.compengines[e]
3745 try:
3745 try:
3746 if engine.available():
3746 if engine.available():
3747 engine.revlogcompressor().compress(b'dummy')
3747 engine.revlogcompressor().compress(b'dummy')
3748 engines.append(e)
3748 engines.append(e)
3749 except NotImplementedError:
3749 except NotImplementedError:
3750 pass
3750 pass
3751
3751
3752 revs = list(rl.revs(startrev, len(rl) - 1))
3752 revs = list(rl.revs(startrev, len(rl) - 1))
3753
3753
3754 @contextlib.contextmanager
3754 @contextlib.contextmanager
3755 def reading(rl):
3755 def reading(rl):
3756 if getattr(rl, 'reading', None) is not None:
3756 if getattr(rl, 'reading', None) is not None:
3757 with rl.reading():
3757 with rl.reading():
3758 yield None
3758 yield None
3759 elif rl._inline:
3759 elif rl._inline:
3760 indexfile = getattr(rl, '_indexfile', None)
3760 indexfile = getattr(rl, '_indexfile', None)
3761 if indexfile is None:
3761 if indexfile is None:
3762 # compatibility with <= hg-5.8
3762 # compatibility with <= hg-5.8
3763 indexfile = getattr(rl, 'indexfile')
3763 indexfile = getattr(rl, 'indexfile')
3764 yield getsvfs(repo)(indexfile)
3764 yield getsvfs(repo)(indexfile)
3765 else:
3765 else:
3766 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3766 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3767 yield getsvfs(repo)(datafile)
3767 yield getsvfs(repo)(datafile)
3768
3768
3769 if getattr(rl, 'reading', None) is not None:
3769 if getattr(rl, 'reading', None) is not None:
3770
3770
3771 @contextlib.contextmanager
3771 @contextlib.contextmanager
3772 def lazy_reading(rl):
3772 def lazy_reading(rl):
3773 with rl.reading():
3773 with rl.reading():
3774 yield
3774 yield
3775
3775
3776 else:
3776 else:
3777
3777
3778 @contextlib.contextmanager
3778 @contextlib.contextmanager
3779 def lazy_reading(rl):
3779 def lazy_reading(rl):
3780 yield
3780 yield
3781
3781
3782 def doread():
3782 def doread():
3783 rl.clearcaches()
3783 rl.clearcaches()
3784 for rev in revs:
3784 for rev in revs:
3785 with lazy_reading(rl):
3785 with lazy_reading(rl):
3786 segmentforrevs(rev, rev)
3786 segmentforrevs(rev, rev)
3787
3787
3788 def doreadcachedfh():
3788 def doreadcachedfh():
3789 rl.clearcaches()
3789 rl.clearcaches()
3790 with reading(rl) as fh:
3790 with reading(rl) as fh:
3791 if fh is not None:
3791 if fh is not None:
3792 for rev in revs:
3792 for rev in revs:
3793 segmentforrevs(rev, rev, df=fh)
3793 segmentforrevs(rev, rev, df=fh)
3794 else:
3794 else:
3795 for rev in revs:
3795 for rev in revs:
3796 segmentforrevs(rev, rev)
3796 segmentforrevs(rev, rev)
3797
3797
3798 def doreadbatch():
3798 def doreadbatch():
3799 rl.clearcaches()
3799 rl.clearcaches()
3800 with lazy_reading(rl):
3800 with lazy_reading(rl):
3801 segmentforrevs(revs[0], revs[-1])
3801 segmentforrevs(revs[0], revs[-1])
3802
3802
3803 def doreadbatchcachedfh():
3803 def doreadbatchcachedfh():
3804 rl.clearcaches()
3804 rl.clearcaches()
3805 with reading(rl) as fh:
3805 with reading(rl) as fh:
3806 if fh is not None:
3806 if fh is not None:
3807 segmentforrevs(revs[0], revs[-1], df=fh)
3807 segmentforrevs(revs[0], revs[-1], df=fh)
3808 else:
3808 else:
3809 segmentforrevs(revs[0], revs[-1])
3809 segmentforrevs(revs[0], revs[-1])
3810
3810
3811 def dochunk():
3811 def dochunk():
3812 rl.clearcaches()
3812 rl.clearcaches()
3813 # chunk used to be available directly on the revlog
3813 # chunk used to be available directly on the revlog
3814 _chunk = getattr(rl, '_inner', rl)._chunk
3814 _chunk = getattr(rl, '_inner', rl)._chunk
3815 with reading(rl) as fh:
3815 with reading(rl) as fh:
3816 if fh is not None:
3816 if fh is not None:
3817 for rev in revs:
3817 for rev in revs:
3818 _chunk(rev, df=fh)
3818 _chunk(rev, df=fh)
3819 else:
3819 else:
3820 for rev in revs:
3820 for rev in revs:
3821 _chunk(rev)
3821 _chunk(rev)
3822
3822
3823 chunks = [None]
3823 chunks = [None]
3824
3824
3825 def dochunkbatch():
3825 def dochunkbatch():
3826 rl.clearcaches()
3826 rl.clearcaches()
3827 _chunks = getattr(rl, '_inner', rl)._chunks
3827 _chunks = getattr(rl, '_inner', rl)._chunks
3828 with reading(rl) as fh:
3828 with reading(rl) as fh:
3829 if fh is not None:
3829 if fh is not None:
3830 # Save chunks as a side-effect.
3830 # Save chunks as a side-effect.
3831 chunks[0] = _chunks(revs, df=fh)
3831 chunks[0] = _chunks(revs, df=fh)
3832 else:
3832 else:
3833 # Save chunks as a side-effect.
3833 # Save chunks as a side-effect.
3834 chunks[0] = _chunks(revs)
3834 chunks[0] = _chunks(revs)
3835
3835
3836 def docompress(compressor):
3836 def docompress(compressor):
3837 rl.clearcaches()
3837 rl.clearcaches()
3838
3838
3839 compressor_holder = getattr(rl, '_inner', rl)
3839 compressor_holder = getattr(rl, '_inner', rl)
3840
3840
3841 try:
3841 try:
3842 # Swap in the requested compression engine.
3842 # Swap in the requested compression engine.
3843 oldcompressor = compressor_holder._compressor
3843 oldcompressor = compressor_holder._compressor
3844 compressor_holder._compressor = compressor
3844 compressor_holder._compressor = compressor
3845 for chunk in chunks[0]:
3845 for chunk in chunks[0]:
3846 rl.compress(chunk)
3846 rl.compress(chunk)
3847 finally:
3847 finally:
3848 compressor_holder._compressor = oldcompressor
3848 compressor_holder._compressor = oldcompressor
3849
3849
3850 benches = [
3850 benches = [
3851 (lambda: doread(), b'read'),
3851 (lambda: doread(), b'read'),
3852 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3852 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3853 (lambda: doreadbatch(), b'read batch'),
3853 (lambda: doreadbatch(), b'read batch'),
3854 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3854 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3855 (lambda: dochunk(), b'chunk'),
3855 (lambda: dochunk(), b'chunk'),
3856 (lambda: dochunkbatch(), b'chunk batch'),
3856 (lambda: dochunkbatch(), b'chunk batch'),
3857 ]
3857 ]
3858
3858
3859 for engine in sorted(engines):
3859 for engine in sorted(engines):
3860 compressor = util.compengines[engine].revlogcompressor()
3860 compressor = util.compengines[engine].revlogcompressor()
3861 benches.append(
3861 benches.append(
3862 (
3862 (
3863 functools.partial(docompress, compressor),
3863 functools.partial(docompress, compressor),
3864 b'compress w/ %s' % engine,
3864 b'compress w/ %s' % engine,
3865 )
3865 )
3866 )
3866 )
3867
3867
3868 for fn, title in benches:
3868 for fn, title in benches:
3869 timer, fm = gettimer(ui, opts)
3869 timer, fm = gettimer(ui, opts)
3870 timer(fn, title=title)
3870 timer(fn, title=title)
3871 fm.end()
3871 fm.end()
3872
3872
3873
3873
3874 @command(
3874 @command(
3875 b'perf::revlogrevision|perfrevlogrevision',
3875 b'perf::revlogrevision|perfrevlogrevision',
3876 revlogopts
3876 revlogopts
3877 + formatteropts
3877 + formatteropts
3878 + [(b'', b'cache', False, b'use caches instead of clearing')],
3878 + [(b'', b'cache', False, b'use caches instead of clearing')],
3879 b'-c|-m|FILE REV',
3879 b'-c|-m|FILE REV',
3880 )
3880 )
3881 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3881 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3882 """Benchmark obtaining a revlog revision.
3882 """Benchmark obtaining a revlog revision.
3883
3883
3884 Obtaining a revlog revision consists of roughly the following steps:
3884 Obtaining a revlog revision consists of roughly the following steps:
3885
3885
3886 1. Compute the delta chain
3886 1. Compute the delta chain
3887 2. Slice the delta chain if applicable
3887 2. Slice the delta chain if applicable
3888 3. Obtain the raw chunks for that delta chain
3888 3. Obtain the raw chunks for that delta chain
3889 4. Decompress each raw chunk
3889 4. Decompress each raw chunk
3890 5. Apply binary patches to obtain fulltext
3890 5. Apply binary patches to obtain fulltext
3891 6. Verify hash of fulltext
3891 6. Verify hash of fulltext
3892
3892
3893 This command measures the time spent in each of these phases.
3893 This command measures the time spent in each of these phases.
3894 """
3894 """
3895 opts = _byteskwargs(opts)
3895 opts = _byteskwargs(opts)
3896
3896
3897 if opts.get(b'changelog') or opts.get(b'manifest'):
3897 if opts.get(b'changelog') or opts.get(b'manifest'):
3898 file_, rev = None, file_
3898 file_, rev = None, file_
3899 elif rev is None:
3899 elif rev is None:
3900 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3900 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3901
3901
3902 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3902 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3903
3903
3904 # _chunkraw was renamed to _getsegmentforrevs.
3904 # _chunkraw was renamed to _getsegmentforrevs.
3905 try:
3905 try:
3906 segmentforrevs = r._inner.get_segment_for_revs
3906 segmentforrevs = r._inner.get_segment_for_revs
3907 except AttributeError:
3907 except AttributeError:
3908 try:
3908 try:
3909 segmentforrevs = r._getsegmentforrevs
3909 segmentforrevs = r._getsegmentforrevs
3910 except AttributeError:
3910 except AttributeError:
3911 segmentforrevs = r._chunkraw
3911 segmentforrevs = r._chunkraw
3912
3912
3913 node = r.lookup(rev)
3913 node = r.lookup(rev)
3914 rev = r.rev(node)
3914 rev = r.rev(node)
3915
3915
3916 if getattr(r, 'reading', None) is not None:
3916 if getattr(r, 'reading', None) is not None:
3917
3917
3918 @contextlib.contextmanager
3918 @contextlib.contextmanager
3919 def lazy_reading(r):
3919 def lazy_reading(r):
3920 with r.reading():
3920 with r.reading():
3921 yield
3921 yield
3922
3922
3923 else:
3923 else:
3924
3924
3925 @contextlib.contextmanager
3925 @contextlib.contextmanager
3926 def lazy_reading(r):
3926 def lazy_reading(r):
3927 yield
3927 yield
3928
3928
3929 def getrawchunks(data, chain):
3929 def getrawchunks(data, chain):
3930 start = r.start
3930 start = r.start
3931 length = r.length
3931 length = r.length
3932 inline = r._inline
3932 inline = r._inline
3933 try:
3933 try:
3934 iosize = r.index.entry_size
3934 iosize = r.index.entry_size
3935 except AttributeError:
3935 except AttributeError:
3936 iosize = r._io.size
3936 iosize = r._io.size
3937 buffer = util.buffer
3937 buffer = util.buffer
3938
3938
3939 chunks = []
3939 chunks = []
3940 ladd = chunks.append
3940 ladd = chunks.append
3941 for idx, item in enumerate(chain):
3941 for idx, item in enumerate(chain):
3942 offset = start(item[0])
3942 offset = start(item[0])
3943 bits = data[idx]
3943 bits = data[idx]
3944 for rev in item:
3944 for rev in item:
3945 chunkstart = start(rev)
3945 chunkstart = start(rev)
3946 if inline:
3946 if inline:
3947 chunkstart += (rev + 1) * iosize
3947 chunkstart += (rev + 1) * iosize
3948 chunklength = length(rev)
3948 chunklength = length(rev)
3949 ladd(buffer(bits, chunkstart - offset, chunklength))
3949 ladd(buffer(bits, chunkstart - offset, chunklength))
3950
3950
3951 return chunks
3951 return chunks
3952
3952
3953 def dodeltachain(rev):
3953 def dodeltachain(rev):
3954 if not cache:
3954 if not cache:
3955 r.clearcaches()
3955 r.clearcaches()
3956 r._deltachain(rev)
3956 r._deltachain(rev)
3957
3957
3958 def doread(chain):
3958 def doread(chain):
3959 if not cache:
3959 if not cache:
3960 r.clearcaches()
3960 r.clearcaches()
3961 for item in slicedchain:
3961 for item in slicedchain:
3962 with lazy_reading(r):
3962 with lazy_reading(r):
3963 segmentforrevs(item[0], item[-1])
3963 segmentforrevs(item[0], item[-1])
3964
3964
3965 def doslice(r, chain, size):
3965 def doslice(r, chain, size):
3966 for s in slicechunk(r, chain, targetsize=size):
3966 for s in slicechunk(r, chain, targetsize=size):
3967 pass
3967 pass
3968
3968
3969 def dorawchunks(data, chain):
3969 def dorawchunks(data, chain):
3970 if not cache:
3970 if not cache:
3971 r.clearcaches()
3971 r.clearcaches()
3972 getrawchunks(data, chain)
3972 getrawchunks(data, chain)
3973
3973
3974 def dodecompress(chunks):
3974 def dodecompress(chunks):
3975 decomp = r.decompress
3975 decomp = r.decompress
3976 for chunk in chunks:
3976 for chunk in chunks:
3977 decomp(chunk)
3977 decomp(chunk)
3978
3978
3979 def dopatch(text, bins):
3979 def dopatch(text, bins):
3980 if not cache:
3980 if not cache:
3981 r.clearcaches()
3981 r.clearcaches()
3982 mdiff.patches(text, bins)
3982 mdiff.patches(text, bins)
3983
3983
3984 def dohash(text):
3984 def dohash(text):
3985 if not cache:
3985 if not cache:
3986 r.clearcaches()
3986 r.clearcaches()
3987 r.checkhash(text, node, rev=rev)
3987 r.checkhash(text, node, rev=rev)
3988
3988
3989 def dorevision():
3989 def dorevision():
3990 if not cache:
3990 if not cache:
3991 r.clearcaches()
3991 r.clearcaches()
3992 r.revision(node)
3992 r.revision(node)
3993
3993
3994 try:
3994 try:
3995 from mercurial.revlogutils.deltas import slicechunk
3995 from mercurial.revlogutils.deltas import slicechunk
3996 except ImportError:
3996 except ImportError:
3997 slicechunk = getattr(revlog, '_slicechunk', None)
3997 slicechunk = getattr(revlog, '_slicechunk', None)
3998
3998
3999 size = r.length(rev)
3999 size = r.length(rev)
4000 chain = r._deltachain(rev)[0]
4000 chain = r._deltachain(rev)[0]
4001
4001
4002 with_sparse_read = False
4002 with_sparse_read = False
4003 if hasattr(r, 'data_config'):
4003 if hasattr(r, 'data_config'):
4004 with_sparse_read = r.data_config.with_sparse_read
4004 with_sparse_read = r.data_config.with_sparse_read
4005 elif hasattr(r, '_withsparseread'):
4005 elif hasattr(r, '_withsparseread'):
4006 with_sparse_read = r._withsparseread
4006 with_sparse_read = r._withsparseread
4007 if with_sparse_read:
4007 if with_sparse_read:
4008 slicedchain = (chain,)
4008 slicedchain = (chain,)
4009 else:
4009 else:
4010 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4010 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4011 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4011 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4012 rawchunks = getrawchunks(data, slicedchain)
4012 rawchunks = getrawchunks(data, slicedchain)
4013 bins = r._inner._chunks(chain)
4013 bins = r._inner._chunks(chain)
4014 text = bytes(bins[0])
4014 text = bytes(bins[0])
4015 bins = bins[1:]
4015 bins = bins[1:]
4016 text = mdiff.patches(text, bins)
4016 text = mdiff.patches(text, bins)
4017
4017
4018 benches = [
4018 benches = [
4019 (lambda: dorevision(), b'full'),
4019 (lambda: dorevision(), b'full'),
4020 (lambda: dodeltachain(rev), b'deltachain'),
4020 (lambda: dodeltachain(rev), b'deltachain'),
4021 (lambda: doread(chain), b'read'),
4021 (lambda: doread(chain), b'read'),
4022 ]
4022 ]
4023
4023
4024 if with_sparse_read:
4024 if with_sparse_read:
4025 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4025 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4026 benches.append(slicing)
4026 benches.append(slicing)
4027
4027
4028 benches.extend(
4028 benches.extend(
4029 [
4029 [
4030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4031 (lambda: dodecompress(rawchunks), b'decompress'),
4031 (lambda: dodecompress(rawchunks), b'decompress'),
4032 (lambda: dopatch(text, bins), b'patch'),
4032 (lambda: dopatch(text, bins), b'patch'),
4033 (lambda: dohash(text), b'hash'),
4033 (lambda: dohash(text), b'hash'),
4034 ]
4034 ]
4035 )
4035 )
4036
4036
4037 timer, fm = gettimer(ui, opts)
4037 timer, fm = gettimer(ui, opts)
4038 for fn, title in benches:
4038 for fn, title in benches:
4039 timer(fn, title=title)
4039 timer(fn, title=title)
4040 fm.end()
4040 fm.end()
4041
4041
4042
4042
4043 @command(
4043 @command(
4044 b'perf::revset|perfrevset',
4044 b'perf::revset|perfrevset',
4045 [
4045 [
4046 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4046 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4047 (b'', b'contexts', False, b'obtain changectx for each revision'),
4047 (b'', b'contexts', False, b'obtain changectx for each revision'),
4048 ]
4048 ]
4049 + formatteropts,
4049 + formatteropts,
4050 b"REVSET",
4050 b"REVSET",
4051 )
4051 )
4052 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4052 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4053 """benchmark the execution time of a revset
4053 """benchmark the execution time of a revset
4054
4054
4055 Use the --clean option if need to evaluate the impact of build volatile
4055 Use the --clean option if need to evaluate the impact of build volatile
4056 revisions set cache on the revset execution. Volatile cache hold filtered
4056 revisions set cache on the revset execution. Volatile cache hold filtered
4057 and obsolete related cache."""
4057 and obsolete related cache."""
4058 opts = _byteskwargs(opts)
4058 opts = _byteskwargs(opts)
4059
4059
4060 timer, fm = gettimer(ui, opts)
4060 timer, fm = gettimer(ui, opts)
4061
4061
4062 def d():
4062 def d():
4063 if clear:
4063 if clear:
4064 repo.invalidatevolatilesets()
4064 repo.invalidatevolatilesets()
4065 if contexts:
4065 if contexts:
4066 for ctx in repo.set(expr):
4066 for ctx in repo.set(expr):
4067 pass
4067 pass
4068 else:
4068 else:
4069 for r in repo.revs(expr):
4069 for r in repo.revs(expr):
4070 pass
4070 pass
4071
4071
4072 timer(d)
4072 timer(d)
4073 fm.end()
4073 fm.end()
4074
4074
4075
4075
4076 @command(
4076 @command(
4077 b'perf::volatilesets|perfvolatilesets',
4077 b'perf::volatilesets|perfvolatilesets',
4078 [
4078 [
4079 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4079 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4080 ]
4080 ]
4081 + formatteropts,
4081 + formatteropts,
4082 )
4082 )
4083 def perfvolatilesets(ui, repo, *names, **opts):
4083 def perfvolatilesets(ui, repo, *names, **opts):
4084 """benchmark the computation of various volatile set
4084 """benchmark the computation of various volatile set
4085
4085
4086 Volatile set computes element related to filtering and obsolescence."""
4086 Volatile set computes element related to filtering and obsolescence."""
4087 opts = _byteskwargs(opts)
4087 opts = _byteskwargs(opts)
4088 timer, fm = gettimer(ui, opts)
4088 timer, fm = gettimer(ui, opts)
4089 repo = repo.unfiltered()
4089 repo = repo.unfiltered()
4090
4090
4091 def getobs(name):
4091 def getobs(name):
4092 def d():
4092 def d():
4093 repo.invalidatevolatilesets()
4093 repo.invalidatevolatilesets()
4094 if opts[b'clear_obsstore']:
4094 if opts[b'clear_obsstore']:
4095 clearfilecache(repo, b'obsstore')
4095 clearfilecache(repo, b'obsstore')
4096 obsolete.getrevs(repo, name)
4096 obsolete.getrevs(repo, name)
4097
4097
4098 return d
4098 return d
4099
4099
4100 allobs = sorted(obsolete.cachefuncs)
4100 allobs = sorted(obsolete.cachefuncs)
4101 if names:
4101 if names:
4102 allobs = [n for n in allobs if n in names]
4102 allobs = [n for n in allobs if n in names]
4103
4103
4104 for name in allobs:
4104 for name in allobs:
4105 timer(getobs(name), title=name)
4105 timer(getobs(name), title=name)
4106
4106
4107 def getfiltered(name):
4107 def getfiltered(name):
4108 def d():
4108 def d():
4109 repo.invalidatevolatilesets()
4109 repo.invalidatevolatilesets()
4110 if opts[b'clear_obsstore']:
4110 if opts[b'clear_obsstore']:
4111 clearfilecache(repo, b'obsstore')
4111 clearfilecache(repo, b'obsstore')
4112 repoview.filterrevs(repo, name)
4112 repoview.filterrevs(repo, name)
4113
4113
4114 return d
4114 return d
4115
4115
4116 allfilter = sorted(repoview.filtertable)
4116 allfilter = sorted(repoview.filtertable)
4117 if names:
4117 if names:
4118 allfilter = [n for n in allfilter if n in names]
4118 allfilter = [n for n in allfilter if n in names]
4119
4119
4120 for name in allfilter:
4120 for name in allfilter:
4121 timer(getfiltered(name), title=name)
4121 timer(getfiltered(name), title=name)
4122 fm.end()
4122 fm.end()
4123
4123
4124
4124
4125 @command(
4125 @command(
4126 b'perf::branchmap|perfbranchmap',
4126 b'perf::branchmap|perfbranchmap',
4127 [
4127 [
4128 (b'f', b'full', False, b'Includes build time of subset'),
4128 (b'f', b'full', False, b'Includes build time of subset'),
4129 (
4129 (
4130 b'',
4130 b'',
4131 b'clear-revbranch',
4131 b'clear-revbranch',
4132 False,
4132 False,
4133 b'purge the revbranch cache between computation',
4133 b'purge the revbranch cache between computation',
4134 ),
4134 ),
4135 ]
4135 ]
4136 + formatteropts,
4136 + formatteropts,
4137 )
4137 )
4138 def perfbranchmap(ui, repo, *filternames, **opts):
4138 def perfbranchmap(ui, repo, *filternames, **opts):
4139 """benchmark the update of a branchmap
4139 """benchmark the update of a branchmap
4140
4140
4141 This benchmarks the full repo.branchmap() call with read and write disabled
4141 This benchmarks the full repo.branchmap() call with read and write disabled
4142 """
4142 """
4143 opts = _byteskwargs(opts)
4143 opts = _byteskwargs(opts)
4144 full = opts.get(b"full", False)
4144 full = opts.get(b"full", False)
4145 clear_revbranch = opts.get(b"clear_revbranch", False)
4145 clear_revbranch = opts.get(b"clear_revbranch", False)
4146 timer, fm = gettimer(ui, opts)
4146 timer, fm = gettimer(ui, opts)
4147
4147
4148 def getbranchmap(filtername):
4148 def getbranchmap(filtername):
4149 """generate a benchmark function for the filtername"""
4149 """generate a benchmark function for the filtername"""
4150 if filtername is None:
4150 if filtername is None:
4151 view = repo
4151 view = repo
4152 else:
4152 else:
4153 view = repo.filtered(filtername)
4153 view = repo.filtered(filtername)
4154 if util.safehasattr(view._branchcaches, '_per_filter'):
4154 if util.safehasattr(view._branchcaches, '_per_filter'):
4155 filtered = view._branchcaches._per_filter
4155 filtered = view._branchcaches._per_filter
4156 else:
4156 else:
4157 # older versions
4157 # older versions
4158 filtered = view._branchcaches
4158 filtered = view._branchcaches
4159
4159
4160 def d():
4160 def d():
4161 if clear_revbranch:
4161 if clear_revbranch:
4162 repo.revbranchcache()._clear()
4162 repo.revbranchcache()._clear()
4163 if full:
4163 if full:
4164 view._branchcaches.clear()
4164 view._branchcaches.clear()
4165 else:
4165 else:
4166 filtered.pop(filtername, None)
4166 filtered.pop(filtername, None)
4167 view.branchmap()
4167 view.branchmap()
4168
4168
4169 return d
4169 return d
4170
4170
4171 # add filter in smaller subset to bigger subset
4171 # add filter in smaller subset to bigger subset
4172 possiblefilters = set(repoview.filtertable)
4172 possiblefilters = set(repoview.filtertable)
4173 if filternames:
4173 if filternames:
4174 possiblefilters &= set(filternames)
4174 possiblefilters &= set(filternames)
4175 subsettable = getbranchmapsubsettable()
4175 subsettable = getbranchmapsubsettable()
4176 allfilters = []
4176 allfilters = []
4177 while possiblefilters:
4177 while possiblefilters:
4178 for name in possiblefilters:
4178 for name in possiblefilters:
4179 subset = subsettable.get(name)
4179 subset = subsettable.get(name)
4180 if subset not in possiblefilters:
4180 if subset not in possiblefilters:
4181 break
4181 break
4182 else:
4182 else:
4183 assert False, b'subset cycle %s!' % possiblefilters
4183 assert False, b'subset cycle %s!' % possiblefilters
4184 allfilters.append(name)
4184 allfilters.append(name)
4185 possiblefilters.remove(name)
4185 possiblefilters.remove(name)
4186
4186
4187 # warm the cache
4187 # warm the cache
4188 if not full:
4188 if not full:
4189 for name in allfilters:
4189 for name in allfilters:
4190 repo.filtered(name).branchmap()
4190 repo.filtered(name).branchmap()
4191 if not filternames or b'unfiltered' in filternames:
4191 if not filternames or b'unfiltered' in filternames:
4192 # add unfiltered
4192 # add unfiltered
4193 allfilters.append(None)
4193 allfilters.append(None)
4194
4194
4195 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4195 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4196 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4196 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4197 branchcacheread.set(classmethod(lambda *args: None))
4197 branchcacheread.set(classmethod(lambda *args: None))
4198 else:
4198 else:
4199 # older versions
4199 # older versions
4200 branchcacheread = safeattrsetter(branchmap, b'read')
4200 branchcacheread = safeattrsetter(branchmap, b'read')
4201 branchcacheread.set(lambda *args: None)
4201 branchcacheread.set(lambda *args: None)
4202 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4202 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4203 branchcachewrite.set(lambda *args: None)
4203 branchcachewrite.set(lambda *args: None)
4204 try:
4204 try:
4205 for name in allfilters:
4205 for name in allfilters:
4206 printname = name
4206 printname = name
4207 if name is None:
4207 if name is None:
4208 printname = b'unfiltered'
4208 printname = b'unfiltered'
4209 timer(getbranchmap(name), title=printname)
4209 timer(getbranchmap(name), title=printname)
4210 finally:
4210 finally:
4211 branchcacheread.restore()
4211 branchcacheread.restore()
4212 branchcachewrite.restore()
4212 branchcachewrite.restore()
4213 fm.end()
4213 fm.end()
4214
4214
4215
4215
4216 @command(
4216 @command(
4217 b'perf::branchmapupdate|perfbranchmapupdate',
4217 b'perf::branchmapupdate|perfbranchmapupdate',
4218 [
4218 [
4219 (b'', b'base', [], b'subset of revision to start from'),
4219 (b'', b'base', [], b'subset of revision to start from'),
4220 (b'', b'target', [], b'subset of revision to end with'),
4220 (b'', b'target', [], b'subset of revision to end with'),
4221 (b'', b'clear-caches', False, b'clear cache between each runs'),
4221 (b'', b'clear-caches', False, b'clear cache between each runs'),
4222 ]
4222 ]
4223 + formatteropts,
4223 + formatteropts,
4224 )
4224 )
4225 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4225 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4226 """benchmark branchmap update from for <base> revs to <target> revs
4226 """benchmark branchmap update from for <base> revs to <target> revs
4227
4227
4228 If `--clear-caches` is passed, the following items will be reset before
4228 If `--clear-caches` is passed, the following items will be reset before
4229 each update:
4229 each update:
4230 * the changelog instance and associated indexes
4230 * the changelog instance and associated indexes
4231 * the rev-branch-cache instance
4231 * the rev-branch-cache instance
4232
4232
4233 Examples:
4233 Examples:
4234
4234
4235 # update for the one last revision
4235 # update for the one last revision
4236 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4236 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4237
4237
4238 $ update for change coming with a new branch
4238 $ update for change coming with a new branch
4239 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4239 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4240 """
4240 """
4241 from mercurial import branchmap
4241 from mercurial import branchmap
4242 from mercurial import repoview
4242 from mercurial import repoview
4243
4243
4244 opts = _byteskwargs(opts)
4244 opts = _byteskwargs(opts)
4245 timer, fm = gettimer(ui, opts)
4245 timer, fm = gettimer(ui, opts)
4246 clearcaches = opts[b'clear_caches']
4246 clearcaches = opts[b'clear_caches']
4247 unfi = repo.unfiltered()
4247 unfi = repo.unfiltered()
4248 x = [None] # used to pass data between closure
4248 x = [None] # used to pass data between closure
4249
4249
4250 # we use a `list` here to avoid possible side effect from smartset
4250 # we use a `list` here to avoid possible side effect from smartset
4251 baserevs = list(scmutil.revrange(repo, base))
4251 baserevs = list(scmutil.revrange(repo, base))
4252 targetrevs = list(scmutil.revrange(repo, target))
4252 targetrevs = list(scmutil.revrange(repo, target))
4253 if not baserevs:
4253 if not baserevs:
4254 raise error.Abort(b'no revisions selected for --base')
4254 raise error.Abort(b'no revisions selected for --base')
4255 if not targetrevs:
4255 if not targetrevs:
4256 raise error.Abort(b'no revisions selected for --target')
4256 raise error.Abort(b'no revisions selected for --target')
4257
4257
4258 # make sure the target branchmap also contains the one in the base
4258 # make sure the target branchmap also contains the one in the base
4259 targetrevs = list(set(baserevs) | set(targetrevs))
4259 targetrevs = list(set(baserevs) | set(targetrevs))
4260 targetrevs.sort()
4260 targetrevs.sort()
4261
4261
4262 cl = repo.changelog
4262 cl = repo.changelog
4263 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4263 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4264 allbaserevs.sort()
4264 allbaserevs.sort()
4265 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4265 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4266
4266
4267 newrevs = list(alltargetrevs.difference(allbaserevs))
4267 newrevs = list(alltargetrevs.difference(allbaserevs))
4268 newrevs.sort()
4268 newrevs.sort()
4269
4269
4270 allrevs = frozenset(unfi.changelog.revs())
4270 allrevs = frozenset(unfi.changelog.revs())
4271 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4271 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4272 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4272 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4273
4273
4274 def basefilter(repo, visibilityexceptions=None):
4274 def basefilter(repo, visibilityexceptions=None):
4275 return basefilterrevs
4275 return basefilterrevs
4276
4276
4277 def targetfilter(repo, visibilityexceptions=None):
4277 def targetfilter(repo, visibilityexceptions=None):
4278 return targetfilterrevs
4278 return targetfilterrevs
4279
4279
4280 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4280 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4281 ui.status(msg % (len(allbaserevs), len(newrevs)))
4281 ui.status(msg % (len(allbaserevs), len(newrevs)))
4282 if targetfilterrevs:
4282 if targetfilterrevs:
4283 msg = b'(%d revisions still filtered)\n'
4283 msg = b'(%d revisions still filtered)\n'
4284 ui.status(msg % len(targetfilterrevs))
4284 ui.status(msg % len(targetfilterrevs))
4285
4285
4286 try:
4286 try:
4287 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4287 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4288 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4288 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4289
4289
4290 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4290 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4291 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4291 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4292
4292
4293 # try to find an existing branchmap to reuse
4293 # try to find an existing branchmap to reuse
4294 subsettable = getbranchmapsubsettable()
4294 subsettable = getbranchmapsubsettable()
4295 candidatefilter = subsettable.get(None)
4295 candidatefilter = subsettable.get(None)
4296 while candidatefilter is not None:
4296 while candidatefilter is not None:
4297 candidatebm = repo.filtered(candidatefilter).branchmap()
4297 candidatebm = repo.filtered(candidatefilter).branchmap()
4298 if candidatebm.validfor(baserepo):
4298 if candidatebm.validfor(baserepo):
4299 filtered = repoview.filterrevs(repo, candidatefilter)
4299 filtered = repoview.filterrevs(repo, candidatefilter)
4300 missing = [r for r in allbaserevs if r in filtered]
4300 missing = [r for r in allbaserevs if r in filtered]
4301 base = candidatebm.copy()
4301 base = candidatebm.copy()
4302 base.update(baserepo, missing)
4302 base.update(baserepo, missing)
4303 break
4303 break
4304 candidatefilter = subsettable.get(candidatefilter)
4304 candidatefilter = subsettable.get(candidatefilter)
4305 else:
4305 else:
4306 # no suitable subset where found
4306 # no suitable subset where found
4307 base = branchmap.branchcache()
4307 base = branchmap.branchcache()
4308 base.update(baserepo, allbaserevs)
4308 base.update(baserepo, allbaserevs)
4309
4309
4310 def setup():
4310 def setup():
4311 x[0] = base.copy()
4311 x[0] = base.copy()
4312 if clearcaches:
4312 if clearcaches:
4313 unfi._revbranchcache = None
4313 unfi._revbranchcache = None
4314 clearchangelog(repo)
4314 clearchangelog(repo)
4315
4315
4316 def bench():
4316 def bench():
4317 x[0].update(targetrepo, newrevs)
4317 x[0].update(targetrepo, newrevs)
4318
4318
4319 timer(bench, setup=setup)
4319 timer(bench, setup=setup)
4320 fm.end()
4320 fm.end()
4321 finally:
4321 finally:
4322 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4322 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4323 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4323 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4324
4324
4325
4325
4326 @command(
4326 @command(
4327 b'perf::branchmapload|perfbranchmapload',
4327 b'perf::branchmapload|perfbranchmapload',
4328 [
4328 [
4329 (b'f', b'filter', b'', b'Specify repoview filter'),
4329 (b'f', b'filter', b'', b'Specify repoview filter'),
4330 (b'', b'list', False, b'List brachmap filter caches'),
4330 (b'', b'list', False, b'List brachmap filter caches'),
4331 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4331 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4332 ]
4332 ]
4333 + formatteropts,
4333 + formatteropts,
4334 )
4334 )
4335 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4335 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4336 """benchmark reading the branchmap"""
4336 """benchmark reading the branchmap"""
4337 opts = _byteskwargs(opts)
4337 opts = _byteskwargs(opts)
4338 clearrevlogs = opts[b'clear_revlogs']
4338 clearrevlogs = opts[b'clear_revlogs']
4339
4339
4340 if list:
4340 if list:
4341 for name, kind, st in repo.cachevfs.readdir(stat=True):
4341 for name, kind, st in repo.cachevfs.readdir(stat=True):
4342 if name.startswith(b'branch2'):
4342 if name.startswith(b'branch2'):
4343 filtername = name.partition(b'-')[2] or b'unfiltered'
4343 filtername = name.partition(b'-')[2] or b'unfiltered'
4344 ui.status(
4344 ui.status(
4345 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4345 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4346 )
4346 )
4347 return
4347 return
4348 if not filter:
4348 if not filter:
4349 filter = None
4349 filter = None
4350 subsettable = getbranchmapsubsettable()
4350 subsettable = getbranchmapsubsettable()
4351 if filter is None:
4351 if filter is None:
4352 repo = repo.unfiltered()
4352 repo = repo.unfiltered()
4353 else:
4353 else:
4354 repo = repoview.repoview(repo, filter)
4354 repo = repoview.repoview(repo, filter)
4355
4355
4356 repo.branchmap() # make sure we have a relevant, up to date branchmap
4356 repo.branchmap() # make sure we have a relevant, up to date branchmap
4357
4357
4358 try:
4358 try:
4359 fromfile = branchmap.branchcache.fromfile
4359 fromfile = branchmap.branchcache.fromfile
4360 except AttributeError:
4360 except AttributeError:
4361 # older versions
4361 # older versions
4362 fromfile = branchmap.read
4362 fromfile = branchmap.read
4363
4363
4364 currentfilter = filter
4364 currentfilter = filter
4365 # try once without timer, the filter may not be cached
4365 # try once without timer, the filter may not be cached
4366 while fromfile(repo) is None:
4366 while fromfile(repo) is None:
4367 currentfilter = subsettable.get(currentfilter)
4367 currentfilter = subsettable.get(currentfilter)
4368 if currentfilter is None:
4368 if currentfilter is None:
4369 raise error.Abort(
4369 raise error.Abort(
4370 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4370 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4371 )
4371 )
4372 repo = repo.filtered(currentfilter)
4372 repo = repo.filtered(currentfilter)
4373 timer, fm = gettimer(ui, opts)
4373 timer, fm = gettimer(ui, opts)
4374
4374
4375 def setup():
4375 def setup():
4376 if clearrevlogs:
4376 if clearrevlogs:
4377 clearchangelog(repo)
4377 clearchangelog(repo)
4378
4378
4379 def bench():
4379 def bench():
4380 fromfile(repo)
4380 fromfile(repo)
4381
4381
4382 timer(bench, setup=setup)
4382 timer(bench, setup=setup)
4383 fm.end()
4383 fm.end()
4384
4384
4385
4385
4386 @command(b'perf::loadmarkers|perfloadmarkers')
4386 @command(b'perf::loadmarkers|perfloadmarkers')
4387 def perfloadmarkers(ui, repo):
4387 def perfloadmarkers(ui, repo):
4388 """benchmark the time to parse the on-disk markers for a repo
4388 """benchmark the time to parse the on-disk markers for a repo
4389
4389
4390 Result is the number of markers in the repo."""
4390 Result is the number of markers in the repo."""
4391 timer, fm = gettimer(ui)
4391 timer, fm = gettimer(ui)
4392 svfs = getsvfs(repo)
4392 svfs = getsvfs(repo)
4393 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4393 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4394 fm.end()
4394 fm.end()
4395
4395
4396
4396
4397 @command(
4397 @command(
4398 b'perf::lrucachedict|perflrucachedict',
4398 b'perf::lrucachedict|perflrucachedict',
4399 formatteropts
4399 formatteropts
4400 + [
4400 + [
4401 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4401 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4402 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4402 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4403 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4403 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4404 (b'', b'size', 4, b'size of cache'),
4404 (b'', b'size', 4, b'size of cache'),
4405 (b'', b'gets', 10000, b'number of key lookups'),
4405 (b'', b'gets', 10000, b'number of key lookups'),
4406 (b'', b'sets', 10000, b'number of key sets'),
4406 (b'', b'sets', 10000, b'number of key sets'),
4407 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4407 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4408 (
4408 (
4409 b'',
4409 b'',
4410 b'mixedgetfreq',
4410 b'mixedgetfreq',
4411 50,
4411 50,
4412 b'frequency of get vs set ops in mixed mode',
4412 b'frequency of get vs set ops in mixed mode',
4413 ),
4413 ),
4414 ],
4414 ],
4415 norepo=True,
4415 norepo=True,
4416 )
4416 )
4417 def perflrucache(
4417 def perflrucache(
4418 ui,
4418 ui,
4419 mincost=0,
4419 mincost=0,
4420 maxcost=100,
4420 maxcost=100,
4421 costlimit=0,
4421 costlimit=0,
4422 size=4,
4422 size=4,
4423 gets=10000,
4423 gets=10000,
4424 sets=10000,
4424 sets=10000,
4425 mixed=10000,
4425 mixed=10000,
4426 mixedgetfreq=50,
4426 mixedgetfreq=50,
4427 **opts
4427 **opts
4428 ):
4428 ):
4429 opts = _byteskwargs(opts)
4429 opts = _byteskwargs(opts)
4430
4430
4431 def doinit():
4431 def doinit():
4432 for i in _xrange(10000):
4432 for i in _xrange(10000):
4433 util.lrucachedict(size)
4433 util.lrucachedict(size)
4434
4434
4435 costrange = list(range(mincost, maxcost + 1))
4435 costrange = list(range(mincost, maxcost + 1))
4436
4436
4437 values = []
4437 values = []
4438 for i in _xrange(size):
4438 for i in _xrange(size):
4439 values.append(random.randint(0, _maxint))
4439 values.append(random.randint(0, _maxint))
4440
4440
4441 # Get mode fills the cache and tests raw lookup performance with no
4441 # Get mode fills the cache and tests raw lookup performance with no
4442 # eviction.
4442 # eviction.
4443 getseq = []
4443 getseq = []
4444 for i in _xrange(gets):
4444 for i in _xrange(gets):
4445 getseq.append(random.choice(values))
4445 getseq.append(random.choice(values))
4446
4446
4447 def dogets():
4447 def dogets():
4448 d = util.lrucachedict(size)
4448 d = util.lrucachedict(size)
4449 for v in values:
4449 for v in values:
4450 d[v] = v
4450 d[v] = v
4451 for key in getseq:
4451 for key in getseq:
4452 value = d[key]
4452 value = d[key]
4453 value # silence pyflakes warning
4453 value # silence pyflakes warning
4454
4454
4455 def dogetscost():
4455 def dogetscost():
4456 d = util.lrucachedict(size, maxcost=costlimit)
4456 d = util.lrucachedict(size, maxcost=costlimit)
4457 for i, v in enumerate(values):
4457 for i, v in enumerate(values):
4458 d.insert(v, v, cost=costs[i])
4458 d.insert(v, v, cost=costs[i])
4459 for key in getseq:
4459 for key in getseq:
4460 try:
4460 try:
4461 value = d[key]
4461 value = d[key]
4462 value # silence pyflakes warning
4462 value # silence pyflakes warning
4463 except KeyError:
4463 except KeyError:
4464 pass
4464 pass
4465
4465
4466 # Set mode tests insertion speed with cache eviction.
4466 # Set mode tests insertion speed with cache eviction.
4467 setseq = []
4467 setseq = []
4468 costs = []
4468 costs = []
4469 for i in _xrange(sets):
4469 for i in _xrange(sets):
4470 setseq.append(random.randint(0, _maxint))
4470 setseq.append(random.randint(0, _maxint))
4471 costs.append(random.choice(costrange))
4471 costs.append(random.choice(costrange))
4472
4472
4473 def doinserts():
4473 def doinserts():
4474 d = util.lrucachedict(size)
4474 d = util.lrucachedict(size)
4475 for v in setseq:
4475 for v in setseq:
4476 d.insert(v, v)
4476 d.insert(v, v)
4477
4477
4478 def doinsertscost():
4478 def doinsertscost():
4479 d = util.lrucachedict(size, maxcost=costlimit)
4479 d = util.lrucachedict(size, maxcost=costlimit)
4480 for i, v in enumerate(setseq):
4480 for i, v in enumerate(setseq):
4481 d.insert(v, v, cost=costs[i])
4481 d.insert(v, v, cost=costs[i])
4482
4482
4483 def dosets():
4483 def dosets():
4484 d = util.lrucachedict(size)
4484 d = util.lrucachedict(size)
4485 for v in setseq:
4485 for v in setseq:
4486 d[v] = v
4486 d[v] = v
4487
4487
4488 # Mixed mode randomly performs gets and sets with eviction.
4488 # Mixed mode randomly performs gets and sets with eviction.
4489 mixedops = []
4489 mixedops = []
4490 for i in _xrange(mixed):
4490 for i in _xrange(mixed):
4491 r = random.randint(0, 100)
4491 r = random.randint(0, 100)
4492 if r < mixedgetfreq:
4492 if r < mixedgetfreq:
4493 op = 0
4493 op = 0
4494 else:
4494 else:
4495 op = 1
4495 op = 1
4496
4496
4497 mixedops.append(
4497 mixedops.append(
4498 (op, random.randint(0, size * 2), random.choice(costrange))
4498 (op, random.randint(0, size * 2), random.choice(costrange))
4499 )
4499 )
4500
4500
4501 def domixed():
4501 def domixed():
4502 d = util.lrucachedict(size)
4502 d = util.lrucachedict(size)
4503
4503
4504 for op, v, cost in mixedops:
4504 for op, v, cost in mixedops:
4505 if op == 0:
4505 if op == 0:
4506 try:
4506 try:
4507 d[v]
4507 d[v]
4508 except KeyError:
4508 except KeyError:
4509 pass
4509 pass
4510 else:
4510 else:
4511 d[v] = v
4511 d[v] = v
4512
4512
4513 def domixedcost():
4513 def domixedcost():
4514 d = util.lrucachedict(size, maxcost=costlimit)
4514 d = util.lrucachedict(size, maxcost=costlimit)
4515
4515
4516 for op, v, cost in mixedops:
4516 for op, v, cost in mixedops:
4517 if op == 0:
4517 if op == 0:
4518 try:
4518 try:
4519 d[v]
4519 d[v]
4520 except KeyError:
4520 except KeyError:
4521 pass
4521 pass
4522 else:
4522 else:
4523 d.insert(v, v, cost=cost)
4523 d.insert(v, v, cost=cost)
4524
4524
4525 benches = [
4525 benches = [
4526 (doinit, b'init'),
4526 (doinit, b'init'),
4527 ]
4527 ]
4528
4528
4529 if costlimit:
4529 if costlimit:
4530 benches.extend(
4530 benches.extend(
4531 [
4531 [
4532 (dogetscost, b'gets w/ cost limit'),
4532 (dogetscost, b'gets w/ cost limit'),
4533 (doinsertscost, b'inserts w/ cost limit'),
4533 (doinsertscost, b'inserts w/ cost limit'),
4534 (domixedcost, b'mixed w/ cost limit'),
4534 (domixedcost, b'mixed w/ cost limit'),
4535 ]
4535 ]
4536 )
4536 )
4537 else:
4537 else:
4538 benches.extend(
4538 benches.extend(
4539 [
4539 [
4540 (dogets, b'gets'),
4540 (dogets, b'gets'),
4541 (doinserts, b'inserts'),
4541 (doinserts, b'inserts'),
4542 (dosets, b'sets'),
4542 (dosets, b'sets'),
4543 (domixed, b'mixed'),
4543 (domixed, b'mixed'),
4544 ]
4544 ]
4545 )
4545 )
4546
4546
4547 for fn, title in benches:
4547 for fn, title in benches:
4548 timer, fm = gettimer(ui, opts)
4548 timer, fm = gettimer(ui, opts)
4549 timer(fn, title=title)
4549 timer(fn, title=title)
4550 fm.end()
4550 fm.end()
4551
4551
4552
4552
4553 @command(
4553 @command(
4554 b'perf::write|perfwrite',
4554 b'perf::write|perfwrite',
4555 formatteropts
4555 formatteropts
4556 + [
4556 + [
4557 (b'', b'write-method', b'write', b'ui write method'),
4557 (b'', b'write-method', b'write', b'ui write method'),
4558 (b'', b'nlines', 100, b'number of lines'),
4558 (b'', b'nlines', 100, b'number of lines'),
4559 (b'', b'nitems', 100, b'number of items (per line)'),
4559 (b'', b'nitems', 100, b'number of items (per line)'),
4560 (b'', b'item', b'x', b'item that is written'),
4560 (b'', b'item', b'x', b'item that is written'),
4561 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4561 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4562 (b'', b'flush-line', None, b'flush after each line'),
4562 (b'', b'flush-line', None, b'flush after each line'),
4563 ],
4563 ],
4564 )
4564 )
4565 def perfwrite(ui, repo, **opts):
4565 def perfwrite(ui, repo, **opts):
4566 """microbenchmark ui.write (and others)"""
4566 """microbenchmark ui.write (and others)"""
4567 opts = _byteskwargs(opts)
4567 opts = _byteskwargs(opts)
4568
4568
4569 write = getattr(ui, _sysstr(opts[b'write_method']))
4569 write = getattr(ui, _sysstr(opts[b'write_method']))
4570 nlines = int(opts[b'nlines'])
4570 nlines = int(opts[b'nlines'])
4571 nitems = int(opts[b'nitems'])
4571 nitems = int(opts[b'nitems'])
4572 item = opts[b'item']
4572 item = opts[b'item']
4573 batch_line = opts.get(b'batch_line')
4573 batch_line = opts.get(b'batch_line')
4574 flush_line = opts.get(b'flush_line')
4574 flush_line = opts.get(b'flush_line')
4575
4575
4576 if batch_line:
4576 if batch_line:
4577 line = item * nitems + b'\n'
4577 line = item * nitems + b'\n'
4578
4578
4579 def benchmark():
4579 def benchmark():
4580 for i in pycompat.xrange(nlines):
4580 for i in pycompat.xrange(nlines):
4581 if batch_line:
4581 if batch_line:
4582 write(line)
4582 write(line)
4583 else:
4583 else:
4584 for i in pycompat.xrange(nitems):
4584 for i in pycompat.xrange(nitems):
4585 write(item)
4585 write(item)
4586 write(b'\n')
4586 write(b'\n')
4587 if flush_line:
4587 if flush_line:
4588 ui.flush()
4588 ui.flush()
4589 ui.flush()
4589 ui.flush()
4590
4590
4591 timer, fm = gettimer(ui, opts)
4591 timer, fm = gettimer(ui, opts)
4592 timer(benchmark)
4592 timer(benchmark)
4593 fm.end()
4593 fm.end()
4594
4594
4595
4595
4596 def uisetup(ui):
4596 def uisetup(ui):
4597 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4597 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4598 commands, b'debugrevlogopts'
4598 commands, b'debugrevlogopts'
4599 ):
4599 ):
4600 # for "historical portability":
4600 # for "historical portability":
4601 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4601 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4602 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4602 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4603 # openrevlog() should cause failure, because it has been
4603 # openrevlog() should cause failure, because it has been
4604 # available since 3.5 (or 49c583ca48c4).
4604 # available since 3.5 (or 49c583ca48c4).
4605 def openrevlog(orig, repo, cmd, file_, opts):
4605 def openrevlog(orig, repo, cmd, file_, opts):
4606 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4606 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4607 raise error.Abort(
4607 raise error.Abort(
4608 b"This version doesn't support --dir option",
4608 b"This version doesn't support --dir option",
4609 hint=b"use 3.5 or later",
4609 hint=b"use 3.5 or later",
4610 )
4610 )
4611 return orig(repo, cmd, file_, opts)
4611 return orig(repo, cmd, file_, opts)
4612
4612
4613 name = _sysstr(b'openrevlog')
4613 name = _sysstr(b'openrevlog')
4614 extensions.wrapfunction(cmdutil, name, openrevlog)
4614 extensions.wrapfunction(cmdutil, name, openrevlog)
4615
4615
4616
4616
4617 @command(
4617 @command(
4618 b'perf::progress|perfprogress',
4618 b'perf::progress|perfprogress',
4619 formatteropts
4619 formatteropts
4620 + [
4620 + [
4621 (b'', b'topic', b'topic', b'topic for progress messages'),
4621 (b'', b'topic', b'topic', b'topic for progress messages'),
4622 (b'c', b'total', 1000000, b'total value we are progressing to'),
4622 (b'c', b'total', 1000000, b'total value we are progressing to'),
4623 ],
4623 ],
4624 norepo=True,
4624 norepo=True,
4625 )
4625 )
4626 def perfprogress(ui, topic=None, total=None, **opts):
4626 def perfprogress(ui, topic=None, total=None, **opts):
4627 """printing of progress bars"""
4627 """printing of progress bars"""
4628 opts = _byteskwargs(opts)
4628 opts = _byteskwargs(opts)
4629
4629
4630 timer, fm = gettimer(ui, opts)
4630 timer, fm = gettimer(ui, opts)
4631
4631
4632 def doprogress():
4632 def doprogress():
4633 with ui.makeprogress(topic, total=total) as progress:
4633 with ui.makeprogress(topic, total=total) as progress:
4634 for i in _xrange(total):
4634 for i in _xrange(total):
4635 progress.increment()
4635 progress.increment()
4636
4636
4637 timer(doprogress)
4637 timer(doprogress)
4638 fm.end()
4638 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now