##// END OF EJS Templates
perf-unbundle: do a quick and dirty fix to make it run on more commit...
marmoute -
r50457:27bff608 stable
parent child Browse files
Show More
@@ -1,4205 +1,4230
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1003 except ImportError:
1004 path = ui.expandpath(path)
1004 path = ui.expandpath(path)
1005
1005
1006 def s():
1006 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1007 repos[1] = hg.peer(ui, opts, path)
1008
1008
1009 def d():
1009 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1010 setdiscovery.findcommonheads(ui, *repos)
1011
1011
1012 timer(d, setup=s)
1012 timer(d, setup=s)
1013 fm.end()
1013 fm.end()
1014
1014
1015
1015
1016 @command(
1016 @command(
1017 b'perf::bookmarks|perfbookmarks',
1017 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1018 formatteropts
1019 + [
1019 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1021 ],
1022 )
1022 )
1023 def perfbookmarks(ui, repo, **opts):
1023 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1024 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027
1027
1028 clearrevlogs = opts[b'clear_revlogs']
1028 clearrevlogs = opts[b'clear_revlogs']
1029
1029
1030 def s():
1030 def s():
1031 if clearrevlogs:
1031 if clearrevlogs:
1032 clearchangelog(repo)
1032 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1033 clearfilecache(repo, b'_bookmarks')
1034
1034
1035 def d():
1035 def d():
1036 repo._bookmarks
1036 repo._bookmarks
1037
1037
1038 timer(d, setup=s)
1038 timer(d, setup=s)
1039 fm.end()
1039 fm.end()
1040
1040
1041
1041
1042 @command(
1042 @command(
1043 b'perf::bundle',
1043 b'perf::bundle',
1044 [
1044 [
1045 (
1045 (
1046 b'r',
1046 b'r',
1047 b'rev',
1047 b'rev',
1048 [],
1048 [],
1049 b'changesets to bundle',
1049 b'changesets to bundle',
1050 b'REV',
1050 b'REV',
1051 ),
1051 ),
1052 (
1052 (
1053 b't',
1053 b't',
1054 b'type',
1054 b'type',
1055 b'none',
1055 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1057 b'TYPE',
1058 ),
1058 ),
1059 ]
1059 ]
1060 + formatteropts,
1060 + formatteropts,
1061 b'REVS',
1061 b'REVS',
1062 )
1062 )
1063 def perfbundle(ui, repo, *revs, **opts):
1063 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1064 """benchmark the creation of a bundle from a repository
1065
1065
1066 For now, this only supports "none" compression.
1066 For now, this only supports "none" compression.
1067 """
1067 """
1068 try:
1068 try:
1069 from mercurial import bundlecaches
1069 from mercurial import bundlecaches
1070
1070
1071 parsebundlespec = bundlecaches.parsebundlespec
1071 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1072 except ImportError:
1073 from mercurial import exchange
1073 from mercurial import exchange
1074
1074
1075 parsebundlespec = exchange.parsebundlespec
1075 parsebundlespec = exchange.parsebundlespec
1076
1076
1077 from mercurial import discovery
1077 from mercurial import discovery
1078 from mercurial import bundle2
1078 from mercurial import bundle2
1079
1079
1080 opts = _byteskwargs(opts)
1080 opts = _byteskwargs(opts)
1081 timer, fm = gettimer(ui, opts)
1081 timer, fm = gettimer(ui, opts)
1082
1082
1083 cl = repo.changelog
1083 cl = repo.changelog
1084 revs = list(revs)
1084 revs = list(revs)
1085 revs.extend(opts.get(b'rev', ()))
1085 revs.extend(opts.get(b'rev', ()))
1086 revs = scmutil.revrange(repo, revs)
1086 revs = scmutil.revrange(repo, revs)
1087 if not revs:
1087 if not revs:
1088 raise error.Abort(b"not revision specified")
1088 raise error.Abort(b"not revision specified")
1089 # make it a consistent set (ie: without topological gaps)
1089 # make it a consistent set (ie: without topological gaps)
1090 old_len = len(revs)
1090 old_len = len(revs)
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 if old_len != len(revs):
1092 if old_len != len(revs):
1093 new_count = len(revs) - old_len
1093 new_count = len(revs) - old_len
1094 msg = b"add %d new revisions to make it a consistent set\n"
1094 msg = b"add %d new revisions to make it a consistent set\n"
1095 ui.write_err(msg % new_count)
1095 ui.write_err(msg % new_count)
1096
1096
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 outgoing = discovery.outgoing(repo, bases, targets)
1099 outgoing = discovery.outgoing(repo, bases, targets)
1100
1100
1101 bundle_spec = opts.get(b'type')
1101 bundle_spec = opts.get(b'type')
1102
1102
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104
1104
1105 cgversion = bundle_spec.params.get(b"cg.version")
1105 cgversion = bundle_spec.params.get(b"cg.version")
1106 if cgversion is None:
1106 if cgversion is None:
1107 if bundle_spec.version == b'v1':
1107 if bundle_spec.version == b'v1':
1108 cgversion = b'01'
1108 cgversion = b'01'
1109 if bundle_spec.version == b'v2':
1109 if bundle_spec.version == b'v2':
1110 cgversion = b'02'
1110 cgversion = b'02'
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 err = b"repository does not support bundle version %s"
1112 err = b"repository does not support bundle version %s"
1113 raise error.Abort(err % cgversion)
1113 raise error.Abort(err % cgversion)
1114
1114
1115 if cgversion == b'01': # bundle1
1115 if cgversion == b'01': # bundle1
1116 bversion = b'HG10' + bundle_spec.wirecompression
1116 bversion = b'HG10' + bundle_spec.wirecompression
1117 bcompression = None
1117 bcompression = None
1118 elif cgversion in (b'02', b'03'):
1118 elif cgversion in (b'02', b'03'):
1119 bversion = b'HG20'
1119 bversion = b'HG20'
1120 bcompression = bundle_spec.wirecompression
1120 bcompression = bundle_spec.wirecompression
1121 else:
1121 else:
1122 err = b'perf::bundle: unexpected changegroup version %s'
1122 err = b'perf::bundle: unexpected changegroup version %s'
1123 raise error.ProgrammingError(err % cgversion)
1123 raise error.ProgrammingError(err % cgversion)
1124
1124
1125 if bcompression is None:
1125 if bcompression is None:
1126 bcompression = b'UN'
1126 bcompression = b'UN'
1127
1127
1128 if bcompression != b'UN':
1128 if bcompression != b'UN':
1129 err = b'perf::bundle: compression currently unsupported: %s'
1129 err = b'perf::bundle: compression currently unsupported: %s'
1130 raise error.ProgrammingError(err % bcompression)
1130 raise error.ProgrammingError(err % bcompression)
1131
1131
1132 def do_bundle():
1132 def do_bundle():
1133 bundle2.writenewbundle(
1133 bundle2.writenewbundle(
1134 ui,
1134 ui,
1135 repo,
1135 repo,
1136 b'perf::bundle',
1136 b'perf::bundle',
1137 os.devnull,
1137 os.devnull,
1138 bversion,
1138 bversion,
1139 outgoing,
1139 outgoing,
1140 bundle_spec.params,
1140 bundle_spec.params,
1141 )
1141 )
1142
1142
1143 timer(do_bundle)
1143 timer(do_bundle)
1144 fm.end()
1144 fm.end()
1145
1145
1146
1146
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1149 """Benchmark reading of bundle files.
1149 """Benchmark reading of bundle files.
1150
1150
1151 This command is meant to isolate the I/O part of bundle reading as
1151 This command is meant to isolate the I/O part of bundle reading as
1152 much as possible.
1152 much as possible.
1153 """
1153 """
1154 from mercurial import (
1154 from mercurial import (
1155 bundle2,
1155 bundle2,
1156 exchange,
1156 exchange,
1157 streamclone,
1157 streamclone,
1158 )
1158 )
1159
1159
1160 opts = _byteskwargs(opts)
1160 opts = _byteskwargs(opts)
1161
1161
1162 def makebench(fn):
1162 def makebench(fn):
1163 def run():
1163 def run():
1164 with open(bundlepath, b'rb') as fh:
1164 with open(bundlepath, b'rb') as fh:
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 fn(bundle)
1166 fn(bundle)
1167
1167
1168 return run
1168 return run
1169
1169
1170 def makereadnbytes(size):
1170 def makereadnbytes(size):
1171 def run():
1171 def run():
1172 with open(bundlepath, b'rb') as fh:
1172 with open(bundlepath, b'rb') as fh:
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 while bundle.read(size):
1174 while bundle.read(size):
1175 pass
1175 pass
1176
1176
1177 return run
1177 return run
1178
1178
1179 def makestdioread(size):
1179 def makestdioread(size):
1180 def run():
1180 def run():
1181 with open(bundlepath, b'rb') as fh:
1181 with open(bundlepath, b'rb') as fh:
1182 while fh.read(size):
1182 while fh.read(size):
1183 pass
1183 pass
1184
1184
1185 return run
1185 return run
1186
1186
1187 # bundle1
1187 # bundle1
1188
1188
1189 def deltaiter(bundle):
1189 def deltaiter(bundle):
1190 for delta in bundle.deltaiter():
1190 for delta in bundle.deltaiter():
1191 pass
1191 pass
1192
1192
1193 def iterchunks(bundle):
1193 def iterchunks(bundle):
1194 for chunk in bundle.getchunks():
1194 for chunk in bundle.getchunks():
1195 pass
1195 pass
1196
1196
1197 # bundle2
1197 # bundle2
1198
1198
1199 def forwardchunks(bundle):
1199 def forwardchunks(bundle):
1200 for chunk in bundle._forwardchunks():
1200 for chunk in bundle._forwardchunks():
1201 pass
1201 pass
1202
1202
1203 def iterparts(bundle):
1203 def iterparts(bundle):
1204 for part in bundle.iterparts():
1204 for part in bundle.iterparts():
1205 pass
1205 pass
1206
1206
1207 def iterpartsseekable(bundle):
1207 def iterpartsseekable(bundle):
1208 for part in bundle.iterparts(seekable=True):
1208 for part in bundle.iterparts(seekable=True):
1209 pass
1209 pass
1210
1210
1211 def seek(bundle):
1211 def seek(bundle):
1212 for part in bundle.iterparts(seekable=True):
1212 for part in bundle.iterparts(seekable=True):
1213 part.seek(0, os.SEEK_END)
1213 part.seek(0, os.SEEK_END)
1214
1214
1215 def makepartreadnbytes(size):
1215 def makepartreadnbytes(size):
1216 def run():
1216 def run():
1217 with open(bundlepath, b'rb') as fh:
1217 with open(bundlepath, b'rb') as fh:
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 for part in bundle.iterparts():
1219 for part in bundle.iterparts():
1220 while part.read(size):
1220 while part.read(size):
1221 pass
1221 pass
1222
1222
1223 return run
1223 return run
1224
1224
1225 benches = [
1225 benches = [
1226 (makestdioread(8192), b'read(8k)'),
1226 (makestdioread(8192), b'read(8k)'),
1227 (makestdioread(16384), b'read(16k)'),
1227 (makestdioread(16384), b'read(16k)'),
1228 (makestdioread(32768), b'read(32k)'),
1228 (makestdioread(32768), b'read(32k)'),
1229 (makestdioread(131072), b'read(128k)'),
1229 (makestdioread(131072), b'read(128k)'),
1230 ]
1230 ]
1231
1231
1232 with open(bundlepath, b'rb') as fh:
1232 with open(bundlepath, b'rb') as fh:
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1234
1234
1235 if isinstance(bundle, changegroup.cg1unpacker):
1235 if isinstance(bundle, changegroup.cg1unpacker):
1236 benches.extend(
1236 benches.extend(
1237 [
1237 [
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 ]
1244 ]
1245 )
1245 )
1246 elif isinstance(bundle, bundle2.unbundle20):
1246 elif isinstance(bundle, bundle2.unbundle20):
1247 benches.extend(
1247 benches.extend(
1248 [
1248 [
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1251 (
1251 (
1252 makebench(iterpartsseekable),
1252 makebench(iterpartsseekable),
1253 b'bundle2 iterparts() seekable',
1253 b'bundle2 iterparts() seekable',
1254 ),
1254 ),
1255 (makebench(seek), b'bundle2 part seek()'),
1255 (makebench(seek), b'bundle2 part seek()'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 ]
1260 ]
1261 )
1261 )
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 raise error.Abort(b'stream clone bundles not supported')
1263 raise error.Abort(b'stream clone bundles not supported')
1264 else:
1264 else:
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266
1266
1267 for fn, title in benches:
1267 for fn, title in benches:
1268 timer, fm = gettimer(ui, opts)
1268 timer, fm = gettimer(ui, opts)
1269 timer(fn, title=title)
1269 timer(fn, title=title)
1270 fm.end()
1270 fm.end()
1271
1271
1272
1272
1273 @command(
1273 @command(
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 formatteropts
1275 formatteropts
1276 + [
1276 + [
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 ],
1279 ],
1280 )
1280 )
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 """Benchmark producing a changelog group for a changegroup.
1282 """Benchmark producing a changelog group for a changegroup.
1283
1283
1284 This measures the time spent processing the changelog during a
1284 This measures the time spent processing the changelog during a
1285 bundle operation. This occurs during `hg bundle` and on a server
1285 bundle operation. This occurs during `hg bundle` and on a server
1286 processing a `getbundle` wire protocol request (handles clones
1286 processing a `getbundle` wire protocol request (handles clones
1287 and pull requests).
1287 and pull requests).
1288
1288
1289 By default, all revisions are added to the changegroup.
1289 By default, all revisions are added to the changegroup.
1290 """
1290 """
1291 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1292 cl = repo.changelog
1292 cl = repo.changelog
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 bundler = changegroup.getbundler(cgversion, repo)
1294 bundler = changegroup.getbundler(cgversion, repo)
1295
1295
1296 def d():
1296 def d():
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1298 for chunk in chunks:
1298 for chunk in chunks:
1299 pass
1299 pass
1300
1300
1301 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1302
1302
1303 # Terminal printing can interfere with timing. So disable it.
1303 # Terminal printing can interfere with timing. So disable it.
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1305 timer(d)
1305 timer(d)
1306
1306
1307 fm.end()
1307 fm.end()
1308
1308
1309
1309
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1311 def perfdirs(ui, repo, **opts):
1311 def perfdirs(ui, repo, **opts):
1312 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1314 dirstate = repo.dirstate
1314 dirstate = repo.dirstate
1315 b'a' in dirstate
1315 b'a' in dirstate
1316
1316
1317 def d():
1317 def d():
1318 dirstate.hasdir(b'a')
1318 dirstate.hasdir(b'a')
1319 try:
1319 try:
1320 del dirstate._map._dirs
1320 del dirstate._map._dirs
1321 except AttributeError:
1321 except AttributeError:
1322 pass
1322 pass
1323
1323
1324 timer(d)
1324 timer(d)
1325 fm.end()
1325 fm.end()
1326
1326
1327
1327
1328 @command(
1328 @command(
1329 b'perf::dirstate|perfdirstate',
1329 b'perf::dirstate|perfdirstate',
1330 [
1330 [
1331 (
1331 (
1332 b'',
1332 b'',
1333 b'iteration',
1333 b'iteration',
1334 None,
1334 None,
1335 b'benchmark a full iteration for the dirstate',
1335 b'benchmark a full iteration for the dirstate',
1336 ),
1336 ),
1337 (
1337 (
1338 b'',
1338 b'',
1339 b'contains',
1339 b'contains',
1340 None,
1340 None,
1341 b'benchmark a large amount of `nf in dirstate` calls',
1341 b'benchmark a large amount of `nf in dirstate` calls',
1342 ),
1342 ),
1343 ]
1343 ]
1344 + formatteropts,
1344 + formatteropts,
1345 )
1345 )
1346 def perfdirstate(ui, repo, **opts):
1346 def perfdirstate(ui, repo, **opts):
1347 """benchmap the time of various distate operations
1347 """benchmap the time of various distate operations
1348
1348
1349 By default benchmark the time necessary to load a dirstate from scratch.
1349 By default benchmark the time necessary to load a dirstate from scratch.
1350 The dirstate is loaded to the point were a "contains" request can be
1350 The dirstate is loaded to the point were a "contains" request can be
1351 answered.
1351 answered.
1352 """
1352 """
1353 opts = _byteskwargs(opts)
1353 opts = _byteskwargs(opts)
1354 timer, fm = gettimer(ui, opts)
1354 timer, fm = gettimer(ui, opts)
1355 b"a" in repo.dirstate
1355 b"a" in repo.dirstate
1356
1356
1357 if opts[b'iteration'] and opts[b'contains']:
1357 if opts[b'iteration'] and opts[b'contains']:
1358 msg = b'only specify one of --iteration or --contains'
1358 msg = b'only specify one of --iteration or --contains'
1359 raise error.Abort(msg)
1359 raise error.Abort(msg)
1360
1360
1361 if opts[b'iteration']:
1361 if opts[b'iteration']:
1362 setup = None
1362 setup = None
1363 dirstate = repo.dirstate
1363 dirstate = repo.dirstate
1364
1364
1365 def d():
1365 def d():
1366 for f in dirstate:
1366 for f in dirstate:
1367 pass
1367 pass
1368
1368
1369 elif opts[b'contains']:
1369 elif opts[b'contains']:
1370 setup = None
1370 setup = None
1371 dirstate = repo.dirstate
1371 dirstate = repo.dirstate
1372 allfiles = list(dirstate)
1372 allfiles = list(dirstate)
1373 # also add file path that will be "missing" from the dirstate
1373 # also add file path that will be "missing" from the dirstate
1374 allfiles.extend([f[::-1] for f in allfiles])
1374 allfiles.extend([f[::-1] for f in allfiles])
1375
1375
1376 def d():
1376 def d():
1377 for f in allfiles:
1377 for f in allfiles:
1378 f in dirstate
1378 f in dirstate
1379
1379
1380 else:
1380 else:
1381
1381
1382 def setup():
1382 def setup():
1383 repo.dirstate.invalidate()
1383 repo.dirstate.invalidate()
1384
1384
1385 def d():
1385 def d():
1386 b"a" in repo.dirstate
1386 b"a" in repo.dirstate
1387
1387
1388 timer(d, setup=setup)
1388 timer(d, setup=setup)
1389 fm.end()
1389 fm.end()
1390
1390
1391
1391
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 def perfdirstatedirs(ui, repo, **opts):
1393 def perfdirstatedirs(ui, repo, **opts):
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 opts = _byteskwargs(opts)
1395 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1396 timer, fm = gettimer(ui, opts)
1397 repo.dirstate.hasdir(b"a")
1397 repo.dirstate.hasdir(b"a")
1398
1398
1399 def setup():
1399 def setup():
1400 try:
1400 try:
1401 del repo.dirstate._map._dirs
1401 del repo.dirstate._map._dirs
1402 except AttributeError:
1402 except AttributeError:
1403 pass
1403 pass
1404
1404
1405 def d():
1405 def d():
1406 repo.dirstate.hasdir(b"a")
1406 repo.dirstate.hasdir(b"a")
1407
1407
1408 timer(d, setup=setup)
1408 timer(d, setup=setup)
1409 fm.end()
1409 fm.end()
1410
1410
1411
1411
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 def perfdirstatefoldmap(ui, repo, **opts):
1413 def perfdirstatefoldmap(ui, repo, **opts):
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1415
1415
1416 The dirstate filefoldmap cache is dropped between every request.
1416 The dirstate filefoldmap cache is dropped between every request.
1417 """
1417 """
1418 opts = _byteskwargs(opts)
1418 opts = _byteskwargs(opts)
1419 timer, fm = gettimer(ui, opts)
1419 timer, fm = gettimer(ui, opts)
1420 dirstate = repo.dirstate
1420 dirstate = repo.dirstate
1421 dirstate._map.filefoldmap.get(b'a')
1421 dirstate._map.filefoldmap.get(b'a')
1422
1422
1423 def setup():
1423 def setup():
1424 del dirstate._map.filefoldmap
1424 del dirstate._map.filefoldmap
1425
1425
1426 def d():
1426 def d():
1427 dirstate._map.filefoldmap.get(b'a')
1427 dirstate._map.filefoldmap.get(b'a')
1428
1428
1429 timer(d, setup=setup)
1429 timer(d, setup=setup)
1430 fm.end()
1430 fm.end()
1431
1431
1432
1432
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 def perfdirfoldmap(ui, repo, **opts):
1434 def perfdirfoldmap(ui, repo, **opts):
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436
1436
1437 The dirstate dirfoldmap cache is dropped between every request.
1437 The dirstate dirfoldmap cache is dropped between every request.
1438 """
1438 """
1439 opts = _byteskwargs(opts)
1439 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1440 timer, fm = gettimer(ui, opts)
1441 dirstate = repo.dirstate
1441 dirstate = repo.dirstate
1442 dirstate._map.dirfoldmap.get(b'a')
1442 dirstate._map.dirfoldmap.get(b'a')
1443
1443
1444 def setup():
1444 def setup():
1445 del dirstate._map.dirfoldmap
1445 del dirstate._map.dirfoldmap
1446 try:
1446 try:
1447 del dirstate._map._dirs
1447 del dirstate._map._dirs
1448 except AttributeError:
1448 except AttributeError:
1449 pass
1449 pass
1450
1450
1451 def d():
1451 def d():
1452 dirstate._map.dirfoldmap.get(b'a')
1452 dirstate._map.dirfoldmap.get(b'a')
1453
1453
1454 timer(d, setup=setup)
1454 timer(d, setup=setup)
1455 fm.end()
1455 fm.end()
1456
1456
1457
1457
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 def perfdirstatewrite(ui, repo, **opts):
1459 def perfdirstatewrite(ui, repo, **opts):
1460 """benchmap the time it take to write a dirstate on disk"""
1460 """benchmap the time it take to write a dirstate on disk"""
1461 opts = _byteskwargs(opts)
1461 opts = _byteskwargs(opts)
1462 timer, fm = gettimer(ui, opts)
1462 timer, fm = gettimer(ui, opts)
1463 ds = repo.dirstate
1463 ds = repo.dirstate
1464 b"a" in ds
1464 b"a" in ds
1465
1465
1466 def setup():
1466 def setup():
1467 ds._dirty = True
1467 ds._dirty = True
1468
1468
1469 def d():
1469 def d():
1470 ds.write(repo.currenttransaction())
1470 ds.write(repo.currenttransaction())
1471
1471
1472 timer(d, setup=setup)
1472 timer(d, setup=setup)
1473 fm.end()
1473 fm.end()
1474
1474
1475
1475
1476 def _getmergerevs(repo, opts):
1476 def _getmergerevs(repo, opts):
1477 """parse command argument to return rev involved in merge
1477 """parse command argument to return rev involved in merge
1478
1478
1479 input: options dictionnary with `rev`, `from` and `bse`
1479 input: options dictionnary with `rev`, `from` and `bse`
1480 output: (localctx, otherctx, basectx)
1480 output: (localctx, otherctx, basectx)
1481 """
1481 """
1482 if opts[b'from']:
1482 if opts[b'from']:
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 wctx = repo[fromrev]
1484 wctx = repo[fromrev]
1485 else:
1485 else:
1486 wctx = repo[None]
1486 wctx = repo[None]
1487 # we don't want working dir files to be stat'd in the benchmark, so
1487 # we don't want working dir files to be stat'd in the benchmark, so
1488 # prime that cache
1488 # prime that cache
1489 wctx.dirty()
1489 wctx.dirty()
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 if opts[b'base']:
1491 if opts[b'base']:
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 ancestor = repo[fromrev]
1493 ancestor = repo[fromrev]
1494 else:
1494 else:
1495 ancestor = wctx.ancestor(rctx)
1495 ancestor = wctx.ancestor(rctx)
1496 return (wctx, rctx, ancestor)
1496 return (wctx, rctx, ancestor)
1497
1497
1498
1498
1499 @command(
1499 @command(
1500 b'perf::mergecalculate|perfmergecalculate',
1500 b'perf::mergecalculate|perfmergecalculate',
1501 [
1501 [
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1503 (b'', b'from', b'', b'rev to merge from'),
1503 (b'', b'from', b'', b'rev to merge from'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1505 ]
1505 ]
1506 + formatteropts,
1506 + formatteropts,
1507 )
1507 )
1508 def perfmergecalculate(ui, repo, **opts):
1508 def perfmergecalculate(ui, repo, **opts):
1509 opts = _byteskwargs(opts)
1509 opts = _byteskwargs(opts)
1510 timer, fm = gettimer(ui, opts)
1510 timer, fm = gettimer(ui, opts)
1511
1511
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513
1513
1514 def d():
1514 def d():
1515 # acceptremote is True because we don't want prompts in the middle of
1515 # acceptremote is True because we don't want prompts in the middle of
1516 # our benchmark
1516 # our benchmark
1517 merge.calculateupdates(
1517 merge.calculateupdates(
1518 repo,
1518 repo,
1519 wctx,
1519 wctx,
1520 rctx,
1520 rctx,
1521 [ancestor],
1521 [ancestor],
1522 branchmerge=False,
1522 branchmerge=False,
1523 force=False,
1523 force=False,
1524 acceptremote=True,
1524 acceptremote=True,
1525 followcopies=True,
1525 followcopies=True,
1526 )
1526 )
1527
1527
1528 timer(d)
1528 timer(d)
1529 fm.end()
1529 fm.end()
1530
1530
1531
1531
1532 @command(
1532 @command(
1533 b'perf::mergecopies|perfmergecopies',
1533 b'perf::mergecopies|perfmergecopies',
1534 [
1534 [
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1536 (b'', b'from', b'', b'rev to merge from'),
1536 (b'', b'from', b'', b'rev to merge from'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1538 ]
1538 ]
1539 + formatteropts,
1539 + formatteropts,
1540 )
1540 )
1541 def perfmergecopies(ui, repo, **opts):
1541 def perfmergecopies(ui, repo, **opts):
1542 """measure runtime of `copies.mergecopies`"""
1542 """measure runtime of `copies.mergecopies`"""
1543 opts = _byteskwargs(opts)
1543 opts = _byteskwargs(opts)
1544 timer, fm = gettimer(ui, opts)
1544 timer, fm = gettimer(ui, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546
1546
1547 def d():
1547 def d():
1548 # acceptremote is True because we don't want prompts in the middle of
1548 # acceptremote is True because we don't want prompts in the middle of
1549 # our benchmark
1549 # our benchmark
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1551
1551
1552 timer(d)
1552 timer(d)
1553 fm.end()
1553 fm.end()
1554
1554
1555
1555
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 """benchmark the copy tracing logic"""
1558 """benchmark the copy tracing logic"""
1559 opts = _byteskwargs(opts)
1559 opts = _byteskwargs(opts)
1560 timer, fm = gettimer(ui, opts)
1560 timer, fm = gettimer(ui, opts)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563
1563
1564 def d():
1564 def d():
1565 copies.pathcopies(ctx1, ctx2)
1565 copies.pathcopies(ctx1, ctx2)
1566
1566
1567 timer(d)
1567 timer(d)
1568 fm.end()
1568 fm.end()
1569
1569
1570
1570
1571 @command(
1571 @command(
1572 b'perf::phases|perfphases',
1572 b'perf::phases|perfphases',
1573 [
1573 [
1574 (b'', b'full', False, b'include file reading time too'),
1574 (b'', b'full', False, b'include file reading time too'),
1575 ],
1575 ],
1576 b"",
1576 b"",
1577 )
1577 )
1578 def perfphases(ui, repo, **opts):
1578 def perfphases(ui, repo, **opts):
1579 """benchmark phasesets computation"""
1579 """benchmark phasesets computation"""
1580 opts = _byteskwargs(opts)
1580 opts = _byteskwargs(opts)
1581 timer, fm = gettimer(ui, opts)
1581 timer, fm = gettimer(ui, opts)
1582 _phases = repo._phasecache
1582 _phases = repo._phasecache
1583 full = opts.get(b'full')
1583 full = opts.get(b'full')
1584
1584
1585 def d():
1585 def d():
1586 phases = _phases
1586 phases = _phases
1587 if full:
1587 if full:
1588 clearfilecache(repo, b'_phasecache')
1588 clearfilecache(repo, b'_phasecache')
1589 phases = repo._phasecache
1589 phases = repo._phasecache
1590 phases.invalidate()
1590 phases.invalidate()
1591 phases.loadphaserevs(repo)
1591 phases.loadphaserevs(repo)
1592
1592
1593 timer(d)
1593 timer(d)
1594 fm.end()
1594 fm.end()
1595
1595
1596
1596
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1599 """benchmark time needed to analyse phases of the remote server"""
1599 """benchmark time needed to analyse phases of the remote server"""
1600 from mercurial.node import bin
1600 from mercurial.node import bin
1601 from mercurial import (
1601 from mercurial import (
1602 exchange,
1602 exchange,
1603 hg,
1603 hg,
1604 phases,
1604 phases,
1605 )
1605 )
1606
1606
1607 opts = _byteskwargs(opts)
1607 opts = _byteskwargs(opts)
1608 timer, fm = gettimer(ui, opts)
1608 timer, fm = gettimer(ui, opts)
1609
1609
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 if not path:
1611 if not path:
1612 raise error.Abort(
1612 raise error.Abort(
1613 b'default repository not configured!',
1613 b'default repository not configured!',
1614 hint=b"see 'hg help config.paths'",
1614 hint=b"see 'hg help config.paths'",
1615 )
1615 )
1616 dest = path.pushloc or path.loc
1616 dest = path.pushloc or path.loc
1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1618 other = hg.peer(repo, opts, dest)
1618 other = hg.peer(repo, opts, dest)
1619
1619
1620 # easier to perform discovery through the operation
1620 # easier to perform discovery through the operation
1621 op = exchange.pushoperation(repo, other)
1621 op = exchange.pushoperation(repo, other)
1622 exchange._pushdiscoverychangeset(op)
1622 exchange._pushdiscoverychangeset(op)
1623
1623
1624 remotesubset = op.fallbackheads
1624 remotesubset = op.fallbackheads
1625
1625
1626 with other.commandexecutor() as e:
1626 with other.commandexecutor() as e:
1627 remotephases = e.callcommand(
1627 remotephases = e.callcommand(
1628 b'listkeys', {b'namespace': b'phases'}
1628 b'listkeys', {b'namespace': b'phases'}
1629 ).result()
1629 ).result()
1630 del other
1630 del other
1631 publishing = remotephases.get(b'publishing', False)
1631 publishing = remotephases.get(b'publishing', False)
1632 if publishing:
1632 if publishing:
1633 ui.statusnoi18n(b'publishing: yes\n')
1633 ui.statusnoi18n(b'publishing: yes\n')
1634 else:
1634 else:
1635 ui.statusnoi18n(b'publishing: no\n')
1635 ui.statusnoi18n(b'publishing: no\n')
1636
1636
1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1638 if has_node is None:
1638 if has_node is None:
1639 has_node = repo.changelog.nodemap.__contains__
1639 has_node = repo.changelog.nodemap.__contains__
1640 nonpublishroots = 0
1640 nonpublishroots = 0
1641 for nhex, phase in remotephases.iteritems():
1641 for nhex, phase in remotephases.iteritems():
1642 if nhex == b'publishing': # ignore data related to publish option
1642 if nhex == b'publishing': # ignore data related to publish option
1643 continue
1643 continue
1644 node = bin(nhex)
1644 node = bin(nhex)
1645 if has_node(node) and int(phase):
1645 if has_node(node) and int(phase):
1646 nonpublishroots += 1
1646 nonpublishroots += 1
1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1649
1649
1650 def d():
1650 def d():
1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1652
1652
1653 timer(d)
1653 timer(d)
1654 fm.end()
1654 fm.end()
1655
1655
1656
1656
1657 @command(
1657 @command(
1658 b'perf::manifest|perfmanifest',
1658 b'perf::manifest|perfmanifest',
1659 [
1659 [
1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1662 ]
1662 ]
1663 + formatteropts,
1663 + formatteropts,
1664 b'REV|NODE',
1664 b'REV|NODE',
1665 )
1665 )
1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1667 """benchmark the time to read a manifest from disk and return a usable
1667 """benchmark the time to read a manifest from disk and return a usable
1668 dict-like object
1668 dict-like object
1669
1669
1670 Manifest caches are cleared before retrieval."""
1670 Manifest caches are cleared before retrieval."""
1671 opts = _byteskwargs(opts)
1671 opts = _byteskwargs(opts)
1672 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1673 if not manifest_rev:
1673 if not manifest_rev:
1674 ctx = scmutil.revsingle(repo, rev, rev)
1674 ctx = scmutil.revsingle(repo, rev, rev)
1675 t = ctx.manifestnode()
1675 t = ctx.manifestnode()
1676 else:
1676 else:
1677 from mercurial.node import bin
1677 from mercurial.node import bin
1678
1678
1679 if len(rev) == 40:
1679 if len(rev) == 40:
1680 t = bin(rev)
1680 t = bin(rev)
1681 else:
1681 else:
1682 try:
1682 try:
1683 rev = int(rev)
1683 rev = int(rev)
1684
1684
1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1686 t = repo.manifestlog.getstorage(b'').node(rev)
1686 t = repo.manifestlog.getstorage(b'').node(rev)
1687 else:
1687 else:
1688 t = repo.manifestlog._revlog.lookup(rev)
1688 t = repo.manifestlog._revlog.lookup(rev)
1689 except ValueError:
1689 except ValueError:
1690 raise error.Abort(
1690 raise error.Abort(
1691 b'manifest revision must be integer or full node'
1691 b'manifest revision must be integer or full node'
1692 )
1692 )
1693
1693
1694 def d():
1694 def d():
1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1696 repo.manifestlog[t].read()
1696 repo.manifestlog[t].read()
1697
1697
1698 timer(d)
1698 timer(d)
1699 fm.end()
1699 fm.end()
1700
1700
1701
1701
1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1703 def perfchangeset(ui, repo, rev, **opts):
1703 def perfchangeset(ui, repo, rev, **opts):
1704 opts = _byteskwargs(opts)
1704 opts = _byteskwargs(opts)
1705 timer, fm = gettimer(ui, opts)
1705 timer, fm = gettimer(ui, opts)
1706 n = scmutil.revsingle(repo, rev).node()
1706 n = scmutil.revsingle(repo, rev).node()
1707
1707
1708 def d():
1708 def d():
1709 repo.changelog.read(n)
1709 repo.changelog.read(n)
1710 # repo.changelog._cache = None
1710 # repo.changelog._cache = None
1711
1711
1712 timer(d)
1712 timer(d)
1713 fm.end()
1713 fm.end()
1714
1714
1715
1715
1716 @command(b'perf::ignore|perfignore', formatteropts)
1716 @command(b'perf::ignore|perfignore', formatteropts)
1717 def perfignore(ui, repo, **opts):
1717 def perfignore(ui, repo, **opts):
1718 """benchmark operation related to computing ignore"""
1718 """benchmark operation related to computing ignore"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 dirstate = repo.dirstate
1721 dirstate = repo.dirstate
1722
1722
1723 def setupone():
1723 def setupone():
1724 dirstate.invalidate()
1724 dirstate.invalidate()
1725 clearfilecache(dirstate, b'_ignore')
1725 clearfilecache(dirstate, b'_ignore')
1726
1726
1727 def runone():
1727 def runone():
1728 dirstate._ignore
1728 dirstate._ignore
1729
1729
1730 timer(runone, setup=setupone, title=b"load")
1730 timer(runone, setup=setupone, title=b"load")
1731 fm.end()
1731 fm.end()
1732
1732
1733
1733
1734 @command(
1734 @command(
1735 b'perf::index|perfindex',
1735 b'perf::index|perfindex',
1736 [
1736 [
1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1739 ]
1739 ]
1740 + formatteropts,
1740 + formatteropts,
1741 )
1741 )
1742 def perfindex(ui, repo, **opts):
1742 def perfindex(ui, repo, **opts):
1743 """benchmark index creation time followed by a lookup
1743 """benchmark index creation time followed by a lookup
1744
1744
1745 The default is to look `tip` up. Depending on the index implementation,
1745 The default is to look `tip` up. Depending on the index implementation,
1746 the revision looked up can matters. For example, an implementation
1746 the revision looked up can matters. For example, an implementation
1747 scanning the index will have a faster lookup time for `--rev tip` than for
1747 scanning the index will have a faster lookup time for `--rev tip` than for
1748 `--rev 0`. The number of looked up revisions and their order can also
1748 `--rev 0`. The number of looked up revisions and their order can also
1749 matters.
1749 matters.
1750
1750
1751 Example of useful set to test:
1751 Example of useful set to test:
1752
1752
1753 * tip
1753 * tip
1754 * 0
1754 * 0
1755 * -10:
1755 * -10:
1756 * :10
1756 * :10
1757 * -10: + :10
1757 * -10: + :10
1758 * :10: + -10:
1758 * :10: + -10:
1759 * -10000:
1759 * -10000:
1760 * -10000: + 0
1760 * -10000: + 0
1761
1761
1762 It is not currently possible to check for lookup of a missing node. For
1762 It is not currently possible to check for lookup of a missing node. For
1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1764 import mercurial.revlog
1764 import mercurial.revlog
1765
1765
1766 opts = _byteskwargs(opts)
1766 opts = _byteskwargs(opts)
1767 timer, fm = gettimer(ui, opts)
1767 timer, fm = gettimer(ui, opts)
1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1769 if opts[b'no_lookup']:
1769 if opts[b'no_lookup']:
1770 if opts['rev']:
1770 if opts['rev']:
1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1772 nodes = []
1772 nodes = []
1773 elif not opts[b'rev']:
1773 elif not opts[b'rev']:
1774 nodes = [repo[b"tip"].node()]
1774 nodes = [repo[b"tip"].node()]
1775 else:
1775 else:
1776 revs = scmutil.revrange(repo, opts[b'rev'])
1776 revs = scmutil.revrange(repo, opts[b'rev'])
1777 cl = repo.changelog
1777 cl = repo.changelog
1778 nodes = [cl.node(r) for r in revs]
1778 nodes = [cl.node(r) for r in revs]
1779
1779
1780 unfi = repo.unfiltered()
1780 unfi = repo.unfiltered()
1781 # find the filecache func directly
1781 # find the filecache func directly
1782 # This avoid polluting the benchmark with the filecache logic
1782 # This avoid polluting the benchmark with the filecache logic
1783 makecl = unfi.__class__.changelog.func
1783 makecl = unfi.__class__.changelog.func
1784
1784
1785 def setup():
1785 def setup():
1786 # probably not necessary, but for good measure
1786 # probably not necessary, but for good measure
1787 clearchangelog(unfi)
1787 clearchangelog(unfi)
1788
1788
1789 def d():
1789 def d():
1790 cl = makecl(unfi)
1790 cl = makecl(unfi)
1791 for n in nodes:
1791 for n in nodes:
1792 cl.rev(n)
1792 cl.rev(n)
1793
1793
1794 timer(d, setup=setup)
1794 timer(d, setup=setup)
1795 fm.end()
1795 fm.end()
1796
1796
1797
1797
1798 @command(
1798 @command(
1799 b'perf::nodemap|perfnodemap',
1799 b'perf::nodemap|perfnodemap',
1800 [
1800 [
1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1803 ]
1803 ]
1804 + formatteropts,
1804 + formatteropts,
1805 )
1805 )
1806 def perfnodemap(ui, repo, **opts):
1806 def perfnodemap(ui, repo, **opts):
1807 """benchmark the time necessary to look up revision from a cold nodemap
1807 """benchmark the time necessary to look up revision from a cold nodemap
1808
1808
1809 Depending on the implementation, the amount and order of revision we look
1809 Depending on the implementation, the amount and order of revision we look
1810 up can varies. Example of useful set to test:
1810 up can varies. Example of useful set to test:
1811 * tip
1811 * tip
1812 * 0
1812 * 0
1813 * -10:
1813 * -10:
1814 * :10
1814 * :10
1815 * -10: + :10
1815 * -10: + :10
1816 * :10: + -10:
1816 * :10: + -10:
1817 * -10000:
1817 * -10000:
1818 * -10000: + 0
1818 * -10000: + 0
1819
1819
1820 The command currently focus on valid binary lookup. Benchmarking for
1820 The command currently focus on valid binary lookup. Benchmarking for
1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1822 """
1822 """
1823 import mercurial.revlog
1823 import mercurial.revlog
1824
1824
1825 opts = _byteskwargs(opts)
1825 opts = _byteskwargs(opts)
1826 timer, fm = gettimer(ui, opts)
1826 timer, fm = gettimer(ui, opts)
1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1828
1828
1829 unfi = repo.unfiltered()
1829 unfi = repo.unfiltered()
1830 clearcaches = opts[b'clear_caches']
1830 clearcaches = opts[b'clear_caches']
1831 # find the filecache func directly
1831 # find the filecache func directly
1832 # This avoid polluting the benchmark with the filecache logic
1832 # This avoid polluting the benchmark with the filecache logic
1833 makecl = unfi.__class__.changelog.func
1833 makecl = unfi.__class__.changelog.func
1834 if not opts[b'rev']:
1834 if not opts[b'rev']:
1835 raise error.Abort(b'use --rev to specify revisions to look up')
1835 raise error.Abort(b'use --rev to specify revisions to look up')
1836 revs = scmutil.revrange(repo, opts[b'rev'])
1836 revs = scmutil.revrange(repo, opts[b'rev'])
1837 cl = repo.changelog
1837 cl = repo.changelog
1838 nodes = [cl.node(r) for r in revs]
1838 nodes = [cl.node(r) for r in revs]
1839
1839
1840 # use a list to pass reference to a nodemap from one closure to the next
1840 # use a list to pass reference to a nodemap from one closure to the next
1841 nodeget = [None]
1841 nodeget = [None]
1842
1842
1843 def setnodeget():
1843 def setnodeget():
1844 # probably not necessary, but for good measure
1844 # probably not necessary, but for good measure
1845 clearchangelog(unfi)
1845 clearchangelog(unfi)
1846 cl = makecl(unfi)
1846 cl = makecl(unfi)
1847 if util.safehasattr(cl.index, 'get_rev'):
1847 if util.safehasattr(cl.index, 'get_rev'):
1848 nodeget[0] = cl.index.get_rev
1848 nodeget[0] = cl.index.get_rev
1849 else:
1849 else:
1850 nodeget[0] = cl.nodemap.get
1850 nodeget[0] = cl.nodemap.get
1851
1851
1852 def d():
1852 def d():
1853 get = nodeget[0]
1853 get = nodeget[0]
1854 for n in nodes:
1854 for n in nodes:
1855 get(n)
1855 get(n)
1856
1856
1857 setup = None
1857 setup = None
1858 if clearcaches:
1858 if clearcaches:
1859
1859
1860 def setup():
1860 def setup():
1861 setnodeget()
1861 setnodeget()
1862
1862
1863 else:
1863 else:
1864 setnodeget()
1864 setnodeget()
1865 d() # prewarm the data structure
1865 d() # prewarm the data structure
1866 timer(d, setup=setup)
1866 timer(d, setup=setup)
1867 fm.end()
1867 fm.end()
1868
1868
1869
1869
1870 @command(b'perf::startup|perfstartup', formatteropts)
1870 @command(b'perf::startup|perfstartup', formatteropts)
1871 def perfstartup(ui, repo, **opts):
1871 def perfstartup(ui, repo, **opts):
1872 opts = _byteskwargs(opts)
1872 opts = _byteskwargs(opts)
1873 timer, fm = gettimer(ui, opts)
1873 timer, fm = gettimer(ui, opts)
1874
1874
1875 def d():
1875 def d():
1876 if os.name != 'nt':
1876 if os.name != 'nt':
1877 os.system(
1877 os.system(
1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1879 )
1879 )
1880 else:
1880 else:
1881 os.environ['HGRCPATH'] = r' '
1881 os.environ['HGRCPATH'] = r' '
1882 os.system("%s version -q > NUL" % sys.argv[0])
1882 os.system("%s version -q > NUL" % sys.argv[0])
1883
1883
1884 timer(d)
1884 timer(d)
1885 fm.end()
1885 fm.end()
1886
1886
1887
1887
1888 @command(b'perf::parents|perfparents', formatteropts)
1888 @command(b'perf::parents|perfparents', formatteropts)
1889 def perfparents(ui, repo, **opts):
1889 def perfparents(ui, repo, **opts):
1890 """benchmark the time necessary to fetch one changeset's parents.
1890 """benchmark the time necessary to fetch one changeset's parents.
1891
1891
1892 The fetch is done using the `node identifier`, traversing all object layers
1892 The fetch is done using the `node identifier`, traversing all object layers
1893 from the repository object. The first N revisions will be used for this
1893 from the repository object. The first N revisions will be used for this
1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1895 (default: 1000).
1895 (default: 1000).
1896 """
1896 """
1897 opts = _byteskwargs(opts)
1897 opts = _byteskwargs(opts)
1898 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1899 # control the number of commits perfparents iterates over
1899 # control the number of commits perfparents iterates over
1900 # experimental config: perf.parentscount
1900 # experimental config: perf.parentscount
1901 count = getint(ui, b"perf", b"parentscount", 1000)
1901 count = getint(ui, b"perf", b"parentscount", 1000)
1902 if len(repo.changelog) < count:
1902 if len(repo.changelog) < count:
1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1904 repo = repo.unfiltered()
1904 repo = repo.unfiltered()
1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1906
1906
1907 def d():
1907 def d():
1908 for n in nl:
1908 for n in nl:
1909 repo.changelog.parents(n)
1909 repo.changelog.parents(n)
1910
1910
1911 timer(d)
1911 timer(d)
1912 fm.end()
1912 fm.end()
1913
1913
1914
1914
1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1916 def perfctxfiles(ui, repo, x, **opts):
1916 def perfctxfiles(ui, repo, x, **opts):
1917 opts = _byteskwargs(opts)
1917 opts = _byteskwargs(opts)
1918 x = int(x)
1918 x = int(x)
1919 timer, fm = gettimer(ui, opts)
1919 timer, fm = gettimer(ui, opts)
1920
1920
1921 def d():
1921 def d():
1922 len(repo[x].files())
1922 len(repo[x].files())
1923
1923
1924 timer(d)
1924 timer(d)
1925 fm.end()
1925 fm.end()
1926
1926
1927
1927
1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1929 def perfrawfiles(ui, repo, x, **opts):
1929 def perfrawfiles(ui, repo, x, **opts):
1930 opts = _byteskwargs(opts)
1930 opts = _byteskwargs(opts)
1931 x = int(x)
1931 x = int(x)
1932 timer, fm = gettimer(ui, opts)
1932 timer, fm = gettimer(ui, opts)
1933 cl = repo.changelog
1933 cl = repo.changelog
1934
1934
1935 def d():
1935 def d():
1936 len(cl.read(x)[3])
1936 len(cl.read(x)[3])
1937
1937
1938 timer(d)
1938 timer(d)
1939 fm.end()
1939 fm.end()
1940
1940
1941
1941
1942 @command(b'perf::lookup|perflookup', formatteropts)
1942 @command(b'perf::lookup|perflookup', formatteropts)
1943 def perflookup(ui, repo, rev, **opts):
1943 def perflookup(ui, repo, rev, **opts):
1944 opts = _byteskwargs(opts)
1944 opts = _byteskwargs(opts)
1945 timer, fm = gettimer(ui, opts)
1945 timer, fm = gettimer(ui, opts)
1946 timer(lambda: len(repo.lookup(rev)))
1946 timer(lambda: len(repo.lookup(rev)))
1947 fm.end()
1947 fm.end()
1948
1948
1949
1949
1950 @command(
1950 @command(
1951 b'perf::linelogedits|perflinelogedits',
1951 b'perf::linelogedits|perflinelogedits',
1952 [
1952 [
1953 (b'n', b'edits', 10000, b'number of edits'),
1953 (b'n', b'edits', 10000, b'number of edits'),
1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1955 ],
1955 ],
1956 norepo=True,
1956 norepo=True,
1957 )
1957 )
1958 def perflinelogedits(ui, **opts):
1958 def perflinelogedits(ui, **opts):
1959 from mercurial import linelog
1959 from mercurial import linelog
1960
1960
1961 opts = _byteskwargs(opts)
1961 opts = _byteskwargs(opts)
1962
1962
1963 edits = opts[b'edits']
1963 edits = opts[b'edits']
1964 maxhunklines = opts[b'max_hunk_lines']
1964 maxhunklines = opts[b'max_hunk_lines']
1965
1965
1966 maxb1 = 100000
1966 maxb1 = 100000
1967 random.seed(0)
1967 random.seed(0)
1968 randint = random.randint
1968 randint = random.randint
1969 currentlines = 0
1969 currentlines = 0
1970 arglist = []
1970 arglist = []
1971 for rev in _xrange(edits):
1971 for rev in _xrange(edits):
1972 a1 = randint(0, currentlines)
1972 a1 = randint(0, currentlines)
1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1974 b1 = randint(0, maxb1)
1974 b1 = randint(0, maxb1)
1975 b2 = randint(b1, b1 + maxhunklines)
1975 b2 = randint(b1, b1 + maxhunklines)
1976 currentlines += (b2 - b1) - (a2 - a1)
1976 currentlines += (b2 - b1) - (a2 - a1)
1977 arglist.append((rev, a1, a2, b1, b2))
1977 arglist.append((rev, a1, a2, b1, b2))
1978
1978
1979 def d():
1979 def d():
1980 ll = linelog.linelog()
1980 ll = linelog.linelog()
1981 for args in arglist:
1981 for args in arglist:
1982 ll.replacelines(*args)
1982 ll.replacelines(*args)
1983
1983
1984 timer, fm = gettimer(ui, opts)
1984 timer, fm = gettimer(ui, opts)
1985 timer(d)
1985 timer(d)
1986 fm.end()
1986 fm.end()
1987
1987
1988
1988
1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1990 def perfrevrange(ui, repo, *specs, **opts):
1990 def perfrevrange(ui, repo, *specs, **opts):
1991 opts = _byteskwargs(opts)
1991 opts = _byteskwargs(opts)
1992 timer, fm = gettimer(ui, opts)
1992 timer, fm = gettimer(ui, opts)
1993 revrange = scmutil.revrange
1993 revrange = scmutil.revrange
1994 timer(lambda: len(revrange(repo, specs)))
1994 timer(lambda: len(revrange(repo, specs)))
1995 fm.end()
1995 fm.end()
1996
1996
1997
1997
1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1999 def perfnodelookup(ui, repo, rev, **opts):
1999 def perfnodelookup(ui, repo, rev, **opts):
2000 opts = _byteskwargs(opts)
2000 opts = _byteskwargs(opts)
2001 timer, fm = gettimer(ui, opts)
2001 timer, fm = gettimer(ui, opts)
2002 import mercurial.revlog
2002 import mercurial.revlog
2003
2003
2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2005 n = scmutil.revsingle(repo, rev).node()
2005 n = scmutil.revsingle(repo, rev).node()
2006
2006
2007 try:
2007 try:
2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2009 except TypeError:
2009 except TypeError:
2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2011
2011
2012 def d():
2012 def d():
2013 cl.rev(n)
2013 cl.rev(n)
2014 clearcaches(cl)
2014 clearcaches(cl)
2015
2015
2016 timer(d)
2016 timer(d)
2017 fm.end()
2017 fm.end()
2018
2018
2019
2019
2020 @command(
2020 @command(
2021 b'perf::log|perflog',
2021 b'perf::log|perflog',
2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2023 )
2023 )
2024 def perflog(ui, repo, rev=None, **opts):
2024 def perflog(ui, repo, rev=None, **opts):
2025 opts = _byteskwargs(opts)
2025 opts = _byteskwargs(opts)
2026 if rev is None:
2026 if rev is None:
2027 rev = []
2027 rev = []
2028 timer, fm = gettimer(ui, opts)
2028 timer, fm = gettimer(ui, opts)
2029 ui.pushbuffer()
2029 ui.pushbuffer()
2030 timer(
2030 timer(
2031 lambda: commands.log(
2031 lambda: commands.log(
2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2033 )
2033 )
2034 )
2034 )
2035 ui.popbuffer()
2035 ui.popbuffer()
2036 fm.end()
2036 fm.end()
2037
2037
2038
2038
2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2040 def perfmoonwalk(ui, repo, **opts):
2040 def perfmoonwalk(ui, repo, **opts):
2041 """benchmark walking the changelog backwards
2041 """benchmark walking the changelog backwards
2042
2042
2043 This also loads the changelog data for each revision in the changelog.
2043 This also loads the changelog data for each revision in the changelog.
2044 """
2044 """
2045 opts = _byteskwargs(opts)
2045 opts = _byteskwargs(opts)
2046 timer, fm = gettimer(ui, opts)
2046 timer, fm = gettimer(ui, opts)
2047
2047
2048 def moonwalk():
2048 def moonwalk():
2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2050 ctx = repo[i]
2050 ctx = repo[i]
2051 ctx.branch() # read changelog data (in addition to the index)
2051 ctx.branch() # read changelog data (in addition to the index)
2052
2052
2053 timer(moonwalk)
2053 timer(moonwalk)
2054 fm.end()
2054 fm.end()
2055
2055
2056
2056
2057 @command(
2057 @command(
2058 b'perf::templating|perftemplating',
2058 b'perf::templating|perftemplating',
2059 [
2059 [
2060 (b'r', b'rev', [], b'revisions to run the template on'),
2060 (b'r', b'rev', [], b'revisions to run the template on'),
2061 ]
2061 ]
2062 + formatteropts,
2062 + formatteropts,
2063 )
2063 )
2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2065 """test the rendering time of a given template"""
2065 """test the rendering time of a given template"""
2066 if makelogtemplater is None:
2066 if makelogtemplater is None:
2067 raise error.Abort(
2067 raise error.Abort(
2068 b"perftemplating not available with this Mercurial",
2068 b"perftemplating not available with this Mercurial",
2069 hint=b"use 4.3 or later",
2069 hint=b"use 4.3 or later",
2070 )
2070 )
2071
2071
2072 opts = _byteskwargs(opts)
2072 opts = _byteskwargs(opts)
2073
2073
2074 nullui = ui.copy()
2074 nullui = ui.copy()
2075 nullui.fout = open(os.devnull, 'wb')
2075 nullui.fout = open(os.devnull, 'wb')
2076 nullui.disablepager()
2076 nullui.disablepager()
2077 revs = opts.get(b'rev')
2077 revs = opts.get(b'rev')
2078 if not revs:
2078 if not revs:
2079 revs = [b'all()']
2079 revs = [b'all()']
2080 revs = list(scmutil.revrange(repo, revs))
2080 revs = list(scmutil.revrange(repo, revs))
2081
2081
2082 defaulttemplate = (
2082 defaulttemplate = (
2083 b'{date|shortdate} [{rev}:{node|short}]'
2083 b'{date|shortdate} [{rev}:{node|short}]'
2084 b' {author|person}: {desc|firstline}\n'
2084 b' {author|person}: {desc|firstline}\n'
2085 )
2085 )
2086 if testedtemplate is None:
2086 if testedtemplate is None:
2087 testedtemplate = defaulttemplate
2087 testedtemplate = defaulttemplate
2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2089
2089
2090 def format():
2090 def format():
2091 for r in revs:
2091 for r in revs:
2092 ctx = repo[r]
2092 ctx = repo[r]
2093 displayer.show(ctx)
2093 displayer.show(ctx)
2094 displayer.flush(ctx)
2094 displayer.flush(ctx)
2095
2095
2096 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2097 timer(format)
2097 timer(format)
2098 fm.end()
2098 fm.end()
2099
2099
2100
2100
2101 def _displaystats(ui, opts, entries, data):
2101 def _displaystats(ui, opts, entries, data):
2102 # use a second formatter because the data are quite different, not sure
2102 # use a second formatter because the data are quite different, not sure
2103 # how it flies with the templater.
2103 # how it flies with the templater.
2104 fm = ui.formatter(b'perf-stats', opts)
2104 fm = ui.formatter(b'perf-stats', opts)
2105 for key, title in entries:
2105 for key, title in entries:
2106 values = data[key]
2106 values = data[key]
2107 nbvalues = len(data)
2107 nbvalues = len(data)
2108 values.sort()
2108 values.sort()
2109 stats = {
2109 stats = {
2110 'key': key,
2110 'key': key,
2111 'title': title,
2111 'title': title,
2112 'nbitems': len(values),
2112 'nbitems': len(values),
2113 'min': values[0][0],
2113 'min': values[0][0],
2114 '10%': values[(nbvalues * 10) // 100][0],
2114 '10%': values[(nbvalues * 10) // 100][0],
2115 '25%': values[(nbvalues * 25) // 100][0],
2115 '25%': values[(nbvalues * 25) // 100][0],
2116 '50%': values[(nbvalues * 50) // 100][0],
2116 '50%': values[(nbvalues * 50) // 100][0],
2117 '75%': values[(nbvalues * 75) // 100][0],
2117 '75%': values[(nbvalues * 75) // 100][0],
2118 '80%': values[(nbvalues * 80) // 100][0],
2118 '80%': values[(nbvalues * 80) // 100][0],
2119 '85%': values[(nbvalues * 85) // 100][0],
2119 '85%': values[(nbvalues * 85) // 100][0],
2120 '90%': values[(nbvalues * 90) // 100][0],
2120 '90%': values[(nbvalues * 90) // 100][0],
2121 '95%': values[(nbvalues * 95) // 100][0],
2121 '95%': values[(nbvalues * 95) // 100][0],
2122 '99%': values[(nbvalues * 99) // 100][0],
2122 '99%': values[(nbvalues * 99) // 100][0],
2123 'max': values[-1][0],
2123 'max': values[-1][0],
2124 }
2124 }
2125 fm.startitem()
2125 fm.startitem()
2126 fm.data(**stats)
2126 fm.data(**stats)
2127 # make node pretty for the human output
2127 # make node pretty for the human output
2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2129 lines = [
2129 lines = [
2130 'min',
2130 'min',
2131 '10%',
2131 '10%',
2132 '25%',
2132 '25%',
2133 '50%',
2133 '50%',
2134 '75%',
2134 '75%',
2135 '80%',
2135 '80%',
2136 '85%',
2136 '85%',
2137 '90%',
2137 '90%',
2138 '95%',
2138 '95%',
2139 '99%',
2139 '99%',
2140 'max',
2140 'max',
2141 ]
2141 ]
2142 for l in lines:
2142 for l in lines:
2143 fm.plain('%s: %s\n' % (l, stats[l]))
2143 fm.plain('%s: %s\n' % (l, stats[l]))
2144 fm.end()
2144 fm.end()
2145
2145
2146
2146
2147 @command(
2147 @command(
2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2149 formatteropts
2149 formatteropts
2150 + [
2150 + [
2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2152 (b'', b'timing', False, b'provides extra data (costly)'),
2152 (b'', b'timing', False, b'provides extra data (costly)'),
2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2154 ],
2154 ],
2155 )
2155 )
2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2157 """find statistics about potential parameters for `perfmergecopies`
2157 """find statistics about potential parameters for `perfmergecopies`
2158
2158
2159 This command find (base, p1, p2) triplet relevant for copytracing
2159 This command find (base, p1, p2) triplet relevant for copytracing
2160 benchmarking in the context of a merge. It reports values for some of the
2160 benchmarking in the context of a merge. It reports values for some of the
2161 parameters that impact merge copy tracing time during merge.
2161 parameters that impact merge copy tracing time during merge.
2162
2162
2163 If `--timing` is set, rename detection is run and the associated timing
2163 If `--timing` is set, rename detection is run and the associated timing
2164 will be reported. The extra details come at the cost of slower command
2164 will be reported. The extra details come at the cost of slower command
2165 execution.
2165 execution.
2166
2166
2167 Since rename detection is only run once, other factors might easily
2167 Since rename detection is only run once, other factors might easily
2168 affect the precision of the timing. However it should give a good
2168 affect the precision of the timing. However it should give a good
2169 approximation of which revision triplets are very costly.
2169 approximation of which revision triplets are very costly.
2170 """
2170 """
2171 opts = _byteskwargs(opts)
2171 opts = _byteskwargs(opts)
2172 fm = ui.formatter(b'perf', opts)
2172 fm = ui.formatter(b'perf', opts)
2173 dotiming = opts[b'timing']
2173 dotiming = opts[b'timing']
2174 dostats = opts[b'stats']
2174 dostats = opts[b'stats']
2175
2175
2176 output_template = [
2176 output_template = [
2177 ("base", "%(base)12s"),
2177 ("base", "%(base)12s"),
2178 ("p1", "%(p1.node)12s"),
2178 ("p1", "%(p1.node)12s"),
2179 ("p2", "%(p2.node)12s"),
2179 ("p2", "%(p2.node)12s"),
2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2183 ("p1.time", "%(p1.time)12.3f"),
2183 ("p1.time", "%(p1.time)12.3f"),
2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2187 ("p2.time", "%(p2.time)12.3f"),
2187 ("p2.time", "%(p2.time)12.3f"),
2188 ("renames", "%(nbrenamedfiles)12d"),
2188 ("renames", "%(nbrenamedfiles)12d"),
2189 ("total.time", "%(time)12.3f"),
2189 ("total.time", "%(time)12.3f"),
2190 ]
2190 ]
2191 if not dotiming:
2191 if not dotiming:
2192 output_template = [
2192 output_template = [
2193 i
2193 i
2194 for i in output_template
2194 for i in output_template
2195 if not ('time' in i[0] or 'renames' in i[0])
2195 if not ('time' in i[0] or 'renames' in i[0])
2196 ]
2196 ]
2197 header_names = [h for (h, v) in output_template]
2197 header_names = [h for (h, v) in output_template]
2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2200 fm.plain(header % tuple(header_names))
2200 fm.plain(header % tuple(header_names))
2201
2201
2202 if not revs:
2202 if not revs:
2203 revs = ['all()']
2203 revs = ['all()']
2204 revs = scmutil.revrange(repo, revs)
2204 revs = scmutil.revrange(repo, revs)
2205
2205
2206 if dostats:
2206 if dostats:
2207 alldata = {
2207 alldata = {
2208 'nbrevs': [],
2208 'nbrevs': [],
2209 'nbmissingfiles': [],
2209 'nbmissingfiles': [],
2210 }
2210 }
2211 if dotiming:
2211 if dotiming:
2212 alldata['parentnbrenames'] = []
2212 alldata['parentnbrenames'] = []
2213 alldata['totalnbrenames'] = []
2213 alldata['totalnbrenames'] = []
2214 alldata['parenttime'] = []
2214 alldata['parenttime'] = []
2215 alldata['totaltime'] = []
2215 alldata['totaltime'] = []
2216
2216
2217 roi = repo.revs('merge() and %ld', revs)
2217 roi = repo.revs('merge() and %ld', revs)
2218 for r in roi:
2218 for r in roi:
2219 ctx = repo[r]
2219 ctx = repo[r]
2220 p1 = ctx.p1()
2220 p1 = ctx.p1()
2221 p2 = ctx.p2()
2221 p2 = ctx.p2()
2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2223 for b in bases:
2223 for b in bases:
2224 b = repo[b]
2224 b = repo[b]
2225 p1missing = copies._computeforwardmissing(b, p1)
2225 p1missing = copies._computeforwardmissing(b, p1)
2226 p2missing = copies._computeforwardmissing(b, p2)
2226 p2missing = copies._computeforwardmissing(b, p2)
2227 data = {
2227 data = {
2228 b'base': b.hex(),
2228 b'base': b.hex(),
2229 b'p1.node': p1.hex(),
2229 b'p1.node': p1.hex(),
2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2231 b'p1.nbmissingfiles': len(p1missing),
2231 b'p1.nbmissingfiles': len(p1missing),
2232 b'p2.node': p2.hex(),
2232 b'p2.node': p2.hex(),
2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2234 b'p2.nbmissingfiles': len(p2missing),
2234 b'p2.nbmissingfiles': len(p2missing),
2235 }
2235 }
2236 if dostats:
2236 if dostats:
2237 if p1missing:
2237 if p1missing:
2238 alldata['nbrevs'].append(
2238 alldata['nbrevs'].append(
2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2240 )
2240 )
2241 alldata['nbmissingfiles'].append(
2241 alldata['nbmissingfiles'].append(
2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2243 )
2243 )
2244 if p2missing:
2244 if p2missing:
2245 alldata['nbrevs'].append(
2245 alldata['nbrevs'].append(
2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2247 )
2247 )
2248 alldata['nbmissingfiles'].append(
2248 alldata['nbmissingfiles'].append(
2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2250 )
2250 )
2251 if dotiming:
2251 if dotiming:
2252 begin = util.timer()
2252 begin = util.timer()
2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2254 end = util.timer()
2254 end = util.timer()
2255 # not very stable timing since we did only one run
2255 # not very stable timing since we did only one run
2256 data['time'] = end - begin
2256 data['time'] = end - begin
2257 # mergedata contains five dicts: "copy", "movewithdir",
2257 # mergedata contains five dicts: "copy", "movewithdir",
2258 # "diverge", "renamedelete" and "dirmove".
2258 # "diverge", "renamedelete" and "dirmove".
2259 # The first 4 are about renamed file so lets count that.
2259 # The first 4 are about renamed file so lets count that.
2260 renames = len(mergedata[0])
2260 renames = len(mergedata[0])
2261 renames += len(mergedata[1])
2261 renames += len(mergedata[1])
2262 renames += len(mergedata[2])
2262 renames += len(mergedata[2])
2263 renames += len(mergedata[3])
2263 renames += len(mergedata[3])
2264 data['nbrenamedfiles'] = renames
2264 data['nbrenamedfiles'] = renames
2265 begin = util.timer()
2265 begin = util.timer()
2266 p1renames = copies.pathcopies(b, p1)
2266 p1renames = copies.pathcopies(b, p1)
2267 end = util.timer()
2267 end = util.timer()
2268 data['p1.time'] = end - begin
2268 data['p1.time'] = end - begin
2269 begin = util.timer()
2269 begin = util.timer()
2270 p2renames = copies.pathcopies(b, p2)
2270 p2renames = copies.pathcopies(b, p2)
2271 end = util.timer()
2271 end = util.timer()
2272 data['p2.time'] = end - begin
2272 data['p2.time'] = end - begin
2273 data['p1.renamedfiles'] = len(p1renames)
2273 data['p1.renamedfiles'] = len(p1renames)
2274 data['p2.renamedfiles'] = len(p2renames)
2274 data['p2.renamedfiles'] = len(p2renames)
2275
2275
2276 if dostats:
2276 if dostats:
2277 if p1missing:
2277 if p1missing:
2278 alldata['parentnbrenames'].append(
2278 alldata['parentnbrenames'].append(
2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2280 )
2280 )
2281 alldata['parenttime'].append(
2281 alldata['parenttime'].append(
2282 (data['p1.time'], b.hex(), p1.hex())
2282 (data['p1.time'], b.hex(), p1.hex())
2283 )
2283 )
2284 if p2missing:
2284 if p2missing:
2285 alldata['parentnbrenames'].append(
2285 alldata['parentnbrenames'].append(
2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2287 )
2287 )
2288 alldata['parenttime'].append(
2288 alldata['parenttime'].append(
2289 (data['p2.time'], b.hex(), p2.hex())
2289 (data['p2.time'], b.hex(), p2.hex())
2290 )
2290 )
2291 if p1missing or p2missing:
2291 if p1missing or p2missing:
2292 alldata['totalnbrenames'].append(
2292 alldata['totalnbrenames'].append(
2293 (
2293 (
2294 data['nbrenamedfiles'],
2294 data['nbrenamedfiles'],
2295 b.hex(),
2295 b.hex(),
2296 p1.hex(),
2296 p1.hex(),
2297 p2.hex(),
2297 p2.hex(),
2298 )
2298 )
2299 )
2299 )
2300 alldata['totaltime'].append(
2300 alldata['totaltime'].append(
2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2302 )
2302 )
2303 fm.startitem()
2303 fm.startitem()
2304 fm.data(**data)
2304 fm.data(**data)
2305 # make node pretty for the human output
2305 # make node pretty for the human output
2306 out = data.copy()
2306 out = data.copy()
2307 out['base'] = fm.hexfunc(b.node())
2307 out['base'] = fm.hexfunc(b.node())
2308 out['p1.node'] = fm.hexfunc(p1.node())
2308 out['p1.node'] = fm.hexfunc(p1.node())
2309 out['p2.node'] = fm.hexfunc(p2.node())
2309 out['p2.node'] = fm.hexfunc(p2.node())
2310 fm.plain(output % out)
2310 fm.plain(output % out)
2311
2311
2312 fm.end()
2312 fm.end()
2313 if dostats:
2313 if dostats:
2314 # use a second formatter because the data are quite different, not sure
2314 # use a second formatter because the data are quite different, not sure
2315 # how it flies with the templater.
2315 # how it flies with the templater.
2316 entries = [
2316 entries = [
2317 ('nbrevs', 'number of revision covered'),
2317 ('nbrevs', 'number of revision covered'),
2318 ('nbmissingfiles', 'number of missing files at head'),
2318 ('nbmissingfiles', 'number of missing files at head'),
2319 ]
2319 ]
2320 if dotiming:
2320 if dotiming:
2321 entries.append(
2321 entries.append(
2322 ('parentnbrenames', 'rename from one parent to base')
2322 ('parentnbrenames', 'rename from one parent to base')
2323 )
2323 )
2324 entries.append(('totalnbrenames', 'total number of renames'))
2324 entries.append(('totalnbrenames', 'total number of renames'))
2325 entries.append(('parenttime', 'time for one parent'))
2325 entries.append(('parenttime', 'time for one parent'))
2326 entries.append(('totaltime', 'time for both parents'))
2326 entries.append(('totaltime', 'time for both parents'))
2327 _displaystats(ui, opts, entries, alldata)
2327 _displaystats(ui, opts, entries, alldata)
2328
2328
2329
2329
2330 @command(
2330 @command(
2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2332 formatteropts
2332 formatteropts
2333 + [
2333 + [
2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2335 (b'', b'timing', False, b'provides extra data (costly)'),
2335 (b'', b'timing', False, b'provides extra data (costly)'),
2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2337 ],
2337 ],
2338 )
2338 )
2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2340 """find statistic about potential parameters for the `perftracecopies`
2340 """find statistic about potential parameters for the `perftracecopies`
2341
2341
2342 This command find source-destination pair relevant for copytracing testing.
2342 This command find source-destination pair relevant for copytracing testing.
2343 It report value for some of the parameters that impact copy tracing time.
2343 It report value for some of the parameters that impact copy tracing time.
2344
2344
2345 If `--timing` is set, rename detection is run and the associated timing
2345 If `--timing` is set, rename detection is run and the associated timing
2346 will be reported. The extra details comes at the cost of a slower command
2346 will be reported. The extra details comes at the cost of a slower command
2347 execution.
2347 execution.
2348
2348
2349 Since the rename detection is only run once, other factors might easily
2349 Since the rename detection is only run once, other factors might easily
2350 affect the precision of the timing. However it should give a good
2350 affect the precision of the timing. However it should give a good
2351 approximation of which revision pairs are very costly.
2351 approximation of which revision pairs are very costly.
2352 """
2352 """
2353 opts = _byteskwargs(opts)
2353 opts = _byteskwargs(opts)
2354 fm = ui.formatter(b'perf', opts)
2354 fm = ui.formatter(b'perf', opts)
2355 dotiming = opts[b'timing']
2355 dotiming = opts[b'timing']
2356 dostats = opts[b'stats']
2356 dostats = opts[b'stats']
2357
2357
2358 if dotiming:
2358 if dotiming:
2359 header = '%12s %12s %12s %12s %12s %12s\n'
2359 header = '%12s %12s %12s %12s %12s %12s\n'
2360 output = (
2360 output = (
2361 "%(source)12s %(destination)12s "
2361 "%(source)12s %(destination)12s "
2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2364 )
2364 )
2365 header_names = (
2365 header_names = (
2366 "source",
2366 "source",
2367 "destination",
2367 "destination",
2368 "nb-revs",
2368 "nb-revs",
2369 "nb-files",
2369 "nb-files",
2370 "nb-renames",
2370 "nb-renames",
2371 "time",
2371 "time",
2372 )
2372 )
2373 fm.plain(header % header_names)
2373 fm.plain(header % header_names)
2374 else:
2374 else:
2375 header = '%12s %12s %12s %12s\n'
2375 header = '%12s %12s %12s %12s\n'
2376 output = (
2376 output = (
2377 "%(source)12s %(destination)12s "
2377 "%(source)12s %(destination)12s "
2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2379 )
2379 )
2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2381
2381
2382 if not revs:
2382 if not revs:
2383 revs = ['all()']
2383 revs = ['all()']
2384 revs = scmutil.revrange(repo, revs)
2384 revs = scmutil.revrange(repo, revs)
2385
2385
2386 if dostats:
2386 if dostats:
2387 alldata = {
2387 alldata = {
2388 'nbrevs': [],
2388 'nbrevs': [],
2389 'nbmissingfiles': [],
2389 'nbmissingfiles': [],
2390 }
2390 }
2391 if dotiming:
2391 if dotiming:
2392 alldata['nbrenames'] = []
2392 alldata['nbrenames'] = []
2393 alldata['time'] = []
2393 alldata['time'] = []
2394
2394
2395 roi = repo.revs('merge() and %ld', revs)
2395 roi = repo.revs('merge() and %ld', revs)
2396 for r in roi:
2396 for r in roi:
2397 ctx = repo[r]
2397 ctx = repo[r]
2398 p1 = ctx.p1().rev()
2398 p1 = ctx.p1().rev()
2399 p2 = ctx.p2().rev()
2399 p2 = ctx.p2().rev()
2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2401 for p in (p1, p2):
2401 for p in (p1, p2):
2402 for b in bases:
2402 for b in bases:
2403 base = repo[b]
2403 base = repo[b]
2404 parent = repo[p]
2404 parent = repo[p]
2405 missing = copies._computeforwardmissing(base, parent)
2405 missing = copies._computeforwardmissing(base, parent)
2406 if not missing:
2406 if not missing:
2407 continue
2407 continue
2408 data = {
2408 data = {
2409 b'source': base.hex(),
2409 b'source': base.hex(),
2410 b'destination': parent.hex(),
2410 b'destination': parent.hex(),
2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2412 b'nbmissingfiles': len(missing),
2412 b'nbmissingfiles': len(missing),
2413 }
2413 }
2414 if dostats:
2414 if dostats:
2415 alldata['nbrevs'].append(
2415 alldata['nbrevs'].append(
2416 (
2416 (
2417 data['nbrevs'],
2417 data['nbrevs'],
2418 base.hex(),
2418 base.hex(),
2419 parent.hex(),
2419 parent.hex(),
2420 )
2420 )
2421 )
2421 )
2422 alldata['nbmissingfiles'].append(
2422 alldata['nbmissingfiles'].append(
2423 (
2423 (
2424 data['nbmissingfiles'],
2424 data['nbmissingfiles'],
2425 base.hex(),
2425 base.hex(),
2426 parent.hex(),
2426 parent.hex(),
2427 )
2427 )
2428 )
2428 )
2429 if dotiming:
2429 if dotiming:
2430 begin = util.timer()
2430 begin = util.timer()
2431 renames = copies.pathcopies(base, parent)
2431 renames = copies.pathcopies(base, parent)
2432 end = util.timer()
2432 end = util.timer()
2433 # not very stable timing since we did only one run
2433 # not very stable timing since we did only one run
2434 data['time'] = end - begin
2434 data['time'] = end - begin
2435 data['nbrenamedfiles'] = len(renames)
2435 data['nbrenamedfiles'] = len(renames)
2436 if dostats:
2436 if dostats:
2437 alldata['time'].append(
2437 alldata['time'].append(
2438 (
2438 (
2439 data['time'],
2439 data['time'],
2440 base.hex(),
2440 base.hex(),
2441 parent.hex(),
2441 parent.hex(),
2442 )
2442 )
2443 )
2443 )
2444 alldata['nbrenames'].append(
2444 alldata['nbrenames'].append(
2445 (
2445 (
2446 data['nbrenamedfiles'],
2446 data['nbrenamedfiles'],
2447 base.hex(),
2447 base.hex(),
2448 parent.hex(),
2448 parent.hex(),
2449 )
2449 )
2450 )
2450 )
2451 fm.startitem()
2451 fm.startitem()
2452 fm.data(**data)
2452 fm.data(**data)
2453 out = data.copy()
2453 out = data.copy()
2454 out['source'] = fm.hexfunc(base.node())
2454 out['source'] = fm.hexfunc(base.node())
2455 out['destination'] = fm.hexfunc(parent.node())
2455 out['destination'] = fm.hexfunc(parent.node())
2456 fm.plain(output % out)
2456 fm.plain(output % out)
2457
2457
2458 fm.end()
2458 fm.end()
2459 if dostats:
2459 if dostats:
2460 entries = [
2460 entries = [
2461 ('nbrevs', 'number of revision covered'),
2461 ('nbrevs', 'number of revision covered'),
2462 ('nbmissingfiles', 'number of missing files at head'),
2462 ('nbmissingfiles', 'number of missing files at head'),
2463 ]
2463 ]
2464 if dotiming:
2464 if dotiming:
2465 entries.append(('nbrenames', 'renamed files'))
2465 entries.append(('nbrenames', 'renamed files'))
2466 entries.append(('time', 'time'))
2466 entries.append(('time', 'time'))
2467 _displaystats(ui, opts, entries, alldata)
2467 _displaystats(ui, opts, entries, alldata)
2468
2468
2469
2469
2470 @command(b'perf::cca|perfcca', formatteropts)
2470 @command(b'perf::cca|perfcca', formatteropts)
2471 def perfcca(ui, repo, **opts):
2471 def perfcca(ui, repo, **opts):
2472 opts = _byteskwargs(opts)
2472 opts = _byteskwargs(opts)
2473 timer, fm = gettimer(ui, opts)
2473 timer, fm = gettimer(ui, opts)
2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2475 fm.end()
2475 fm.end()
2476
2476
2477
2477
2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2479 def perffncacheload(ui, repo, **opts):
2479 def perffncacheload(ui, repo, **opts):
2480 opts = _byteskwargs(opts)
2480 opts = _byteskwargs(opts)
2481 timer, fm = gettimer(ui, opts)
2481 timer, fm = gettimer(ui, opts)
2482 s = repo.store
2482 s = repo.store
2483
2483
2484 def d():
2484 def d():
2485 s.fncache._load()
2485 s.fncache._load()
2486
2486
2487 timer(d)
2487 timer(d)
2488 fm.end()
2488 fm.end()
2489
2489
2490
2490
2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2492 def perffncachewrite(ui, repo, **opts):
2492 def perffncachewrite(ui, repo, **opts):
2493 opts = _byteskwargs(opts)
2493 opts = _byteskwargs(opts)
2494 timer, fm = gettimer(ui, opts)
2494 timer, fm = gettimer(ui, opts)
2495 s = repo.store
2495 s = repo.store
2496 lock = repo.lock()
2496 lock = repo.lock()
2497 s.fncache._load()
2497 s.fncache._load()
2498 tr = repo.transaction(b'perffncachewrite')
2498 tr = repo.transaction(b'perffncachewrite')
2499 tr.addbackup(b'fncache')
2499 tr.addbackup(b'fncache')
2500
2500
2501 def d():
2501 def d():
2502 s.fncache._dirty = True
2502 s.fncache._dirty = True
2503 s.fncache.write(tr)
2503 s.fncache.write(tr)
2504
2504
2505 timer(d)
2505 timer(d)
2506 tr.close()
2506 tr.close()
2507 lock.release()
2507 lock.release()
2508 fm.end()
2508 fm.end()
2509
2509
2510
2510
2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2512 def perffncacheencode(ui, repo, **opts):
2512 def perffncacheencode(ui, repo, **opts):
2513 opts = _byteskwargs(opts)
2513 opts = _byteskwargs(opts)
2514 timer, fm = gettimer(ui, opts)
2514 timer, fm = gettimer(ui, opts)
2515 s = repo.store
2515 s = repo.store
2516 s.fncache._load()
2516 s.fncache._load()
2517
2517
2518 def d():
2518 def d():
2519 for p in s.fncache.entries:
2519 for p in s.fncache.entries:
2520 s.encode(p)
2520 s.encode(p)
2521
2521
2522 timer(d)
2522 timer(d)
2523 fm.end()
2523 fm.end()
2524
2524
2525
2525
2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2527 while not done.is_set():
2527 while not done.is_set():
2528 pair = q.get()
2528 pair = q.get()
2529 while pair is not None:
2529 while pair is not None:
2530 if xdiff:
2530 if xdiff:
2531 mdiff.bdiff.xdiffblocks(*pair)
2531 mdiff.bdiff.xdiffblocks(*pair)
2532 elif blocks:
2532 elif blocks:
2533 mdiff.bdiff.blocks(*pair)
2533 mdiff.bdiff.blocks(*pair)
2534 else:
2534 else:
2535 mdiff.textdiff(*pair)
2535 mdiff.textdiff(*pair)
2536 q.task_done()
2536 q.task_done()
2537 pair = q.get()
2537 pair = q.get()
2538 q.task_done() # for the None one
2538 q.task_done() # for the None one
2539 with ready:
2539 with ready:
2540 ready.wait()
2540 ready.wait()
2541
2541
2542
2542
2543 def _manifestrevision(repo, mnode):
2543 def _manifestrevision(repo, mnode):
2544 ml = repo.manifestlog
2544 ml = repo.manifestlog
2545
2545
2546 if util.safehasattr(ml, b'getstorage'):
2546 if util.safehasattr(ml, b'getstorage'):
2547 store = ml.getstorage(b'')
2547 store = ml.getstorage(b'')
2548 else:
2548 else:
2549 store = ml._revlog
2549 store = ml._revlog
2550
2550
2551 return store.revision(mnode)
2551 return store.revision(mnode)
2552
2552
2553
2553
2554 @command(
2554 @command(
2555 b'perf::bdiff|perfbdiff',
2555 b'perf::bdiff|perfbdiff',
2556 revlogopts
2556 revlogopts
2557 + formatteropts
2557 + formatteropts
2558 + [
2558 + [
2559 (
2559 (
2560 b'',
2560 b'',
2561 b'count',
2561 b'count',
2562 1,
2562 1,
2563 b'number of revisions to test (when using --startrev)',
2563 b'number of revisions to test (when using --startrev)',
2564 ),
2564 ),
2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2569 ],
2569 ],
2570 b'-c|-m|FILE REV',
2570 b'-c|-m|FILE REV',
2571 )
2571 )
2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2573 """benchmark a bdiff between revisions
2573 """benchmark a bdiff between revisions
2574
2574
2575 By default, benchmark a bdiff between its delta parent and itself.
2575 By default, benchmark a bdiff between its delta parent and itself.
2576
2576
2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2578 revisions starting at the specified revision.
2578 revisions starting at the specified revision.
2579
2579
2580 With ``--alldata``, assume the requested revision is a changeset and
2580 With ``--alldata``, assume the requested revision is a changeset and
2581 measure bdiffs for all changes related to that changeset (manifest
2581 measure bdiffs for all changes related to that changeset (manifest
2582 and filelogs).
2582 and filelogs).
2583 """
2583 """
2584 opts = _byteskwargs(opts)
2584 opts = _byteskwargs(opts)
2585
2585
2586 if opts[b'xdiff'] and not opts[b'blocks']:
2586 if opts[b'xdiff'] and not opts[b'blocks']:
2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2588
2588
2589 if opts[b'alldata']:
2589 if opts[b'alldata']:
2590 opts[b'changelog'] = True
2590 opts[b'changelog'] = True
2591
2591
2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2593 file_, rev = None, file_
2593 file_, rev = None, file_
2594 elif rev is None:
2594 elif rev is None:
2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2596
2596
2597 blocks = opts[b'blocks']
2597 blocks = opts[b'blocks']
2598 xdiff = opts[b'xdiff']
2598 xdiff = opts[b'xdiff']
2599 textpairs = []
2599 textpairs = []
2600
2600
2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2602
2602
2603 startrev = r.rev(r.lookup(rev))
2603 startrev = r.rev(r.lookup(rev))
2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2605 if opts[b'alldata']:
2605 if opts[b'alldata']:
2606 # Load revisions associated with changeset.
2606 # Load revisions associated with changeset.
2607 ctx = repo[rev]
2607 ctx = repo[rev]
2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2609 for pctx in ctx.parents():
2609 for pctx in ctx.parents():
2610 pman = _manifestrevision(repo, pctx.manifestnode())
2610 pman = _manifestrevision(repo, pctx.manifestnode())
2611 textpairs.append((pman, mtext))
2611 textpairs.append((pman, mtext))
2612
2612
2613 # Load filelog revisions by iterating manifest delta.
2613 # Load filelog revisions by iterating manifest delta.
2614 man = ctx.manifest()
2614 man = ctx.manifest()
2615 pman = ctx.p1().manifest()
2615 pman = ctx.p1().manifest()
2616 for filename, change in pman.diff(man).items():
2616 for filename, change in pman.diff(man).items():
2617 fctx = repo.file(filename)
2617 fctx = repo.file(filename)
2618 f1 = fctx.revision(change[0][0] or -1)
2618 f1 = fctx.revision(change[0][0] or -1)
2619 f2 = fctx.revision(change[1][0] or -1)
2619 f2 = fctx.revision(change[1][0] or -1)
2620 textpairs.append((f1, f2))
2620 textpairs.append((f1, f2))
2621 else:
2621 else:
2622 dp = r.deltaparent(rev)
2622 dp = r.deltaparent(rev)
2623 textpairs.append((r.revision(dp), r.revision(rev)))
2623 textpairs.append((r.revision(dp), r.revision(rev)))
2624
2624
2625 withthreads = threads > 0
2625 withthreads = threads > 0
2626 if not withthreads:
2626 if not withthreads:
2627
2627
2628 def d():
2628 def d():
2629 for pair in textpairs:
2629 for pair in textpairs:
2630 if xdiff:
2630 if xdiff:
2631 mdiff.bdiff.xdiffblocks(*pair)
2631 mdiff.bdiff.xdiffblocks(*pair)
2632 elif blocks:
2632 elif blocks:
2633 mdiff.bdiff.blocks(*pair)
2633 mdiff.bdiff.blocks(*pair)
2634 else:
2634 else:
2635 mdiff.textdiff(*pair)
2635 mdiff.textdiff(*pair)
2636
2636
2637 else:
2637 else:
2638 q = queue()
2638 q = queue()
2639 for i in _xrange(threads):
2639 for i in _xrange(threads):
2640 q.put(None)
2640 q.put(None)
2641 ready = threading.Condition()
2641 ready = threading.Condition()
2642 done = threading.Event()
2642 done = threading.Event()
2643 for i in _xrange(threads):
2643 for i in _xrange(threads):
2644 threading.Thread(
2644 threading.Thread(
2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2646 ).start()
2646 ).start()
2647 q.join()
2647 q.join()
2648
2648
2649 def d():
2649 def d():
2650 for pair in textpairs:
2650 for pair in textpairs:
2651 q.put(pair)
2651 q.put(pair)
2652 for i in _xrange(threads):
2652 for i in _xrange(threads):
2653 q.put(None)
2653 q.put(None)
2654 with ready:
2654 with ready:
2655 ready.notify_all()
2655 ready.notify_all()
2656 q.join()
2656 q.join()
2657
2657
2658 timer, fm = gettimer(ui, opts)
2658 timer, fm = gettimer(ui, opts)
2659 timer(d)
2659 timer(d)
2660 fm.end()
2660 fm.end()
2661
2661
2662 if withthreads:
2662 if withthreads:
2663 done.set()
2663 done.set()
2664 for i in _xrange(threads):
2664 for i in _xrange(threads):
2665 q.put(None)
2665 q.put(None)
2666 with ready:
2666 with ready:
2667 ready.notify_all()
2667 ready.notify_all()
2668
2668
2669
2669
2670 @command(
2670 @command(
2671 b'perf::unbundle',
2671 b'perf::unbundle',
2672 formatteropts,
2672 formatteropts,
2673 b'BUNDLE_FILE',
2673 b'BUNDLE_FILE',
2674 )
2674 )
2675 def perf_unbundle(ui, repo, fname, **opts):
2675 def perf_unbundle(ui, repo, fname, **opts):
2676 """benchmark application of a bundle in a repository.
2676 """benchmark application of a bundle in a repository.
2677
2677
2678 This does not include the final transaction processing"""
2678 This does not include the final transaction processing"""
2679
2679 from mercurial import exchange
2680 from mercurial import exchange
2680 from mercurial import bundle2
2681 from mercurial import bundle2
2682 from mercurial import transaction
2681
2683
2682 opts = _byteskwargs(opts)
2684 opts = _byteskwargs(opts)
2683
2685
2684 if True:
2686 ### some compatibility hotfix
2687 #
2688 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2689 # critical regression that break transaction rollback for files that are
2690 # de-inlined.
2691 method = transaction.transaction._addentry
2692 pre_63edc384d3b7 = "data" in getargspec(method).args
2693 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2694 # a changeset that is a close descendant of 18415fc918a1, the changeset
2695 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2696 args = getargspec(error.Abort.__init__).args
2697 post_18415fc918a1 = "detailed_exit_code" in args
2698
2699 old_max_inline = None
2700 try:
2701 if not (pre_63edc384d3b7 or post_18415fc918a1):
2702 # disable inlining
2703 old_max_inline = mercurial.revlog._maxinline
2704 # large enough to never happen
2705 mercurial.revlog._maxinline = 2 ** 50
2706
2685 with repo.lock():
2707 with repo.lock():
2686 bundle = [None, None]
2708 bundle = [None, None]
2687 orig_quiet = repo.ui.quiet
2709 orig_quiet = repo.ui.quiet
2688 try:
2710 try:
2689 repo.ui.quiet = True
2711 repo.ui.quiet = True
2690 with open(fname, mode="rb") as f:
2712 with open(fname, mode="rb") as f:
2691
2713
2692 def noop_report(*args, **kwargs):
2714 def noop_report(*args, **kwargs):
2693 pass
2715 pass
2694
2716
2695 def setup():
2717 def setup():
2696 gen, tr = bundle
2718 gen, tr = bundle
2697 if tr is not None:
2719 if tr is not None:
2698 tr.abort()
2720 tr.abort()
2699 bundle[:] = [None, None]
2721 bundle[:] = [None, None]
2700 f.seek(0)
2722 f.seek(0)
2701 bundle[0] = exchange.readbundle(ui, f, fname)
2723 bundle[0] = exchange.readbundle(ui, f, fname)
2702 bundle[1] = repo.transaction(b'perf::unbundle')
2724 bundle[1] = repo.transaction(b'perf::unbundle')
2703 # silence the transaction
2725 # silence the transaction
2704 bundle[1]._report = noop_report
2726 bundle[1]._report = noop_report
2705
2727
2706 def apply():
2728 def apply():
2707 gen, tr = bundle
2729 gen, tr = bundle
2708 bundle2.applybundle(
2730 bundle2.applybundle(
2709 repo,
2731 repo,
2710 gen,
2732 gen,
2711 tr,
2733 tr,
2712 source=b'perf::unbundle',
2734 source=b'perf::unbundle',
2713 url=fname,
2735 url=fname,
2714 )
2736 )
2715
2737
2716 timer, fm = gettimer(ui, opts)
2738 timer, fm = gettimer(ui, opts)
2717 timer(apply, setup=setup)
2739 timer(apply, setup=setup)
2718 fm.end()
2740 fm.end()
2719 finally:
2741 finally:
2720 repo.ui.quiet == orig_quiet
2742 repo.ui.quiet == orig_quiet
2721 gen, tr = bundle
2743 gen, tr = bundle
2722 if tr is not None:
2744 if tr is not None:
2723 tr.abort()
2745 tr.abort()
2746 finally:
2747 if old_max_inline is not None:
2748 mercurial.revlog._maxinline = old_max_inline
2724
2749
2725
2750
2726 @command(
2751 @command(
2727 b'perf::unidiff|perfunidiff',
2752 b'perf::unidiff|perfunidiff',
2728 revlogopts
2753 revlogopts
2729 + formatteropts
2754 + formatteropts
2730 + [
2755 + [
2731 (
2756 (
2732 b'',
2757 b'',
2733 b'count',
2758 b'count',
2734 1,
2759 1,
2735 b'number of revisions to test (when using --startrev)',
2760 b'number of revisions to test (when using --startrev)',
2736 ),
2761 ),
2737 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2762 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2738 ],
2763 ],
2739 b'-c|-m|FILE REV',
2764 b'-c|-m|FILE REV',
2740 )
2765 )
2741 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2766 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2742 """benchmark a unified diff between revisions
2767 """benchmark a unified diff between revisions
2743
2768
2744 This doesn't include any copy tracing - it's just a unified diff
2769 This doesn't include any copy tracing - it's just a unified diff
2745 of the texts.
2770 of the texts.
2746
2771
2747 By default, benchmark a diff between its delta parent and itself.
2772 By default, benchmark a diff between its delta parent and itself.
2748
2773
2749 With ``--count``, benchmark diffs between delta parents and self for N
2774 With ``--count``, benchmark diffs between delta parents and self for N
2750 revisions starting at the specified revision.
2775 revisions starting at the specified revision.
2751
2776
2752 With ``--alldata``, assume the requested revision is a changeset and
2777 With ``--alldata``, assume the requested revision is a changeset and
2753 measure diffs for all changes related to that changeset (manifest
2778 measure diffs for all changes related to that changeset (manifest
2754 and filelogs).
2779 and filelogs).
2755 """
2780 """
2756 opts = _byteskwargs(opts)
2781 opts = _byteskwargs(opts)
2757 if opts[b'alldata']:
2782 if opts[b'alldata']:
2758 opts[b'changelog'] = True
2783 opts[b'changelog'] = True
2759
2784
2760 if opts.get(b'changelog') or opts.get(b'manifest'):
2785 if opts.get(b'changelog') or opts.get(b'manifest'):
2761 file_, rev = None, file_
2786 file_, rev = None, file_
2762 elif rev is None:
2787 elif rev is None:
2763 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2788 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2764
2789
2765 textpairs = []
2790 textpairs = []
2766
2791
2767 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2792 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2768
2793
2769 startrev = r.rev(r.lookup(rev))
2794 startrev = r.rev(r.lookup(rev))
2770 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2795 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2771 if opts[b'alldata']:
2796 if opts[b'alldata']:
2772 # Load revisions associated with changeset.
2797 # Load revisions associated with changeset.
2773 ctx = repo[rev]
2798 ctx = repo[rev]
2774 mtext = _manifestrevision(repo, ctx.manifestnode())
2799 mtext = _manifestrevision(repo, ctx.manifestnode())
2775 for pctx in ctx.parents():
2800 for pctx in ctx.parents():
2776 pman = _manifestrevision(repo, pctx.manifestnode())
2801 pman = _manifestrevision(repo, pctx.manifestnode())
2777 textpairs.append((pman, mtext))
2802 textpairs.append((pman, mtext))
2778
2803
2779 # Load filelog revisions by iterating manifest delta.
2804 # Load filelog revisions by iterating manifest delta.
2780 man = ctx.manifest()
2805 man = ctx.manifest()
2781 pman = ctx.p1().manifest()
2806 pman = ctx.p1().manifest()
2782 for filename, change in pman.diff(man).items():
2807 for filename, change in pman.diff(man).items():
2783 fctx = repo.file(filename)
2808 fctx = repo.file(filename)
2784 f1 = fctx.revision(change[0][0] or -1)
2809 f1 = fctx.revision(change[0][0] or -1)
2785 f2 = fctx.revision(change[1][0] or -1)
2810 f2 = fctx.revision(change[1][0] or -1)
2786 textpairs.append((f1, f2))
2811 textpairs.append((f1, f2))
2787 else:
2812 else:
2788 dp = r.deltaparent(rev)
2813 dp = r.deltaparent(rev)
2789 textpairs.append((r.revision(dp), r.revision(rev)))
2814 textpairs.append((r.revision(dp), r.revision(rev)))
2790
2815
2791 def d():
2816 def d():
2792 for left, right in textpairs:
2817 for left, right in textpairs:
2793 # The date strings don't matter, so we pass empty strings.
2818 # The date strings don't matter, so we pass empty strings.
2794 headerlines, hunks = mdiff.unidiff(
2819 headerlines, hunks = mdiff.unidiff(
2795 left, b'', right, b'', b'left', b'right', binary=False
2820 left, b'', right, b'', b'left', b'right', binary=False
2796 )
2821 )
2797 # consume iterators in roughly the way patch.py does
2822 # consume iterators in roughly the way patch.py does
2798 b'\n'.join(headerlines)
2823 b'\n'.join(headerlines)
2799 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2824 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2800
2825
2801 timer, fm = gettimer(ui, opts)
2826 timer, fm = gettimer(ui, opts)
2802 timer(d)
2827 timer(d)
2803 fm.end()
2828 fm.end()
2804
2829
2805
2830
2806 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2831 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2807 def perfdiffwd(ui, repo, **opts):
2832 def perfdiffwd(ui, repo, **opts):
2808 """Profile diff of working directory changes"""
2833 """Profile diff of working directory changes"""
2809 opts = _byteskwargs(opts)
2834 opts = _byteskwargs(opts)
2810 timer, fm = gettimer(ui, opts)
2835 timer, fm = gettimer(ui, opts)
2811 options = {
2836 options = {
2812 'w': 'ignore_all_space',
2837 'w': 'ignore_all_space',
2813 'b': 'ignore_space_change',
2838 'b': 'ignore_space_change',
2814 'B': 'ignore_blank_lines',
2839 'B': 'ignore_blank_lines',
2815 }
2840 }
2816
2841
2817 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2842 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2818 opts = {options[c]: b'1' for c in diffopt}
2843 opts = {options[c]: b'1' for c in diffopt}
2819
2844
2820 def d():
2845 def d():
2821 ui.pushbuffer()
2846 ui.pushbuffer()
2822 commands.diff(ui, repo, **opts)
2847 commands.diff(ui, repo, **opts)
2823 ui.popbuffer()
2848 ui.popbuffer()
2824
2849
2825 diffopt = diffopt.encode('ascii')
2850 diffopt = diffopt.encode('ascii')
2826 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2851 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2827 timer(d, title=title)
2852 timer(d, title=title)
2828 fm.end()
2853 fm.end()
2829
2854
2830
2855
2831 @command(
2856 @command(
2832 b'perf::revlogindex|perfrevlogindex',
2857 b'perf::revlogindex|perfrevlogindex',
2833 revlogopts + formatteropts,
2858 revlogopts + formatteropts,
2834 b'-c|-m|FILE',
2859 b'-c|-m|FILE',
2835 )
2860 )
2836 def perfrevlogindex(ui, repo, file_=None, **opts):
2861 def perfrevlogindex(ui, repo, file_=None, **opts):
2837 """Benchmark operations against a revlog index.
2862 """Benchmark operations against a revlog index.
2838
2863
2839 This tests constructing a revlog instance, reading index data,
2864 This tests constructing a revlog instance, reading index data,
2840 parsing index data, and performing various operations related to
2865 parsing index data, and performing various operations related to
2841 index data.
2866 index data.
2842 """
2867 """
2843
2868
2844 opts = _byteskwargs(opts)
2869 opts = _byteskwargs(opts)
2845
2870
2846 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2871 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2847
2872
2848 opener = getattr(rl, 'opener') # trick linter
2873 opener = getattr(rl, 'opener') # trick linter
2849 # compat with hg <= 5.8
2874 # compat with hg <= 5.8
2850 radix = getattr(rl, 'radix', None)
2875 radix = getattr(rl, 'radix', None)
2851 indexfile = getattr(rl, '_indexfile', None)
2876 indexfile = getattr(rl, '_indexfile', None)
2852 if indexfile is None:
2877 if indexfile is None:
2853 # compatibility with <= hg-5.8
2878 # compatibility with <= hg-5.8
2854 indexfile = getattr(rl, 'indexfile')
2879 indexfile = getattr(rl, 'indexfile')
2855 data = opener.read(indexfile)
2880 data = opener.read(indexfile)
2856
2881
2857 header = struct.unpack(b'>I', data[0:4])[0]
2882 header = struct.unpack(b'>I', data[0:4])[0]
2858 version = header & 0xFFFF
2883 version = header & 0xFFFF
2859 if version == 1:
2884 if version == 1:
2860 inline = header & (1 << 16)
2885 inline = header & (1 << 16)
2861 else:
2886 else:
2862 raise error.Abort(b'unsupported revlog version: %d' % version)
2887 raise error.Abort(b'unsupported revlog version: %d' % version)
2863
2888
2864 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2889 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2865 if parse_index_v1 is None:
2890 if parse_index_v1 is None:
2866 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2891 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2867
2892
2868 rllen = len(rl)
2893 rllen = len(rl)
2869
2894
2870 node0 = rl.node(0)
2895 node0 = rl.node(0)
2871 node25 = rl.node(rllen // 4)
2896 node25 = rl.node(rllen // 4)
2872 node50 = rl.node(rllen // 2)
2897 node50 = rl.node(rllen // 2)
2873 node75 = rl.node(rllen // 4 * 3)
2898 node75 = rl.node(rllen // 4 * 3)
2874 node100 = rl.node(rllen - 1)
2899 node100 = rl.node(rllen - 1)
2875
2900
2876 allrevs = range(rllen)
2901 allrevs = range(rllen)
2877 allrevsrev = list(reversed(allrevs))
2902 allrevsrev = list(reversed(allrevs))
2878 allnodes = [rl.node(rev) for rev in range(rllen)]
2903 allnodes = [rl.node(rev) for rev in range(rllen)]
2879 allnodesrev = list(reversed(allnodes))
2904 allnodesrev = list(reversed(allnodes))
2880
2905
2881 def constructor():
2906 def constructor():
2882 if radix is not None:
2907 if radix is not None:
2883 revlog(opener, radix=radix)
2908 revlog(opener, radix=radix)
2884 else:
2909 else:
2885 # hg <= 5.8
2910 # hg <= 5.8
2886 revlog(opener, indexfile=indexfile)
2911 revlog(opener, indexfile=indexfile)
2887
2912
2888 def read():
2913 def read():
2889 with opener(indexfile) as fh:
2914 with opener(indexfile) as fh:
2890 fh.read()
2915 fh.read()
2891
2916
2892 def parseindex():
2917 def parseindex():
2893 parse_index_v1(data, inline)
2918 parse_index_v1(data, inline)
2894
2919
2895 def getentry(revornode):
2920 def getentry(revornode):
2896 index = parse_index_v1(data, inline)[0]
2921 index = parse_index_v1(data, inline)[0]
2897 index[revornode]
2922 index[revornode]
2898
2923
2899 def getentries(revs, count=1):
2924 def getentries(revs, count=1):
2900 index = parse_index_v1(data, inline)[0]
2925 index = parse_index_v1(data, inline)[0]
2901
2926
2902 for i in range(count):
2927 for i in range(count):
2903 for rev in revs:
2928 for rev in revs:
2904 index[rev]
2929 index[rev]
2905
2930
2906 def resolvenode(node):
2931 def resolvenode(node):
2907 index = parse_index_v1(data, inline)[0]
2932 index = parse_index_v1(data, inline)[0]
2908 rev = getattr(index, 'rev', None)
2933 rev = getattr(index, 'rev', None)
2909 if rev is None:
2934 if rev is None:
2910 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2935 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2911 # This only works for the C code.
2936 # This only works for the C code.
2912 if nodemap is None:
2937 if nodemap is None:
2913 return
2938 return
2914 rev = nodemap.__getitem__
2939 rev = nodemap.__getitem__
2915
2940
2916 try:
2941 try:
2917 rev(node)
2942 rev(node)
2918 except error.RevlogError:
2943 except error.RevlogError:
2919 pass
2944 pass
2920
2945
2921 def resolvenodes(nodes, count=1):
2946 def resolvenodes(nodes, count=1):
2922 index = parse_index_v1(data, inline)[0]
2947 index = parse_index_v1(data, inline)[0]
2923 rev = getattr(index, 'rev', None)
2948 rev = getattr(index, 'rev', None)
2924 if rev is None:
2949 if rev is None:
2925 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2926 # This only works for the C code.
2951 # This only works for the C code.
2927 if nodemap is None:
2952 if nodemap is None:
2928 return
2953 return
2929 rev = nodemap.__getitem__
2954 rev = nodemap.__getitem__
2930
2955
2931 for i in range(count):
2956 for i in range(count):
2932 for node in nodes:
2957 for node in nodes:
2933 try:
2958 try:
2934 rev(node)
2959 rev(node)
2935 except error.RevlogError:
2960 except error.RevlogError:
2936 pass
2961 pass
2937
2962
2938 benches = [
2963 benches = [
2939 (constructor, b'revlog constructor'),
2964 (constructor, b'revlog constructor'),
2940 (read, b'read'),
2965 (read, b'read'),
2941 (parseindex, b'create index object'),
2966 (parseindex, b'create index object'),
2942 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2967 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2943 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2968 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2944 (lambda: resolvenode(node0), b'look up node at rev 0'),
2969 (lambda: resolvenode(node0), b'look up node at rev 0'),
2945 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2970 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2946 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2971 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2947 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2972 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2948 (lambda: resolvenode(node100), b'look up node at tip'),
2973 (lambda: resolvenode(node100), b'look up node at tip'),
2949 # 2x variation is to measure caching impact.
2974 # 2x variation is to measure caching impact.
2950 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2975 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2951 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2976 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2952 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2977 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2953 (
2978 (
2954 lambda: resolvenodes(allnodesrev, 2),
2979 lambda: resolvenodes(allnodesrev, 2),
2955 b'look up all nodes 2x (reverse)',
2980 b'look up all nodes 2x (reverse)',
2956 ),
2981 ),
2957 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2982 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2958 (
2983 (
2959 lambda: getentries(allrevs, 2),
2984 lambda: getentries(allrevs, 2),
2960 b'retrieve all index entries 2x (forward)',
2985 b'retrieve all index entries 2x (forward)',
2961 ),
2986 ),
2962 (
2987 (
2963 lambda: getentries(allrevsrev),
2988 lambda: getentries(allrevsrev),
2964 b'retrieve all index entries (reverse)',
2989 b'retrieve all index entries (reverse)',
2965 ),
2990 ),
2966 (
2991 (
2967 lambda: getentries(allrevsrev, 2),
2992 lambda: getentries(allrevsrev, 2),
2968 b'retrieve all index entries 2x (reverse)',
2993 b'retrieve all index entries 2x (reverse)',
2969 ),
2994 ),
2970 ]
2995 ]
2971
2996
2972 for fn, title in benches:
2997 for fn, title in benches:
2973 timer, fm = gettimer(ui, opts)
2998 timer, fm = gettimer(ui, opts)
2974 timer(fn, title=title)
2999 timer(fn, title=title)
2975 fm.end()
3000 fm.end()
2976
3001
2977
3002
2978 @command(
3003 @command(
2979 b'perf::revlogrevisions|perfrevlogrevisions',
3004 b'perf::revlogrevisions|perfrevlogrevisions',
2980 revlogopts
3005 revlogopts
2981 + formatteropts
3006 + formatteropts
2982 + [
3007 + [
2983 (b'd', b'dist', 100, b'distance between the revisions'),
3008 (b'd', b'dist', 100, b'distance between the revisions'),
2984 (b's', b'startrev', 0, b'revision to start reading at'),
3009 (b's', b'startrev', 0, b'revision to start reading at'),
2985 (b'', b'reverse', False, b'read in reverse'),
3010 (b'', b'reverse', False, b'read in reverse'),
2986 ],
3011 ],
2987 b'-c|-m|FILE',
3012 b'-c|-m|FILE',
2988 )
3013 )
2989 def perfrevlogrevisions(
3014 def perfrevlogrevisions(
2990 ui, repo, file_=None, startrev=0, reverse=False, **opts
3015 ui, repo, file_=None, startrev=0, reverse=False, **opts
2991 ):
3016 ):
2992 """Benchmark reading a series of revisions from a revlog.
3017 """Benchmark reading a series of revisions from a revlog.
2993
3018
2994 By default, we read every ``-d/--dist`` revision from 0 to tip of
3019 By default, we read every ``-d/--dist`` revision from 0 to tip of
2995 the specified revlog.
3020 the specified revlog.
2996
3021
2997 The start revision can be defined via ``-s/--startrev``.
3022 The start revision can be defined via ``-s/--startrev``.
2998 """
3023 """
2999 opts = _byteskwargs(opts)
3024 opts = _byteskwargs(opts)
3000
3025
3001 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3026 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3002 rllen = getlen(ui)(rl)
3027 rllen = getlen(ui)(rl)
3003
3028
3004 if startrev < 0:
3029 if startrev < 0:
3005 startrev = rllen + startrev
3030 startrev = rllen + startrev
3006
3031
3007 def d():
3032 def d():
3008 rl.clearcaches()
3033 rl.clearcaches()
3009
3034
3010 beginrev = startrev
3035 beginrev = startrev
3011 endrev = rllen
3036 endrev = rllen
3012 dist = opts[b'dist']
3037 dist = opts[b'dist']
3013
3038
3014 if reverse:
3039 if reverse:
3015 beginrev, endrev = endrev - 1, beginrev - 1
3040 beginrev, endrev = endrev - 1, beginrev - 1
3016 dist = -1 * dist
3041 dist = -1 * dist
3017
3042
3018 for x in _xrange(beginrev, endrev, dist):
3043 for x in _xrange(beginrev, endrev, dist):
3019 # Old revisions don't support passing int.
3044 # Old revisions don't support passing int.
3020 n = rl.node(x)
3045 n = rl.node(x)
3021 rl.revision(n)
3046 rl.revision(n)
3022
3047
3023 timer, fm = gettimer(ui, opts)
3048 timer, fm = gettimer(ui, opts)
3024 timer(d)
3049 timer(d)
3025 fm.end()
3050 fm.end()
3026
3051
3027
3052
3028 @command(
3053 @command(
3029 b'perf::revlogwrite|perfrevlogwrite',
3054 b'perf::revlogwrite|perfrevlogwrite',
3030 revlogopts
3055 revlogopts
3031 + formatteropts
3056 + formatteropts
3032 + [
3057 + [
3033 (b's', b'startrev', 1000, b'revision to start writing at'),
3058 (b's', b'startrev', 1000, b'revision to start writing at'),
3034 (b'', b'stoprev', -1, b'last revision to write'),
3059 (b'', b'stoprev', -1, b'last revision to write'),
3035 (b'', b'count', 3, b'number of passes to perform'),
3060 (b'', b'count', 3, b'number of passes to perform'),
3036 (b'', b'details', False, b'print timing for every revisions tested'),
3061 (b'', b'details', False, b'print timing for every revisions tested'),
3037 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3062 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3038 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3063 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3039 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3064 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3040 ],
3065 ],
3041 b'-c|-m|FILE',
3066 b'-c|-m|FILE',
3042 )
3067 )
3043 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3068 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3044 """Benchmark writing a series of revisions to a revlog.
3069 """Benchmark writing a series of revisions to a revlog.
3045
3070
3046 Possible source values are:
3071 Possible source values are:
3047 * `full`: add from a full text (default).
3072 * `full`: add from a full text (default).
3048 * `parent-1`: add from a delta to the first parent
3073 * `parent-1`: add from a delta to the first parent
3049 * `parent-2`: add from a delta to the second parent if it exists
3074 * `parent-2`: add from a delta to the second parent if it exists
3050 (use a delta from the first parent otherwise)
3075 (use a delta from the first parent otherwise)
3051 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3076 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3052 * `storage`: add from the existing precomputed deltas
3077 * `storage`: add from the existing precomputed deltas
3053
3078
3054 Note: This performance command measures performance in a custom way. As a
3079 Note: This performance command measures performance in a custom way. As a
3055 result some of the global configuration of the 'perf' command does not
3080 result some of the global configuration of the 'perf' command does not
3056 apply to it:
3081 apply to it:
3057
3082
3058 * ``pre-run``: disabled
3083 * ``pre-run``: disabled
3059
3084
3060 * ``profile-benchmark``: disabled
3085 * ``profile-benchmark``: disabled
3061
3086
3062 * ``run-limits``: disabled use --count instead
3087 * ``run-limits``: disabled use --count instead
3063 """
3088 """
3064 opts = _byteskwargs(opts)
3089 opts = _byteskwargs(opts)
3065
3090
3066 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3091 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3067 rllen = getlen(ui)(rl)
3092 rllen = getlen(ui)(rl)
3068 if startrev < 0:
3093 if startrev < 0:
3069 startrev = rllen + startrev
3094 startrev = rllen + startrev
3070 if stoprev < 0:
3095 if stoprev < 0:
3071 stoprev = rllen + stoprev
3096 stoprev = rllen + stoprev
3072
3097
3073 lazydeltabase = opts['lazydeltabase']
3098 lazydeltabase = opts['lazydeltabase']
3074 source = opts['source']
3099 source = opts['source']
3075 clearcaches = opts['clear_caches']
3100 clearcaches = opts['clear_caches']
3076 validsource = (
3101 validsource = (
3077 b'full',
3102 b'full',
3078 b'parent-1',
3103 b'parent-1',
3079 b'parent-2',
3104 b'parent-2',
3080 b'parent-smallest',
3105 b'parent-smallest',
3081 b'storage',
3106 b'storage',
3082 )
3107 )
3083 if source not in validsource:
3108 if source not in validsource:
3084 raise error.Abort('invalid source type: %s' % source)
3109 raise error.Abort('invalid source type: %s' % source)
3085
3110
3086 ### actually gather results
3111 ### actually gather results
3087 count = opts['count']
3112 count = opts['count']
3088 if count <= 0:
3113 if count <= 0:
3089 raise error.Abort('invalide run count: %d' % count)
3114 raise error.Abort('invalide run count: %d' % count)
3090 allresults = []
3115 allresults = []
3091 for c in range(count):
3116 for c in range(count):
3092 timing = _timeonewrite(
3117 timing = _timeonewrite(
3093 ui,
3118 ui,
3094 rl,
3119 rl,
3095 source,
3120 source,
3096 startrev,
3121 startrev,
3097 stoprev,
3122 stoprev,
3098 c + 1,
3123 c + 1,
3099 lazydeltabase=lazydeltabase,
3124 lazydeltabase=lazydeltabase,
3100 clearcaches=clearcaches,
3125 clearcaches=clearcaches,
3101 )
3126 )
3102 allresults.append(timing)
3127 allresults.append(timing)
3103
3128
3104 ### consolidate the results in a single list
3129 ### consolidate the results in a single list
3105 results = []
3130 results = []
3106 for idx, (rev, t) in enumerate(allresults[0]):
3131 for idx, (rev, t) in enumerate(allresults[0]):
3107 ts = [t]
3132 ts = [t]
3108 for other in allresults[1:]:
3133 for other in allresults[1:]:
3109 orev, ot = other[idx]
3134 orev, ot = other[idx]
3110 assert orev == rev
3135 assert orev == rev
3111 ts.append(ot)
3136 ts.append(ot)
3112 results.append((rev, ts))
3137 results.append((rev, ts))
3113 resultcount = len(results)
3138 resultcount = len(results)
3114
3139
3115 ### Compute and display relevant statistics
3140 ### Compute and display relevant statistics
3116
3141
3117 # get a formatter
3142 # get a formatter
3118 fm = ui.formatter(b'perf', opts)
3143 fm = ui.formatter(b'perf', opts)
3119 displayall = ui.configbool(b"perf", b"all-timing", False)
3144 displayall = ui.configbool(b"perf", b"all-timing", False)
3120
3145
3121 # print individual details if requested
3146 # print individual details if requested
3122 if opts['details']:
3147 if opts['details']:
3123 for idx, item in enumerate(results, 1):
3148 for idx, item in enumerate(results, 1):
3124 rev, data = item
3149 rev, data = item
3125 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3150 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3126 formatone(fm, data, title=title, displayall=displayall)
3151 formatone(fm, data, title=title, displayall=displayall)
3127
3152
3128 # sorts results by median time
3153 # sorts results by median time
3129 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3154 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3130 # list of (name, index) to display)
3155 # list of (name, index) to display)
3131 relevants = [
3156 relevants = [
3132 ("min", 0),
3157 ("min", 0),
3133 ("10%", resultcount * 10 // 100),
3158 ("10%", resultcount * 10 // 100),
3134 ("25%", resultcount * 25 // 100),
3159 ("25%", resultcount * 25 // 100),
3135 ("50%", resultcount * 70 // 100),
3160 ("50%", resultcount * 70 // 100),
3136 ("75%", resultcount * 75 // 100),
3161 ("75%", resultcount * 75 // 100),
3137 ("90%", resultcount * 90 // 100),
3162 ("90%", resultcount * 90 // 100),
3138 ("95%", resultcount * 95 // 100),
3163 ("95%", resultcount * 95 // 100),
3139 ("99%", resultcount * 99 // 100),
3164 ("99%", resultcount * 99 // 100),
3140 ("99.9%", resultcount * 999 // 1000),
3165 ("99.9%", resultcount * 999 // 1000),
3141 ("99.99%", resultcount * 9999 // 10000),
3166 ("99.99%", resultcount * 9999 // 10000),
3142 ("99.999%", resultcount * 99999 // 100000),
3167 ("99.999%", resultcount * 99999 // 100000),
3143 ("max", -1),
3168 ("max", -1),
3144 ]
3169 ]
3145 if not ui.quiet:
3170 if not ui.quiet:
3146 for name, idx in relevants:
3171 for name, idx in relevants:
3147 data = results[idx]
3172 data = results[idx]
3148 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3173 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3149 formatone(fm, data[1], title=title, displayall=displayall)
3174 formatone(fm, data[1], title=title, displayall=displayall)
3150
3175
3151 # XXX summing that many float will not be very precise, we ignore this fact
3176 # XXX summing that many float will not be very precise, we ignore this fact
3152 # for now
3177 # for now
3153 totaltime = []
3178 totaltime = []
3154 for item in allresults:
3179 for item in allresults:
3155 totaltime.append(
3180 totaltime.append(
3156 (
3181 (
3157 sum(x[1][0] for x in item),
3182 sum(x[1][0] for x in item),
3158 sum(x[1][1] for x in item),
3183 sum(x[1][1] for x in item),
3159 sum(x[1][2] for x in item),
3184 sum(x[1][2] for x in item),
3160 )
3185 )
3161 )
3186 )
3162 formatone(
3187 formatone(
3163 fm,
3188 fm,
3164 totaltime,
3189 totaltime,
3165 title="total time (%d revs)" % resultcount,
3190 title="total time (%d revs)" % resultcount,
3166 displayall=displayall,
3191 displayall=displayall,
3167 )
3192 )
3168 fm.end()
3193 fm.end()
3169
3194
3170
3195
3171 class _faketr:
3196 class _faketr:
3172 def add(s, x, y, z=None):
3197 def add(s, x, y, z=None):
3173 return None
3198 return None
3174
3199
3175
3200
3176 def _timeonewrite(
3201 def _timeonewrite(
3177 ui,
3202 ui,
3178 orig,
3203 orig,
3179 source,
3204 source,
3180 startrev,
3205 startrev,
3181 stoprev,
3206 stoprev,
3182 runidx=None,
3207 runidx=None,
3183 lazydeltabase=True,
3208 lazydeltabase=True,
3184 clearcaches=True,
3209 clearcaches=True,
3185 ):
3210 ):
3186 timings = []
3211 timings = []
3187 tr = _faketr()
3212 tr = _faketr()
3188 with _temprevlog(ui, orig, startrev) as dest:
3213 with _temprevlog(ui, orig, startrev) as dest:
3189 dest._lazydeltabase = lazydeltabase
3214 dest._lazydeltabase = lazydeltabase
3190 revs = list(orig.revs(startrev, stoprev))
3215 revs = list(orig.revs(startrev, stoprev))
3191 total = len(revs)
3216 total = len(revs)
3192 topic = 'adding'
3217 topic = 'adding'
3193 if runidx is not None:
3218 if runidx is not None:
3194 topic += ' (run #%d)' % runidx
3219 topic += ' (run #%d)' % runidx
3195 # Support both old and new progress API
3220 # Support both old and new progress API
3196 if util.safehasattr(ui, 'makeprogress'):
3221 if util.safehasattr(ui, 'makeprogress'):
3197 progress = ui.makeprogress(topic, unit='revs', total=total)
3222 progress = ui.makeprogress(topic, unit='revs', total=total)
3198
3223
3199 def updateprogress(pos):
3224 def updateprogress(pos):
3200 progress.update(pos)
3225 progress.update(pos)
3201
3226
3202 def completeprogress():
3227 def completeprogress():
3203 progress.complete()
3228 progress.complete()
3204
3229
3205 else:
3230 else:
3206
3231
3207 def updateprogress(pos):
3232 def updateprogress(pos):
3208 ui.progress(topic, pos, unit='revs', total=total)
3233 ui.progress(topic, pos, unit='revs', total=total)
3209
3234
3210 def completeprogress():
3235 def completeprogress():
3211 ui.progress(topic, None, unit='revs', total=total)
3236 ui.progress(topic, None, unit='revs', total=total)
3212
3237
3213 for idx, rev in enumerate(revs):
3238 for idx, rev in enumerate(revs):
3214 updateprogress(idx)
3239 updateprogress(idx)
3215 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3240 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3216 if clearcaches:
3241 if clearcaches:
3217 dest.index.clearcaches()
3242 dest.index.clearcaches()
3218 dest.clearcaches()
3243 dest.clearcaches()
3219 with timeone() as r:
3244 with timeone() as r:
3220 dest.addrawrevision(*addargs, **addkwargs)
3245 dest.addrawrevision(*addargs, **addkwargs)
3221 timings.append((rev, r[0]))
3246 timings.append((rev, r[0]))
3222 updateprogress(total)
3247 updateprogress(total)
3223 completeprogress()
3248 completeprogress()
3224 return timings
3249 return timings
3225
3250
3226
3251
3227 def _getrevisionseed(orig, rev, tr, source):
3252 def _getrevisionseed(orig, rev, tr, source):
3228 from mercurial.node import nullid
3253 from mercurial.node import nullid
3229
3254
3230 linkrev = orig.linkrev(rev)
3255 linkrev = orig.linkrev(rev)
3231 node = orig.node(rev)
3256 node = orig.node(rev)
3232 p1, p2 = orig.parents(node)
3257 p1, p2 = orig.parents(node)
3233 flags = orig.flags(rev)
3258 flags = orig.flags(rev)
3234 cachedelta = None
3259 cachedelta = None
3235 text = None
3260 text = None
3236
3261
3237 if source == b'full':
3262 if source == b'full':
3238 text = orig.revision(rev)
3263 text = orig.revision(rev)
3239 elif source == b'parent-1':
3264 elif source == b'parent-1':
3240 baserev = orig.rev(p1)
3265 baserev = orig.rev(p1)
3241 cachedelta = (baserev, orig.revdiff(p1, rev))
3266 cachedelta = (baserev, orig.revdiff(p1, rev))
3242 elif source == b'parent-2':
3267 elif source == b'parent-2':
3243 parent = p2
3268 parent = p2
3244 if p2 == nullid:
3269 if p2 == nullid:
3245 parent = p1
3270 parent = p1
3246 baserev = orig.rev(parent)
3271 baserev = orig.rev(parent)
3247 cachedelta = (baserev, orig.revdiff(parent, rev))
3272 cachedelta = (baserev, orig.revdiff(parent, rev))
3248 elif source == b'parent-smallest':
3273 elif source == b'parent-smallest':
3249 p1diff = orig.revdiff(p1, rev)
3274 p1diff = orig.revdiff(p1, rev)
3250 parent = p1
3275 parent = p1
3251 diff = p1diff
3276 diff = p1diff
3252 if p2 != nullid:
3277 if p2 != nullid:
3253 p2diff = orig.revdiff(p2, rev)
3278 p2diff = orig.revdiff(p2, rev)
3254 if len(p1diff) > len(p2diff):
3279 if len(p1diff) > len(p2diff):
3255 parent = p2
3280 parent = p2
3256 diff = p2diff
3281 diff = p2diff
3257 baserev = orig.rev(parent)
3282 baserev = orig.rev(parent)
3258 cachedelta = (baserev, diff)
3283 cachedelta = (baserev, diff)
3259 elif source == b'storage':
3284 elif source == b'storage':
3260 baserev = orig.deltaparent(rev)
3285 baserev = orig.deltaparent(rev)
3261 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3286 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3262
3287
3263 return (
3288 return (
3264 (text, tr, linkrev, p1, p2),
3289 (text, tr, linkrev, p1, p2),
3265 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3290 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3266 )
3291 )
3267
3292
3268
3293
3269 @contextlib.contextmanager
3294 @contextlib.contextmanager
3270 def _temprevlog(ui, orig, truncaterev):
3295 def _temprevlog(ui, orig, truncaterev):
3271 from mercurial import vfs as vfsmod
3296 from mercurial import vfs as vfsmod
3272
3297
3273 if orig._inline:
3298 if orig._inline:
3274 raise error.Abort('not supporting inline revlog (yet)')
3299 raise error.Abort('not supporting inline revlog (yet)')
3275 revlogkwargs = {}
3300 revlogkwargs = {}
3276 k = 'upperboundcomp'
3301 k = 'upperboundcomp'
3277 if util.safehasattr(orig, k):
3302 if util.safehasattr(orig, k):
3278 revlogkwargs[k] = getattr(orig, k)
3303 revlogkwargs[k] = getattr(orig, k)
3279
3304
3280 indexfile = getattr(orig, '_indexfile', None)
3305 indexfile = getattr(orig, '_indexfile', None)
3281 if indexfile is None:
3306 if indexfile is None:
3282 # compatibility with <= hg-5.8
3307 # compatibility with <= hg-5.8
3283 indexfile = getattr(orig, 'indexfile')
3308 indexfile = getattr(orig, 'indexfile')
3284 origindexpath = orig.opener.join(indexfile)
3309 origindexpath = orig.opener.join(indexfile)
3285
3310
3286 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3311 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3287 origdatapath = orig.opener.join(datafile)
3312 origdatapath = orig.opener.join(datafile)
3288 radix = b'revlog'
3313 radix = b'revlog'
3289 indexname = b'revlog.i'
3314 indexname = b'revlog.i'
3290 dataname = b'revlog.d'
3315 dataname = b'revlog.d'
3291
3316
3292 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3317 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3293 try:
3318 try:
3294 # copy the data file in a temporary directory
3319 # copy the data file in a temporary directory
3295 ui.debug('copying data in %s\n' % tmpdir)
3320 ui.debug('copying data in %s\n' % tmpdir)
3296 destindexpath = os.path.join(tmpdir, 'revlog.i')
3321 destindexpath = os.path.join(tmpdir, 'revlog.i')
3297 destdatapath = os.path.join(tmpdir, 'revlog.d')
3322 destdatapath = os.path.join(tmpdir, 'revlog.d')
3298 shutil.copyfile(origindexpath, destindexpath)
3323 shutil.copyfile(origindexpath, destindexpath)
3299 shutil.copyfile(origdatapath, destdatapath)
3324 shutil.copyfile(origdatapath, destdatapath)
3300
3325
3301 # remove the data we want to add again
3326 # remove the data we want to add again
3302 ui.debug('truncating data to be rewritten\n')
3327 ui.debug('truncating data to be rewritten\n')
3303 with open(destindexpath, 'ab') as index:
3328 with open(destindexpath, 'ab') as index:
3304 index.seek(0)
3329 index.seek(0)
3305 index.truncate(truncaterev * orig._io.size)
3330 index.truncate(truncaterev * orig._io.size)
3306 with open(destdatapath, 'ab') as data:
3331 with open(destdatapath, 'ab') as data:
3307 data.seek(0)
3332 data.seek(0)
3308 data.truncate(orig.start(truncaterev))
3333 data.truncate(orig.start(truncaterev))
3309
3334
3310 # instantiate a new revlog from the temporary copy
3335 # instantiate a new revlog from the temporary copy
3311 ui.debug('truncating adding to be rewritten\n')
3336 ui.debug('truncating adding to be rewritten\n')
3312 vfs = vfsmod.vfs(tmpdir)
3337 vfs = vfsmod.vfs(tmpdir)
3313 vfs.options = getattr(orig.opener, 'options', None)
3338 vfs.options = getattr(orig.opener, 'options', None)
3314
3339
3315 try:
3340 try:
3316 dest = revlog(vfs, radix=radix, **revlogkwargs)
3341 dest = revlog(vfs, radix=radix, **revlogkwargs)
3317 except TypeError:
3342 except TypeError:
3318 dest = revlog(
3343 dest = revlog(
3319 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3344 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3320 )
3345 )
3321 if dest._inline:
3346 if dest._inline:
3322 raise error.Abort('not supporting inline revlog (yet)')
3347 raise error.Abort('not supporting inline revlog (yet)')
3323 # make sure internals are initialized
3348 # make sure internals are initialized
3324 dest.revision(len(dest) - 1)
3349 dest.revision(len(dest) - 1)
3325 yield dest
3350 yield dest
3326 del dest, vfs
3351 del dest, vfs
3327 finally:
3352 finally:
3328 shutil.rmtree(tmpdir, True)
3353 shutil.rmtree(tmpdir, True)
3329
3354
3330
3355
3331 @command(
3356 @command(
3332 b'perf::revlogchunks|perfrevlogchunks',
3357 b'perf::revlogchunks|perfrevlogchunks',
3333 revlogopts
3358 revlogopts
3334 + formatteropts
3359 + formatteropts
3335 + [
3360 + [
3336 (b'e', b'engines', b'', b'compression engines to use'),
3361 (b'e', b'engines', b'', b'compression engines to use'),
3337 (b's', b'startrev', 0, b'revision to start at'),
3362 (b's', b'startrev', 0, b'revision to start at'),
3338 ],
3363 ],
3339 b'-c|-m|FILE',
3364 b'-c|-m|FILE',
3340 )
3365 )
3341 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3366 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3342 """Benchmark operations on revlog chunks.
3367 """Benchmark operations on revlog chunks.
3343
3368
3344 Logically, each revlog is a collection of fulltext revisions. However,
3369 Logically, each revlog is a collection of fulltext revisions. However,
3345 stored within each revlog are "chunks" of possibly compressed data. This
3370 stored within each revlog are "chunks" of possibly compressed data. This
3346 data needs to be read and decompressed or compressed and written.
3371 data needs to be read and decompressed or compressed and written.
3347
3372
3348 This command measures the time it takes to read+decompress and recompress
3373 This command measures the time it takes to read+decompress and recompress
3349 chunks in a revlog. It effectively isolates I/O and compression performance.
3374 chunks in a revlog. It effectively isolates I/O and compression performance.
3350 For measurements of higher-level operations like resolving revisions,
3375 For measurements of higher-level operations like resolving revisions,
3351 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3376 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3352 """
3377 """
3353 opts = _byteskwargs(opts)
3378 opts = _byteskwargs(opts)
3354
3379
3355 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3380 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3356
3381
3357 # _chunkraw was renamed to _getsegmentforrevs.
3382 # _chunkraw was renamed to _getsegmentforrevs.
3358 try:
3383 try:
3359 segmentforrevs = rl._getsegmentforrevs
3384 segmentforrevs = rl._getsegmentforrevs
3360 except AttributeError:
3385 except AttributeError:
3361 segmentforrevs = rl._chunkraw
3386 segmentforrevs = rl._chunkraw
3362
3387
3363 # Verify engines argument.
3388 # Verify engines argument.
3364 if engines:
3389 if engines:
3365 engines = {e.strip() for e in engines.split(b',')}
3390 engines = {e.strip() for e in engines.split(b',')}
3366 for engine in engines:
3391 for engine in engines:
3367 try:
3392 try:
3368 util.compressionengines[engine]
3393 util.compressionengines[engine]
3369 except KeyError:
3394 except KeyError:
3370 raise error.Abort(b'unknown compression engine: %s' % engine)
3395 raise error.Abort(b'unknown compression engine: %s' % engine)
3371 else:
3396 else:
3372 engines = []
3397 engines = []
3373 for e in util.compengines:
3398 for e in util.compengines:
3374 engine = util.compengines[e]
3399 engine = util.compengines[e]
3375 try:
3400 try:
3376 if engine.available():
3401 if engine.available():
3377 engine.revlogcompressor().compress(b'dummy')
3402 engine.revlogcompressor().compress(b'dummy')
3378 engines.append(e)
3403 engines.append(e)
3379 except NotImplementedError:
3404 except NotImplementedError:
3380 pass
3405 pass
3381
3406
3382 revs = list(rl.revs(startrev, len(rl) - 1))
3407 revs = list(rl.revs(startrev, len(rl) - 1))
3383
3408
3384 def rlfh(rl):
3409 def rlfh(rl):
3385 if rl._inline:
3410 if rl._inline:
3386 indexfile = getattr(rl, '_indexfile', None)
3411 indexfile = getattr(rl, '_indexfile', None)
3387 if indexfile is None:
3412 if indexfile is None:
3388 # compatibility with <= hg-5.8
3413 # compatibility with <= hg-5.8
3389 indexfile = getattr(rl, 'indexfile')
3414 indexfile = getattr(rl, 'indexfile')
3390 return getsvfs(repo)(indexfile)
3415 return getsvfs(repo)(indexfile)
3391 else:
3416 else:
3392 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3417 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3393 return getsvfs(repo)(datafile)
3418 return getsvfs(repo)(datafile)
3394
3419
3395 def doread():
3420 def doread():
3396 rl.clearcaches()
3421 rl.clearcaches()
3397 for rev in revs:
3422 for rev in revs:
3398 segmentforrevs(rev, rev)
3423 segmentforrevs(rev, rev)
3399
3424
3400 def doreadcachedfh():
3425 def doreadcachedfh():
3401 rl.clearcaches()
3426 rl.clearcaches()
3402 fh = rlfh(rl)
3427 fh = rlfh(rl)
3403 for rev in revs:
3428 for rev in revs:
3404 segmentforrevs(rev, rev, df=fh)
3429 segmentforrevs(rev, rev, df=fh)
3405
3430
3406 def doreadbatch():
3431 def doreadbatch():
3407 rl.clearcaches()
3432 rl.clearcaches()
3408 segmentforrevs(revs[0], revs[-1])
3433 segmentforrevs(revs[0], revs[-1])
3409
3434
3410 def doreadbatchcachedfh():
3435 def doreadbatchcachedfh():
3411 rl.clearcaches()
3436 rl.clearcaches()
3412 fh = rlfh(rl)
3437 fh = rlfh(rl)
3413 segmentforrevs(revs[0], revs[-1], df=fh)
3438 segmentforrevs(revs[0], revs[-1], df=fh)
3414
3439
3415 def dochunk():
3440 def dochunk():
3416 rl.clearcaches()
3441 rl.clearcaches()
3417 fh = rlfh(rl)
3442 fh = rlfh(rl)
3418 for rev in revs:
3443 for rev in revs:
3419 rl._chunk(rev, df=fh)
3444 rl._chunk(rev, df=fh)
3420
3445
3421 chunks = [None]
3446 chunks = [None]
3422
3447
3423 def dochunkbatch():
3448 def dochunkbatch():
3424 rl.clearcaches()
3449 rl.clearcaches()
3425 fh = rlfh(rl)
3450 fh = rlfh(rl)
3426 # Save chunks as a side-effect.
3451 # Save chunks as a side-effect.
3427 chunks[0] = rl._chunks(revs, df=fh)
3452 chunks[0] = rl._chunks(revs, df=fh)
3428
3453
3429 def docompress(compressor):
3454 def docompress(compressor):
3430 rl.clearcaches()
3455 rl.clearcaches()
3431
3456
3432 try:
3457 try:
3433 # Swap in the requested compression engine.
3458 # Swap in the requested compression engine.
3434 oldcompressor = rl._compressor
3459 oldcompressor = rl._compressor
3435 rl._compressor = compressor
3460 rl._compressor = compressor
3436 for chunk in chunks[0]:
3461 for chunk in chunks[0]:
3437 rl.compress(chunk)
3462 rl.compress(chunk)
3438 finally:
3463 finally:
3439 rl._compressor = oldcompressor
3464 rl._compressor = oldcompressor
3440
3465
3441 benches = [
3466 benches = [
3442 (lambda: doread(), b'read'),
3467 (lambda: doread(), b'read'),
3443 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3468 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3444 (lambda: doreadbatch(), b'read batch'),
3469 (lambda: doreadbatch(), b'read batch'),
3445 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3470 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3446 (lambda: dochunk(), b'chunk'),
3471 (lambda: dochunk(), b'chunk'),
3447 (lambda: dochunkbatch(), b'chunk batch'),
3472 (lambda: dochunkbatch(), b'chunk batch'),
3448 ]
3473 ]
3449
3474
3450 for engine in sorted(engines):
3475 for engine in sorted(engines):
3451 compressor = util.compengines[engine].revlogcompressor()
3476 compressor = util.compengines[engine].revlogcompressor()
3452 benches.append(
3477 benches.append(
3453 (
3478 (
3454 functools.partial(docompress, compressor),
3479 functools.partial(docompress, compressor),
3455 b'compress w/ %s' % engine,
3480 b'compress w/ %s' % engine,
3456 )
3481 )
3457 )
3482 )
3458
3483
3459 for fn, title in benches:
3484 for fn, title in benches:
3460 timer, fm = gettimer(ui, opts)
3485 timer, fm = gettimer(ui, opts)
3461 timer(fn, title=title)
3486 timer(fn, title=title)
3462 fm.end()
3487 fm.end()
3463
3488
3464
3489
3465 @command(
3490 @command(
3466 b'perf::revlogrevision|perfrevlogrevision',
3491 b'perf::revlogrevision|perfrevlogrevision',
3467 revlogopts
3492 revlogopts
3468 + formatteropts
3493 + formatteropts
3469 + [(b'', b'cache', False, b'use caches instead of clearing')],
3494 + [(b'', b'cache', False, b'use caches instead of clearing')],
3470 b'-c|-m|FILE REV',
3495 b'-c|-m|FILE REV',
3471 )
3496 )
3472 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3497 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3473 """Benchmark obtaining a revlog revision.
3498 """Benchmark obtaining a revlog revision.
3474
3499
3475 Obtaining a revlog revision consists of roughly the following steps:
3500 Obtaining a revlog revision consists of roughly the following steps:
3476
3501
3477 1. Compute the delta chain
3502 1. Compute the delta chain
3478 2. Slice the delta chain if applicable
3503 2. Slice the delta chain if applicable
3479 3. Obtain the raw chunks for that delta chain
3504 3. Obtain the raw chunks for that delta chain
3480 4. Decompress each raw chunk
3505 4. Decompress each raw chunk
3481 5. Apply binary patches to obtain fulltext
3506 5. Apply binary patches to obtain fulltext
3482 6. Verify hash of fulltext
3507 6. Verify hash of fulltext
3483
3508
3484 This command measures the time spent in each of these phases.
3509 This command measures the time spent in each of these phases.
3485 """
3510 """
3486 opts = _byteskwargs(opts)
3511 opts = _byteskwargs(opts)
3487
3512
3488 if opts.get(b'changelog') or opts.get(b'manifest'):
3513 if opts.get(b'changelog') or opts.get(b'manifest'):
3489 file_, rev = None, file_
3514 file_, rev = None, file_
3490 elif rev is None:
3515 elif rev is None:
3491 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3516 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3492
3517
3493 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3518 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3494
3519
3495 # _chunkraw was renamed to _getsegmentforrevs.
3520 # _chunkraw was renamed to _getsegmentforrevs.
3496 try:
3521 try:
3497 segmentforrevs = r._getsegmentforrevs
3522 segmentforrevs = r._getsegmentforrevs
3498 except AttributeError:
3523 except AttributeError:
3499 segmentforrevs = r._chunkraw
3524 segmentforrevs = r._chunkraw
3500
3525
3501 node = r.lookup(rev)
3526 node = r.lookup(rev)
3502 rev = r.rev(node)
3527 rev = r.rev(node)
3503
3528
3504 def getrawchunks(data, chain):
3529 def getrawchunks(data, chain):
3505 start = r.start
3530 start = r.start
3506 length = r.length
3531 length = r.length
3507 inline = r._inline
3532 inline = r._inline
3508 try:
3533 try:
3509 iosize = r.index.entry_size
3534 iosize = r.index.entry_size
3510 except AttributeError:
3535 except AttributeError:
3511 iosize = r._io.size
3536 iosize = r._io.size
3512 buffer = util.buffer
3537 buffer = util.buffer
3513
3538
3514 chunks = []
3539 chunks = []
3515 ladd = chunks.append
3540 ladd = chunks.append
3516 for idx, item in enumerate(chain):
3541 for idx, item in enumerate(chain):
3517 offset = start(item[0])
3542 offset = start(item[0])
3518 bits = data[idx]
3543 bits = data[idx]
3519 for rev in item:
3544 for rev in item:
3520 chunkstart = start(rev)
3545 chunkstart = start(rev)
3521 if inline:
3546 if inline:
3522 chunkstart += (rev + 1) * iosize
3547 chunkstart += (rev + 1) * iosize
3523 chunklength = length(rev)
3548 chunklength = length(rev)
3524 ladd(buffer(bits, chunkstart - offset, chunklength))
3549 ladd(buffer(bits, chunkstart - offset, chunklength))
3525
3550
3526 return chunks
3551 return chunks
3527
3552
3528 def dodeltachain(rev):
3553 def dodeltachain(rev):
3529 if not cache:
3554 if not cache:
3530 r.clearcaches()
3555 r.clearcaches()
3531 r._deltachain(rev)
3556 r._deltachain(rev)
3532
3557
3533 def doread(chain):
3558 def doread(chain):
3534 if not cache:
3559 if not cache:
3535 r.clearcaches()
3560 r.clearcaches()
3536 for item in slicedchain:
3561 for item in slicedchain:
3537 segmentforrevs(item[0], item[-1])
3562 segmentforrevs(item[0], item[-1])
3538
3563
3539 def doslice(r, chain, size):
3564 def doslice(r, chain, size):
3540 for s in slicechunk(r, chain, targetsize=size):
3565 for s in slicechunk(r, chain, targetsize=size):
3541 pass
3566 pass
3542
3567
3543 def dorawchunks(data, chain):
3568 def dorawchunks(data, chain):
3544 if not cache:
3569 if not cache:
3545 r.clearcaches()
3570 r.clearcaches()
3546 getrawchunks(data, chain)
3571 getrawchunks(data, chain)
3547
3572
3548 def dodecompress(chunks):
3573 def dodecompress(chunks):
3549 decomp = r.decompress
3574 decomp = r.decompress
3550 for chunk in chunks:
3575 for chunk in chunks:
3551 decomp(chunk)
3576 decomp(chunk)
3552
3577
3553 def dopatch(text, bins):
3578 def dopatch(text, bins):
3554 if not cache:
3579 if not cache:
3555 r.clearcaches()
3580 r.clearcaches()
3556 mdiff.patches(text, bins)
3581 mdiff.patches(text, bins)
3557
3582
3558 def dohash(text):
3583 def dohash(text):
3559 if not cache:
3584 if not cache:
3560 r.clearcaches()
3585 r.clearcaches()
3561 r.checkhash(text, node, rev=rev)
3586 r.checkhash(text, node, rev=rev)
3562
3587
3563 def dorevision():
3588 def dorevision():
3564 if not cache:
3589 if not cache:
3565 r.clearcaches()
3590 r.clearcaches()
3566 r.revision(node)
3591 r.revision(node)
3567
3592
3568 try:
3593 try:
3569 from mercurial.revlogutils.deltas import slicechunk
3594 from mercurial.revlogutils.deltas import slicechunk
3570 except ImportError:
3595 except ImportError:
3571 slicechunk = getattr(revlog, '_slicechunk', None)
3596 slicechunk = getattr(revlog, '_slicechunk', None)
3572
3597
3573 size = r.length(rev)
3598 size = r.length(rev)
3574 chain = r._deltachain(rev)[0]
3599 chain = r._deltachain(rev)[0]
3575 if not getattr(r, '_withsparseread', False):
3600 if not getattr(r, '_withsparseread', False):
3576 slicedchain = (chain,)
3601 slicedchain = (chain,)
3577 else:
3602 else:
3578 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3603 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3579 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3604 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3580 rawchunks = getrawchunks(data, slicedchain)
3605 rawchunks = getrawchunks(data, slicedchain)
3581 bins = r._chunks(chain)
3606 bins = r._chunks(chain)
3582 text = bytes(bins[0])
3607 text = bytes(bins[0])
3583 bins = bins[1:]
3608 bins = bins[1:]
3584 text = mdiff.patches(text, bins)
3609 text = mdiff.patches(text, bins)
3585
3610
3586 benches = [
3611 benches = [
3587 (lambda: dorevision(), b'full'),
3612 (lambda: dorevision(), b'full'),
3588 (lambda: dodeltachain(rev), b'deltachain'),
3613 (lambda: dodeltachain(rev), b'deltachain'),
3589 (lambda: doread(chain), b'read'),
3614 (lambda: doread(chain), b'read'),
3590 ]
3615 ]
3591
3616
3592 if getattr(r, '_withsparseread', False):
3617 if getattr(r, '_withsparseread', False):
3593 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3618 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3594 benches.append(slicing)
3619 benches.append(slicing)
3595
3620
3596 benches.extend(
3621 benches.extend(
3597 [
3622 [
3598 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3623 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3599 (lambda: dodecompress(rawchunks), b'decompress'),
3624 (lambda: dodecompress(rawchunks), b'decompress'),
3600 (lambda: dopatch(text, bins), b'patch'),
3625 (lambda: dopatch(text, bins), b'patch'),
3601 (lambda: dohash(text), b'hash'),
3626 (lambda: dohash(text), b'hash'),
3602 ]
3627 ]
3603 )
3628 )
3604
3629
3605 timer, fm = gettimer(ui, opts)
3630 timer, fm = gettimer(ui, opts)
3606 for fn, title in benches:
3631 for fn, title in benches:
3607 timer(fn, title=title)
3632 timer(fn, title=title)
3608 fm.end()
3633 fm.end()
3609
3634
3610
3635
3611 @command(
3636 @command(
3612 b'perf::revset|perfrevset',
3637 b'perf::revset|perfrevset',
3613 [
3638 [
3614 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3639 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3615 (b'', b'contexts', False, b'obtain changectx for each revision'),
3640 (b'', b'contexts', False, b'obtain changectx for each revision'),
3616 ]
3641 ]
3617 + formatteropts,
3642 + formatteropts,
3618 b"REVSET",
3643 b"REVSET",
3619 )
3644 )
3620 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3645 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3621 """benchmark the execution time of a revset
3646 """benchmark the execution time of a revset
3622
3647
3623 Use the --clean option if need to evaluate the impact of build volatile
3648 Use the --clean option if need to evaluate the impact of build volatile
3624 revisions set cache on the revset execution. Volatile cache hold filtered
3649 revisions set cache on the revset execution. Volatile cache hold filtered
3625 and obsolete related cache."""
3650 and obsolete related cache."""
3626 opts = _byteskwargs(opts)
3651 opts = _byteskwargs(opts)
3627
3652
3628 timer, fm = gettimer(ui, opts)
3653 timer, fm = gettimer(ui, opts)
3629
3654
3630 def d():
3655 def d():
3631 if clear:
3656 if clear:
3632 repo.invalidatevolatilesets()
3657 repo.invalidatevolatilesets()
3633 if contexts:
3658 if contexts:
3634 for ctx in repo.set(expr):
3659 for ctx in repo.set(expr):
3635 pass
3660 pass
3636 else:
3661 else:
3637 for r in repo.revs(expr):
3662 for r in repo.revs(expr):
3638 pass
3663 pass
3639
3664
3640 timer(d)
3665 timer(d)
3641 fm.end()
3666 fm.end()
3642
3667
3643
3668
3644 @command(
3669 @command(
3645 b'perf::volatilesets|perfvolatilesets',
3670 b'perf::volatilesets|perfvolatilesets',
3646 [
3671 [
3647 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3672 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3648 ]
3673 ]
3649 + formatteropts,
3674 + formatteropts,
3650 )
3675 )
3651 def perfvolatilesets(ui, repo, *names, **opts):
3676 def perfvolatilesets(ui, repo, *names, **opts):
3652 """benchmark the computation of various volatile set
3677 """benchmark the computation of various volatile set
3653
3678
3654 Volatile set computes element related to filtering and obsolescence."""
3679 Volatile set computes element related to filtering and obsolescence."""
3655 opts = _byteskwargs(opts)
3680 opts = _byteskwargs(opts)
3656 timer, fm = gettimer(ui, opts)
3681 timer, fm = gettimer(ui, opts)
3657 repo = repo.unfiltered()
3682 repo = repo.unfiltered()
3658
3683
3659 def getobs(name):
3684 def getobs(name):
3660 def d():
3685 def d():
3661 repo.invalidatevolatilesets()
3686 repo.invalidatevolatilesets()
3662 if opts[b'clear_obsstore']:
3687 if opts[b'clear_obsstore']:
3663 clearfilecache(repo, b'obsstore')
3688 clearfilecache(repo, b'obsstore')
3664 obsolete.getrevs(repo, name)
3689 obsolete.getrevs(repo, name)
3665
3690
3666 return d
3691 return d
3667
3692
3668 allobs = sorted(obsolete.cachefuncs)
3693 allobs = sorted(obsolete.cachefuncs)
3669 if names:
3694 if names:
3670 allobs = [n for n in allobs if n in names]
3695 allobs = [n for n in allobs if n in names]
3671
3696
3672 for name in allobs:
3697 for name in allobs:
3673 timer(getobs(name), title=name)
3698 timer(getobs(name), title=name)
3674
3699
3675 def getfiltered(name):
3700 def getfiltered(name):
3676 def d():
3701 def d():
3677 repo.invalidatevolatilesets()
3702 repo.invalidatevolatilesets()
3678 if opts[b'clear_obsstore']:
3703 if opts[b'clear_obsstore']:
3679 clearfilecache(repo, b'obsstore')
3704 clearfilecache(repo, b'obsstore')
3680 repoview.filterrevs(repo, name)
3705 repoview.filterrevs(repo, name)
3681
3706
3682 return d
3707 return d
3683
3708
3684 allfilter = sorted(repoview.filtertable)
3709 allfilter = sorted(repoview.filtertable)
3685 if names:
3710 if names:
3686 allfilter = [n for n in allfilter if n in names]
3711 allfilter = [n for n in allfilter if n in names]
3687
3712
3688 for name in allfilter:
3713 for name in allfilter:
3689 timer(getfiltered(name), title=name)
3714 timer(getfiltered(name), title=name)
3690 fm.end()
3715 fm.end()
3691
3716
3692
3717
3693 @command(
3718 @command(
3694 b'perf::branchmap|perfbranchmap',
3719 b'perf::branchmap|perfbranchmap',
3695 [
3720 [
3696 (b'f', b'full', False, b'Includes build time of subset'),
3721 (b'f', b'full', False, b'Includes build time of subset'),
3697 (
3722 (
3698 b'',
3723 b'',
3699 b'clear-revbranch',
3724 b'clear-revbranch',
3700 False,
3725 False,
3701 b'purge the revbranch cache between computation',
3726 b'purge the revbranch cache between computation',
3702 ),
3727 ),
3703 ]
3728 ]
3704 + formatteropts,
3729 + formatteropts,
3705 )
3730 )
3706 def perfbranchmap(ui, repo, *filternames, **opts):
3731 def perfbranchmap(ui, repo, *filternames, **opts):
3707 """benchmark the update of a branchmap
3732 """benchmark the update of a branchmap
3708
3733
3709 This benchmarks the full repo.branchmap() call with read and write disabled
3734 This benchmarks the full repo.branchmap() call with read and write disabled
3710 """
3735 """
3711 opts = _byteskwargs(opts)
3736 opts = _byteskwargs(opts)
3712 full = opts.get(b"full", False)
3737 full = opts.get(b"full", False)
3713 clear_revbranch = opts.get(b"clear_revbranch", False)
3738 clear_revbranch = opts.get(b"clear_revbranch", False)
3714 timer, fm = gettimer(ui, opts)
3739 timer, fm = gettimer(ui, opts)
3715
3740
3716 def getbranchmap(filtername):
3741 def getbranchmap(filtername):
3717 """generate a benchmark function for the filtername"""
3742 """generate a benchmark function for the filtername"""
3718 if filtername is None:
3743 if filtername is None:
3719 view = repo
3744 view = repo
3720 else:
3745 else:
3721 view = repo.filtered(filtername)
3746 view = repo.filtered(filtername)
3722 if util.safehasattr(view._branchcaches, '_per_filter'):
3747 if util.safehasattr(view._branchcaches, '_per_filter'):
3723 filtered = view._branchcaches._per_filter
3748 filtered = view._branchcaches._per_filter
3724 else:
3749 else:
3725 # older versions
3750 # older versions
3726 filtered = view._branchcaches
3751 filtered = view._branchcaches
3727
3752
3728 def d():
3753 def d():
3729 if clear_revbranch:
3754 if clear_revbranch:
3730 repo.revbranchcache()._clear()
3755 repo.revbranchcache()._clear()
3731 if full:
3756 if full:
3732 view._branchcaches.clear()
3757 view._branchcaches.clear()
3733 else:
3758 else:
3734 filtered.pop(filtername, None)
3759 filtered.pop(filtername, None)
3735 view.branchmap()
3760 view.branchmap()
3736
3761
3737 return d
3762 return d
3738
3763
3739 # add filter in smaller subset to bigger subset
3764 # add filter in smaller subset to bigger subset
3740 possiblefilters = set(repoview.filtertable)
3765 possiblefilters = set(repoview.filtertable)
3741 if filternames:
3766 if filternames:
3742 possiblefilters &= set(filternames)
3767 possiblefilters &= set(filternames)
3743 subsettable = getbranchmapsubsettable()
3768 subsettable = getbranchmapsubsettable()
3744 allfilters = []
3769 allfilters = []
3745 while possiblefilters:
3770 while possiblefilters:
3746 for name in possiblefilters:
3771 for name in possiblefilters:
3747 subset = subsettable.get(name)
3772 subset = subsettable.get(name)
3748 if subset not in possiblefilters:
3773 if subset not in possiblefilters:
3749 break
3774 break
3750 else:
3775 else:
3751 assert False, b'subset cycle %s!' % possiblefilters
3776 assert False, b'subset cycle %s!' % possiblefilters
3752 allfilters.append(name)
3777 allfilters.append(name)
3753 possiblefilters.remove(name)
3778 possiblefilters.remove(name)
3754
3779
3755 # warm the cache
3780 # warm the cache
3756 if not full:
3781 if not full:
3757 for name in allfilters:
3782 for name in allfilters:
3758 repo.filtered(name).branchmap()
3783 repo.filtered(name).branchmap()
3759 if not filternames or b'unfiltered' in filternames:
3784 if not filternames or b'unfiltered' in filternames:
3760 # add unfiltered
3785 # add unfiltered
3761 allfilters.append(None)
3786 allfilters.append(None)
3762
3787
3763 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3788 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3764 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3789 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3765 branchcacheread.set(classmethod(lambda *args: None))
3790 branchcacheread.set(classmethod(lambda *args: None))
3766 else:
3791 else:
3767 # older versions
3792 # older versions
3768 branchcacheread = safeattrsetter(branchmap, b'read')
3793 branchcacheread = safeattrsetter(branchmap, b'read')
3769 branchcacheread.set(lambda *args: None)
3794 branchcacheread.set(lambda *args: None)
3770 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3795 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3771 branchcachewrite.set(lambda *args: None)
3796 branchcachewrite.set(lambda *args: None)
3772 try:
3797 try:
3773 for name in allfilters:
3798 for name in allfilters:
3774 printname = name
3799 printname = name
3775 if name is None:
3800 if name is None:
3776 printname = b'unfiltered'
3801 printname = b'unfiltered'
3777 timer(getbranchmap(name), title=printname)
3802 timer(getbranchmap(name), title=printname)
3778 finally:
3803 finally:
3779 branchcacheread.restore()
3804 branchcacheread.restore()
3780 branchcachewrite.restore()
3805 branchcachewrite.restore()
3781 fm.end()
3806 fm.end()
3782
3807
3783
3808
3784 @command(
3809 @command(
3785 b'perf::branchmapupdate|perfbranchmapupdate',
3810 b'perf::branchmapupdate|perfbranchmapupdate',
3786 [
3811 [
3787 (b'', b'base', [], b'subset of revision to start from'),
3812 (b'', b'base', [], b'subset of revision to start from'),
3788 (b'', b'target', [], b'subset of revision to end with'),
3813 (b'', b'target', [], b'subset of revision to end with'),
3789 (b'', b'clear-caches', False, b'clear cache between each runs'),
3814 (b'', b'clear-caches', False, b'clear cache between each runs'),
3790 ]
3815 ]
3791 + formatteropts,
3816 + formatteropts,
3792 )
3817 )
3793 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3818 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3794 """benchmark branchmap update from for <base> revs to <target> revs
3819 """benchmark branchmap update from for <base> revs to <target> revs
3795
3820
3796 If `--clear-caches` is passed, the following items will be reset before
3821 If `--clear-caches` is passed, the following items will be reset before
3797 each update:
3822 each update:
3798 * the changelog instance and associated indexes
3823 * the changelog instance and associated indexes
3799 * the rev-branch-cache instance
3824 * the rev-branch-cache instance
3800
3825
3801 Examples:
3826 Examples:
3802
3827
3803 # update for the one last revision
3828 # update for the one last revision
3804 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3829 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3805
3830
3806 $ update for change coming with a new branch
3831 $ update for change coming with a new branch
3807 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3832 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3808 """
3833 """
3809 from mercurial import branchmap
3834 from mercurial import branchmap
3810 from mercurial import repoview
3835 from mercurial import repoview
3811
3836
3812 opts = _byteskwargs(opts)
3837 opts = _byteskwargs(opts)
3813 timer, fm = gettimer(ui, opts)
3838 timer, fm = gettimer(ui, opts)
3814 clearcaches = opts[b'clear_caches']
3839 clearcaches = opts[b'clear_caches']
3815 unfi = repo.unfiltered()
3840 unfi = repo.unfiltered()
3816 x = [None] # used to pass data between closure
3841 x = [None] # used to pass data between closure
3817
3842
3818 # we use a `list` here to avoid possible side effect from smartset
3843 # we use a `list` here to avoid possible side effect from smartset
3819 baserevs = list(scmutil.revrange(repo, base))
3844 baserevs = list(scmutil.revrange(repo, base))
3820 targetrevs = list(scmutil.revrange(repo, target))
3845 targetrevs = list(scmutil.revrange(repo, target))
3821 if not baserevs:
3846 if not baserevs:
3822 raise error.Abort(b'no revisions selected for --base')
3847 raise error.Abort(b'no revisions selected for --base')
3823 if not targetrevs:
3848 if not targetrevs:
3824 raise error.Abort(b'no revisions selected for --target')
3849 raise error.Abort(b'no revisions selected for --target')
3825
3850
3826 # make sure the target branchmap also contains the one in the base
3851 # make sure the target branchmap also contains the one in the base
3827 targetrevs = list(set(baserevs) | set(targetrevs))
3852 targetrevs = list(set(baserevs) | set(targetrevs))
3828 targetrevs.sort()
3853 targetrevs.sort()
3829
3854
3830 cl = repo.changelog
3855 cl = repo.changelog
3831 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3856 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3832 allbaserevs.sort()
3857 allbaserevs.sort()
3833 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3858 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3834
3859
3835 newrevs = list(alltargetrevs.difference(allbaserevs))
3860 newrevs = list(alltargetrevs.difference(allbaserevs))
3836 newrevs.sort()
3861 newrevs.sort()
3837
3862
3838 allrevs = frozenset(unfi.changelog.revs())
3863 allrevs = frozenset(unfi.changelog.revs())
3839 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3864 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3840 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3865 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3841
3866
3842 def basefilter(repo, visibilityexceptions=None):
3867 def basefilter(repo, visibilityexceptions=None):
3843 return basefilterrevs
3868 return basefilterrevs
3844
3869
3845 def targetfilter(repo, visibilityexceptions=None):
3870 def targetfilter(repo, visibilityexceptions=None):
3846 return targetfilterrevs
3871 return targetfilterrevs
3847
3872
3848 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3873 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3849 ui.status(msg % (len(allbaserevs), len(newrevs)))
3874 ui.status(msg % (len(allbaserevs), len(newrevs)))
3850 if targetfilterrevs:
3875 if targetfilterrevs:
3851 msg = b'(%d revisions still filtered)\n'
3876 msg = b'(%d revisions still filtered)\n'
3852 ui.status(msg % len(targetfilterrevs))
3877 ui.status(msg % len(targetfilterrevs))
3853
3878
3854 try:
3879 try:
3855 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3880 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3856 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3881 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3857
3882
3858 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3883 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3859 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3884 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3860
3885
3861 # try to find an existing branchmap to reuse
3886 # try to find an existing branchmap to reuse
3862 subsettable = getbranchmapsubsettable()
3887 subsettable = getbranchmapsubsettable()
3863 candidatefilter = subsettable.get(None)
3888 candidatefilter = subsettable.get(None)
3864 while candidatefilter is not None:
3889 while candidatefilter is not None:
3865 candidatebm = repo.filtered(candidatefilter).branchmap()
3890 candidatebm = repo.filtered(candidatefilter).branchmap()
3866 if candidatebm.validfor(baserepo):
3891 if candidatebm.validfor(baserepo):
3867 filtered = repoview.filterrevs(repo, candidatefilter)
3892 filtered = repoview.filterrevs(repo, candidatefilter)
3868 missing = [r for r in allbaserevs if r in filtered]
3893 missing = [r for r in allbaserevs if r in filtered]
3869 base = candidatebm.copy()
3894 base = candidatebm.copy()
3870 base.update(baserepo, missing)
3895 base.update(baserepo, missing)
3871 break
3896 break
3872 candidatefilter = subsettable.get(candidatefilter)
3897 candidatefilter = subsettable.get(candidatefilter)
3873 else:
3898 else:
3874 # no suitable subset where found
3899 # no suitable subset where found
3875 base = branchmap.branchcache()
3900 base = branchmap.branchcache()
3876 base.update(baserepo, allbaserevs)
3901 base.update(baserepo, allbaserevs)
3877
3902
3878 def setup():
3903 def setup():
3879 x[0] = base.copy()
3904 x[0] = base.copy()
3880 if clearcaches:
3905 if clearcaches:
3881 unfi._revbranchcache = None
3906 unfi._revbranchcache = None
3882 clearchangelog(repo)
3907 clearchangelog(repo)
3883
3908
3884 def bench():
3909 def bench():
3885 x[0].update(targetrepo, newrevs)
3910 x[0].update(targetrepo, newrevs)
3886
3911
3887 timer(bench, setup=setup)
3912 timer(bench, setup=setup)
3888 fm.end()
3913 fm.end()
3889 finally:
3914 finally:
3890 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3915 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3891 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3916 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3892
3917
3893
3918
3894 @command(
3919 @command(
3895 b'perf::branchmapload|perfbranchmapload',
3920 b'perf::branchmapload|perfbranchmapload',
3896 [
3921 [
3897 (b'f', b'filter', b'', b'Specify repoview filter'),
3922 (b'f', b'filter', b'', b'Specify repoview filter'),
3898 (b'', b'list', False, b'List brachmap filter caches'),
3923 (b'', b'list', False, b'List brachmap filter caches'),
3899 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3924 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3900 ]
3925 ]
3901 + formatteropts,
3926 + formatteropts,
3902 )
3927 )
3903 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3928 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3904 """benchmark reading the branchmap"""
3929 """benchmark reading the branchmap"""
3905 opts = _byteskwargs(opts)
3930 opts = _byteskwargs(opts)
3906 clearrevlogs = opts[b'clear_revlogs']
3931 clearrevlogs = opts[b'clear_revlogs']
3907
3932
3908 if list:
3933 if list:
3909 for name, kind, st in repo.cachevfs.readdir(stat=True):
3934 for name, kind, st in repo.cachevfs.readdir(stat=True):
3910 if name.startswith(b'branch2'):
3935 if name.startswith(b'branch2'):
3911 filtername = name.partition(b'-')[2] or b'unfiltered'
3936 filtername = name.partition(b'-')[2] or b'unfiltered'
3912 ui.status(
3937 ui.status(
3913 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3938 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3914 )
3939 )
3915 return
3940 return
3916 if not filter:
3941 if not filter:
3917 filter = None
3942 filter = None
3918 subsettable = getbranchmapsubsettable()
3943 subsettable = getbranchmapsubsettable()
3919 if filter is None:
3944 if filter is None:
3920 repo = repo.unfiltered()
3945 repo = repo.unfiltered()
3921 else:
3946 else:
3922 repo = repoview.repoview(repo, filter)
3947 repo = repoview.repoview(repo, filter)
3923
3948
3924 repo.branchmap() # make sure we have a relevant, up to date branchmap
3949 repo.branchmap() # make sure we have a relevant, up to date branchmap
3925
3950
3926 try:
3951 try:
3927 fromfile = branchmap.branchcache.fromfile
3952 fromfile = branchmap.branchcache.fromfile
3928 except AttributeError:
3953 except AttributeError:
3929 # older versions
3954 # older versions
3930 fromfile = branchmap.read
3955 fromfile = branchmap.read
3931
3956
3932 currentfilter = filter
3957 currentfilter = filter
3933 # try once without timer, the filter may not be cached
3958 # try once without timer, the filter may not be cached
3934 while fromfile(repo) is None:
3959 while fromfile(repo) is None:
3935 currentfilter = subsettable.get(currentfilter)
3960 currentfilter = subsettable.get(currentfilter)
3936 if currentfilter is None:
3961 if currentfilter is None:
3937 raise error.Abort(
3962 raise error.Abort(
3938 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3963 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3939 )
3964 )
3940 repo = repo.filtered(currentfilter)
3965 repo = repo.filtered(currentfilter)
3941 timer, fm = gettimer(ui, opts)
3966 timer, fm = gettimer(ui, opts)
3942
3967
3943 def setup():
3968 def setup():
3944 if clearrevlogs:
3969 if clearrevlogs:
3945 clearchangelog(repo)
3970 clearchangelog(repo)
3946
3971
3947 def bench():
3972 def bench():
3948 fromfile(repo)
3973 fromfile(repo)
3949
3974
3950 timer(bench, setup=setup)
3975 timer(bench, setup=setup)
3951 fm.end()
3976 fm.end()
3952
3977
3953
3978
3954 @command(b'perf::loadmarkers|perfloadmarkers')
3979 @command(b'perf::loadmarkers|perfloadmarkers')
3955 def perfloadmarkers(ui, repo):
3980 def perfloadmarkers(ui, repo):
3956 """benchmark the time to parse the on-disk markers for a repo
3981 """benchmark the time to parse the on-disk markers for a repo
3957
3982
3958 Result is the number of markers in the repo."""
3983 Result is the number of markers in the repo."""
3959 timer, fm = gettimer(ui)
3984 timer, fm = gettimer(ui)
3960 svfs = getsvfs(repo)
3985 svfs = getsvfs(repo)
3961 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3986 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3962 fm.end()
3987 fm.end()
3963
3988
3964
3989
3965 @command(
3990 @command(
3966 b'perf::lrucachedict|perflrucachedict',
3991 b'perf::lrucachedict|perflrucachedict',
3967 formatteropts
3992 formatteropts
3968 + [
3993 + [
3969 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3994 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3970 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3995 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3971 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3996 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3972 (b'', b'size', 4, b'size of cache'),
3997 (b'', b'size', 4, b'size of cache'),
3973 (b'', b'gets', 10000, b'number of key lookups'),
3998 (b'', b'gets', 10000, b'number of key lookups'),
3974 (b'', b'sets', 10000, b'number of key sets'),
3999 (b'', b'sets', 10000, b'number of key sets'),
3975 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4000 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3976 (
4001 (
3977 b'',
4002 b'',
3978 b'mixedgetfreq',
4003 b'mixedgetfreq',
3979 50,
4004 50,
3980 b'frequency of get vs set ops in mixed mode',
4005 b'frequency of get vs set ops in mixed mode',
3981 ),
4006 ),
3982 ],
4007 ],
3983 norepo=True,
4008 norepo=True,
3984 )
4009 )
3985 def perflrucache(
4010 def perflrucache(
3986 ui,
4011 ui,
3987 mincost=0,
4012 mincost=0,
3988 maxcost=100,
4013 maxcost=100,
3989 costlimit=0,
4014 costlimit=0,
3990 size=4,
4015 size=4,
3991 gets=10000,
4016 gets=10000,
3992 sets=10000,
4017 sets=10000,
3993 mixed=10000,
4018 mixed=10000,
3994 mixedgetfreq=50,
4019 mixedgetfreq=50,
3995 **opts
4020 **opts
3996 ):
4021 ):
3997 opts = _byteskwargs(opts)
4022 opts = _byteskwargs(opts)
3998
4023
3999 def doinit():
4024 def doinit():
4000 for i in _xrange(10000):
4025 for i in _xrange(10000):
4001 util.lrucachedict(size)
4026 util.lrucachedict(size)
4002
4027
4003 costrange = list(range(mincost, maxcost + 1))
4028 costrange = list(range(mincost, maxcost + 1))
4004
4029
4005 values = []
4030 values = []
4006 for i in _xrange(size):
4031 for i in _xrange(size):
4007 values.append(random.randint(0, _maxint))
4032 values.append(random.randint(0, _maxint))
4008
4033
4009 # Get mode fills the cache and tests raw lookup performance with no
4034 # Get mode fills the cache and tests raw lookup performance with no
4010 # eviction.
4035 # eviction.
4011 getseq = []
4036 getseq = []
4012 for i in _xrange(gets):
4037 for i in _xrange(gets):
4013 getseq.append(random.choice(values))
4038 getseq.append(random.choice(values))
4014
4039
4015 def dogets():
4040 def dogets():
4016 d = util.lrucachedict(size)
4041 d = util.lrucachedict(size)
4017 for v in values:
4042 for v in values:
4018 d[v] = v
4043 d[v] = v
4019 for key in getseq:
4044 for key in getseq:
4020 value = d[key]
4045 value = d[key]
4021 value # silence pyflakes warning
4046 value # silence pyflakes warning
4022
4047
4023 def dogetscost():
4048 def dogetscost():
4024 d = util.lrucachedict(size, maxcost=costlimit)
4049 d = util.lrucachedict(size, maxcost=costlimit)
4025 for i, v in enumerate(values):
4050 for i, v in enumerate(values):
4026 d.insert(v, v, cost=costs[i])
4051 d.insert(v, v, cost=costs[i])
4027 for key in getseq:
4052 for key in getseq:
4028 try:
4053 try:
4029 value = d[key]
4054 value = d[key]
4030 value # silence pyflakes warning
4055 value # silence pyflakes warning
4031 except KeyError:
4056 except KeyError:
4032 pass
4057 pass
4033
4058
4034 # Set mode tests insertion speed with cache eviction.
4059 # Set mode tests insertion speed with cache eviction.
4035 setseq = []
4060 setseq = []
4036 costs = []
4061 costs = []
4037 for i in _xrange(sets):
4062 for i in _xrange(sets):
4038 setseq.append(random.randint(0, _maxint))
4063 setseq.append(random.randint(0, _maxint))
4039 costs.append(random.choice(costrange))
4064 costs.append(random.choice(costrange))
4040
4065
4041 def doinserts():
4066 def doinserts():
4042 d = util.lrucachedict(size)
4067 d = util.lrucachedict(size)
4043 for v in setseq:
4068 for v in setseq:
4044 d.insert(v, v)
4069 d.insert(v, v)
4045
4070
4046 def doinsertscost():
4071 def doinsertscost():
4047 d = util.lrucachedict(size, maxcost=costlimit)
4072 d = util.lrucachedict(size, maxcost=costlimit)
4048 for i, v in enumerate(setseq):
4073 for i, v in enumerate(setseq):
4049 d.insert(v, v, cost=costs[i])
4074 d.insert(v, v, cost=costs[i])
4050
4075
4051 def dosets():
4076 def dosets():
4052 d = util.lrucachedict(size)
4077 d = util.lrucachedict(size)
4053 for v in setseq:
4078 for v in setseq:
4054 d[v] = v
4079 d[v] = v
4055
4080
4056 # Mixed mode randomly performs gets and sets with eviction.
4081 # Mixed mode randomly performs gets and sets with eviction.
4057 mixedops = []
4082 mixedops = []
4058 for i in _xrange(mixed):
4083 for i in _xrange(mixed):
4059 r = random.randint(0, 100)
4084 r = random.randint(0, 100)
4060 if r < mixedgetfreq:
4085 if r < mixedgetfreq:
4061 op = 0
4086 op = 0
4062 else:
4087 else:
4063 op = 1
4088 op = 1
4064
4089
4065 mixedops.append(
4090 mixedops.append(
4066 (op, random.randint(0, size * 2), random.choice(costrange))
4091 (op, random.randint(0, size * 2), random.choice(costrange))
4067 )
4092 )
4068
4093
4069 def domixed():
4094 def domixed():
4070 d = util.lrucachedict(size)
4095 d = util.lrucachedict(size)
4071
4096
4072 for op, v, cost in mixedops:
4097 for op, v, cost in mixedops:
4073 if op == 0:
4098 if op == 0:
4074 try:
4099 try:
4075 d[v]
4100 d[v]
4076 except KeyError:
4101 except KeyError:
4077 pass
4102 pass
4078 else:
4103 else:
4079 d[v] = v
4104 d[v] = v
4080
4105
4081 def domixedcost():
4106 def domixedcost():
4082 d = util.lrucachedict(size, maxcost=costlimit)
4107 d = util.lrucachedict(size, maxcost=costlimit)
4083
4108
4084 for op, v, cost in mixedops:
4109 for op, v, cost in mixedops:
4085 if op == 0:
4110 if op == 0:
4086 try:
4111 try:
4087 d[v]
4112 d[v]
4088 except KeyError:
4113 except KeyError:
4089 pass
4114 pass
4090 else:
4115 else:
4091 d.insert(v, v, cost=cost)
4116 d.insert(v, v, cost=cost)
4092
4117
4093 benches = [
4118 benches = [
4094 (doinit, b'init'),
4119 (doinit, b'init'),
4095 ]
4120 ]
4096
4121
4097 if costlimit:
4122 if costlimit:
4098 benches.extend(
4123 benches.extend(
4099 [
4124 [
4100 (dogetscost, b'gets w/ cost limit'),
4125 (dogetscost, b'gets w/ cost limit'),
4101 (doinsertscost, b'inserts w/ cost limit'),
4126 (doinsertscost, b'inserts w/ cost limit'),
4102 (domixedcost, b'mixed w/ cost limit'),
4127 (domixedcost, b'mixed w/ cost limit'),
4103 ]
4128 ]
4104 )
4129 )
4105 else:
4130 else:
4106 benches.extend(
4131 benches.extend(
4107 [
4132 [
4108 (dogets, b'gets'),
4133 (dogets, b'gets'),
4109 (doinserts, b'inserts'),
4134 (doinserts, b'inserts'),
4110 (dosets, b'sets'),
4135 (dosets, b'sets'),
4111 (domixed, b'mixed'),
4136 (domixed, b'mixed'),
4112 ]
4137 ]
4113 )
4138 )
4114
4139
4115 for fn, title in benches:
4140 for fn, title in benches:
4116 timer, fm = gettimer(ui, opts)
4141 timer, fm = gettimer(ui, opts)
4117 timer(fn, title=title)
4142 timer(fn, title=title)
4118 fm.end()
4143 fm.end()
4119
4144
4120
4145
4121 @command(
4146 @command(
4122 b'perf::write|perfwrite',
4147 b'perf::write|perfwrite',
4123 formatteropts
4148 formatteropts
4124 + [
4149 + [
4125 (b'', b'write-method', b'write', b'ui write method'),
4150 (b'', b'write-method', b'write', b'ui write method'),
4126 (b'', b'nlines', 100, b'number of lines'),
4151 (b'', b'nlines', 100, b'number of lines'),
4127 (b'', b'nitems', 100, b'number of items (per line)'),
4152 (b'', b'nitems', 100, b'number of items (per line)'),
4128 (b'', b'item', b'x', b'item that is written'),
4153 (b'', b'item', b'x', b'item that is written'),
4129 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4154 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4130 (b'', b'flush-line', None, b'flush after each line'),
4155 (b'', b'flush-line', None, b'flush after each line'),
4131 ],
4156 ],
4132 )
4157 )
4133 def perfwrite(ui, repo, **opts):
4158 def perfwrite(ui, repo, **opts):
4134 """microbenchmark ui.write (and others)"""
4159 """microbenchmark ui.write (and others)"""
4135 opts = _byteskwargs(opts)
4160 opts = _byteskwargs(opts)
4136
4161
4137 write = getattr(ui, _sysstr(opts[b'write_method']))
4162 write = getattr(ui, _sysstr(opts[b'write_method']))
4138 nlines = int(opts[b'nlines'])
4163 nlines = int(opts[b'nlines'])
4139 nitems = int(opts[b'nitems'])
4164 nitems = int(opts[b'nitems'])
4140 item = opts[b'item']
4165 item = opts[b'item']
4141 batch_line = opts.get(b'batch_line')
4166 batch_line = opts.get(b'batch_line')
4142 flush_line = opts.get(b'flush_line')
4167 flush_line = opts.get(b'flush_line')
4143
4168
4144 if batch_line:
4169 if batch_line:
4145 line = item * nitems + b'\n'
4170 line = item * nitems + b'\n'
4146
4171
4147 def benchmark():
4172 def benchmark():
4148 for i in pycompat.xrange(nlines):
4173 for i in pycompat.xrange(nlines):
4149 if batch_line:
4174 if batch_line:
4150 write(line)
4175 write(line)
4151 else:
4176 else:
4152 for i in pycompat.xrange(nitems):
4177 for i in pycompat.xrange(nitems):
4153 write(item)
4178 write(item)
4154 write(b'\n')
4179 write(b'\n')
4155 if flush_line:
4180 if flush_line:
4156 ui.flush()
4181 ui.flush()
4157 ui.flush()
4182 ui.flush()
4158
4183
4159 timer, fm = gettimer(ui, opts)
4184 timer, fm = gettimer(ui, opts)
4160 timer(benchmark)
4185 timer(benchmark)
4161 fm.end()
4186 fm.end()
4162
4187
4163
4188
4164 def uisetup(ui):
4189 def uisetup(ui):
4165 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4190 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4166 commands, b'debugrevlogopts'
4191 commands, b'debugrevlogopts'
4167 ):
4192 ):
4168 # for "historical portability":
4193 # for "historical portability":
4169 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4194 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4170 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4195 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4171 # openrevlog() should cause failure, because it has been
4196 # openrevlog() should cause failure, because it has been
4172 # available since 3.5 (or 49c583ca48c4).
4197 # available since 3.5 (or 49c583ca48c4).
4173 def openrevlog(orig, repo, cmd, file_, opts):
4198 def openrevlog(orig, repo, cmd, file_, opts):
4174 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4199 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4175 raise error.Abort(
4200 raise error.Abort(
4176 b"This version doesn't support --dir option",
4201 b"This version doesn't support --dir option",
4177 hint=b"use 3.5 or later",
4202 hint=b"use 3.5 or later",
4178 )
4203 )
4179 return orig(repo, cmd, file_, opts)
4204 return orig(repo, cmd, file_, opts)
4180
4205
4181 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4206 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4182
4207
4183
4208
4184 @command(
4209 @command(
4185 b'perf::progress|perfprogress',
4210 b'perf::progress|perfprogress',
4186 formatteropts
4211 formatteropts
4187 + [
4212 + [
4188 (b'', b'topic', b'topic', b'topic for progress messages'),
4213 (b'', b'topic', b'topic', b'topic for progress messages'),
4189 (b'c', b'total', 1000000, b'total value we are progressing to'),
4214 (b'c', b'total', 1000000, b'total value we are progressing to'),
4190 ],
4215 ],
4191 norepo=True,
4216 norepo=True,
4192 )
4217 )
4193 def perfprogress(ui, topic=None, total=None, **opts):
4218 def perfprogress(ui, topic=None, total=None, **opts):
4194 """printing of progress bars"""
4219 """printing of progress bars"""
4195 opts = _byteskwargs(opts)
4220 opts = _byteskwargs(opts)
4196
4221
4197 timer, fm = gettimer(ui, opts)
4222 timer, fm = gettimer(ui, opts)
4198
4223
4199 def doprogress():
4224 def doprogress():
4200 with ui.makeprogress(topic, total=total) as progress:
4225 with ui.makeprogress(topic, total=total) as progress:
4201 for i in _xrange(total):
4226 for i in _xrange(total):
4202 progress.increment()
4227 progress.increment()
4203
4228
4204 timer(doprogress)
4229 timer(doprogress)
4205 fm.end()
4230 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now