##// END OF EJS Templates
path: pass `path` to `peer` in `hg perf::discovery`...
marmoute -
r50632:4cedae99 default
parent child Browse files
Show More
@@ -1,4234 +1,4239 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path_obj
1001
1002 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 except ImportError:
1004 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1005 from mercurial.utils.urlutil import get_unique_pull_path
1001
1006
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1007 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1008 except ImportError:
1004 path = ui.expandpath(path)
1009 path = ui.expandpath(path)
1005
1010
1006 def s():
1011 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1012 repos[1] = hg.peer(ui, opts, path)
1008
1013
1009 def d():
1014 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1015 setdiscovery.findcommonheads(ui, *repos)
1011
1016
1012 timer(d, setup=s)
1017 timer(d, setup=s)
1013 fm.end()
1018 fm.end()
1014
1019
1015
1020
1016 @command(
1021 @command(
1017 b'perf::bookmarks|perfbookmarks',
1022 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1023 formatteropts
1019 + [
1024 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1025 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1026 ],
1022 )
1027 )
1023 def perfbookmarks(ui, repo, **opts):
1028 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1029 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1030 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1031 timer, fm = gettimer(ui, opts)
1027
1032
1028 clearrevlogs = opts[b'clear_revlogs']
1033 clearrevlogs = opts[b'clear_revlogs']
1029
1034
1030 def s():
1035 def s():
1031 if clearrevlogs:
1036 if clearrevlogs:
1032 clearchangelog(repo)
1037 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1038 clearfilecache(repo, b'_bookmarks')
1034
1039
1035 def d():
1040 def d():
1036 repo._bookmarks
1041 repo._bookmarks
1037
1042
1038 timer(d, setup=s)
1043 timer(d, setup=s)
1039 fm.end()
1044 fm.end()
1040
1045
1041
1046
1042 @command(
1047 @command(
1043 b'perf::bundle',
1048 b'perf::bundle',
1044 [
1049 [
1045 (
1050 (
1046 b'r',
1051 b'r',
1047 b'rev',
1052 b'rev',
1048 [],
1053 [],
1049 b'changesets to bundle',
1054 b'changesets to bundle',
1050 b'REV',
1055 b'REV',
1051 ),
1056 ),
1052 (
1057 (
1053 b't',
1058 b't',
1054 b'type',
1059 b'type',
1055 b'none',
1060 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1061 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1062 b'TYPE',
1058 ),
1063 ),
1059 ]
1064 ]
1060 + formatteropts,
1065 + formatteropts,
1061 b'REVS',
1066 b'REVS',
1062 )
1067 )
1063 def perfbundle(ui, repo, *revs, **opts):
1068 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1069 """benchmark the creation of a bundle from a repository
1065
1070
1066 For now, this only supports "none" compression.
1071 For now, this only supports "none" compression.
1067 """
1072 """
1068 try:
1073 try:
1069 from mercurial import bundlecaches
1074 from mercurial import bundlecaches
1070
1075
1071 parsebundlespec = bundlecaches.parsebundlespec
1076 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1077 except ImportError:
1073 from mercurial import exchange
1078 from mercurial import exchange
1074
1079
1075 parsebundlespec = exchange.parsebundlespec
1080 parsebundlespec = exchange.parsebundlespec
1076
1081
1077 from mercurial import discovery
1082 from mercurial import discovery
1078 from mercurial import bundle2
1083 from mercurial import bundle2
1079
1084
1080 opts = _byteskwargs(opts)
1085 opts = _byteskwargs(opts)
1081 timer, fm = gettimer(ui, opts)
1086 timer, fm = gettimer(ui, opts)
1082
1087
1083 cl = repo.changelog
1088 cl = repo.changelog
1084 revs = list(revs)
1089 revs = list(revs)
1085 revs.extend(opts.get(b'rev', ()))
1090 revs.extend(opts.get(b'rev', ()))
1086 revs = scmutil.revrange(repo, revs)
1091 revs = scmutil.revrange(repo, revs)
1087 if not revs:
1092 if not revs:
1088 raise error.Abort(b"not revision specified")
1093 raise error.Abort(b"not revision specified")
1089 # make it a consistent set (ie: without topological gaps)
1094 # make it a consistent set (ie: without topological gaps)
1090 old_len = len(revs)
1095 old_len = len(revs)
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1096 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 if old_len != len(revs):
1097 if old_len != len(revs):
1093 new_count = len(revs) - old_len
1098 new_count = len(revs) - old_len
1094 msg = b"add %d new revisions to make it a consistent set\n"
1099 msg = b"add %d new revisions to make it a consistent set\n"
1095 ui.write_err(msg % new_count)
1100 ui.write_err(msg % new_count)
1096
1101
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1102 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1103 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 outgoing = discovery.outgoing(repo, bases, targets)
1104 outgoing = discovery.outgoing(repo, bases, targets)
1100
1105
1101 bundle_spec = opts.get(b'type')
1106 bundle_spec = opts.get(b'type')
1102
1107
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1108 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104
1109
1105 cgversion = bundle_spec.params.get(b"cg.version")
1110 cgversion = bundle_spec.params.get(b"cg.version")
1106 if cgversion is None:
1111 if cgversion is None:
1107 if bundle_spec.version == b'v1':
1112 if bundle_spec.version == b'v1':
1108 cgversion = b'01'
1113 cgversion = b'01'
1109 if bundle_spec.version == b'v2':
1114 if bundle_spec.version == b'v2':
1110 cgversion = b'02'
1115 cgversion = b'02'
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1116 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 err = b"repository does not support bundle version %s"
1117 err = b"repository does not support bundle version %s"
1113 raise error.Abort(err % cgversion)
1118 raise error.Abort(err % cgversion)
1114
1119
1115 if cgversion == b'01': # bundle1
1120 if cgversion == b'01': # bundle1
1116 bversion = b'HG10' + bundle_spec.wirecompression
1121 bversion = b'HG10' + bundle_spec.wirecompression
1117 bcompression = None
1122 bcompression = None
1118 elif cgversion in (b'02', b'03'):
1123 elif cgversion in (b'02', b'03'):
1119 bversion = b'HG20'
1124 bversion = b'HG20'
1120 bcompression = bundle_spec.wirecompression
1125 bcompression = bundle_spec.wirecompression
1121 else:
1126 else:
1122 err = b'perf::bundle: unexpected changegroup version %s'
1127 err = b'perf::bundle: unexpected changegroup version %s'
1123 raise error.ProgrammingError(err % cgversion)
1128 raise error.ProgrammingError(err % cgversion)
1124
1129
1125 if bcompression is None:
1130 if bcompression is None:
1126 bcompression = b'UN'
1131 bcompression = b'UN'
1127
1132
1128 if bcompression != b'UN':
1133 if bcompression != b'UN':
1129 err = b'perf::bundle: compression currently unsupported: %s'
1134 err = b'perf::bundle: compression currently unsupported: %s'
1130 raise error.ProgrammingError(err % bcompression)
1135 raise error.ProgrammingError(err % bcompression)
1131
1136
1132 def do_bundle():
1137 def do_bundle():
1133 bundle2.writenewbundle(
1138 bundle2.writenewbundle(
1134 ui,
1139 ui,
1135 repo,
1140 repo,
1136 b'perf::bundle',
1141 b'perf::bundle',
1137 os.devnull,
1142 os.devnull,
1138 bversion,
1143 bversion,
1139 outgoing,
1144 outgoing,
1140 bundle_spec.params,
1145 bundle_spec.params,
1141 )
1146 )
1142
1147
1143 timer(do_bundle)
1148 timer(do_bundle)
1144 fm.end()
1149 fm.end()
1145
1150
1146
1151
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1152 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1153 def perfbundleread(ui, repo, bundlepath, **opts):
1149 """Benchmark reading of bundle files.
1154 """Benchmark reading of bundle files.
1150
1155
1151 This command is meant to isolate the I/O part of bundle reading as
1156 This command is meant to isolate the I/O part of bundle reading as
1152 much as possible.
1157 much as possible.
1153 """
1158 """
1154 from mercurial import (
1159 from mercurial import (
1155 bundle2,
1160 bundle2,
1156 exchange,
1161 exchange,
1157 streamclone,
1162 streamclone,
1158 )
1163 )
1159
1164
1160 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1161
1166
1162 def makebench(fn):
1167 def makebench(fn):
1163 def run():
1168 def run():
1164 with open(bundlepath, b'rb') as fh:
1169 with open(bundlepath, b'rb') as fh:
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1170 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 fn(bundle)
1171 fn(bundle)
1167
1172
1168 return run
1173 return run
1169
1174
1170 def makereadnbytes(size):
1175 def makereadnbytes(size):
1171 def run():
1176 def run():
1172 with open(bundlepath, b'rb') as fh:
1177 with open(bundlepath, b'rb') as fh:
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1178 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 while bundle.read(size):
1179 while bundle.read(size):
1175 pass
1180 pass
1176
1181
1177 return run
1182 return run
1178
1183
1179 def makestdioread(size):
1184 def makestdioread(size):
1180 def run():
1185 def run():
1181 with open(bundlepath, b'rb') as fh:
1186 with open(bundlepath, b'rb') as fh:
1182 while fh.read(size):
1187 while fh.read(size):
1183 pass
1188 pass
1184
1189
1185 return run
1190 return run
1186
1191
1187 # bundle1
1192 # bundle1
1188
1193
1189 def deltaiter(bundle):
1194 def deltaiter(bundle):
1190 for delta in bundle.deltaiter():
1195 for delta in bundle.deltaiter():
1191 pass
1196 pass
1192
1197
1193 def iterchunks(bundle):
1198 def iterchunks(bundle):
1194 for chunk in bundle.getchunks():
1199 for chunk in bundle.getchunks():
1195 pass
1200 pass
1196
1201
1197 # bundle2
1202 # bundle2
1198
1203
1199 def forwardchunks(bundle):
1204 def forwardchunks(bundle):
1200 for chunk in bundle._forwardchunks():
1205 for chunk in bundle._forwardchunks():
1201 pass
1206 pass
1202
1207
1203 def iterparts(bundle):
1208 def iterparts(bundle):
1204 for part in bundle.iterparts():
1209 for part in bundle.iterparts():
1205 pass
1210 pass
1206
1211
1207 def iterpartsseekable(bundle):
1212 def iterpartsseekable(bundle):
1208 for part in bundle.iterparts(seekable=True):
1213 for part in bundle.iterparts(seekable=True):
1209 pass
1214 pass
1210
1215
1211 def seek(bundle):
1216 def seek(bundle):
1212 for part in bundle.iterparts(seekable=True):
1217 for part in bundle.iterparts(seekable=True):
1213 part.seek(0, os.SEEK_END)
1218 part.seek(0, os.SEEK_END)
1214
1219
1215 def makepartreadnbytes(size):
1220 def makepartreadnbytes(size):
1216 def run():
1221 def run():
1217 with open(bundlepath, b'rb') as fh:
1222 with open(bundlepath, b'rb') as fh:
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1223 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 for part in bundle.iterparts():
1224 for part in bundle.iterparts():
1220 while part.read(size):
1225 while part.read(size):
1221 pass
1226 pass
1222
1227
1223 return run
1228 return run
1224
1229
1225 benches = [
1230 benches = [
1226 (makestdioread(8192), b'read(8k)'),
1231 (makestdioread(8192), b'read(8k)'),
1227 (makestdioread(16384), b'read(16k)'),
1232 (makestdioread(16384), b'read(16k)'),
1228 (makestdioread(32768), b'read(32k)'),
1233 (makestdioread(32768), b'read(32k)'),
1229 (makestdioread(131072), b'read(128k)'),
1234 (makestdioread(131072), b'read(128k)'),
1230 ]
1235 ]
1231
1236
1232 with open(bundlepath, b'rb') as fh:
1237 with open(bundlepath, b'rb') as fh:
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1238 bundle = exchange.readbundle(ui, fh, bundlepath)
1234
1239
1235 if isinstance(bundle, changegroup.cg1unpacker):
1240 if isinstance(bundle, changegroup.cg1unpacker):
1236 benches.extend(
1241 benches.extend(
1237 [
1242 [
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1243 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1244 (makebench(iterchunks), b'cg1 getchunks()'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1245 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1246 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1247 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1248 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 ]
1249 ]
1245 )
1250 )
1246 elif isinstance(bundle, bundle2.unbundle20):
1251 elif isinstance(bundle, bundle2.unbundle20):
1247 benches.extend(
1252 benches.extend(
1248 [
1253 [
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1254 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1255 (makebench(iterparts), b'bundle2 iterparts()'),
1251 (
1256 (
1252 makebench(iterpartsseekable),
1257 makebench(iterpartsseekable),
1253 b'bundle2 iterparts() seekable',
1258 b'bundle2 iterparts() seekable',
1254 ),
1259 ),
1255 (makebench(seek), b'bundle2 part seek()'),
1260 (makebench(seek), b'bundle2 part seek()'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1261 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1262 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1263 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1264 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 ]
1265 ]
1261 )
1266 )
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1267 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 raise error.Abort(b'stream clone bundles not supported')
1268 raise error.Abort(b'stream clone bundles not supported')
1264 else:
1269 else:
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1270 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266
1271
1267 for fn, title in benches:
1272 for fn, title in benches:
1268 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1269 timer(fn, title=title)
1274 timer(fn, title=title)
1270 fm.end()
1275 fm.end()
1271
1276
1272
1277
1273 @command(
1278 @command(
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1279 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 formatteropts
1280 formatteropts
1276 + [
1281 + [
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1282 (b'', b'cgversion', b'02', b'changegroup version'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1283 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 ],
1284 ],
1280 )
1285 )
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1286 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 """Benchmark producing a changelog group for a changegroup.
1287 """Benchmark producing a changelog group for a changegroup.
1283
1288
1284 This measures the time spent processing the changelog during a
1289 This measures the time spent processing the changelog during a
1285 bundle operation. This occurs during `hg bundle` and on a server
1290 bundle operation. This occurs during `hg bundle` and on a server
1286 processing a `getbundle` wire protocol request (handles clones
1291 processing a `getbundle` wire protocol request (handles clones
1287 and pull requests).
1292 and pull requests).
1288
1293
1289 By default, all revisions are added to the changegroup.
1294 By default, all revisions are added to the changegroup.
1290 """
1295 """
1291 opts = _byteskwargs(opts)
1296 opts = _byteskwargs(opts)
1292 cl = repo.changelog
1297 cl = repo.changelog
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1298 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 bundler = changegroup.getbundler(cgversion, repo)
1299 bundler = changegroup.getbundler(cgversion, repo)
1295
1300
1296 def d():
1301 def d():
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1302 state, chunks = bundler._generatechangelog(cl, nodes)
1298 for chunk in chunks:
1303 for chunk in chunks:
1299 pass
1304 pass
1300
1305
1301 timer, fm = gettimer(ui, opts)
1306 timer, fm = gettimer(ui, opts)
1302
1307
1303 # Terminal printing can interfere with timing. So disable it.
1308 # Terminal printing can interfere with timing. So disable it.
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1309 with ui.configoverride({(b'progress', b'disable'): True}):
1305 timer(d)
1310 timer(d)
1306
1311
1307 fm.end()
1312 fm.end()
1308
1313
1309
1314
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1315 @command(b'perf::dirs|perfdirs', formatteropts)
1311 def perfdirs(ui, repo, **opts):
1316 def perfdirs(ui, repo, **opts):
1312 opts = _byteskwargs(opts)
1317 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1318 timer, fm = gettimer(ui, opts)
1314 dirstate = repo.dirstate
1319 dirstate = repo.dirstate
1315 b'a' in dirstate
1320 b'a' in dirstate
1316
1321
1317 def d():
1322 def d():
1318 dirstate.hasdir(b'a')
1323 dirstate.hasdir(b'a')
1319 try:
1324 try:
1320 del dirstate._map._dirs
1325 del dirstate._map._dirs
1321 except AttributeError:
1326 except AttributeError:
1322 pass
1327 pass
1323
1328
1324 timer(d)
1329 timer(d)
1325 fm.end()
1330 fm.end()
1326
1331
1327
1332
1328 @command(
1333 @command(
1329 b'perf::dirstate|perfdirstate',
1334 b'perf::dirstate|perfdirstate',
1330 [
1335 [
1331 (
1336 (
1332 b'',
1337 b'',
1333 b'iteration',
1338 b'iteration',
1334 None,
1339 None,
1335 b'benchmark a full iteration for the dirstate',
1340 b'benchmark a full iteration for the dirstate',
1336 ),
1341 ),
1337 (
1342 (
1338 b'',
1343 b'',
1339 b'contains',
1344 b'contains',
1340 None,
1345 None,
1341 b'benchmark a large amount of `nf in dirstate` calls',
1346 b'benchmark a large amount of `nf in dirstate` calls',
1342 ),
1347 ),
1343 ]
1348 ]
1344 + formatteropts,
1349 + formatteropts,
1345 )
1350 )
1346 def perfdirstate(ui, repo, **opts):
1351 def perfdirstate(ui, repo, **opts):
1347 """benchmap the time of various distate operations
1352 """benchmap the time of various distate operations
1348
1353
1349 By default benchmark the time necessary to load a dirstate from scratch.
1354 By default benchmark the time necessary to load a dirstate from scratch.
1350 The dirstate is loaded to the point were a "contains" request can be
1355 The dirstate is loaded to the point were a "contains" request can be
1351 answered.
1356 answered.
1352 """
1357 """
1353 opts = _byteskwargs(opts)
1358 opts = _byteskwargs(opts)
1354 timer, fm = gettimer(ui, opts)
1359 timer, fm = gettimer(ui, opts)
1355 b"a" in repo.dirstate
1360 b"a" in repo.dirstate
1356
1361
1357 if opts[b'iteration'] and opts[b'contains']:
1362 if opts[b'iteration'] and opts[b'contains']:
1358 msg = b'only specify one of --iteration or --contains'
1363 msg = b'only specify one of --iteration or --contains'
1359 raise error.Abort(msg)
1364 raise error.Abort(msg)
1360
1365
1361 if opts[b'iteration']:
1366 if opts[b'iteration']:
1362 setup = None
1367 setup = None
1363 dirstate = repo.dirstate
1368 dirstate = repo.dirstate
1364
1369
1365 def d():
1370 def d():
1366 for f in dirstate:
1371 for f in dirstate:
1367 pass
1372 pass
1368
1373
1369 elif opts[b'contains']:
1374 elif opts[b'contains']:
1370 setup = None
1375 setup = None
1371 dirstate = repo.dirstate
1376 dirstate = repo.dirstate
1372 allfiles = list(dirstate)
1377 allfiles = list(dirstate)
1373 # also add file path that will be "missing" from the dirstate
1378 # also add file path that will be "missing" from the dirstate
1374 allfiles.extend([f[::-1] for f in allfiles])
1379 allfiles.extend([f[::-1] for f in allfiles])
1375
1380
1376 def d():
1381 def d():
1377 for f in allfiles:
1382 for f in allfiles:
1378 f in dirstate
1383 f in dirstate
1379
1384
1380 else:
1385 else:
1381
1386
1382 def setup():
1387 def setup():
1383 repo.dirstate.invalidate()
1388 repo.dirstate.invalidate()
1384
1389
1385 def d():
1390 def d():
1386 b"a" in repo.dirstate
1391 b"a" in repo.dirstate
1387
1392
1388 timer(d, setup=setup)
1393 timer(d, setup=setup)
1389 fm.end()
1394 fm.end()
1390
1395
1391
1396
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1397 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 def perfdirstatedirs(ui, repo, **opts):
1398 def perfdirstatedirs(ui, repo, **opts):
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1399 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 opts = _byteskwargs(opts)
1400 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1401 timer, fm = gettimer(ui, opts)
1397 repo.dirstate.hasdir(b"a")
1402 repo.dirstate.hasdir(b"a")
1398
1403
1399 def setup():
1404 def setup():
1400 try:
1405 try:
1401 del repo.dirstate._map._dirs
1406 del repo.dirstate._map._dirs
1402 except AttributeError:
1407 except AttributeError:
1403 pass
1408 pass
1404
1409
1405 def d():
1410 def d():
1406 repo.dirstate.hasdir(b"a")
1411 repo.dirstate.hasdir(b"a")
1407
1412
1408 timer(d, setup=setup)
1413 timer(d, setup=setup)
1409 fm.end()
1414 fm.end()
1410
1415
1411
1416
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1417 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 def perfdirstatefoldmap(ui, repo, **opts):
1418 def perfdirstatefoldmap(ui, repo, **opts):
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1419 """benchmap a `dirstate._map.filefoldmap.get()` request
1415
1420
1416 The dirstate filefoldmap cache is dropped between every request.
1421 The dirstate filefoldmap cache is dropped between every request.
1417 """
1422 """
1418 opts = _byteskwargs(opts)
1423 opts = _byteskwargs(opts)
1419 timer, fm = gettimer(ui, opts)
1424 timer, fm = gettimer(ui, opts)
1420 dirstate = repo.dirstate
1425 dirstate = repo.dirstate
1421 dirstate._map.filefoldmap.get(b'a')
1426 dirstate._map.filefoldmap.get(b'a')
1422
1427
1423 def setup():
1428 def setup():
1424 del dirstate._map.filefoldmap
1429 del dirstate._map.filefoldmap
1425
1430
1426 def d():
1431 def d():
1427 dirstate._map.filefoldmap.get(b'a')
1432 dirstate._map.filefoldmap.get(b'a')
1428
1433
1429 timer(d, setup=setup)
1434 timer(d, setup=setup)
1430 fm.end()
1435 fm.end()
1431
1436
1432
1437
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1438 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 def perfdirfoldmap(ui, repo, **opts):
1439 def perfdirfoldmap(ui, repo, **opts):
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1440 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436
1441
1437 The dirstate dirfoldmap cache is dropped between every request.
1442 The dirstate dirfoldmap cache is dropped between every request.
1438 """
1443 """
1439 opts = _byteskwargs(opts)
1444 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1445 timer, fm = gettimer(ui, opts)
1441 dirstate = repo.dirstate
1446 dirstate = repo.dirstate
1442 dirstate._map.dirfoldmap.get(b'a')
1447 dirstate._map.dirfoldmap.get(b'a')
1443
1448
1444 def setup():
1449 def setup():
1445 del dirstate._map.dirfoldmap
1450 del dirstate._map.dirfoldmap
1446 try:
1451 try:
1447 del dirstate._map._dirs
1452 del dirstate._map._dirs
1448 except AttributeError:
1453 except AttributeError:
1449 pass
1454 pass
1450
1455
1451 def d():
1456 def d():
1452 dirstate._map.dirfoldmap.get(b'a')
1457 dirstate._map.dirfoldmap.get(b'a')
1453
1458
1454 timer(d, setup=setup)
1459 timer(d, setup=setup)
1455 fm.end()
1460 fm.end()
1456
1461
1457
1462
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1463 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 def perfdirstatewrite(ui, repo, **opts):
1464 def perfdirstatewrite(ui, repo, **opts):
1460 """benchmap the time it take to write a dirstate on disk"""
1465 """benchmap the time it take to write a dirstate on disk"""
1461 opts = _byteskwargs(opts)
1466 opts = _byteskwargs(opts)
1462 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1463 ds = repo.dirstate
1468 ds = repo.dirstate
1464 b"a" in ds
1469 b"a" in ds
1465
1470
1466 def setup():
1471 def setup():
1467 ds._dirty = True
1472 ds._dirty = True
1468
1473
1469 def d():
1474 def d():
1470 ds.write(repo.currenttransaction())
1475 ds.write(repo.currenttransaction())
1471
1476
1472 timer(d, setup=setup)
1477 timer(d, setup=setup)
1473 fm.end()
1478 fm.end()
1474
1479
1475
1480
1476 def _getmergerevs(repo, opts):
1481 def _getmergerevs(repo, opts):
1477 """parse command argument to return rev involved in merge
1482 """parse command argument to return rev involved in merge
1478
1483
1479 input: options dictionnary with `rev`, `from` and `bse`
1484 input: options dictionnary with `rev`, `from` and `bse`
1480 output: (localctx, otherctx, basectx)
1485 output: (localctx, otherctx, basectx)
1481 """
1486 """
1482 if opts[b'from']:
1487 if opts[b'from']:
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1488 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 wctx = repo[fromrev]
1489 wctx = repo[fromrev]
1485 else:
1490 else:
1486 wctx = repo[None]
1491 wctx = repo[None]
1487 # we don't want working dir files to be stat'd in the benchmark, so
1492 # we don't want working dir files to be stat'd in the benchmark, so
1488 # prime that cache
1493 # prime that cache
1489 wctx.dirty()
1494 wctx.dirty()
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1495 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 if opts[b'base']:
1496 if opts[b'base']:
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1497 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 ancestor = repo[fromrev]
1498 ancestor = repo[fromrev]
1494 else:
1499 else:
1495 ancestor = wctx.ancestor(rctx)
1500 ancestor = wctx.ancestor(rctx)
1496 return (wctx, rctx, ancestor)
1501 return (wctx, rctx, ancestor)
1497
1502
1498
1503
1499 @command(
1504 @command(
1500 b'perf::mergecalculate|perfmergecalculate',
1505 b'perf::mergecalculate|perfmergecalculate',
1501 [
1506 [
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1507 (b'r', b'rev', b'.', b'rev to merge against'),
1503 (b'', b'from', b'', b'rev to merge from'),
1508 (b'', b'from', b'', b'rev to merge from'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1509 (b'', b'base', b'', b'the revision to use as base'),
1505 ]
1510 ]
1506 + formatteropts,
1511 + formatteropts,
1507 )
1512 )
1508 def perfmergecalculate(ui, repo, **opts):
1513 def perfmergecalculate(ui, repo, **opts):
1509 opts = _byteskwargs(opts)
1514 opts = _byteskwargs(opts)
1510 timer, fm = gettimer(ui, opts)
1515 timer, fm = gettimer(ui, opts)
1511
1516
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1517 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513
1518
1514 def d():
1519 def d():
1515 # acceptremote is True because we don't want prompts in the middle of
1520 # acceptremote is True because we don't want prompts in the middle of
1516 # our benchmark
1521 # our benchmark
1517 merge.calculateupdates(
1522 merge.calculateupdates(
1518 repo,
1523 repo,
1519 wctx,
1524 wctx,
1520 rctx,
1525 rctx,
1521 [ancestor],
1526 [ancestor],
1522 branchmerge=False,
1527 branchmerge=False,
1523 force=False,
1528 force=False,
1524 acceptremote=True,
1529 acceptremote=True,
1525 followcopies=True,
1530 followcopies=True,
1526 )
1531 )
1527
1532
1528 timer(d)
1533 timer(d)
1529 fm.end()
1534 fm.end()
1530
1535
1531
1536
1532 @command(
1537 @command(
1533 b'perf::mergecopies|perfmergecopies',
1538 b'perf::mergecopies|perfmergecopies',
1534 [
1539 [
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1540 (b'r', b'rev', b'.', b'rev to merge against'),
1536 (b'', b'from', b'', b'rev to merge from'),
1541 (b'', b'from', b'', b'rev to merge from'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1542 (b'', b'base', b'', b'the revision to use as base'),
1538 ]
1543 ]
1539 + formatteropts,
1544 + formatteropts,
1540 )
1545 )
1541 def perfmergecopies(ui, repo, **opts):
1546 def perfmergecopies(ui, repo, **opts):
1542 """measure runtime of `copies.mergecopies`"""
1547 """measure runtime of `copies.mergecopies`"""
1543 opts = _byteskwargs(opts)
1548 opts = _byteskwargs(opts)
1544 timer, fm = gettimer(ui, opts)
1549 timer, fm = gettimer(ui, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1550 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546
1551
1547 def d():
1552 def d():
1548 # acceptremote is True because we don't want prompts in the middle of
1553 # acceptremote is True because we don't want prompts in the middle of
1549 # our benchmark
1554 # our benchmark
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1555 copies.mergecopies(repo, wctx, rctx, ancestor)
1551
1556
1552 timer(d)
1557 timer(d)
1553 fm.end()
1558 fm.end()
1554
1559
1555
1560
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1561 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1562 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 """benchmark the copy tracing logic"""
1563 """benchmark the copy tracing logic"""
1559 opts = _byteskwargs(opts)
1564 opts = _byteskwargs(opts)
1560 timer, fm = gettimer(ui, opts)
1565 timer, fm = gettimer(ui, opts)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1566 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1567 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563
1568
1564 def d():
1569 def d():
1565 copies.pathcopies(ctx1, ctx2)
1570 copies.pathcopies(ctx1, ctx2)
1566
1571
1567 timer(d)
1572 timer(d)
1568 fm.end()
1573 fm.end()
1569
1574
1570
1575
1571 @command(
1576 @command(
1572 b'perf::phases|perfphases',
1577 b'perf::phases|perfphases',
1573 [
1578 [
1574 (b'', b'full', False, b'include file reading time too'),
1579 (b'', b'full', False, b'include file reading time too'),
1575 ],
1580 ],
1576 b"",
1581 b"",
1577 )
1582 )
1578 def perfphases(ui, repo, **opts):
1583 def perfphases(ui, repo, **opts):
1579 """benchmark phasesets computation"""
1584 """benchmark phasesets computation"""
1580 opts = _byteskwargs(opts)
1585 opts = _byteskwargs(opts)
1581 timer, fm = gettimer(ui, opts)
1586 timer, fm = gettimer(ui, opts)
1582 _phases = repo._phasecache
1587 _phases = repo._phasecache
1583 full = opts.get(b'full')
1588 full = opts.get(b'full')
1584
1589
1585 def d():
1590 def d():
1586 phases = _phases
1591 phases = _phases
1587 if full:
1592 if full:
1588 clearfilecache(repo, b'_phasecache')
1593 clearfilecache(repo, b'_phasecache')
1589 phases = repo._phasecache
1594 phases = repo._phasecache
1590 phases.invalidate()
1595 phases.invalidate()
1591 phases.loadphaserevs(repo)
1596 phases.loadphaserevs(repo)
1592
1597
1593 timer(d)
1598 timer(d)
1594 fm.end()
1599 fm.end()
1595
1600
1596
1601
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1602 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1603 def perfphasesremote(ui, repo, dest=None, **opts):
1599 """benchmark time needed to analyse phases of the remote server"""
1604 """benchmark time needed to analyse phases of the remote server"""
1600 from mercurial.node import bin
1605 from mercurial.node import bin
1601 from mercurial import (
1606 from mercurial import (
1602 exchange,
1607 exchange,
1603 hg,
1608 hg,
1604 phases,
1609 phases,
1605 )
1610 )
1606
1611
1607 opts = _byteskwargs(opts)
1612 opts = _byteskwargs(opts)
1608 timer, fm = gettimer(ui, opts)
1613 timer, fm = gettimer(ui, opts)
1609
1614
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1615 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 if not path:
1616 if not path:
1612 raise error.Abort(
1617 raise error.Abort(
1613 b'default repository not configured!',
1618 b'default repository not configured!',
1614 hint=b"see 'hg help config.paths'",
1619 hint=b"see 'hg help config.paths'",
1615 )
1620 )
1616 if util.safehasattr(path, 'main_path'):
1621 if util.safehasattr(path, 'main_path'):
1617 path = path.get_push_variant()
1622 path = path.get_push_variant()
1618 dest = path.loc
1623 dest = path.loc
1619 else:
1624 else:
1620 dest = path.pushloc or path.loc
1625 dest = path.pushloc or path.loc
1621 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1626 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1622 other = hg.peer(repo, opts, dest)
1627 other = hg.peer(repo, opts, dest)
1623
1628
1624 # easier to perform discovery through the operation
1629 # easier to perform discovery through the operation
1625 op = exchange.pushoperation(repo, other)
1630 op = exchange.pushoperation(repo, other)
1626 exchange._pushdiscoverychangeset(op)
1631 exchange._pushdiscoverychangeset(op)
1627
1632
1628 remotesubset = op.fallbackheads
1633 remotesubset = op.fallbackheads
1629
1634
1630 with other.commandexecutor() as e:
1635 with other.commandexecutor() as e:
1631 remotephases = e.callcommand(
1636 remotephases = e.callcommand(
1632 b'listkeys', {b'namespace': b'phases'}
1637 b'listkeys', {b'namespace': b'phases'}
1633 ).result()
1638 ).result()
1634 del other
1639 del other
1635 publishing = remotephases.get(b'publishing', False)
1640 publishing = remotephases.get(b'publishing', False)
1636 if publishing:
1641 if publishing:
1637 ui.statusnoi18n(b'publishing: yes\n')
1642 ui.statusnoi18n(b'publishing: yes\n')
1638 else:
1643 else:
1639 ui.statusnoi18n(b'publishing: no\n')
1644 ui.statusnoi18n(b'publishing: no\n')
1640
1645
1641 has_node = getattr(repo.changelog.index, 'has_node', None)
1646 has_node = getattr(repo.changelog.index, 'has_node', None)
1642 if has_node is None:
1647 if has_node is None:
1643 has_node = repo.changelog.nodemap.__contains__
1648 has_node = repo.changelog.nodemap.__contains__
1644 nonpublishroots = 0
1649 nonpublishroots = 0
1645 for nhex, phase in remotephases.iteritems():
1650 for nhex, phase in remotephases.iteritems():
1646 if nhex == b'publishing': # ignore data related to publish option
1651 if nhex == b'publishing': # ignore data related to publish option
1647 continue
1652 continue
1648 node = bin(nhex)
1653 node = bin(nhex)
1649 if has_node(node) and int(phase):
1654 if has_node(node) and int(phase):
1650 nonpublishroots += 1
1655 nonpublishroots += 1
1651 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1656 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1652 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1657 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1653
1658
1654 def d():
1659 def d():
1655 phases.remotephasessummary(repo, remotesubset, remotephases)
1660 phases.remotephasessummary(repo, remotesubset, remotephases)
1656
1661
1657 timer(d)
1662 timer(d)
1658 fm.end()
1663 fm.end()
1659
1664
1660
1665
1661 @command(
1666 @command(
1662 b'perf::manifest|perfmanifest',
1667 b'perf::manifest|perfmanifest',
1663 [
1668 [
1664 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1669 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1665 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1670 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1666 ]
1671 ]
1667 + formatteropts,
1672 + formatteropts,
1668 b'REV|NODE',
1673 b'REV|NODE',
1669 )
1674 )
1670 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1675 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1671 """benchmark the time to read a manifest from disk and return a usable
1676 """benchmark the time to read a manifest from disk and return a usable
1672 dict-like object
1677 dict-like object
1673
1678
1674 Manifest caches are cleared before retrieval."""
1679 Manifest caches are cleared before retrieval."""
1675 opts = _byteskwargs(opts)
1680 opts = _byteskwargs(opts)
1676 timer, fm = gettimer(ui, opts)
1681 timer, fm = gettimer(ui, opts)
1677 if not manifest_rev:
1682 if not manifest_rev:
1678 ctx = scmutil.revsingle(repo, rev, rev)
1683 ctx = scmutil.revsingle(repo, rev, rev)
1679 t = ctx.manifestnode()
1684 t = ctx.manifestnode()
1680 else:
1685 else:
1681 from mercurial.node import bin
1686 from mercurial.node import bin
1682
1687
1683 if len(rev) == 40:
1688 if len(rev) == 40:
1684 t = bin(rev)
1689 t = bin(rev)
1685 else:
1690 else:
1686 try:
1691 try:
1687 rev = int(rev)
1692 rev = int(rev)
1688
1693
1689 if util.safehasattr(repo.manifestlog, b'getstorage'):
1694 if util.safehasattr(repo.manifestlog, b'getstorage'):
1690 t = repo.manifestlog.getstorage(b'').node(rev)
1695 t = repo.manifestlog.getstorage(b'').node(rev)
1691 else:
1696 else:
1692 t = repo.manifestlog._revlog.lookup(rev)
1697 t = repo.manifestlog._revlog.lookup(rev)
1693 except ValueError:
1698 except ValueError:
1694 raise error.Abort(
1699 raise error.Abort(
1695 b'manifest revision must be integer or full node'
1700 b'manifest revision must be integer or full node'
1696 )
1701 )
1697
1702
1698 def d():
1703 def d():
1699 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1704 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1700 repo.manifestlog[t].read()
1705 repo.manifestlog[t].read()
1701
1706
1702 timer(d)
1707 timer(d)
1703 fm.end()
1708 fm.end()
1704
1709
1705
1710
1706 @command(b'perf::changeset|perfchangeset', formatteropts)
1711 @command(b'perf::changeset|perfchangeset', formatteropts)
1707 def perfchangeset(ui, repo, rev, **opts):
1712 def perfchangeset(ui, repo, rev, **opts):
1708 opts = _byteskwargs(opts)
1713 opts = _byteskwargs(opts)
1709 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1710 n = scmutil.revsingle(repo, rev).node()
1715 n = scmutil.revsingle(repo, rev).node()
1711
1716
1712 def d():
1717 def d():
1713 repo.changelog.read(n)
1718 repo.changelog.read(n)
1714 # repo.changelog._cache = None
1719 # repo.changelog._cache = None
1715
1720
1716 timer(d)
1721 timer(d)
1717 fm.end()
1722 fm.end()
1718
1723
1719
1724
1720 @command(b'perf::ignore|perfignore', formatteropts)
1725 @command(b'perf::ignore|perfignore', formatteropts)
1721 def perfignore(ui, repo, **opts):
1726 def perfignore(ui, repo, **opts):
1722 """benchmark operation related to computing ignore"""
1727 """benchmark operation related to computing ignore"""
1723 opts = _byteskwargs(opts)
1728 opts = _byteskwargs(opts)
1724 timer, fm = gettimer(ui, opts)
1729 timer, fm = gettimer(ui, opts)
1725 dirstate = repo.dirstate
1730 dirstate = repo.dirstate
1726
1731
1727 def setupone():
1732 def setupone():
1728 dirstate.invalidate()
1733 dirstate.invalidate()
1729 clearfilecache(dirstate, b'_ignore')
1734 clearfilecache(dirstate, b'_ignore')
1730
1735
1731 def runone():
1736 def runone():
1732 dirstate._ignore
1737 dirstate._ignore
1733
1738
1734 timer(runone, setup=setupone, title=b"load")
1739 timer(runone, setup=setupone, title=b"load")
1735 fm.end()
1740 fm.end()
1736
1741
1737
1742
1738 @command(
1743 @command(
1739 b'perf::index|perfindex',
1744 b'perf::index|perfindex',
1740 [
1745 [
1741 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1746 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1742 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1747 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1743 ]
1748 ]
1744 + formatteropts,
1749 + formatteropts,
1745 )
1750 )
1746 def perfindex(ui, repo, **opts):
1751 def perfindex(ui, repo, **opts):
1747 """benchmark index creation time followed by a lookup
1752 """benchmark index creation time followed by a lookup
1748
1753
1749 The default is to look `tip` up. Depending on the index implementation,
1754 The default is to look `tip` up. Depending on the index implementation,
1750 the revision looked up can matters. For example, an implementation
1755 the revision looked up can matters. For example, an implementation
1751 scanning the index will have a faster lookup time for `--rev tip` than for
1756 scanning the index will have a faster lookup time for `--rev tip` than for
1752 `--rev 0`. The number of looked up revisions and their order can also
1757 `--rev 0`. The number of looked up revisions and their order can also
1753 matters.
1758 matters.
1754
1759
1755 Example of useful set to test:
1760 Example of useful set to test:
1756
1761
1757 * tip
1762 * tip
1758 * 0
1763 * 0
1759 * -10:
1764 * -10:
1760 * :10
1765 * :10
1761 * -10: + :10
1766 * -10: + :10
1762 * :10: + -10:
1767 * :10: + -10:
1763 * -10000:
1768 * -10000:
1764 * -10000: + 0
1769 * -10000: + 0
1765
1770
1766 It is not currently possible to check for lookup of a missing node. For
1771 It is not currently possible to check for lookup of a missing node. For
1767 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1772 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1768 import mercurial.revlog
1773 import mercurial.revlog
1769
1774
1770 opts = _byteskwargs(opts)
1775 opts = _byteskwargs(opts)
1771 timer, fm = gettimer(ui, opts)
1776 timer, fm = gettimer(ui, opts)
1772 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1777 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1773 if opts[b'no_lookup']:
1778 if opts[b'no_lookup']:
1774 if opts['rev']:
1779 if opts['rev']:
1775 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1780 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1776 nodes = []
1781 nodes = []
1777 elif not opts[b'rev']:
1782 elif not opts[b'rev']:
1778 nodes = [repo[b"tip"].node()]
1783 nodes = [repo[b"tip"].node()]
1779 else:
1784 else:
1780 revs = scmutil.revrange(repo, opts[b'rev'])
1785 revs = scmutil.revrange(repo, opts[b'rev'])
1781 cl = repo.changelog
1786 cl = repo.changelog
1782 nodes = [cl.node(r) for r in revs]
1787 nodes = [cl.node(r) for r in revs]
1783
1788
1784 unfi = repo.unfiltered()
1789 unfi = repo.unfiltered()
1785 # find the filecache func directly
1790 # find the filecache func directly
1786 # This avoid polluting the benchmark with the filecache logic
1791 # This avoid polluting the benchmark with the filecache logic
1787 makecl = unfi.__class__.changelog.func
1792 makecl = unfi.__class__.changelog.func
1788
1793
1789 def setup():
1794 def setup():
1790 # probably not necessary, but for good measure
1795 # probably not necessary, but for good measure
1791 clearchangelog(unfi)
1796 clearchangelog(unfi)
1792
1797
1793 def d():
1798 def d():
1794 cl = makecl(unfi)
1799 cl = makecl(unfi)
1795 for n in nodes:
1800 for n in nodes:
1796 cl.rev(n)
1801 cl.rev(n)
1797
1802
1798 timer(d, setup=setup)
1803 timer(d, setup=setup)
1799 fm.end()
1804 fm.end()
1800
1805
1801
1806
1802 @command(
1807 @command(
1803 b'perf::nodemap|perfnodemap',
1808 b'perf::nodemap|perfnodemap',
1804 [
1809 [
1805 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1810 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1806 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1807 ]
1812 ]
1808 + formatteropts,
1813 + formatteropts,
1809 )
1814 )
1810 def perfnodemap(ui, repo, **opts):
1815 def perfnodemap(ui, repo, **opts):
1811 """benchmark the time necessary to look up revision from a cold nodemap
1816 """benchmark the time necessary to look up revision from a cold nodemap
1812
1817
1813 Depending on the implementation, the amount and order of revision we look
1818 Depending on the implementation, the amount and order of revision we look
1814 up can varies. Example of useful set to test:
1819 up can varies. Example of useful set to test:
1815 * tip
1820 * tip
1816 * 0
1821 * 0
1817 * -10:
1822 * -10:
1818 * :10
1823 * :10
1819 * -10: + :10
1824 * -10: + :10
1820 * :10: + -10:
1825 * :10: + -10:
1821 * -10000:
1826 * -10000:
1822 * -10000: + 0
1827 * -10000: + 0
1823
1828
1824 The command currently focus on valid binary lookup. Benchmarking for
1829 The command currently focus on valid binary lookup. Benchmarking for
1825 hexlookup, prefix lookup and missing lookup would also be valuable.
1830 hexlookup, prefix lookup and missing lookup would also be valuable.
1826 """
1831 """
1827 import mercurial.revlog
1832 import mercurial.revlog
1828
1833
1829 opts = _byteskwargs(opts)
1834 opts = _byteskwargs(opts)
1830 timer, fm = gettimer(ui, opts)
1835 timer, fm = gettimer(ui, opts)
1831 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1836 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1832
1837
1833 unfi = repo.unfiltered()
1838 unfi = repo.unfiltered()
1834 clearcaches = opts[b'clear_caches']
1839 clearcaches = opts[b'clear_caches']
1835 # find the filecache func directly
1840 # find the filecache func directly
1836 # This avoid polluting the benchmark with the filecache logic
1841 # This avoid polluting the benchmark with the filecache logic
1837 makecl = unfi.__class__.changelog.func
1842 makecl = unfi.__class__.changelog.func
1838 if not opts[b'rev']:
1843 if not opts[b'rev']:
1839 raise error.Abort(b'use --rev to specify revisions to look up')
1844 raise error.Abort(b'use --rev to specify revisions to look up')
1840 revs = scmutil.revrange(repo, opts[b'rev'])
1845 revs = scmutil.revrange(repo, opts[b'rev'])
1841 cl = repo.changelog
1846 cl = repo.changelog
1842 nodes = [cl.node(r) for r in revs]
1847 nodes = [cl.node(r) for r in revs]
1843
1848
1844 # use a list to pass reference to a nodemap from one closure to the next
1849 # use a list to pass reference to a nodemap from one closure to the next
1845 nodeget = [None]
1850 nodeget = [None]
1846
1851
1847 def setnodeget():
1852 def setnodeget():
1848 # probably not necessary, but for good measure
1853 # probably not necessary, but for good measure
1849 clearchangelog(unfi)
1854 clearchangelog(unfi)
1850 cl = makecl(unfi)
1855 cl = makecl(unfi)
1851 if util.safehasattr(cl.index, 'get_rev'):
1856 if util.safehasattr(cl.index, 'get_rev'):
1852 nodeget[0] = cl.index.get_rev
1857 nodeget[0] = cl.index.get_rev
1853 else:
1858 else:
1854 nodeget[0] = cl.nodemap.get
1859 nodeget[0] = cl.nodemap.get
1855
1860
1856 def d():
1861 def d():
1857 get = nodeget[0]
1862 get = nodeget[0]
1858 for n in nodes:
1863 for n in nodes:
1859 get(n)
1864 get(n)
1860
1865
1861 setup = None
1866 setup = None
1862 if clearcaches:
1867 if clearcaches:
1863
1868
1864 def setup():
1869 def setup():
1865 setnodeget()
1870 setnodeget()
1866
1871
1867 else:
1872 else:
1868 setnodeget()
1873 setnodeget()
1869 d() # prewarm the data structure
1874 d() # prewarm the data structure
1870 timer(d, setup=setup)
1875 timer(d, setup=setup)
1871 fm.end()
1876 fm.end()
1872
1877
1873
1878
1874 @command(b'perf::startup|perfstartup', formatteropts)
1879 @command(b'perf::startup|perfstartup', formatteropts)
1875 def perfstartup(ui, repo, **opts):
1880 def perfstartup(ui, repo, **opts):
1876 opts = _byteskwargs(opts)
1881 opts = _byteskwargs(opts)
1877 timer, fm = gettimer(ui, opts)
1882 timer, fm = gettimer(ui, opts)
1878
1883
1879 def d():
1884 def d():
1880 if os.name != 'nt':
1885 if os.name != 'nt':
1881 os.system(
1886 os.system(
1882 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1887 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1883 )
1888 )
1884 else:
1889 else:
1885 os.environ['HGRCPATH'] = r' '
1890 os.environ['HGRCPATH'] = r' '
1886 os.system("%s version -q > NUL" % sys.argv[0])
1891 os.system("%s version -q > NUL" % sys.argv[0])
1887
1892
1888 timer(d)
1893 timer(d)
1889 fm.end()
1894 fm.end()
1890
1895
1891
1896
1892 @command(b'perf::parents|perfparents', formatteropts)
1897 @command(b'perf::parents|perfparents', formatteropts)
1893 def perfparents(ui, repo, **opts):
1898 def perfparents(ui, repo, **opts):
1894 """benchmark the time necessary to fetch one changeset's parents.
1899 """benchmark the time necessary to fetch one changeset's parents.
1895
1900
1896 The fetch is done using the `node identifier`, traversing all object layers
1901 The fetch is done using the `node identifier`, traversing all object layers
1897 from the repository object. The first N revisions will be used for this
1902 from the repository object. The first N revisions will be used for this
1898 benchmark. N is controlled by the ``perf.parentscount`` config option
1903 benchmark. N is controlled by the ``perf.parentscount`` config option
1899 (default: 1000).
1904 (default: 1000).
1900 """
1905 """
1901 opts = _byteskwargs(opts)
1906 opts = _byteskwargs(opts)
1902 timer, fm = gettimer(ui, opts)
1907 timer, fm = gettimer(ui, opts)
1903 # control the number of commits perfparents iterates over
1908 # control the number of commits perfparents iterates over
1904 # experimental config: perf.parentscount
1909 # experimental config: perf.parentscount
1905 count = getint(ui, b"perf", b"parentscount", 1000)
1910 count = getint(ui, b"perf", b"parentscount", 1000)
1906 if len(repo.changelog) < count:
1911 if len(repo.changelog) < count:
1907 raise error.Abort(b"repo needs %d commits for this test" % count)
1912 raise error.Abort(b"repo needs %d commits for this test" % count)
1908 repo = repo.unfiltered()
1913 repo = repo.unfiltered()
1909 nl = [repo.changelog.node(i) for i in _xrange(count)]
1914 nl = [repo.changelog.node(i) for i in _xrange(count)]
1910
1915
1911 def d():
1916 def d():
1912 for n in nl:
1917 for n in nl:
1913 repo.changelog.parents(n)
1918 repo.changelog.parents(n)
1914
1919
1915 timer(d)
1920 timer(d)
1916 fm.end()
1921 fm.end()
1917
1922
1918
1923
1919 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1924 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1920 def perfctxfiles(ui, repo, x, **opts):
1925 def perfctxfiles(ui, repo, x, **opts):
1921 opts = _byteskwargs(opts)
1926 opts = _byteskwargs(opts)
1922 x = int(x)
1927 x = int(x)
1923 timer, fm = gettimer(ui, opts)
1928 timer, fm = gettimer(ui, opts)
1924
1929
1925 def d():
1930 def d():
1926 len(repo[x].files())
1931 len(repo[x].files())
1927
1932
1928 timer(d)
1933 timer(d)
1929 fm.end()
1934 fm.end()
1930
1935
1931
1936
1932 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1937 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1933 def perfrawfiles(ui, repo, x, **opts):
1938 def perfrawfiles(ui, repo, x, **opts):
1934 opts = _byteskwargs(opts)
1939 opts = _byteskwargs(opts)
1935 x = int(x)
1940 x = int(x)
1936 timer, fm = gettimer(ui, opts)
1941 timer, fm = gettimer(ui, opts)
1937 cl = repo.changelog
1942 cl = repo.changelog
1938
1943
1939 def d():
1944 def d():
1940 len(cl.read(x)[3])
1945 len(cl.read(x)[3])
1941
1946
1942 timer(d)
1947 timer(d)
1943 fm.end()
1948 fm.end()
1944
1949
1945
1950
1946 @command(b'perf::lookup|perflookup', formatteropts)
1951 @command(b'perf::lookup|perflookup', formatteropts)
1947 def perflookup(ui, repo, rev, **opts):
1952 def perflookup(ui, repo, rev, **opts):
1948 opts = _byteskwargs(opts)
1953 opts = _byteskwargs(opts)
1949 timer, fm = gettimer(ui, opts)
1954 timer, fm = gettimer(ui, opts)
1950 timer(lambda: len(repo.lookup(rev)))
1955 timer(lambda: len(repo.lookup(rev)))
1951 fm.end()
1956 fm.end()
1952
1957
1953
1958
1954 @command(
1959 @command(
1955 b'perf::linelogedits|perflinelogedits',
1960 b'perf::linelogedits|perflinelogedits',
1956 [
1961 [
1957 (b'n', b'edits', 10000, b'number of edits'),
1962 (b'n', b'edits', 10000, b'number of edits'),
1958 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1963 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1959 ],
1964 ],
1960 norepo=True,
1965 norepo=True,
1961 )
1966 )
1962 def perflinelogedits(ui, **opts):
1967 def perflinelogedits(ui, **opts):
1963 from mercurial import linelog
1968 from mercurial import linelog
1964
1969
1965 opts = _byteskwargs(opts)
1970 opts = _byteskwargs(opts)
1966
1971
1967 edits = opts[b'edits']
1972 edits = opts[b'edits']
1968 maxhunklines = opts[b'max_hunk_lines']
1973 maxhunklines = opts[b'max_hunk_lines']
1969
1974
1970 maxb1 = 100000
1975 maxb1 = 100000
1971 random.seed(0)
1976 random.seed(0)
1972 randint = random.randint
1977 randint = random.randint
1973 currentlines = 0
1978 currentlines = 0
1974 arglist = []
1979 arglist = []
1975 for rev in _xrange(edits):
1980 for rev in _xrange(edits):
1976 a1 = randint(0, currentlines)
1981 a1 = randint(0, currentlines)
1977 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1982 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1978 b1 = randint(0, maxb1)
1983 b1 = randint(0, maxb1)
1979 b2 = randint(b1, b1 + maxhunklines)
1984 b2 = randint(b1, b1 + maxhunklines)
1980 currentlines += (b2 - b1) - (a2 - a1)
1985 currentlines += (b2 - b1) - (a2 - a1)
1981 arglist.append((rev, a1, a2, b1, b2))
1986 arglist.append((rev, a1, a2, b1, b2))
1982
1987
1983 def d():
1988 def d():
1984 ll = linelog.linelog()
1989 ll = linelog.linelog()
1985 for args in arglist:
1990 for args in arglist:
1986 ll.replacelines(*args)
1991 ll.replacelines(*args)
1987
1992
1988 timer, fm = gettimer(ui, opts)
1993 timer, fm = gettimer(ui, opts)
1989 timer(d)
1994 timer(d)
1990 fm.end()
1995 fm.end()
1991
1996
1992
1997
1993 @command(b'perf::revrange|perfrevrange', formatteropts)
1998 @command(b'perf::revrange|perfrevrange', formatteropts)
1994 def perfrevrange(ui, repo, *specs, **opts):
1999 def perfrevrange(ui, repo, *specs, **opts):
1995 opts = _byteskwargs(opts)
2000 opts = _byteskwargs(opts)
1996 timer, fm = gettimer(ui, opts)
2001 timer, fm = gettimer(ui, opts)
1997 revrange = scmutil.revrange
2002 revrange = scmutil.revrange
1998 timer(lambda: len(revrange(repo, specs)))
2003 timer(lambda: len(revrange(repo, specs)))
1999 fm.end()
2004 fm.end()
2000
2005
2001
2006
2002 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2007 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2003 def perfnodelookup(ui, repo, rev, **opts):
2008 def perfnodelookup(ui, repo, rev, **opts):
2004 opts = _byteskwargs(opts)
2009 opts = _byteskwargs(opts)
2005 timer, fm = gettimer(ui, opts)
2010 timer, fm = gettimer(ui, opts)
2006 import mercurial.revlog
2011 import mercurial.revlog
2007
2012
2008 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2013 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2009 n = scmutil.revsingle(repo, rev).node()
2014 n = scmutil.revsingle(repo, rev).node()
2010
2015
2011 try:
2016 try:
2012 cl = revlog(getsvfs(repo), radix=b"00changelog")
2017 cl = revlog(getsvfs(repo), radix=b"00changelog")
2013 except TypeError:
2018 except TypeError:
2014 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2019 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2015
2020
2016 def d():
2021 def d():
2017 cl.rev(n)
2022 cl.rev(n)
2018 clearcaches(cl)
2023 clearcaches(cl)
2019
2024
2020 timer(d)
2025 timer(d)
2021 fm.end()
2026 fm.end()
2022
2027
2023
2028
2024 @command(
2029 @command(
2025 b'perf::log|perflog',
2030 b'perf::log|perflog',
2026 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2031 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2027 )
2032 )
2028 def perflog(ui, repo, rev=None, **opts):
2033 def perflog(ui, repo, rev=None, **opts):
2029 opts = _byteskwargs(opts)
2034 opts = _byteskwargs(opts)
2030 if rev is None:
2035 if rev is None:
2031 rev = []
2036 rev = []
2032 timer, fm = gettimer(ui, opts)
2037 timer, fm = gettimer(ui, opts)
2033 ui.pushbuffer()
2038 ui.pushbuffer()
2034 timer(
2039 timer(
2035 lambda: commands.log(
2040 lambda: commands.log(
2036 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2041 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2037 )
2042 )
2038 )
2043 )
2039 ui.popbuffer()
2044 ui.popbuffer()
2040 fm.end()
2045 fm.end()
2041
2046
2042
2047
2043 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2048 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2044 def perfmoonwalk(ui, repo, **opts):
2049 def perfmoonwalk(ui, repo, **opts):
2045 """benchmark walking the changelog backwards
2050 """benchmark walking the changelog backwards
2046
2051
2047 This also loads the changelog data for each revision in the changelog.
2052 This also loads the changelog data for each revision in the changelog.
2048 """
2053 """
2049 opts = _byteskwargs(opts)
2054 opts = _byteskwargs(opts)
2050 timer, fm = gettimer(ui, opts)
2055 timer, fm = gettimer(ui, opts)
2051
2056
2052 def moonwalk():
2057 def moonwalk():
2053 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2058 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2054 ctx = repo[i]
2059 ctx = repo[i]
2055 ctx.branch() # read changelog data (in addition to the index)
2060 ctx.branch() # read changelog data (in addition to the index)
2056
2061
2057 timer(moonwalk)
2062 timer(moonwalk)
2058 fm.end()
2063 fm.end()
2059
2064
2060
2065
2061 @command(
2066 @command(
2062 b'perf::templating|perftemplating',
2067 b'perf::templating|perftemplating',
2063 [
2068 [
2064 (b'r', b'rev', [], b'revisions to run the template on'),
2069 (b'r', b'rev', [], b'revisions to run the template on'),
2065 ]
2070 ]
2066 + formatteropts,
2071 + formatteropts,
2067 )
2072 )
2068 def perftemplating(ui, repo, testedtemplate=None, **opts):
2073 def perftemplating(ui, repo, testedtemplate=None, **opts):
2069 """test the rendering time of a given template"""
2074 """test the rendering time of a given template"""
2070 if makelogtemplater is None:
2075 if makelogtemplater is None:
2071 raise error.Abort(
2076 raise error.Abort(
2072 b"perftemplating not available with this Mercurial",
2077 b"perftemplating not available with this Mercurial",
2073 hint=b"use 4.3 or later",
2078 hint=b"use 4.3 or later",
2074 )
2079 )
2075
2080
2076 opts = _byteskwargs(opts)
2081 opts = _byteskwargs(opts)
2077
2082
2078 nullui = ui.copy()
2083 nullui = ui.copy()
2079 nullui.fout = open(os.devnull, 'wb')
2084 nullui.fout = open(os.devnull, 'wb')
2080 nullui.disablepager()
2085 nullui.disablepager()
2081 revs = opts.get(b'rev')
2086 revs = opts.get(b'rev')
2082 if not revs:
2087 if not revs:
2083 revs = [b'all()']
2088 revs = [b'all()']
2084 revs = list(scmutil.revrange(repo, revs))
2089 revs = list(scmutil.revrange(repo, revs))
2085
2090
2086 defaulttemplate = (
2091 defaulttemplate = (
2087 b'{date|shortdate} [{rev}:{node|short}]'
2092 b'{date|shortdate} [{rev}:{node|short}]'
2088 b' {author|person}: {desc|firstline}\n'
2093 b' {author|person}: {desc|firstline}\n'
2089 )
2094 )
2090 if testedtemplate is None:
2095 if testedtemplate is None:
2091 testedtemplate = defaulttemplate
2096 testedtemplate = defaulttemplate
2092 displayer = makelogtemplater(nullui, repo, testedtemplate)
2097 displayer = makelogtemplater(nullui, repo, testedtemplate)
2093
2098
2094 def format():
2099 def format():
2095 for r in revs:
2100 for r in revs:
2096 ctx = repo[r]
2101 ctx = repo[r]
2097 displayer.show(ctx)
2102 displayer.show(ctx)
2098 displayer.flush(ctx)
2103 displayer.flush(ctx)
2099
2104
2100 timer, fm = gettimer(ui, opts)
2105 timer, fm = gettimer(ui, opts)
2101 timer(format)
2106 timer(format)
2102 fm.end()
2107 fm.end()
2103
2108
2104
2109
2105 def _displaystats(ui, opts, entries, data):
2110 def _displaystats(ui, opts, entries, data):
2106 # use a second formatter because the data are quite different, not sure
2111 # use a second formatter because the data are quite different, not sure
2107 # how it flies with the templater.
2112 # how it flies with the templater.
2108 fm = ui.formatter(b'perf-stats', opts)
2113 fm = ui.formatter(b'perf-stats', opts)
2109 for key, title in entries:
2114 for key, title in entries:
2110 values = data[key]
2115 values = data[key]
2111 nbvalues = len(data)
2116 nbvalues = len(data)
2112 values.sort()
2117 values.sort()
2113 stats = {
2118 stats = {
2114 'key': key,
2119 'key': key,
2115 'title': title,
2120 'title': title,
2116 'nbitems': len(values),
2121 'nbitems': len(values),
2117 'min': values[0][0],
2122 'min': values[0][0],
2118 '10%': values[(nbvalues * 10) // 100][0],
2123 '10%': values[(nbvalues * 10) // 100][0],
2119 '25%': values[(nbvalues * 25) // 100][0],
2124 '25%': values[(nbvalues * 25) // 100][0],
2120 '50%': values[(nbvalues * 50) // 100][0],
2125 '50%': values[(nbvalues * 50) // 100][0],
2121 '75%': values[(nbvalues * 75) // 100][0],
2126 '75%': values[(nbvalues * 75) // 100][0],
2122 '80%': values[(nbvalues * 80) // 100][0],
2127 '80%': values[(nbvalues * 80) // 100][0],
2123 '85%': values[(nbvalues * 85) // 100][0],
2128 '85%': values[(nbvalues * 85) // 100][0],
2124 '90%': values[(nbvalues * 90) // 100][0],
2129 '90%': values[(nbvalues * 90) // 100][0],
2125 '95%': values[(nbvalues * 95) // 100][0],
2130 '95%': values[(nbvalues * 95) // 100][0],
2126 '99%': values[(nbvalues * 99) // 100][0],
2131 '99%': values[(nbvalues * 99) // 100][0],
2127 'max': values[-1][0],
2132 'max': values[-1][0],
2128 }
2133 }
2129 fm.startitem()
2134 fm.startitem()
2130 fm.data(**stats)
2135 fm.data(**stats)
2131 # make node pretty for the human output
2136 # make node pretty for the human output
2132 fm.plain('### %s (%d items)\n' % (title, len(values)))
2137 fm.plain('### %s (%d items)\n' % (title, len(values)))
2133 lines = [
2138 lines = [
2134 'min',
2139 'min',
2135 '10%',
2140 '10%',
2136 '25%',
2141 '25%',
2137 '50%',
2142 '50%',
2138 '75%',
2143 '75%',
2139 '80%',
2144 '80%',
2140 '85%',
2145 '85%',
2141 '90%',
2146 '90%',
2142 '95%',
2147 '95%',
2143 '99%',
2148 '99%',
2144 'max',
2149 'max',
2145 ]
2150 ]
2146 for l in lines:
2151 for l in lines:
2147 fm.plain('%s: %s\n' % (l, stats[l]))
2152 fm.plain('%s: %s\n' % (l, stats[l]))
2148 fm.end()
2153 fm.end()
2149
2154
2150
2155
2151 @command(
2156 @command(
2152 b'perf::helper-mergecopies|perfhelper-mergecopies',
2157 b'perf::helper-mergecopies|perfhelper-mergecopies',
2153 formatteropts
2158 formatteropts
2154 + [
2159 + [
2155 (b'r', b'revs', [], b'restrict search to these revisions'),
2160 (b'r', b'revs', [], b'restrict search to these revisions'),
2156 (b'', b'timing', False, b'provides extra data (costly)'),
2161 (b'', b'timing', False, b'provides extra data (costly)'),
2157 (b'', b'stats', False, b'provides statistic about the measured data'),
2162 (b'', b'stats', False, b'provides statistic about the measured data'),
2158 ],
2163 ],
2159 )
2164 )
2160 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2165 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2161 """find statistics about potential parameters for `perfmergecopies`
2166 """find statistics about potential parameters for `perfmergecopies`
2162
2167
2163 This command find (base, p1, p2) triplet relevant for copytracing
2168 This command find (base, p1, p2) triplet relevant for copytracing
2164 benchmarking in the context of a merge. It reports values for some of the
2169 benchmarking in the context of a merge. It reports values for some of the
2165 parameters that impact merge copy tracing time during merge.
2170 parameters that impact merge copy tracing time during merge.
2166
2171
2167 If `--timing` is set, rename detection is run and the associated timing
2172 If `--timing` is set, rename detection is run and the associated timing
2168 will be reported. The extra details come at the cost of slower command
2173 will be reported. The extra details come at the cost of slower command
2169 execution.
2174 execution.
2170
2175
2171 Since rename detection is only run once, other factors might easily
2176 Since rename detection is only run once, other factors might easily
2172 affect the precision of the timing. However it should give a good
2177 affect the precision of the timing. However it should give a good
2173 approximation of which revision triplets are very costly.
2178 approximation of which revision triplets are very costly.
2174 """
2179 """
2175 opts = _byteskwargs(opts)
2180 opts = _byteskwargs(opts)
2176 fm = ui.formatter(b'perf', opts)
2181 fm = ui.formatter(b'perf', opts)
2177 dotiming = opts[b'timing']
2182 dotiming = opts[b'timing']
2178 dostats = opts[b'stats']
2183 dostats = opts[b'stats']
2179
2184
2180 output_template = [
2185 output_template = [
2181 ("base", "%(base)12s"),
2186 ("base", "%(base)12s"),
2182 ("p1", "%(p1.node)12s"),
2187 ("p1", "%(p1.node)12s"),
2183 ("p2", "%(p2.node)12s"),
2188 ("p2", "%(p2.node)12s"),
2184 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2189 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2185 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2190 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2186 ("p1.renames", "%(p1.renamedfiles)12d"),
2191 ("p1.renames", "%(p1.renamedfiles)12d"),
2187 ("p1.time", "%(p1.time)12.3f"),
2192 ("p1.time", "%(p1.time)12.3f"),
2188 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2193 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2189 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2194 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2190 ("p2.renames", "%(p2.renamedfiles)12d"),
2195 ("p2.renames", "%(p2.renamedfiles)12d"),
2191 ("p2.time", "%(p2.time)12.3f"),
2196 ("p2.time", "%(p2.time)12.3f"),
2192 ("renames", "%(nbrenamedfiles)12d"),
2197 ("renames", "%(nbrenamedfiles)12d"),
2193 ("total.time", "%(time)12.3f"),
2198 ("total.time", "%(time)12.3f"),
2194 ]
2199 ]
2195 if not dotiming:
2200 if not dotiming:
2196 output_template = [
2201 output_template = [
2197 i
2202 i
2198 for i in output_template
2203 for i in output_template
2199 if not ('time' in i[0] or 'renames' in i[0])
2204 if not ('time' in i[0] or 'renames' in i[0])
2200 ]
2205 ]
2201 header_names = [h for (h, v) in output_template]
2206 header_names = [h for (h, v) in output_template]
2202 output = ' '.join([v for (h, v) in output_template]) + '\n'
2207 output = ' '.join([v for (h, v) in output_template]) + '\n'
2203 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2208 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2204 fm.plain(header % tuple(header_names))
2209 fm.plain(header % tuple(header_names))
2205
2210
2206 if not revs:
2211 if not revs:
2207 revs = ['all()']
2212 revs = ['all()']
2208 revs = scmutil.revrange(repo, revs)
2213 revs = scmutil.revrange(repo, revs)
2209
2214
2210 if dostats:
2215 if dostats:
2211 alldata = {
2216 alldata = {
2212 'nbrevs': [],
2217 'nbrevs': [],
2213 'nbmissingfiles': [],
2218 'nbmissingfiles': [],
2214 }
2219 }
2215 if dotiming:
2220 if dotiming:
2216 alldata['parentnbrenames'] = []
2221 alldata['parentnbrenames'] = []
2217 alldata['totalnbrenames'] = []
2222 alldata['totalnbrenames'] = []
2218 alldata['parenttime'] = []
2223 alldata['parenttime'] = []
2219 alldata['totaltime'] = []
2224 alldata['totaltime'] = []
2220
2225
2221 roi = repo.revs('merge() and %ld', revs)
2226 roi = repo.revs('merge() and %ld', revs)
2222 for r in roi:
2227 for r in roi:
2223 ctx = repo[r]
2228 ctx = repo[r]
2224 p1 = ctx.p1()
2229 p1 = ctx.p1()
2225 p2 = ctx.p2()
2230 p2 = ctx.p2()
2226 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2231 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2227 for b in bases:
2232 for b in bases:
2228 b = repo[b]
2233 b = repo[b]
2229 p1missing = copies._computeforwardmissing(b, p1)
2234 p1missing = copies._computeforwardmissing(b, p1)
2230 p2missing = copies._computeforwardmissing(b, p2)
2235 p2missing = copies._computeforwardmissing(b, p2)
2231 data = {
2236 data = {
2232 b'base': b.hex(),
2237 b'base': b.hex(),
2233 b'p1.node': p1.hex(),
2238 b'p1.node': p1.hex(),
2234 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2239 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2235 b'p1.nbmissingfiles': len(p1missing),
2240 b'p1.nbmissingfiles': len(p1missing),
2236 b'p2.node': p2.hex(),
2241 b'p2.node': p2.hex(),
2237 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2242 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2238 b'p2.nbmissingfiles': len(p2missing),
2243 b'p2.nbmissingfiles': len(p2missing),
2239 }
2244 }
2240 if dostats:
2245 if dostats:
2241 if p1missing:
2246 if p1missing:
2242 alldata['nbrevs'].append(
2247 alldata['nbrevs'].append(
2243 (data['p1.nbrevs'], b.hex(), p1.hex())
2248 (data['p1.nbrevs'], b.hex(), p1.hex())
2244 )
2249 )
2245 alldata['nbmissingfiles'].append(
2250 alldata['nbmissingfiles'].append(
2246 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2251 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2247 )
2252 )
2248 if p2missing:
2253 if p2missing:
2249 alldata['nbrevs'].append(
2254 alldata['nbrevs'].append(
2250 (data['p2.nbrevs'], b.hex(), p2.hex())
2255 (data['p2.nbrevs'], b.hex(), p2.hex())
2251 )
2256 )
2252 alldata['nbmissingfiles'].append(
2257 alldata['nbmissingfiles'].append(
2253 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2258 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2254 )
2259 )
2255 if dotiming:
2260 if dotiming:
2256 begin = util.timer()
2261 begin = util.timer()
2257 mergedata = copies.mergecopies(repo, p1, p2, b)
2262 mergedata = copies.mergecopies(repo, p1, p2, b)
2258 end = util.timer()
2263 end = util.timer()
2259 # not very stable timing since we did only one run
2264 # not very stable timing since we did only one run
2260 data['time'] = end - begin
2265 data['time'] = end - begin
2261 # mergedata contains five dicts: "copy", "movewithdir",
2266 # mergedata contains five dicts: "copy", "movewithdir",
2262 # "diverge", "renamedelete" and "dirmove".
2267 # "diverge", "renamedelete" and "dirmove".
2263 # The first 4 are about renamed file so lets count that.
2268 # The first 4 are about renamed file so lets count that.
2264 renames = len(mergedata[0])
2269 renames = len(mergedata[0])
2265 renames += len(mergedata[1])
2270 renames += len(mergedata[1])
2266 renames += len(mergedata[2])
2271 renames += len(mergedata[2])
2267 renames += len(mergedata[3])
2272 renames += len(mergedata[3])
2268 data['nbrenamedfiles'] = renames
2273 data['nbrenamedfiles'] = renames
2269 begin = util.timer()
2274 begin = util.timer()
2270 p1renames = copies.pathcopies(b, p1)
2275 p1renames = copies.pathcopies(b, p1)
2271 end = util.timer()
2276 end = util.timer()
2272 data['p1.time'] = end - begin
2277 data['p1.time'] = end - begin
2273 begin = util.timer()
2278 begin = util.timer()
2274 p2renames = copies.pathcopies(b, p2)
2279 p2renames = copies.pathcopies(b, p2)
2275 end = util.timer()
2280 end = util.timer()
2276 data['p2.time'] = end - begin
2281 data['p2.time'] = end - begin
2277 data['p1.renamedfiles'] = len(p1renames)
2282 data['p1.renamedfiles'] = len(p1renames)
2278 data['p2.renamedfiles'] = len(p2renames)
2283 data['p2.renamedfiles'] = len(p2renames)
2279
2284
2280 if dostats:
2285 if dostats:
2281 if p1missing:
2286 if p1missing:
2282 alldata['parentnbrenames'].append(
2287 alldata['parentnbrenames'].append(
2283 (data['p1.renamedfiles'], b.hex(), p1.hex())
2288 (data['p1.renamedfiles'], b.hex(), p1.hex())
2284 )
2289 )
2285 alldata['parenttime'].append(
2290 alldata['parenttime'].append(
2286 (data['p1.time'], b.hex(), p1.hex())
2291 (data['p1.time'], b.hex(), p1.hex())
2287 )
2292 )
2288 if p2missing:
2293 if p2missing:
2289 alldata['parentnbrenames'].append(
2294 alldata['parentnbrenames'].append(
2290 (data['p2.renamedfiles'], b.hex(), p2.hex())
2295 (data['p2.renamedfiles'], b.hex(), p2.hex())
2291 )
2296 )
2292 alldata['parenttime'].append(
2297 alldata['parenttime'].append(
2293 (data['p2.time'], b.hex(), p2.hex())
2298 (data['p2.time'], b.hex(), p2.hex())
2294 )
2299 )
2295 if p1missing or p2missing:
2300 if p1missing or p2missing:
2296 alldata['totalnbrenames'].append(
2301 alldata['totalnbrenames'].append(
2297 (
2302 (
2298 data['nbrenamedfiles'],
2303 data['nbrenamedfiles'],
2299 b.hex(),
2304 b.hex(),
2300 p1.hex(),
2305 p1.hex(),
2301 p2.hex(),
2306 p2.hex(),
2302 )
2307 )
2303 )
2308 )
2304 alldata['totaltime'].append(
2309 alldata['totaltime'].append(
2305 (data['time'], b.hex(), p1.hex(), p2.hex())
2310 (data['time'], b.hex(), p1.hex(), p2.hex())
2306 )
2311 )
2307 fm.startitem()
2312 fm.startitem()
2308 fm.data(**data)
2313 fm.data(**data)
2309 # make node pretty for the human output
2314 # make node pretty for the human output
2310 out = data.copy()
2315 out = data.copy()
2311 out['base'] = fm.hexfunc(b.node())
2316 out['base'] = fm.hexfunc(b.node())
2312 out['p1.node'] = fm.hexfunc(p1.node())
2317 out['p1.node'] = fm.hexfunc(p1.node())
2313 out['p2.node'] = fm.hexfunc(p2.node())
2318 out['p2.node'] = fm.hexfunc(p2.node())
2314 fm.plain(output % out)
2319 fm.plain(output % out)
2315
2320
2316 fm.end()
2321 fm.end()
2317 if dostats:
2322 if dostats:
2318 # use a second formatter because the data are quite different, not sure
2323 # use a second formatter because the data are quite different, not sure
2319 # how it flies with the templater.
2324 # how it flies with the templater.
2320 entries = [
2325 entries = [
2321 ('nbrevs', 'number of revision covered'),
2326 ('nbrevs', 'number of revision covered'),
2322 ('nbmissingfiles', 'number of missing files at head'),
2327 ('nbmissingfiles', 'number of missing files at head'),
2323 ]
2328 ]
2324 if dotiming:
2329 if dotiming:
2325 entries.append(
2330 entries.append(
2326 ('parentnbrenames', 'rename from one parent to base')
2331 ('parentnbrenames', 'rename from one parent to base')
2327 )
2332 )
2328 entries.append(('totalnbrenames', 'total number of renames'))
2333 entries.append(('totalnbrenames', 'total number of renames'))
2329 entries.append(('parenttime', 'time for one parent'))
2334 entries.append(('parenttime', 'time for one parent'))
2330 entries.append(('totaltime', 'time for both parents'))
2335 entries.append(('totaltime', 'time for both parents'))
2331 _displaystats(ui, opts, entries, alldata)
2336 _displaystats(ui, opts, entries, alldata)
2332
2337
2333
2338
2334 @command(
2339 @command(
2335 b'perf::helper-pathcopies|perfhelper-pathcopies',
2340 b'perf::helper-pathcopies|perfhelper-pathcopies',
2336 formatteropts
2341 formatteropts
2337 + [
2342 + [
2338 (b'r', b'revs', [], b'restrict search to these revisions'),
2343 (b'r', b'revs', [], b'restrict search to these revisions'),
2339 (b'', b'timing', False, b'provides extra data (costly)'),
2344 (b'', b'timing', False, b'provides extra data (costly)'),
2340 (b'', b'stats', False, b'provides statistic about the measured data'),
2345 (b'', b'stats', False, b'provides statistic about the measured data'),
2341 ],
2346 ],
2342 )
2347 )
2343 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2348 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2344 """find statistic about potential parameters for the `perftracecopies`
2349 """find statistic about potential parameters for the `perftracecopies`
2345
2350
2346 This command find source-destination pair relevant for copytracing testing.
2351 This command find source-destination pair relevant for copytracing testing.
2347 It report value for some of the parameters that impact copy tracing time.
2352 It report value for some of the parameters that impact copy tracing time.
2348
2353
2349 If `--timing` is set, rename detection is run and the associated timing
2354 If `--timing` is set, rename detection is run and the associated timing
2350 will be reported. The extra details comes at the cost of a slower command
2355 will be reported. The extra details comes at the cost of a slower command
2351 execution.
2356 execution.
2352
2357
2353 Since the rename detection is only run once, other factors might easily
2358 Since the rename detection is only run once, other factors might easily
2354 affect the precision of the timing. However it should give a good
2359 affect the precision of the timing. However it should give a good
2355 approximation of which revision pairs are very costly.
2360 approximation of which revision pairs are very costly.
2356 """
2361 """
2357 opts = _byteskwargs(opts)
2362 opts = _byteskwargs(opts)
2358 fm = ui.formatter(b'perf', opts)
2363 fm = ui.formatter(b'perf', opts)
2359 dotiming = opts[b'timing']
2364 dotiming = opts[b'timing']
2360 dostats = opts[b'stats']
2365 dostats = opts[b'stats']
2361
2366
2362 if dotiming:
2367 if dotiming:
2363 header = '%12s %12s %12s %12s %12s %12s\n'
2368 header = '%12s %12s %12s %12s %12s %12s\n'
2364 output = (
2369 output = (
2365 "%(source)12s %(destination)12s "
2370 "%(source)12s %(destination)12s "
2366 "%(nbrevs)12d %(nbmissingfiles)12d "
2371 "%(nbrevs)12d %(nbmissingfiles)12d "
2367 "%(nbrenamedfiles)12d %(time)18.5f\n"
2372 "%(nbrenamedfiles)12d %(time)18.5f\n"
2368 )
2373 )
2369 header_names = (
2374 header_names = (
2370 "source",
2375 "source",
2371 "destination",
2376 "destination",
2372 "nb-revs",
2377 "nb-revs",
2373 "nb-files",
2378 "nb-files",
2374 "nb-renames",
2379 "nb-renames",
2375 "time",
2380 "time",
2376 )
2381 )
2377 fm.plain(header % header_names)
2382 fm.plain(header % header_names)
2378 else:
2383 else:
2379 header = '%12s %12s %12s %12s\n'
2384 header = '%12s %12s %12s %12s\n'
2380 output = (
2385 output = (
2381 "%(source)12s %(destination)12s "
2386 "%(source)12s %(destination)12s "
2382 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2387 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2383 )
2388 )
2384 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2389 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2385
2390
2386 if not revs:
2391 if not revs:
2387 revs = ['all()']
2392 revs = ['all()']
2388 revs = scmutil.revrange(repo, revs)
2393 revs = scmutil.revrange(repo, revs)
2389
2394
2390 if dostats:
2395 if dostats:
2391 alldata = {
2396 alldata = {
2392 'nbrevs': [],
2397 'nbrevs': [],
2393 'nbmissingfiles': [],
2398 'nbmissingfiles': [],
2394 }
2399 }
2395 if dotiming:
2400 if dotiming:
2396 alldata['nbrenames'] = []
2401 alldata['nbrenames'] = []
2397 alldata['time'] = []
2402 alldata['time'] = []
2398
2403
2399 roi = repo.revs('merge() and %ld', revs)
2404 roi = repo.revs('merge() and %ld', revs)
2400 for r in roi:
2405 for r in roi:
2401 ctx = repo[r]
2406 ctx = repo[r]
2402 p1 = ctx.p1().rev()
2407 p1 = ctx.p1().rev()
2403 p2 = ctx.p2().rev()
2408 p2 = ctx.p2().rev()
2404 bases = repo.changelog._commonancestorsheads(p1, p2)
2409 bases = repo.changelog._commonancestorsheads(p1, p2)
2405 for p in (p1, p2):
2410 for p in (p1, p2):
2406 for b in bases:
2411 for b in bases:
2407 base = repo[b]
2412 base = repo[b]
2408 parent = repo[p]
2413 parent = repo[p]
2409 missing = copies._computeforwardmissing(base, parent)
2414 missing = copies._computeforwardmissing(base, parent)
2410 if not missing:
2415 if not missing:
2411 continue
2416 continue
2412 data = {
2417 data = {
2413 b'source': base.hex(),
2418 b'source': base.hex(),
2414 b'destination': parent.hex(),
2419 b'destination': parent.hex(),
2415 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2420 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2416 b'nbmissingfiles': len(missing),
2421 b'nbmissingfiles': len(missing),
2417 }
2422 }
2418 if dostats:
2423 if dostats:
2419 alldata['nbrevs'].append(
2424 alldata['nbrevs'].append(
2420 (
2425 (
2421 data['nbrevs'],
2426 data['nbrevs'],
2422 base.hex(),
2427 base.hex(),
2423 parent.hex(),
2428 parent.hex(),
2424 )
2429 )
2425 )
2430 )
2426 alldata['nbmissingfiles'].append(
2431 alldata['nbmissingfiles'].append(
2427 (
2432 (
2428 data['nbmissingfiles'],
2433 data['nbmissingfiles'],
2429 base.hex(),
2434 base.hex(),
2430 parent.hex(),
2435 parent.hex(),
2431 )
2436 )
2432 )
2437 )
2433 if dotiming:
2438 if dotiming:
2434 begin = util.timer()
2439 begin = util.timer()
2435 renames = copies.pathcopies(base, parent)
2440 renames = copies.pathcopies(base, parent)
2436 end = util.timer()
2441 end = util.timer()
2437 # not very stable timing since we did only one run
2442 # not very stable timing since we did only one run
2438 data['time'] = end - begin
2443 data['time'] = end - begin
2439 data['nbrenamedfiles'] = len(renames)
2444 data['nbrenamedfiles'] = len(renames)
2440 if dostats:
2445 if dostats:
2441 alldata['time'].append(
2446 alldata['time'].append(
2442 (
2447 (
2443 data['time'],
2448 data['time'],
2444 base.hex(),
2449 base.hex(),
2445 parent.hex(),
2450 parent.hex(),
2446 )
2451 )
2447 )
2452 )
2448 alldata['nbrenames'].append(
2453 alldata['nbrenames'].append(
2449 (
2454 (
2450 data['nbrenamedfiles'],
2455 data['nbrenamedfiles'],
2451 base.hex(),
2456 base.hex(),
2452 parent.hex(),
2457 parent.hex(),
2453 )
2458 )
2454 )
2459 )
2455 fm.startitem()
2460 fm.startitem()
2456 fm.data(**data)
2461 fm.data(**data)
2457 out = data.copy()
2462 out = data.copy()
2458 out['source'] = fm.hexfunc(base.node())
2463 out['source'] = fm.hexfunc(base.node())
2459 out['destination'] = fm.hexfunc(parent.node())
2464 out['destination'] = fm.hexfunc(parent.node())
2460 fm.plain(output % out)
2465 fm.plain(output % out)
2461
2466
2462 fm.end()
2467 fm.end()
2463 if dostats:
2468 if dostats:
2464 entries = [
2469 entries = [
2465 ('nbrevs', 'number of revision covered'),
2470 ('nbrevs', 'number of revision covered'),
2466 ('nbmissingfiles', 'number of missing files at head'),
2471 ('nbmissingfiles', 'number of missing files at head'),
2467 ]
2472 ]
2468 if dotiming:
2473 if dotiming:
2469 entries.append(('nbrenames', 'renamed files'))
2474 entries.append(('nbrenames', 'renamed files'))
2470 entries.append(('time', 'time'))
2475 entries.append(('time', 'time'))
2471 _displaystats(ui, opts, entries, alldata)
2476 _displaystats(ui, opts, entries, alldata)
2472
2477
2473
2478
2474 @command(b'perf::cca|perfcca', formatteropts)
2479 @command(b'perf::cca|perfcca', formatteropts)
2475 def perfcca(ui, repo, **opts):
2480 def perfcca(ui, repo, **opts):
2476 opts = _byteskwargs(opts)
2481 opts = _byteskwargs(opts)
2477 timer, fm = gettimer(ui, opts)
2482 timer, fm = gettimer(ui, opts)
2478 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2483 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2479 fm.end()
2484 fm.end()
2480
2485
2481
2486
2482 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2487 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2483 def perffncacheload(ui, repo, **opts):
2488 def perffncacheload(ui, repo, **opts):
2484 opts = _byteskwargs(opts)
2489 opts = _byteskwargs(opts)
2485 timer, fm = gettimer(ui, opts)
2490 timer, fm = gettimer(ui, opts)
2486 s = repo.store
2491 s = repo.store
2487
2492
2488 def d():
2493 def d():
2489 s.fncache._load()
2494 s.fncache._load()
2490
2495
2491 timer(d)
2496 timer(d)
2492 fm.end()
2497 fm.end()
2493
2498
2494
2499
2495 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2500 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2496 def perffncachewrite(ui, repo, **opts):
2501 def perffncachewrite(ui, repo, **opts):
2497 opts = _byteskwargs(opts)
2502 opts = _byteskwargs(opts)
2498 timer, fm = gettimer(ui, opts)
2503 timer, fm = gettimer(ui, opts)
2499 s = repo.store
2504 s = repo.store
2500 lock = repo.lock()
2505 lock = repo.lock()
2501 s.fncache._load()
2506 s.fncache._load()
2502 tr = repo.transaction(b'perffncachewrite')
2507 tr = repo.transaction(b'perffncachewrite')
2503 tr.addbackup(b'fncache')
2508 tr.addbackup(b'fncache')
2504
2509
2505 def d():
2510 def d():
2506 s.fncache._dirty = True
2511 s.fncache._dirty = True
2507 s.fncache.write(tr)
2512 s.fncache.write(tr)
2508
2513
2509 timer(d)
2514 timer(d)
2510 tr.close()
2515 tr.close()
2511 lock.release()
2516 lock.release()
2512 fm.end()
2517 fm.end()
2513
2518
2514
2519
2515 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2520 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2516 def perffncacheencode(ui, repo, **opts):
2521 def perffncacheencode(ui, repo, **opts):
2517 opts = _byteskwargs(opts)
2522 opts = _byteskwargs(opts)
2518 timer, fm = gettimer(ui, opts)
2523 timer, fm = gettimer(ui, opts)
2519 s = repo.store
2524 s = repo.store
2520 s.fncache._load()
2525 s.fncache._load()
2521
2526
2522 def d():
2527 def d():
2523 for p in s.fncache.entries:
2528 for p in s.fncache.entries:
2524 s.encode(p)
2529 s.encode(p)
2525
2530
2526 timer(d)
2531 timer(d)
2527 fm.end()
2532 fm.end()
2528
2533
2529
2534
2530 def _bdiffworker(q, blocks, xdiff, ready, done):
2535 def _bdiffworker(q, blocks, xdiff, ready, done):
2531 while not done.is_set():
2536 while not done.is_set():
2532 pair = q.get()
2537 pair = q.get()
2533 while pair is not None:
2538 while pair is not None:
2534 if xdiff:
2539 if xdiff:
2535 mdiff.bdiff.xdiffblocks(*pair)
2540 mdiff.bdiff.xdiffblocks(*pair)
2536 elif blocks:
2541 elif blocks:
2537 mdiff.bdiff.blocks(*pair)
2542 mdiff.bdiff.blocks(*pair)
2538 else:
2543 else:
2539 mdiff.textdiff(*pair)
2544 mdiff.textdiff(*pair)
2540 q.task_done()
2545 q.task_done()
2541 pair = q.get()
2546 pair = q.get()
2542 q.task_done() # for the None one
2547 q.task_done() # for the None one
2543 with ready:
2548 with ready:
2544 ready.wait()
2549 ready.wait()
2545
2550
2546
2551
2547 def _manifestrevision(repo, mnode):
2552 def _manifestrevision(repo, mnode):
2548 ml = repo.manifestlog
2553 ml = repo.manifestlog
2549
2554
2550 if util.safehasattr(ml, b'getstorage'):
2555 if util.safehasattr(ml, b'getstorage'):
2551 store = ml.getstorage(b'')
2556 store = ml.getstorage(b'')
2552 else:
2557 else:
2553 store = ml._revlog
2558 store = ml._revlog
2554
2559
2555 return store.revision(mnode)
2560 return store.revision(mnode)
2556
2561
2557
2562
2558 @command(
2563 @command(
2559 b'perf::bdiff|perfbdiff',
2564 b'perf::bdiff|perfbdiff',
2560 revlogopts
2565 revlogopts
2561 + formatteropts
2566 + formatteropts
2562 + [
2567 + [
2563 (
2568 (
2564 b'',
2569 b'',
2565 b'count',
2570 b'count',
2566 1,
2571 1,
2567 b'number of revisions to test (when using --startrev)',
2572 b'number of revisions to test (when using --startrev)',
2568 ),
2573 ),
2569 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2574 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2570 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2575 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2571 (b'', b'blocks', False, b'test computing diffs into blocks'),
2576 (b'', b'blocks', False, b'test computing diffs into blocks'),
2572 (b'', b'xdiff', False, b'use xdiff algorithm'),
2577 (b'', b'xdiff', False, b'use xdiff algorithm'),
2573 ],
2578 ],
2574 b'-c|-m|FILE REV',
2579 b'-c|-m|FILE REV',
2575 )
2580 )
2576 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2581 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2577 """benchmark a bdiff between revisions
2582 """benchmark a bdiff between revisions
2578
2583
2579 By default, benchmark a bdiff between its delta parent and itself.
2584 By default, benchmark a bdiff between its delta parent and itself.
2580
2585
2581 With ``--count``, benchmark bdiffs between delta parents and self for N
2586 With ``--count``, benchmark bdiffs between delta parents and self for N
2582 revisions starting at the specified revision.
2587 revisions starting at the specified revision.
2583
2588
2584 With ``--alldata``, assume the requested revision is a changeset and
2589 With ``--alldata``, assume the requested revision is a changeset and
2585 measure bdiffs for all changes related to that changeset (manifest
2590 measure bdiffs for all changes related to that changeset (manifest
2586 and filelogs).
2591 and filelogs).
2587 """
2592 """
2588 opts = _byteskwargs(opts)
2593 opts = _byteskwargs(opts)
2589
2594
2590 if opts[b'xdiff'] and not opts[b'blocks']:
2595 if opts[b'xdiff'] and not opts[b'blocks']:
2591 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2596 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2592
2597
2593 if opts[b'alldata']:
2598 if opts[b'alldata']:
2594 opts[b'changelog'] = True
2599 opts[b'changelog'] = True
2595
2600
2596 if opts.get(b'changelog') or opts.get(b'manifest'):
2601 if opts.get(b'changelog') or opts.get(b'manifest'):
2597 file_, rev = None, file_
2602 file_, rev = None, file_
2598 elif rev is None:
2603 elif rev is None:
2599 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2604 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2600
2605
2601 blocks = opts[b'blocks']
2606 blocks = opts[b'blocks']
2602 xdiff = opts[b'xdiff']
2607 xdiff = opts[b'xdiff']
2603 textpairs = []
2608 textpairs = []
2604
2609
2605 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2610 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2606
2611
2607 startrev = r.rev(r.lookup(rev))
2612 startrev = r.rev(r.lookup(rev))
2608 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2613 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2609 if opts[b'alldata']:
2614 if opts[b'alldata']:
2610 # Load revisions associated with changeset.
2615 # Load revisions associated with changeset.
2611 ctx = repo[rev]
2616 ctx = repo[rev]
2612 mtext = _manifestrevision(repo, ctx.manifestnode())
2617 mtext = _manifestrevision(repo, ctx.manifestnode())
2613 for pctx in ctx.parents():
2618 for pctx in ctx.parents():
2614 pman = _manifestrevision(repo, pctx.manifestnode())
2619 pman = _manifestrevision(repo, pctx.manifestnode())
2615 textpairs.append((pman, mtext))
2620 textpairs.append((pman, mtext))
2616
2621
2617 # Load filelog revisions by iterating manifest delta.
2622 # Load filelog revisions by iterating manifest delta.
2618 man = ctx.manifest()
2623 man = ctx.manifest()
2619 pman = ctx.p1().manifest()
2624 pman = ctx.p1().manifest()
2620 for filename, change in pman.diff(man).items():
2625 for filename, change in pman.diff(man).items():
2621 fctx = repo.file(filename)
2626 fctx = repo.file(filename)
2622 f1 = fctx.revision(change[0][0] or -1)
2627 f1 = fctx.revision(change[0][0] or -1)
2623 f2 = fctx.revision(change[1][0] or -1)
2628 f2 = fctx.revision(change[1][0] or -1)
2624 textpairs.append((f1, f2))
2629 textpairs.append((f1, f2))
2625 else:
2630 else:
2626 dp = r.deltaparent(rev)
2631 dp = r.deltaparent(rev)
2627 textpairs.append((r.revision(dp), r.revision(rev)))
2632 textpairs.append((r.revision(dp), r.revision(rev)))
2628
2633
2629 withthreads = threads > 0
2634 withthreads = threads > 0
2630 if not withthreads:
2635 if not withthreads:
2631
2636
2632 def d():
2637 def d():
2633 for pair in textpairs:
2638 for pair in textpairs:
2634 if xdiff:
2639 if xdiff:
2635 mdiff.bdiff.xdiffblocks(*pair)
2640 mdiff.bdiff.xdiffblocks(*pair)
2636 elif blocks:
2641 elif blocks:
2637 mdiff.bdiff.blocks(*pair)
2642 mdiff.bdiff.blocks(*pair)
2638 else:
2643 else:
2639 mdiff.textdiff(*pair)
2644 mdiff.textdiff(*pair)
2640
2645
2641 else:
2646 else:
2642 q = queue()
2647 q = queue()
2643 for i in _xrange(threads):
2648 for i in _xrange(threads):
2644 q.put(None)
2649 q.put(None)
2645 ready = threading.Condition()
2650 ready = threading.Condition()
2646 done = threading.Event()
2651 done = threading.Event()
2647 for i in _xrange(threads):
2652 for i in _xrange(threads):
2648 threading.Thread(
2653 threading.Thread(
2649 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2654 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2650 ).start()
2655 ).start()
2651 q.join()
2656 q.join()
2652
2657
2653 def d():
2658 def d():
2654 for pair in textpairs:
2659 for pair in textpairs:
2655 q.put(pair)
2660 q.put(pair)
2656 for i in _xrange(threads):
2661 for i in _xrange(threads):
2657 q.put(None)
2662 q.put(None)
2658 with ready:
2663 with ready:
2659 ready.notify_all()
2664 ready.notify_all()
2660 q.join()
2665 q.join()
2661
2666
2662 timer, fm = gettimer(ui, opts)
2667 timer, fm = gettimer(ui, opts)
2663 timer(d)
2668 timer(d)
2664 fm.end()
2669 fm.end()
2665
2670
2666 if withthreads:
2671 if withthreads:
2667 done.set()
2672 done.set()
2668 for i in _xrange(threads):
2673 for i in _xrange(threads):
2669 q.put(None)
2674 q.put(None)
2670 with ready:
2675 with ready:
2671 ready.notify_all()
2676 ready.notify_all()
2672
2677
2673
2678
2674 @command(
2679 @command(
2675 b'perf::unbundle',
2680 b'perf::unbundle',
2676 formatteropts,
2681 formatteropts,
2677 b'BUNDLE_FILE',
2682 b'BUNDLE_FILE',
2678 )
2683 )
2679 def perf_unbundle(ui, repo, fname, **opts):
2684 def perf_unbundle(ui, repo, fname, **opts):
2680 """benchmark application of a bundle in a repository.
2685 """benchmark application of a bundle in a repository.
2681
2686
2682 This does not include the final transaction processing"""
2687 This does not include the final transaction processing"""
2683
2688
2684 from mercurial import exchange
2689 from mercurial import exchange
2685 from mercurial import bundle2
2690 from mercurial import bundle2
2686 from mercurial import transaction
2691 from mercurial import transaction
2687
2692
2688 opts = _byteskwargs(opts)
2693 opts = _byteskwargs(opts)
2689
2694
2690 ### some compatibility hotfix
2695 ### some compatibility hotfix
2691 #
2696 #
2692 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2697 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2693 # critical regression that break transaction rollback for files that are
2698 # critical regression that break transaction rollback for files that are
2694 # de-inlined.
2699 # de-inlined.
2695 method = transaction.transaction._addentry
2700 method = transaction.transaction._addentry
2696 pre_63edc384d3b7 = "data" in getargspec(method).args
2701 pre_63edc384d3b7 = "data" in getargspec(method).args
2697 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2702 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2698 # a changeset that is a close descendant of 18415fc918a1, the changeset
2703 # a changeset that is a close descendant of 18415fc918a1, the changeset
2699 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2704 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2700 args = getargspec(error.Abort.__init__).args
2705 args = getargspec(error.Abort.__init__).args
2701 post_18415fc918a1 = "detailed_exit_code" in args
2706 post_18415fc918a1 = "detailed_exit_code" in args
2702
2707
2703 old_max_inline = None
2708 old_max_inline = None
2704 try:
2709 try:
2705 if not (pre_63edc384d3b7 or post_18415fc918a1):
2710 if not (pre_63edc384d3b7 or post_18415fc918a1):
2706 # disable inlining
2711 # disable inlining
2707 old_max_inline = mercurial.revlog._maxinline
2712 old_max_inline = mercurial.revlog._maxinline
2708 # large enough to never happen
2713 # large enough to never happen
2709 mercurial.revlog._maxinline = 2 ** 50
2714 mercurial.revlog._maxinline = 2 ** 50
2710
2715
2711 with repo.lock():
2716 with repo.lock():
2712 bundle = [None, None]
2717 bundle = [None, None]
2713 orig_quiet = repo.ui.quiet
2718 orig_quiet = repo.ui.quiet
2714 try:
2719 try:
2715 repo.ui.quiet = True
2720 repo.ui.quiet = True
2716 with open(fname, mode="rb") as f:
2721 with open(fname, mode="rb") as f:
2717
2722
2718 def noop_report(*args, **kwargs):
2723 def noop_report(*args, **kwargs):
2719 pass
2724 pass
2720
2725
2721 def setup():
2726 def setup():
2722 gen, tr = bundle
2727 gen, tr = bundle
2723 if tr is not None:
2728 if tr is not None:
2724 tr.abort()
2729 tr.abort()
2725 bundle[:] = [None, None]
2730 bundle[:] = [None, None]
2726 f.seek(0)
2731 f.seek(0)
2727 bundle[0] = exchange.readbundle(ui, f, fname)
2732 bundle[0] = exchange.readbundle(ui, f, fname)
2728 bundle[1] = repo.transaction(b'perf::unbundle')
2733 bundle[1] = repo.transaction(b'perf::unbundle')
2729 # silence the transaction
2734 # silence the transaction
2730 bundle[1]._report = noop_report
2735 bundle[1]._report = noop_report
2731
2736
2732 def apply():
2737 def apply():
2733 gen, tr = bundle
2738 gen, tr = bundle
2734 bundle2.applybundle(
2739 bundle2.applybundle(
2735 repo,
2740 repo,
2736 gen,
2741 gen,
2737 tr,
2742 tr,
2738 source=b'perf::unbundle',
2743 source=b'perf::unbundle',
2739 url=fname,
2744 url=fname,
2740 )
2745 )
2741
2746
2742 timer, fm = gettimer(ui, opts)
2747 timer, fm = gettimer(ui, opts)
2743 timer(apply, setup=setup)
2748 timer(apply, setup=setup)
2744 fm.end()
2749 fm.end()
2745 finally:
2750 finally:
2746 repo.ui.quiet == orig_quiet
2751 repo.ui.quiet == orig_quiet
2747 gen, tr = bundle
2752 gen, tr = bundle
2748 if tr is not None:
2753 if tr is not None:
2749 tr.abort()
2754 tr.abort()
2750 finally:
2755 finally:
2751 if old_max_inline is not None:
2756 if old_max_inline is not None:
2752 mercurial.revlog._maxinline = old_max_inline
2757 mercurial.revlog._maxinline = old_max_inline
2753
2758
2754
2759
2755 @command(
2760 @command(
2756 b'perf::unidiff|perfunidiff',
2761 b'perf::unidiff|perfunidiff',
2757 revlogopts
2762 revlogopts
2758 + formatteropts
2763 + formatteropts
2759 + [
2764 + [
2760 (
2765 (
2761 b'',
2766 b'',
2762 b'count',
2767 b'count',
2763 1,
2768 1,
2764 b'number of revisions to test (when using --startrev)',
2769 b'number of revisions to test (when using --startrev)',
2765 ),
2770 ),
2766 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2771 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2767 ],
2772 ],
2768 b'-c|-m|FILE REV',
2773 b'-c|-m|FILE REV',
2769 )
2774 )
2770 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2775 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2771 """benchmark a unified diff between revisions
2776 """benchmark a unified diff between revisions
2772
2777
2773 This doesn't include any copy tracing - it's just a unified diff
2778 This doesn't include any copy tracing - it's just a unified diff
2774 of the texts.
2779 of the texts.
2775
2780
2776 By default, benchmark a diff between its delta parent and itself.
2781 By default, benchmark a diff between its delta parent and itself.
2777
2782
2778 With ``--count``, benchmark diffs between delta parents and self for N
2783 With ``--count``, benchmark diffs between delta parents and self for N
2779 revisions starting at the specified revision.
2784 revisions starting at the specified revision.
2780
2785
2781 With ``--alldata``, assume the requested revision is a changeset and
2786 With ``--alldata``, assume the requested revision is a changeset and
2782 measure diffs for all changes related to that changeset (manifest
2787 measure diffs for all changes related to that changeset (manifest
2783 and filelogs).
2788 and filelogs).
2784 """
2789 """
2785 opts = _byteskwargs(opts)
2790 opts = _byteskwargs(opts)
2786 if opts[b'alldata']:
2791 if opts[b'alldata']:
2787 opts[b'changelog'] = True
2792 opts[b'changelog'] = True
2788
2793
2789 if opts.get(b'changelog') or opts.get(b'manifest'):
2794 if opts.get(b'changelog') or opts.get(b'manifest'):
2790 file_, rev = None, file_
2795 file_, rev = None, file_
2791 elif rev is None:
2796 elif rev is None:
2792 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2797 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2793
2798
2794 textpairs = []
2799 textpairs = []
2795
2800
2796 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2801 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2797
2802
2798 startrev = r.rev(r.lookup(rev))
2803 startrev = r.rev(r.lookup(rev))
2799 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2804 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2800 if opts[b'alldata']:
2805 if opts[b'alldata']:
2801 # Load revisions associated with changeset.
2806 # Load revisions associated with changeset.
2802 ctx = repo[rev]
2807 ctx = repo[rev]
2803 mtext = _manifestrevision(repo, ctx.manifestnode())
2808 mtext = _manifestrevision(repo, ctx.manifestnode())
2804 for pctx in ctx.parents():
2809 for pctx in ctx.parents():
2805 pman = _manifestrevision(repo, pctx.manifestnode())
2810 pman = _manifestrevision(repo, pctx.manifestnode())
2806 textpairs.append((pman, mtext))
2811 textpairs.append((pman, mtext))
2807
2812
2808 # Load filelog revisions by iterating manifest delta.
2813 # Load filelog revisions by iterating manifest delta.
2809 man = ctx.manifest()
2814 man = ctx.manifest()
2810 pman = ctx.p1().manifest()
2815 pman = ctx.p1().manifest()
2811 for filename, change in pman.diff(man).items():
2816 for filename, change in pman.diff(man).items():
2812 fctx = repo.file(filename)
2817 fctx = repo.file(filename)
2813 f1 = fctx.revision(change[0][0] or -1)
2818 f1 = fctx.revision(change[0][0] or -1)
2814 f2 = fctx.revision(change[1][0] or -1)
2819 f2 = fctx.revision(change[1][0] or -1)
2815 textpairs.append((f1, f2))
2820 textpairs.append((f1, f2))
2816 else:
2821 else:
2817 dp = r.deltaparent(rev)
2822 dp = r.deltaparent(rev)
2818 textpairs.append((r.revision(dp), r.revision(rev)))
2823 textpairs.append((r.revision(dp), r.revision(rev)))
2819
2824
2820 def d():
2825 def d():
2821 for left, right in textpairs:
2826 for left, right in textpairs:
2822 # The date strings don't matter, so we pass empty strings.
2827 # The date strings don't matter, so we pass empty strings.
2823 headerlines, hunks = mdiff.unidiff(
2828 headerlines, hunks = mdiff.unidiff(
2824 left, b'', right, b'', b'left', b'right', binary=False
2829 left, b'', right, b'', b'left', b'right', binary=False
2825 )
2830 )
2826 # consume iterators in roughly the way patch.py does
2831 # consume iterators in roughly the way patch.py does
2827 b'\n'.join(headerlines)
2832 b'\n'.join(headerlines)
2828 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2833 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2829
2834
2830 timer, fm = gettimer(ui, opts)
2835 timer, fm = gettimer(ui, opts)
2831 timer(d)
2836 timer(d)
2832 fm.end()
2837 fm.end()
2833
2838
2834
2839
2835 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2840 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2836 def perfdiffwd(ui, repo, **opts):
2841 def perfdiffwd(ui, repo, **opts):
2837 """Profile diff of working directory changes"""
2842 """Profile diff of working directory changes"""
2838 opts = _byteskwargs(opts)
2843 opts = _byteskwargs(opts)
2839 timer, fm = gettimer(ui, opts)
2844 timer, fm = gettimer(ui, opts)
2840 options = {
2845 options = {
2841 'w': 'ignore_all_space',
2846 'w': 'ignore_all_space',
2842 'b': 'ignore_space_change',
2847 'b': 'ignore_space_change',
2843 'B': 'ignore_blank_lines',
2848 'B': 'ignore_blank_lines',
2844 }
2849 }
2845
2850
2846 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2851 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2847 opts = {options[c]: b'1' for c in diffopt}
2852 opts = {options[c]: b'1' for c in diffopt}
2848
2853
2849 def d():
2854 def d():
2850 ui.pushbuffer()
2855 ui.pushbuffer()
2851 commands.diff(ui, repo, **opts)
2856 commands.diff(ui, repo, **opts)
2852 ui.popbuffer()
2857 ui.popbuffer()
2853
2858
2854 diffopt = diffopt.encode('ascii')
2859 diffopt = diffopt.encode('ascii')
2855 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2860 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2856 timer(d, title=title)
2861 timer(d, title=title)
2857 fm.end()
2862 fm.end()
2858
2863
2859
2864
2860 @command(
2865 @command(
2861 b'perf::revlogindex|perfrevlogindex',
2866 b'perf::revlogindex|perfrevlogindex',
2862 revlogopts + formatteropts,
2867 revlogopts + formatteropts,
2863 b'-c|-m|FILE',
2868 b'-c|-m|FILE',
2864 )
2869 )
2865 def perfrevlogindex(ui, repo, file_=None, **opts):
2870 def perfrevlogindex(ui, repo, file_=None, **opts):
2866 """Benchmark operations against a revlog index.
2871 """Benchmark operations against a revlog index.
2867
2872
2868 This tests constructing a revlog instance, reading index data,
2873 This tests constructing a revlog instance, reading index data,
2869 parsing index data, and performing various operations related to
2874 parsing index data, and performing various operations related to
2870 index data.
2875 index data.
2871 """
2876 """
2872
2877
2873 opts = _byteskwargs(opts)
2878 opts = _byteskwargs(opts)
2874
2879
2875 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2880 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2876
2881
2877 opener = getattr(rl, 'opener') # trick linter
2882 opener = getattr(rl, 'opener') # trick linter
2878 # compat with hg <= 5.8
2883 # compat with hg <= 5.8
2879 radix = getattr(rl, 'radix', None)
2884 radix = getattr(rl, 'radix', None)
2880 indexfile = getattr(rl, '_indexfile', None)
2885 indexfile = getattr(rl, '_indexfile', None)
2881 if indexfile is None:
2886 if indexfile is None:
2882 # compatibility with <= hg-5.8
2887 # compatibility with <= hg-5.8
2883 indexfile = getattr(rl, 'indexfile')
2888 indexfile = getattr(rl, 'indexfile')
2884 data = opener.read(indexfile)
2889 data = opener.read(indexfile)
2885
2890
2886 header = struct.unpack(b'>I', data[0:4])[0]
2891 header = struct.unpack(b'>I', data[0:4])[0]
2887 version = header & 0xFFFF
2892 version = header & 0xFFFF
2888 if version == 1:
2893 if version == 1:
2889 inline = header & (1 << 16)
2894 inline = header & (1 << 16)
2890 else:
2895 else:
2891 raise error.Abort(b'unsupported revlog version: %d' % version)
2896 raise error.Abort(b'unsupported revlog version: %d' % version)
2892
2897
2893 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2898 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2894 if parse_index_v1 is None:
2899 if parse_index_v1 is None:
2895 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2900 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2896
2901
2897 rllen = len(rl)
2902 rllen = len(rl)
2898
2903
2899 node0 = rl.node(0)
2904 node0 = rl.node(0)
2900 node25 = rl.node(rllen // 4)
2905 node25 = rl.node(rllen // 4)
2901 node50 = rl.node(rllen // 2)
2906 node50 = rl.node(rllen // 2)
2902 node75 = rl.node(rllen // 4 * 3)
2907 node75 = rl.node(rllen // 4 * 3)
2903 node100 = rl.node(rllen - 1)
2908 node100 = rl.node(rllen - 1)
2904
2909
2905 allrevs = range(rllen)
2910 allrevs = range(rllen)
2906 allrevsrev = list(reversed(allrevs))
2911 allrevsrev = list(reversed(allrevs))
2907 allnodes = [rl.node(rev) for rev in range(rllen)]
2912 allnodes = [rl.node(rev) for rev in range(rllen)]
2908 allnodesrev = list(reversed(allnodes))
2913 allnodesrev = list(reversed(allnodes))
2909
2914
2910 def constructor():
2915 def constructor():
2911 if radix is not None:
2916 if radix is not None:
2912 revlog(opener, radix=radix)
2917 revlog(opener, radix=radix)
2913 else:
2918 else:
2914 # hg <= 5.8
2919 # hg <= 5.8
2915 revlog(opener, indexfile=indexfile)
2920 revlog(opener, indexfile=indexfile)
2916
2921
2917 def read():
2922 def read():
2918 with opener(indexfile) as fh:
2923 with opener(indexfile) as fh:
2919 fh.read()
2924 fh.read()
2920
2925
2921 def parseindex():
2926 def parseindex():
2922 parse_index_v1(data, inline)
2927 parse_index_v1(data, inline)
2923
2928
2924 def getentry(revornode):
2929 def getentry(revornode):
2925 index = parse_index_v1(data, inline)[0]
2930 index = parse_index_v1(data, inline)[0]
2926 index[revornode]
2931 index[revornode]
2927
2932
2928 def getentries(revs, count=1):
2933 def getentries(revs, count=1):
2929 index = parse_index_v1(data, inline)[0]
2934 index = parse_index_v1(data, inline)[0]
2930
2935
2931 for i in range(count):
2936 for i in range(count):
2932 for rev in revs:
2937 for rev in revs:
2933 index[rev]
2938 index[rev]
2934
2939
2935 def resolvenode(node):
2940 def resolvenode(node):
2936 index = parse_index_v1(data, inline)[0]
2941 index = parse_index_v1(data, inline)[0]
2937 rev = getattr(index, 'rev', None)
2942 rev = getattr(index, 'rev', None)
2938 if rev is None:
2943 if rev is None:
2939 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2944 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2940 # This only works for the C code.
2945 # This only works for the C code.
2941 if nodemap is None:
2946 if nodemap is None:
2942 return
2947 return
2943 rev = nodemap.__getitem__
2948 rev = nodemap.__getitem__
2944
2949
2945 try:
2950 try:
2946 rev(node)
2951 rev(node)
2947 except error.RevlogError:
2952 except error.RevlogError:
2948 pass
2953 pass
2949
2954
2950 def resolvenodes(nodes, count=1):
2955 def resolvenodes(nodes, count=1):
2951 index = parse_index_v1(data, inline)[0]
2956 index = parse_index_v1(data, inline)[0]
2952 rev = getattr(index, 'rev', None)
2957 rev = getattr(index, 'rev', None)
2953 if rev is None:
2958 if rev is None:
2954 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2959 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2955 # This only works for the C code.
2960 # This only works for the C code.
2956 if nodemap is None:
2961 if nodemap is None:
2957 return
2962 return
2958 rev = nodemap.__getitem__
2963 rev = nodemap.__getitem__
2959
2964
2960 for i in range(count):
2965 for i in range(count):
2961 for node in nodes:
2966 for node in nodes:
2962 try:
2967 try:
2963 rev(node)
2968 rev(node)
2964 except error.RevlogError:
2969 except error.RevlogError:
2965 pass
2970 pass
2966
2971
2967 benches = [
2972 benches = [
2968 (constructor, b'revlog constructor'),
2973 (constructor, b'revlog constructor'),
2969 (read, b'read'),
2974 (read, b'read'),
2970 (parseindex, b'create index object'),
2975 (parseindex, b'create index object'),
2971 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2976 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2972 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2977 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2973 (lambda: resolvenode(node0), b'look up node at rev 0'),
2978 (lambda: resolvenode(node0), b'look up node at rev 0'),
2974 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2979 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2975 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2980 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2976 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2981 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2977 (lambda: resolvenode(node100), b'look up node at tip'),
2982 (lambda: resolvenode(node100), b'look up node at tip'),
2978 # 2x variation is to measure caching impact.
2983 # 2x variation is to measure caching impact.
2979 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2984 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2980 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2985 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2981 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2986 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2982 (
2987 (
2983 lambda: resolvenodes(allnodesrev, 2),
2988 lambda: resolvenodes(allnodesrev, 2),
2984 b'look up all nodes 2x (reverse)',
2989 b'look up all nodes 2x (reverse)',
2985 ),
2990 ),
2986 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2991 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2987 (
2992 (
2988 lambda: getentries(allrevs, 2),
2993 lambda: getentries(allrevs, 2),
2989 b'retrieve all index entries 2x (forward)',
2994 b'retrieve all index entries 2x (forward)',
2990 ),
2995 ),
2991 (
2996 (
2992 lambda: getentries(allrevsrev),
2997 lambda: getentries(allrevsrev),
2993 b'retrieve all index entries (reverse)',
2998 b'retrieve all index entries (reverse)',
2994 ),
2999 ),
2995 (
3000 (
2996 lambda: getentries(allrevsrev, 2),
3001 lambda: getentries(allrevsrev, 2),
2997 b'retrieve all index entries 2x (reverse)',
3002 b'retrieve all index entries 2x (reverse)',
2998 ),
3003 ),
2999 ]
3004 ]
3000
3005
3001 for fn, title in benches:
3006 for fn, title in benches:
3002 timer, fm = gettimer(ui, opts)
3007 timer, fm = gettimer(ui, opts)
3003 timer(fn, title=title)
3008 timer(fn, title=title)
3004 fm.end()
3009 fm.end()
3005
3010
3006
3011
3007 @command(
3012 @command(
3008 b'perf::revlogrevisions|perfrevlogrevisions',
3013 b'perf::revlogrevisions|perfrevlogrevisions',
3009 revlogopts
3014 revlogopts
3010 + formatteropts
3015 + formatteropts
3011 + [
3016 + [
3012 (b'd', b'dist', 100, b'distance between the revisions'),
3017 (b'd', b'dist', 100, b'distance between the revisions'),
3013 (b's', b'startrev', 0, b'revision to start reading at'),
3018 (b's', b'startrev', 0, b'revision to start reading at'),
3014 (b'', b'reverse', False, b'read in reverse'),
3019 (b'', b'reverse', False, b'read in reverse'),
3015 ],
3020 ],
3016 b'-c|-m|FILE',
3021 b'-c|-m|FILE',
3017 )
3022 )
3018 def perfrevlogrevisions(
3023 def perfrevlogrevisions(
3019 ui, repo, file_=None, startrev=0, reverse=False, **opts
3024 ui, repo, file_=None, startrev=0, reverse=False, **opts
3020 ):
3025 ):
3021 """Benchmark reading a series of revisions from a revlog.
3026 """Benchmark reading a series of revisions from a revlog.
3022
3027
3023 By default, we read every ``-d/--dist`` revision from 0 to tip of
3028 By default, we read every ``-d/--dist`` revision from 0 to tip of
3024 the specified revlog.
3029 the specified revlog.
3025
3030
3026 The start revision can be defined via ``-s/--startrev``.
3031 The start revision can be defined via ``-s/--startrev``.
3027 """
3032 """
3028 opts = _byteskwargs(opts)
3033 opts = _byteskwargs(opts)
3029
3034
3030 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3035 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3031 rllen = getlen(ui)(rl)
3036 rllen = getlen(ui)(rl)
3032
3037
3033 if startrev < 0:
3038 if startrev < 0:
3034 startrev = rllen + startrev
3039 startrev = rllen + startrev
3035
3040
3036 def d():
3041 def d():
3037 rl.clearcaches()
3042 rl.clearcaches()
3038
3043
3039 beginrev = startrev
3044 beginrev = startrev
3040 endrev = rllen
3045 endrev = rllen
3041 dist = opts[b'dist']
3046 dist = opts[b'dist']
3042
3047
3043 if reverse:
3048 if reverse:
3044 beginrev, endrev = endrev - 1, beginrev - 1
3049 beginrev, endrev = endrev - 1, beginrev - 1
3045 dist = -1 * dist
3050 dist = -1 * dist
3046
3051
3047 for x in _xrange(beginrev, endrev, dist):
3052 for x in _xrange(beginrev, endrev, dist):
3048 # Old revisions don't support passing int.
3053 # Old revisions don't support passing int.
3049 n = rl.node(x)
3054 n = rl.node(x)
3050 rl.revision(n)
3055 rl.revision(n)
3051
3056
3052 timer, fm = gettimer(ui, opts)
3057 timer, fm = gettimer(ui, opts)
3053 timer(d)
3058 timer(d)
3054 fm.end()
3059 fm.end()
3055
3060
3056
3061
3057 @command(
3062 @command(
3058 b'perf::revlogwrite|perfrevlogwrite',
3063 b'perf::revlogwrite|perfrevlogwrite',
3059 revlogopts
3064 revlogopts
3060 + formatteropts
3065 + formatteropts
3061 + [
3066 + [
3062 (b's', b'startrev', 1000, b'revision to start writing at'),
3067 (b's', b'startrev', 1000, b'revision to start writing at'),
3063 (b'', b'stoprev', -1, b'last revision to write'),
3068 (b'', b'stoprev', -1, b'last revision to write'),
3064 (b'', b'count', 3, b'number of passes to perform'),
3069 (b'', b'count', 3, b'number of passes to perform'),
3065 (b'', b'details', False, b'print timing for every revisions tested'),
3070 (b'', b'details', False, b'print timing for every revisions tested'),
3066 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3071 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3067 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3072 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3068 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3073 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3069 ],
3074 ],
3070 b'-c|-m|FILE',
3075 b'-c|-m|FILE',
3071 )
3076 )
3072 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3077 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3073 """Benchmark writing a series of revisions to a revlog.
3078 """Benchmark writing a series of revisions to a revlog.
3074
3079
3075 Possible source values are:
3080 Possible source values are:
3076 * `full`: add from a full text (default).
3081 * `full`: add from a full text (default).
3077 * `parent-1`: add from a delta to the first parent
3082 * `parent-1`: add from a delta to the first parent
3078 * `parent-2`: add from a delta to the second parent if it exists
3083 * `parent-2`: add from a delta to the second parent if it exists
3079 (use a delta from the first parent otherwise)
3084 (use a delta from the first parent otherwise)
3080 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3085 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3081 * `storage`: add from the existing precomputed deltas
3086 * `storage`: add from the existing precomputed deltas
3082
3087
3083 Note: This performance command measures performance in a custom way. As a
3088 Note: This performance command measures performance in a custom way. As a
3084 result some of the global configuration of the 'perf' command does not
3089 result some of the global configuration of the 'perf' command does not
3085 apply to it:
3090 apply to it:
3086
3091
3087 * ``pre-run``: disabled
3092 * ``pre-run``: disabled
3088
3093
3089 * ``profile-benchmark``: disabled
3094 * ``profile-benchmark``: disabled
3090
3095
3091 * ``run-limits``: disabled use --count instead
3096 * ``run-limits``: disabled use --count instead
3092 """
3097 """
3093 opts = _byteskwargs(opts)
3098 opts = _byteskwargs(opts)
3094
3099
3095 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3100 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3096 rllen = getlen(ui)(rl)
3101 rllen = getlen(ui)(rl)
3097 if startrev < 0:
3102 if startrev < 0:
3098 startrev = rllen + startrev
3103 startrev = rllen + startrev
3099 if stoprev < 0:
3104 if stoprev < 0:
3100 stoprev = rllen + stoprev
3105 stoprev = rllen + stoprev
3101
3106
3102 lazydeltabase = opts['lazydeltabase']
3107 lazydeltabase = opts['lazydeltabase']
3103 source = opts['source']
3108 source = opts['source']
3104 clearcaches = opts['clear_caches']
3109 clearcaches = opts['clear_caches']
3105 validsource = (
3110 validsource = (
3106 b'full',
3111 b'full',
3107 b'parent-1',
3112 b'parent-1',
3108 b'parent-2',
3113 b'parent-2',
3109 b'parent-smallest',
3114 b'parent-smallest',
3110 b'storage',
3115 b'storage',
3111 )
3116 )
3112 if source not in validsource:
3117 if source not in validsource:
3113 raise error.Abort('invalid source type: %s' % source)
3118 raise error.Abort('invalid source type: %s' % source)
3114
3119
3115 ### actually gather results
3120 ### actually gather results
3116 count = opts['count']
3121 count = opts['count']
3117 if count <= 0:
3122 if count <= 0:
3118 raise error.Abort('invalide run count: %d' % count)
3123 raise error.Abort('invalide run count: %d' % count)
3119 allresults = []
3124 allresults = []
3120 for c in range(count):
3125 for c in range(count):
3121 timing = _timeonewrite(
3126 timing = _timeonewrite(
3122 ui,
3127 ui,
3123 rl,
3128 rl,
3124 source,
3129 source,
3125 startrev,
3130 startrev,
3126 stoprev,
3131 stoprev,
3127 c + 1,
3132 c + 1,
3128 lazydeltabase=lazydeltabase,
3133 lazydeltabase=lazydeltabase,
3129 clearcaches=clearcaches,
3134 clearcaches=clearcaches,
3130 )
3135 )
3131 allresults.append(timing)
3136 allresults.append(timing)
3132
3137
3133 ### consolidate the results in a single list
3138 ### consolidate the results in a single list
3134 results = []
3139 results = []
3135 for idx, (rev, t) in enumerate(allresults[0]):
3140 for idx, (rev, t) in enumerate(allresults[0]):
3136 ts = [t]
3141 ts = [t]
3137 for other in allresults[1:]:
3142 for other in allresults[1:]:
3138 orev, ot = other[idx]
3143 orev, ot = other[idx]
3139 assert orev == rev
3144 assert orev == rev
3140 ts.append(ot)
3145 ts.append(ot)
3141 results.append((rev, ts))
3146 results.append((rev, ts))
3142 resultcount = len(results)
3147 resultcount = len(results)
3143
3148
3144 ### Compute and display relevant statistics
3149 ### Compute and display relevant statistics
3145
3150
3146 # get a formatter
3151 # get a formatter
3147 fm = ui.formatter(b'perf', opts)
3152 fm = ui.formatter(b'perf', opts)
3148 displayall = ui.configbool(b"perf", b"all-timing", False)
3153 displayall = ui.configbool(b"perf", b"all-timing", False)
3149
3154
3150 # print individual details if requested
3155 # print individual details if requested
3151 if opts['details']:
3156 if opts['details']:
3152 for idx, item in enumerate(results, 1):
3157 for idx, item in enumerate(results, 1):
3153 rev, data = item
3158 rev, data = item
3154 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3159 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3155 formatone(fm, data, title=title, displayall=displayall)
3160 formatone(fm, data, title=title, displayall=displayall)
3156
3161
3157 # sorts results by median time
3162 # sorts results by median time
3158 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3163 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3159 # list of (name, index) to display)
3164 # list of (name, index) to display)
3160 relevants = [
3165 relevants = [
3161 ("min", 0),
3166 ("min", 0),
3162 ("10%", resultcount * 10 // 100),
3167 ("10%", resultcount * 10 // 100),
3163 ("25%", resultcount * 25 // 100),
3168 ("25%", resultcount * 25 // 100),
3164 ("50%", resultcount * 70 // 100),
3169 ("50%", resultcount * 70 // 100),
3165 ("75%", resultcount * 75 // 100),
3170 ("75%", resultcount * 75 // 100),
3166 ("90%", resultcount * 90 // 100),
3171 ("90%", resultcount * 90 // 100),
3167 ("95%", resultcount * 95 // 100),
3172 ("95%", resultcount * 95 // 100),
3168 ("99%", resultcount * 99 // 100),
3173 ("99%", resultcount * 99 // 100),
3169 ("99.9%", resultcount * 999 // 1000),
3174 ("99.9%", resultcount * 999 // 1000),
3170 ("99.99%", resultcount * 9999 // 10000),
3175 ("99.99%", resultcount * 9999 // 10000),
3171 ("99.999%", resultcount * 99999 // 100000),
3176 ("99.999%", resultcount * 99999 // 100000),
3172 ("max", -1),
3177 ("max", -1),
3173 ]
3178 ]
3174 if not ui.quiet:
3179 if not ui.quiet:
3175 for name, idx in relevants:
3180 for name, idx in relevants:
3176 data = results[idx]
3181 data = results[idx]
3177 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3182 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3178 formatone(fm, data[1], title=title, displayall=displayall)
3183 formatone(fm, data[1], title=title, displayall=displayall)
3179
3184
3180 # XXX summing that many float will not be very precise, we ignore this fact
3185 # XXX summing that many float will not be very precise, we ignore this fact
3181 # for now
3186 # for now
3182 totaltime = []
3187 totaltime = []
3183 for item in allresults:
3188 for item in allresults:
3184 totaltime.append(
3189 totaltime.append(
3185 (
3190 (
3186 sum(x[1][0] for x in item),
3191 sum(x[1][0] for x in item),
3187 sum(x[1][1] for x in item),
3192 sum(x[1][1] for x in item),
3188 sum(x[1][2] for x in item),
3193 sum(x[1][2] for x in item),
3189 )
3194 )
3190 )
3195 )
3191 formatone(
3196 formatone(
3192 fm,
3197 fm,
3193 totaltime,
3198 totaltime,
3194 title="total time (%d revs)" % resultcount,
3199 title="total time (%d revs)" % resultcount,
3195 displayall=displayall,
3200 displayall=displayall,
3196 )
3201 )
3197 fm.end()
3202 fm.end()
3198
3203
3199
3204
3200 class _faketr:
3205 class _faketr:
3201 def add(s, x, y, z=None):
3206 def add(s, x, y, z=None):
3202 return None
3207 return None
3203
3208
3204
3209
3205 def _timeonewrite(
3210 def _timeonewrite(
3206 ui,
3211 ui,
3207 orig,
3212 orig,
3208 source,
3213 source,
3209 startrev,
3214 startrev,
3210 stoprev,
3215 stoprev,
3211 runidx=None,
3216 runidx=None,
3212 lazydeltabase=True,
3217 lazydeltabase=True,
3213 clearcaches=True,
3218 clearcaches=True,
3214 ):
3219 ):
3215 timings = []
3220 timings = []
3216 tr = _faketr()
3221 tr = _faketr()
3217 with _temprevlog(ui, orig, startrev) as dest:
3222 with _temprevlog(ui, orig, startrev) as dest:
3218 dest._lazydeltabase = lazydeltabase
3223 dest._lazydeltabase = lazydeltabase
3219 revs = list(orig.revs(startrev, stoprev))
3224 revs = list(orig.revs(startrev, stoprev))
3220 total = len(revs)
3225 total = len(revs)
3221 topic = 'adding'
3226 topic = 'adding'
3222 if runidx is not None:
3227 if runidx is not None:
3223 topic += ' (run #%d)' % runidx
3228 topic += ' (run #%d)' % runidx
3224 # Support both old and new progress API
3229 # Support both old and new progress API
3225 if util.safehasattr(ui, 'makeprogress'):
3230 if util.safehasattr(ui, 'makeprogress'):
3226 progress = ui.makeprogress(topic, unit='revs', total=total)
3231 progress = ui.makeprogress(topic, unit='revs', total=total)
3227
3232
3228 def updateprogress(pos):
3233 def updateprogress(pos):
3229 progress.update(pos)
3234 progress.update(pos)
3230
3235
3231 def completeprogress():
3236 def completeprogress():
3232 progress.complete()
3237 progress.complete()
3233
3238
3234 else:
3239 else:
3235
3240
3236 def updateprogress(pos):
3241 def updateprogress(pos):
3237 ui.progress(topic, pos, unit='revs', total=total)
3242 ui.progress(topic, pos, unit='revs', total=total)
3238
3243
3239 def completeprogress():
3244 def completeprogress():
3240 ui.progress(topic, None, unit='revs', total=total)
3245 ui.progress(topic, None, unit='revs', total=total)
3241
3246
3242 for idx, rev in enumerate(revs):
3247 for idx, rev in enumerate(revs):
3243 updateprogress(idx)
3248 updateprogress(idx)
3244 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3249 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3245 if clearcaches:
3250 if clearcaches:
3246 dest.index.clearcaches()
3251 dest.index.clearcaches()
3247 dest.clearcaches()
3252 dest.clearcaches()
3248 with timeone() as r:
3253 with timeone() as r:
3249 dest.addrawrevision(*addargs, **addkwargs)
3254 dest.addrawrevision(*addargs, **addkwargs)
3250 timings.append((rev, r[0]))
3255 timings.append((rev, r[0]))
3251 updateprogress(total)
3256 updateprogress(total)
3252 completeprogress()
3257 completeprogress()
3253 return timings
3258 return timings
3254
3259
3255
3260
3256 def _getrevisionseed(orig, rev, tr, source):
3261 def _getrevisionseed(orig, rev, tr, source):
3257 from mercurial.node import nullid
3262 from mercurial.node import nullid
3258
3263
3259 linkrev = orig.linkrev(rev)
3264 linkrev = orig.linkrev(rev)
3260 node = orig.node(rev)
3265 node = orig.node(rev)
3261 p1, p2 = orig.parents(node)
3266 p1, p2 = orig.parents(node)
3262 flags = orig.flags(rev)
3267 flags = orig.flags(rev)
3263 cachedelta = None
3268 cachedelta = None
3264 text = None
3269 text = None
3265
3270
3266 if source == b'full':
3271 if source == b'full':
3267 text = orig.revision(rev)
3272 text = orig.revision(rev)
3268 elif source == b'parent-1':
3273 elif source == b'parent-1':
3269 baserev = orig.rev(p1)
3274 baserev = orig.rev(p1)
3270 cachedelta = (baserev, orig.revdiff(p1, rev))
3275 cachedelta = (baserev, orig.revdiff(p1, rev))
3271 elif source == b'parent-2':
3276 elif source == b'parent-2':
3272 parent = p2
3277 parent = p2
3273 if p2 == nullid:
3278 if p2 == nullid:
3274 parent = p1
3279 parent = p1
3275 baserev = orig.rev(parent)
3280 baserev = orig.rev(parent)
3276 cachedelta = (baserev, orig.revdiff(parent, rev))
3281 cachedelta = (baserev, orig.revdiff(parent, rev))
3277 elif source == b'parent-smallest':
3282 elif source == b'parent-smallest':
3278 p1diff = orig.revdiff(p1, rev)
3283 p1diff = orig.revdiff(p1, rev)
3279 parent = p1
3284 parent = p1
3280 diff = p1diff
3285 diff = p1diff
3281 if p2 != nullid:
3286 if p2 != nullid:
3282 p2diff = orig.revdiff(p2, rev)
3287 p2diff = orig.revdiff(p2, rev)
3283 if len(p1diff) > len(p2diff):
3288 if len(p1diff) > len(p2diff):
3284 parent = p2
3289 parent = p2
3285 diff = p2diff
3290 diff = p2diff
3286 baserev = orig.rev(parent)
3291 baserev = orig.rev(parent)
3287 cachedelta = (baserev, diff)
3292 cachedelta = (baserev, diff)
3288 elif source == b'storage':
3293 elif source == b'storage':
3289 baserev = orig.deltaparent(rev)
3294 baserev = orig.deltaparent(rev)
3290 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3295 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3291
3296
3292 return (
3297 return (
3293 (text, tr, linkrev, p1, p2),
3298 (text, tr, linkrev, p1, p2),
3294 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3299 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3295 )
3300 )
3296
3301
3297
3302
3298 @contextlib.contextmanager
3303 @contextlib.contextmanager
3299 def _temprevlog(ui, orig, truncaterev):
3304 def _temprevlog(ui, orig, truncaterev):
3300 from mercurial import vfs as vfsmod
3305 from mercurial import vfs as vfsmod
3301
3306
3302 if orig._inline:
3307 if orig._inline:
3303 raise error.Abort('not supporting inline revlog (yet)')
3308 raise error.Abort('not supporting inline revlog (yet)')
3304 revlogkwargs = {}
3309 revlogkwargs = {}
3305 k = 'upperboundcomp'
3310 k = 'upperboundcomp'
3306 if util.safehasattr(orig, k):
3311 if util.safehasattr(orig, k):
3307 revlogkwargs[k] = getattr(orig, k)
3312 revlogkwargs[k] = getattr(orig, k)
3308
3313
3309 indexfile = getattr(orig, '_indexfile', None)
3314 indexfile = getattr(orig, '_indexfile', None)
3310 if indexfile is None:
3315 if indexfile is None:
3311 # compatibility with <= hg-5.8
3316 # compatibility with <= hg-5.8
3312 indexfile = getattr(orig, 'indexfile')
3317 indexfile = getattr(orig, 'indexfile')
3313 origindexpath = orig.opener.join(indexfile)
3318 origindexpath = orig.opener.join(indexfile)
3314
3319
3315 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3320 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3316 origdatapath = orig.opener.join(datafile)
3321 origdatapath = orig.opener.join(datafile)
3317 radix = b'revlog'
3322 radix = b'revlog'
3318 indexname = b'revlog.i'
3323 indexname = b'revlog.i'
3319 dataname = b'revlog.d'
3324 dataname = b'revlog.d'
3320
3325
3321 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3326 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3322 try:
3327 try:
3323 # copy the data file in a temporary directory
3328 # copy the data file in a temporary directory
3324 ui.debug('copying data in %s\n' % tmpdir)
3329 ui.debug('copying data in %s\n' % tmpdir)
3325 destindexpath = os.path.join(tmpdir, 'revlog.i')
3330 destindexpath = os.path.join(tmpdir, 'revlog.i')
3326 destdatapath = os.path.join(tmpdir, 'revlog.d')
3331 destdatapath = os.path.join(tmpdir, 'revlog.d')
3327 shutil.copyfile(origindexpath, destindexpath)
3332 shutil.copyfile(origindexpath, destindexpath)
3328 shutil.copyfile(origdatapath, destdatapath)
3333 shutil.copyfile(origdatapath, destdatapath)
3329
3334
3330 # remove the data we want to add again
3335 # remove the data we want to add again
3331 ui.debug('truncating data to be rewritten\n')
3336 ui.debug('truncating data to be rewritten\n')
3332 with open(destindexpath, 'ab') as index:
3337 with open(destindexpath, 'ab') as index:
3333 index.seek(0)
3338 index.seek(0)
3334 index.truncate(truncaterev * orig._io.size)
3339 index.truncate(truncaterev * orig._io.size)
3335 with open(destdatapath, 'ab') as data:
3340 with open(destdatapath, 'ab') as data:
3336 data.seek(0)
3341 data.seek(0)
3337 data.truncate(orig.start(truncaterev))
3342 data.truncate(orig.start(truncaterev))
3338
3343
3339 # instantiate a new revlog from the temporary copy
3344 # instantiate a new revlog from the temporary copy
3340 ui.debug('truncating adding to be rewritten\n')
3345 ui.debug('truncating adding to be rewritten\n')
3341 vfs = vfsmod.vfs(tmpdir)
3346 vfs = vfsmod.vfs(tmpdir)
3342 vfs.options = getattr(orig.opener, 'options', None)
3347 vfs.options = getattr(orig.opener, 'options', None)
3343
3348
3344 try:
3349 try:
3345 dest = revlog(vfs, radix=radix, **revlogkwargs)
3350 dest = revlog(vfs, radix=radix, **revlogkwargs)
3346 except TypeError:
3351 except TypeError:
3347 dest = revlog(
3352 dest = revlog(
3348 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3353 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3349 )
3354 )
3350 if dest._inline:
3355 if dest._inline:
3351 raise error.Abort('not supporting inline revlog (yet)')
3356 raise error.Abort('not supporting inline revlog (yet)')
3352 # make sure internals are initialized
3357 # make sure internals are initialized
3353 dest.revision(len(dest) - 1)
3358 dest.revision(len(dest) - 1)
3354 yield dest
3359 yield dest
3355 del dest, vfs
3360 del dest, vfs
3356 finally:
3361 finally:
3357 shutil.rmtree(tmpdir, True)
3362 shutil.rmtree(tmpdir, True)
3358
3363
3359
3364
3360 @command(
3365 @command(
3361 b'perf::revlogchunks|perfrevlogchunks',
3366 b'perf::revlogchunks|perfrevlogchunks',
3362 revlogopts
3367 revlogopts
3363 + formatteropts
3368 + formatteropts
3364 + [
3369 + [
3365 (b'e', b'engines', b'', b'compression engines to use'),
3370 (b'e', b'engines', b'', b'compression engines to use'),
3366 (b's', b'startrev', 0, b'revision to start at'),
3371 (b's', b'startrev', 0, b'revision to start at'),
3367 ],
3372 ],
3368 b'-c|-m|FILE',
3373 b'-c|-m|FILE',
3369 )
3374 )
3370 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3375 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3371 """Benchmark operations on revlog chunks.
3376 """Benchmark operations on revlog chunks.
3372
3377
3373 Logically, each revlog is a collection of fulltext revisions. However,
3378 Logically, each revlog is a collection of fulltext revisions. However,
3374 stored within each revlog are "chunks" of possibly compressed data. This
3379 stored within each revlog are "chunks" of possibly compressed data. This
3375 data needs to be read and decompressed or compressed and written.
3380 data needs to be read and decompressed or compressed and written.
3376
3381
3377 This command measures the time it takes to read+decompress and recompress
3382 This command measures the time it takes to read+decompress and recompress
3378 chunks in a revlog. It effectively isolates I/O and compression performance.
3383 chunks in a revlog. It effectively isolates I/O and compression performance.
3379 For measurements of higher-level operations like resolving revisions,
3384 For measurements of higher-level operations like resolving revisions,
3380 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3385 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3381 """
3386 """
3382 opts = _byteskwargs(opts)
3387 opts = _byteskwargs(opts)
3383
3388
3384 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3389 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3385
3390
3386 # _chunkraw was renamed to _getsegmentforrevs.
3391 # _chunkraw was renamed to _getsegmentforrevs.
3387 try:
3392 try:
3388 segmentforrevs = rl._getsegmentforrevs
3393 segmentforrevs = rl._getsegmentforrevs
3389 except AttributeError:
3394 except AttributeError:
3390 segmentforrevs = rl._chunkraw
3395 segmentforrevs = rl._chunkraw
3391
3396
3392 # Verify engines argument.
3397 # Verify engines argument.
3393 if engines:
3398 if engines:
3394 engines = {e.strip() for e in engines.split(b',')}
3399 engines = {e.strip() for e in engines.split(b',')}
3395 for engine in engines:
3400 for engine in engines:
3396 try:
3401 try:
3397 util.compressionengines[engine]
3402 util.compressionengines[engine]
3398 except KeyError:
3403 except KeyError:
3399 raise error.Abort(b'unknown compression engine: %s' % engine)
3404 raise error.Abort(b'unknown compression engine: %s' % engine)
3400 else:
3405 else:
3401 engines = []
3406 engines = []
3402 for e in util.compengines:
3407 for e in util.compengines:
3403 engine = util.compengines[e]
3408 engine = util.compengines[e]
3404 try:
3409 try:
3405 if engine.available():
3410 if engine.available():
3406 engine.revlogcompressor().compress(b'dummy')
3411 engine.revlogcompressor().compress(b'dummy')
3407 engines.append(e)
3412 engines.append(e)
3408 except NotImplementedError:
3413 except NotImplementedError:
3409 pass
3414 pass
3410
3415
3411 revs = list(rl.revs(startrev, len(rl) - 1))
3416 revs = list(rl.revs(startrev, len(rl) - 1))
3412
3417
3413 def rlfh(rl):
3418 def rlfh(rl):
3414 if rl._inline:
3419 if rl._inline:
3415 indexfile = getattr(rl, '_indexfile', None)
3420 indexfile = getattr(rl, '_indexfile', None)
3416 if indexfile is None:
3421 if indexfile is None:
3417 # compatibility with <= hg-5.8
3422 # compatibility with <= hg-5.8
3418 indexfile = getattr(rl, 'indexfile')
3423 indexfile = getattr(rl, 'indexfile')
3419 return getsvfs(repo)(indexfile)
3424 return getsvfs(repo)(indexfile)
3420 else:
3425 else:
3421 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3426 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3422 return getsvfs(repo)(datafile)
3427 return getsvfs(repo)(datafile)
3423
3428
3424 def doread():
3429 def doread():
3425 rl.clearcaches()
3430 rl.clearcaches()
3426 for rev in revs:
3431 for rev in revs:
3427 segmentforrevs(rev, rev)
3432 segmentforrevs(rev, rev)
3428
3433
3429 def doreadcachedfh():
3434 def doreadcachedfh():
3430 rl.clearcaches()
3435 rl.clearcaches()
3431 fh = rlfh(rl)
3436 fh = rlfh(rl)
3432 for rev in revs:
3437 for rev in revs:
3433 segmentforrevs(rev, rev, df=fh)
3438 segmentforrevs(rev, rev, df=fh)
3434
3439
3435 def doreadbatch():
3440 def doreadbatch():
3436 rl.clearcaches()
3441 rl.clearcaches()
3437 segmentforrevs(revs[0], revs[-1])
3442 segmentforrevs(revs[0], revs[-1])
3438
3443
3439 def doreadbatchcachedfh():
3444 def doreadbatchcachedfh():
3440 rl.clearcaches()
3445 rl.clearcaches()
3441 fh = rlfh(rl)
3446 fh = rlfh(rl)
3442 segmentforrevs(revs[0], revs[-1], df=fh)
3447 segmentforrevs(revs[0], revs[-1], df=fh)
3443
3448
3444 def dochunk():
3449 def dochunk():
3445 rl.clearcaches()
3450 rl.clearcaches()
3446 fh = rlfh(rl)
3451 fh = rlfh(rl)
3447 for rev in revs:
3452 for rev in revs:
3448 rl._chunk(rev, df=fh)
3453 rl._chunk(rev, df=fh)
3449
3454
3450 chunks = [None]
3455 chunks = [None]
3451
3456
3452 def dochunkbatch():
3457 def dochunkbatch():
3453 rl.clearcaches()
3458 rl.clearcaches()
3454 fh = rlfh(rl)
3459 fh = rlfh(rl)
3455 # Save chunks as a side-effect.
3460 # Save chunks as a side-effect.
3456 chunks[0] = rl._chunks(revs, df=fh)
3461 chunks[0] = rl._chunks(revs, df=fh)
3457
3462
3458 def docompress(compressor):
3463 def docompress(compressor):
3459 rl.clearcaches()
3464 rl.clearcaches()
3460
3465
3461 try:
3466 try:
3462 # Swap in the requested compression engine.
3467 # Swap in the requested compression engine.
3463 oldcompressor = rl._compressor
3468 oldcompressor = rl._compressor
3464 rl._compressor = compressor
3469 rl._compressor = compressor
3465 for chunk in chunks[0]:
3470 for chunk in chunks[0]:
3466 rl.compress(chunk)
3471 rl.compress(chunk)
3467 finally:
3472 finally:
3468 rl._compressor = oldcompressor
3473 rl._compressor = oldcompressor
3469
3474
3470 benches = [
3475 benches = [
3471 (lambda: doread(), b'read'),
3476 (lambda: doread(), b'read'),
3472 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3477 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3473 (lambda: doreadbatch(), b'read batch'),
3478 (lambda: doreadbatch(), b'read batch'),
3474 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3479 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3475 (lambda: dochunk(), b'chunk'),
3480 (lambda: dochunk(), b'chunk'),
3476 (lambda: dochunkbatch(), b'chunk batch'),
3481 (lambda: dochunkbatch(), b'chunk batch'),
3477 ]
3482 ]
3478
3483
3479 for engine in sorted(engines):
3484 for engine in sorted(engines):
3480 compressor = util.compengines[engine].revlogcompressor()
3485 compressor = util.compengines[engine].revlogcompressor()
3481 benches.append(
3486 benches.append(
3482 (
3487 (
3483 functools.partial(docompress, compressor),
3488 functools.partial(docompress, compressor),
3484 b'compress w/ %s' % engine,
3489 b'compress w/ %s' % engine,
3485 )
3490 )
3486 )
3491 )
3487
3492
3488 for fn, title in benches:
3493 for fn, title in benches:
3489 timer, fm = gettimer(ui, opts)
3494 timer, fm = gettimer(ui, opts)
3490 timer(fn, title=title)
3495 timer(fn, title=title)
3491 fm.end()
3496 fm.end()
3492
3497
3493
3498
3494 @command(
3499 @command(
3495 b'perf::revlogrevision|perfrevlogrevision',
3500 b'perf::revlogrevision|perfrevlogrevision',
3496 revlogopts
3501 revlogopts
3497 + formatteropts
3502 + formatteropts
3498 + [(b'', b'cache', False, b'use caches instead of clearing')],
3503 + [(b'', b'cache', False, b'use caches instead of clearing')],
3499 b'-c|-m|FILE REV',
3504 b'-c|-m|FILE REV',
3500 )
3505 )
3501 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3506 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3502 """Benchmark obtaining a revlog revision.
3507 """Benchmark obtaining a revlog revision.
3503
3508
3504 Obtaining a revlog revision consists of roughly the following steps:
3509 Obtaining a revlog revision consists of roughly the following steps:
3505
3510
3506 1. Compute the delta chain
3511 1. Compute the delta chain
3507 2. Slice the delta chain if applicable
3512 2. Slice the delta chain if applicable
3508 3. Obtain the raw chunks for that delta chain
3513 3. Obtain the raw chunks for that delta chain
3509 4. Decompress each raw chunk
3514 4. Decompress each raw chunk
3510 5. Apply binary patches to obtain fulltext
3515 5. Apply binary patches to obtain fulltext
3511 6. Verify hash of fulltext
3516 6. Verify hash of fulltext
3512
3517
3513 This command measures the time spent in each of these phases.
3518 This command measures the time spent in each of these phases.
3514 """
3519 """
3515 opts = _byteskwargs(opts)
3520 opts = _byteskwargs(opts)
3516
3521
3517 if opts.get(b'changelog') or opts.get(b'manifest'):
3522 if opts.get(b'changelog') or opts.get(b'manifest'):
3518 file_, rev = None, file_
3523 file_, rev = None, file_
3519 elif rev is None:
3524 elif rev is None:
3520 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3525 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3521
3526
3522 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3527 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3523
3528
3524 # _chunkraw was renamed to _getsegmentforrevs.
3529 # _chunkraw was renamed to _getsegmentforrevs.
3525 try:
3530 try:
3526 segmentforrevs = r._getsegmentforrevs
3531 segmentforrevs = r._getsegmentforrevs
3527 except AttributeError:
3532 except AttributeError:
3528 segmentforrevs = r._chunkraw
3533 segmentforrevs = r._chunkraw
3529
3534
3530 node = r.lookup(rev)
3535 node = r.lookup(rev)
3531 rev = r.rev(node)
3536 rev = r.rev(node)
3532
3537
3533 def getrawchunks(data, chain):
3538 def getrawchunks(data, chain):
3534 start = r.start
3539 start = r.start
3535 length = r.length
3540 length = r.length
3536 inline = r._inline
3541 inline = r._inline
3537 try:
3542 try:
3538 iosize = r.index.entry_size
3543 iosize = r.index.entry_size
3539 except AttributeError:
3544 except AttributeError:
3540 iosize = r._io.size
3545 iosize = r._io.size
3541 buffer = util.buffer
3546 buffer = util.buffer
3542
3547
3543 chunks = []
3548 chunks = []
3544 ladd = chunks.append
3549 ladd = chunks.append
3545 for idx, item in enumerate(chain):
3550 for idx, item in enumerate(chain):
3546 offset = start(item[0])
3551 offset = start(item[0])
3547 bits = data[idx]
3552 bits = data[idx]
3548 for rev in item:
3553 for rev in item:
3549 chunkstart = start(rev)
3554 chunkstart = start(rev)
3550 if inline:
3555 if inline:
3551 chunkstart += (rev + 1) * iosize
3556 chunkstart += (rev + 1) * iosize
3552 chunklength = length(rev)
3557 chunklength = length(rev)
3553 ladd(buffer(bits, chunkstart - offset, chunklength))
3558 ladd(buffer(bits, chunkstart - offset, chunklength))
3554
3559
3555 return chunks
3560 return chunks
3556
3561
3557 def dodeltachain(rev):
3562 def dodeltachain(rev):
3558 if not cache:
3563 if not cache:
3559 r.clearcaches()
3564 r.clearcaches()
3560 r._deltachain(rev)
3565 r._deltachain(rev)
3561
3566
3562 def doread(chain):
3567 def doread(chain):
3563 if not cache:
3568 if not cache:
3564 r.clearcaches()
3569 r.clearcaches()
3565 for item in slicedchain:
3570 for item in slicedchain:
3566 segmentforrevs(item[0], item[-1])
3571 segmentforrevs(item[0], item[-1])
3567
3572
3568 def doslice(r, chain, size):
3573 def doslice(r, chain, size):
3569 for s in slicechunk(r, chain, targetsize=size):
3574 for s in slicechunk(r, chain, targetsize=size):
3570 pass
3575 pass
3571
3576
3572 def dorawchunks(data, chain):
3577 def dorawchunks(data, chain):
3573 if not cache:
3578 if not cache:
3574 r.clearcaches()
3579 r.clearcaches()
3575 getrawchunks(data, chain)
3580 getrawchunks(data, chain)
3576
3581
3577 def dodecompress(chunks):
3582 def dodecompress(chunks):
3578 decomp = r.decompress
3583 decomp = r.decompress
3579 for chunk in chunks:
3584 for chunk in chunks:
3580 decomp(chunk)
3585 decomp(chunk)
3581
3586
3582 def dopatch(text, bins):
3587 def dopatch(text, bins):
3583 if not cache:
3588 if not cache:
3584 r.clearcaches()
3589 r.clearcaches()
3585 mdiff.patches(text, bins)
3590 mdiff.patches(text, bins)
3586
3591
3587 def dohash(text):
3592 def dohash(text):
3588 if not cache:
3593 if not cache:
3589 r.clearcaches()
3594 r.clearcaches()
3590 r.checkhash(text, node, rev=rev)
3595 r.checkhash(text, node, rev=rev)
3591
3596
3592 def dorevision():
3597 def dorevision():
3593 if not cache:
3598 if not cache:
3594 r.clearcaches()
3599 r.clearcaches()
3595 r.revision(node)
3600 r.revision(node)
3596
3601
3597 try:
3602 try:
3598 from mercurial.revlogutils.deltas import slicechunk
3603 from mercurial.revlogutils.deltas import slicechunk
3599 except ImportError:
3604 except ImportError:
3600 slicechunk = getattr(revlog, '_slicechunk', None)
3605 slicechunk = getattr(revlog, '_slicechunk', None)
3601
3606
3602 size = r.length(rev)
3607 size = r.length(rev)
3603 chain = r._deltachain(rev)[0]
3608 chain = r._deltachain(rev)[0]
3604 if not getattr(r, '_withsparseread', False):
3609 if not getattr(r, '_withsparseread', False):
3605 slicedchain = (chain,)
3610 slicedchain = (chain,)
3606 else:
3611 else:
3607 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3612 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3608 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3613 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3609 rawchunks = getrawchunks(data, slicedchain)
3614 rawchunks = getrawchunks(data, slicedchain)
3610 bins = r._chunks(chain)
3615 bins = r._chunks(chain)
3611 text = bytes(bins[0])
3616 text = bytes(bins[0])
3612 bins = bins[1:]
3617 bins = bins[1:]
3613 text = mdiff.patches(text, bins)
3618 text = mdiff.patches(text, bins)
3614
3619
3615 benches = [
3620 benches = [
3616 (lambda: dorevision(), b'full'),
3621 (lambda: dorevision(), b'full'),
3617 (lambda: dodeltachain(rev), b'deltachain'),
3622 (lambda: dodeltachain(rev), b'deltachain'),
3618 (lambda: doread(chain), b'read'),
3623 (lambda: doread(chain), b'read'),
3619 ]
3624 ]
3620
3625
3621 if getattr(r, '_withsparseread', False):
3626 if getattr(r, '_withsparseread', False):
3622 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3627 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3623 benches.append(slicing)
3628 benches.append(slicing)
3624
3629
3625 benches.extend(
3630 benches.extend(
3626 [
3631 [
3627 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3632 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3628 (lambda: dodecompress(rawchunks), b'decompress'),
3633 (lambda: dodecompress(rawchunks), b'decompress'),
3629 (lambda: dopatch(text, bins), b'patch'),
3634 (lambda: dopatch(text, bins), b'patch'),
3630 (lambda: dohash(text), b'hash'),
3635 (lambda: dohash(text), b'hash'),
3631 ]
3636 ]
3632 )
3637 )
3633
3638
3634 timer, fm = gettimer(ui, opts)
3639 timer, fm = gettimer(ui, opts)
3635 for fn, title in benches:
3640 for fn, title in benches:
3636 timer(fn, title=title)
3641 timer(fn, title=title)
3637 fm.end()
3642 fm.end()
3638
3643
3639
3644
3640 @command(
3645 @command(
3641 b'perf::revset|perfrevset',
3646 b'perf::revset|perfrevset',
3642 [
3647 [
3643 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3648 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3644 (b'', b'contexts', False, b'obtain changectx for each revision'),
3649 (b'', b'contexts', False, b'obtain changectx for each revision'),
3645 ]
3650 ]
3646 + formatteropts,
3651 + formatteropts,
3647 b"REVSET",
3652 b"REVSET",
3648 )
3653 )
3649 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3654 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3650 """benchmark the execution time of a revset
3655 """benchmark the execution time of a revset
3651
3656
3652 Use the --clean option if need to evaluate the impact of build volatile
3657 Use the --clean option if need to evaluate the impact of build volatile
3653 revisions set cache on the revset execution. Volatile cache hold filtered
3658 revisions set cache on the revset execution. Volatile cache hold filtered
3654 and obsolete related cache."""
3659 and obsolete related cache."""
3655 opts = _byteskwargs(opts)
3660 opts = _byteskwargs(opts)
3656
3661
3657 timer, fm = gettimer(ui, opts)
3662 timer, fm = gettimer(ui, opts)
3658
3663
3659 def d():
3664 def d():
3660 if clear:
3665 if clear:
3661 repo.invalidatevolatilesets()
3666 repo.invalidatevolatilesets()
3662 if contexts:
3667 if contexts:
3663 for ctx in repo.set(expr):
3668 for ctx in repo.set(expr):
3664 pass
3669 pass
3665 else:
3670 else:
3666 for r in repo.revs(expr):
3671 for r in repo.revs(expr):
3667 pass
3672 pass
3668
3673
3669 timer(d)
3674 timer(d)
3670 fm.end()
3675 fm.end()
3671
3676
3672
3677
3673 @command(
3678 @command(
3674 b'perf::volatilesets|perfvolatilesets',
3679 b'perf::volatilesets|perfvolatilesets',
3675 [
3680 [
3676 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3681 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3677 ]
3682 ]
3678 + formatteropts,
3683 + formatteropts,
3679 )
3684 )
3680 def perfvolatilesets(ui, repo, *names, **opts):
3685 def perfvolatilesets(ui, repo, *names, **opts):
3681 """benchmark the computation of various volatile set
3686 """benchmark the computation of various volatile set
3682
3687
3683 Volatile set computes element related to filtering and obsolescence."""
3688 Volatile set computes element related to filtering and obsolescence."""
3684 opts = _byteskwargs(opts)
3689 opts = _byteskwargs(opts)
3685 timer, fm = gettimer(ui, opts)
3690 timer, fm = gettimer(ui, opts)
3686 repo = repo.unfiltered()
3691 repo = repo.unfiltered()
3687
3692
3688 def getobs(name):
3693 def getobs(name):
3689 def d():
3694 def d():
3690 repo.invalidatevolatilesets()
3695 repo.invalidatevolatilesets()
3691 if opts[b'clear_obsstore']:
3696 if opts[b'clear_obsstore']:
3692 clearfilecache(repo, b'obsstore')
3697 clearfilecache(repo, b'obsstore')
3693 obsolete.getrevs(repo, name)
3698 obsolete.getrevs(repo, name)
3694
3699
3695 return d
3700 return d
3696
3701
3697 allobs = sorted(obsolete.cachefuncs)
3702 allobs = sorted(obsolete.cachefuncs)
3698 if names:
3703 if names:
3699 allobs = [n for n in allobs if n in names]
3704 allobs = [n for n in allobs if n in names]
3700
3705
3701 for name in allobs:
3706 for name in allobs:
3702 timer(getobs(name), title=name)
3707 timer(getobs(name), title=name)
3703
3708
3704 def getfiltered(name):
3709 def getfiltered(name):
3705 def d():
3710 def d():
3706 repo.invalidatevolatilesets()
3711 repo.invalidatevolatilesets()
3707 if opts[b'clear_obsstore']:
3712 if opts[b'clear_obsstore']:
3708 clearfilecache(repo, b'obsstore')
3713 clearfilecache(repo, b'obsstore')
3709 repoview.filterrevs(repo, name)
3714 repoview.filterrevs(repo, name)
3710
3715
3711 return d
3716 return d
3712
3717
3713 allfilter = sorted(repoview.filtertable)
3718 allfilter = sorted(repoview.filtertable)
3714 if names:
3719 if names:
3715 allfilter = [n for n in allfilter if n in names]
3720 allfilter = [n for n in allfilter if n in names]
3716
3721
3717 for name in allfilter:
3722 for name in allfilter:
3718 timer(getfiltered(name), title=name)
3723 timer(getfiltered(name), title=name)
3719 fm.end()
3724 fm.end()
3720
3725
3721
3726
3722 @command(
3727 @command(
3723 b'perf::branchmap|perfbranchmap',
3728 b'perf::branchmap|perfbranchmap',
3724 [
3729 [
3725 (b'f', b'full', False, b'Includes build time of subset'),
3730 (b'f', b'full', False, b'Includes build time of subset'),
3726 (
3731 (
3727 b'',
3732 b'',
3728 b'clear-revbranch',
3733 b'clear-revbranch',
3729 False,
3734 False,
3730 b'purge the revbranch cache between computation',
3735 b'purge the revbranch cache between computation',
3731 ),
3736 ),
3732 ]
3737 ]
3733 + formatteropts,
3738 + formatteropts,
3734 )
3739 )
3735 def perfbranchmap(ui, repo, *filternames, **opts):
3740 def perfbranchmap(ui, repo, *filternames, **opts):
3736 """benchmark the update of a branchmap
3741 """benchmark the update of a branchmap
3737
3742
3738 This benchmarks the full repo.branchmap() call with read and write disabled
3743 This benchmarks the full repo.branchmap() call with read and write disabled
3739 """
3744 """
3740 opts = _byteskwargs(opts)
3745 opts = _byteskwargs(opts)
3741 full = opts.get(b"full", False)
3746 full = opts.get(b"full", False)
3742 clear_revbranch = opts.get(b"clear_revbranch", False)
3747 clear_revbranch = opts.get(b"clear_revbranch", False)
3743 timer, fm = gettimer(ui, opts)
3748 timer, fm = gettimer(ui, opts)
3744
3749
3745 def getbranchmap(filtername):
3750 def getbranchmap(filtername):
3746 """generate a benchmark function for the filtername"""
3751 """generate a benchmark function for the filtername"""
3747 if filtername is None:
3752 if filtername is None:
3748 view = repo
3753 view = repo
3749 else:
3754 else:
3750 view = repo.filtered(filtername)
3755 view = repo.filtered(filtername)
3751 if util.safehasattr(view._branchcaches, '_per_filter'):
3756 if util.safehasattr(view._branchcaches, '_per_filter'):
3752 filtered = view._branchcaches._per_filter
3757 filtered = view._branchcaches._per_filter
3753 else:
3758 else:
3754 # older versions
3759 # older versions
3755 filtered = view._branchcaches
3760 filtered = view._branchcaches
3756
3761
3757 def d():
3762 def d():
3758 if clear_revbranch:
3763 if clear_revbranch:
3759 repo.revbranchcache()._clear()
3764 repo.revbranchcache()._clear()
3760 if full:
3765 if full:
3761 view._branchcaches.clear()
3766 view._branchcaches.clear()
3762 else:
3767 else:
3763 filtered.pop(filtername, None)
3768 filtered.pop(filtername, None)
3764 view.branchmap()
3769 view.branchmap()
3765
3770
3766 return d
3771 return d
3767
3772
3768 # add filter in smaller subset to bigger subset
3773 # add filter in smaller subset to bigger subset
3769 possiblefilters = set(repoview.filtertable)
3774 possiblefilters = set(repoview.filtertable)
3770 if filternames:
3775 if filternames:
3771 possiblefilters &= set(filternames)
3776 possiblefilters &= set(filternames)
3772 subsettable = getbranchmapsubsettable()
3777 subsettable = getbranchmapsubsettable()
3773 allfilters = []
3778 allfilters = []
3774 while possiblefilters:
3779 while possiblefilters:
3775 for name in possiblefilters:
3780 for name in possiblefilters:
3776 subset = subsettable.get(name)
3781 subset = subsettable.get(name)
3777 if subset not in possiblefilters:
3782 if subset not in possiblefilters:
3778 break
3783 break
3779 else:
3784 else:
3780 assert False, b'subset cycle %s!' % possiblefilters
3785 assert False, b'subset cycle %s!' % possiblefilters
3781 allfilters.append(name)
3786 allfilters.append(name)
3782 possiblefilters.remove(name)
3787 possiblefilters.remove(name)
3783
3788
3784 # warm the cache
3789 # warm the cache
3785 if not full:
3790 if not full:
3786 for name in allfilters:
3791 for name in allfilters:
3787 repo.filtered(name).branchmap()
3792 repo.filtered(name).branchmap()
3788 if not filternames or b'unfiltered' in filternames:
3793 if not filternames or b'unfiltered' in filternames:
3789 # add unfiltered
3794 # add unfiltered
3790 allfilters.append(None)
3795 allfilters.append(None)
3791
3796
3792 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3797 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3793 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3798 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3794 branchcacheread.set(classmethod(lambda *args: None))
3799 branchcacheread.set(classmethod(lambda *args: None))
3795 else:
3800 else:
3796 # older versions
3801 # older versions
3797 branchcacheread = safeattrsetter(branchmap, b'read')
3802 branchcacheread = safeattrsetter(branchmap, b'read')
3798 branchcacheread.set(lambda *args: None)
3803 branchcacheread.set(lambda *args: None)
3799 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3804 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3800 branchcachewrite.set(lambda *args: None)
3805 branchcachewrite.set(lambda *args: None)
3801 try:
3806 try:
3802 for name in allfilters:
3807 for name in allfilters:
3803 printname = name
3808 printname = name
3804 if name is None:
3809 if name is None:
3805 printname = b'unfiltered'
3810 printname = b'unfiltered'
3806 timer(getbranchmap(name), title=printname)
3811 timer(getbranchmap(name), title=printname)
3807 finally:
3812 finally:
3808 branchcacheread.restore()
3813 branchcacheread.restore()
3809 branchcachewrite.restore()
3814 branchcachewrite.restore()
3810 fm.end()
3815 fm.end()
3811
3816
3812
3817
3813 @command(
3818 @command(
3814 b'perf::branchmapupdate|perfbranchmapupdate',
3819 b'perf::branchmapupdate|perfbranchmapupdate',
3815 [
3820 [
3816 (b'', b'base', [], b'subset of revision to start from'),
3821 (b'', b'base', [], b'subset of revision to start from'),
3817 (b'', b'target', [], b'subset of revision to end with'),
3822 (b'', b'target', [], b'subset of revision to end with'),
3818 (b'', b'clear-caches', False, b'clear cache between each runs'),
3823 (b'', b'clear-caches', False, b'clear cache between each runs'),
3819 ]
3824 ]
3820 + formatteropts,
3825 + formatteropts,
3821 )
3826 )
3822 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3827 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3823 """benchmark branchmap update from for <base> revs to <target> revs
3828 """benchmark branchmap update from for <base> revs to <target> revs
3824
3829
3825 If `--clear-caches` is passed, the following items will be reset before
3830 If `--clear-caches` is passed, the following items will be reset before
3826 each update:
3831 each update:
3827 * the changelog instance and associated indexes
3832 * the changelog instance and associated indexes
3828 * the rev-branch-cache instance
3833 * the rev-branch-cache instance
3829
3834
3830 Examples:
3835 Examples:
3831
3836
3832 # update for the one last revision
3837 # update for the one last revision
3833 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3838 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3834
3839
3835 $ update for change coming with a new branch
3840 $ update for change coming with a new branch
3836 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3841 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3837 """
3842 """
3838 from mercurial import branchmap
3843 from mercurial import branchmap
3839 from mercurial import repoview
3844 from mercurial import repoview
3840
3845
3841 opts = _byteskwargs(opts)
3846 opts = _byteskwargs(opts)
3842 timer, fm = gettimer(ui, opts)
3847 timer, fm = gettimer(ui, opts)
3843 clearcaches = opts[b'clear_caches']
3848 clearcaches = opts[b'clear_caches']
3844 unfi = repo.unfiltered()
3849 unfi = repo.unfiltered()
3845 x = [None] # used to pass data between closure
3850 x = [None] # used to pass data between closure
3846
3851
3847 # we use a `list` here to avoid possible side effect from smartset
3852 # we use a `list` here to avoid possible side effect from smartset
3848 baserevs = list(scmutil.revrange(repo, base))
3853 baserevs = list(scmutil.revrange(repo, base))
3849 targetrevs = list(scmutil.revrange(repo, target))
3854 targetrevs = list(scmutil.revrange(repo, target))
3850 if not baserevs:
3855 if not baserevs:
3851 raise error.Abort(b'no revisions selected for --base')
3856 raise error.Abort(b'no revisions selected for --base')
3852 if not targetrevs:
3857 if not targetrevs:
3853 raise error.Abort(b'no revisions selected for --target')
3858 raise error.Abort(b'no revisions selected for --target')
3854
3859
3855 # make sure the target branchmap also contains the one in the base
3860 # make sure the target branchmap also contains the one in the base
3856 targetrevs = list(set(baserevs) | set(targetrevs))
3861 targetrevs = list(set(baserevs) | set(targetrevs))
3857 targetrevs.sort()
3862 targetrevs.sort()
3858
3863
3859 cl = repo.changelog
3864 cl = repo.changelog
3860 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3865 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3861 allbaserevs.sort()
3866 allbaserevs.sort()
3862 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3867 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3863
3868
3864 newrevs = list(alltargetrevs.difference(allbaserevs))
3869 newrevs = list(alltargetrevs.difference(allbaserevs))
3865 newrevs.sort()
3870 newrevs.sort()
3866
3871
3867 allrevs = frozenset(unfi.changelog.revs())
3872 allrevs = frozenset(unfi.changelog.revs())
3868 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3873 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3869 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3874 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3870
3875
3871 def basefilter(repo, visibilityexceptions=None):
3876 def basefilter(repo, visibilityexceptions=None):
3872 return basefilterrevs
3877 return basefilterrevs
3873
3878
3874 def targetfilter(repo, visibilityexceptions=None):
3879 def targetfilter(repo, visibilityexceptions=None):
3875 return targetfilterrevs
3880 return targetfilterrevs
3876
3881
3877 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3882 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3878 ui.status(msg % (len(allbaserevs), len(newrevs)))
3883 ui.status(msg % (len(allbaserevs), len(newrevs)))
3879 if targetfilterrevs:
3884 if targetfilterrevs:
3880 msg = b'(%d revisions still filtered)\n'
3885 msg = b'(%d revisions still filtered)\n'
3881 ui.status(msg % len(targetfilterrevs))
3886 ui.status(msg % len(targetfilterrevs))
3882
3887
3883 try:
3888 try:
3884 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3889 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3885 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3890 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3886
3891
3887 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3892 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3888 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3893 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3889
3894
3890 # try to find an existing branchmap to reuse
3895 # try to find an existing branchmap to reuse
3891 subsettable = getbranchmapsubsettable()
3896 subsettable = getbranchmapsubsettable()
3892 candidatefilter = subsettable.get(None)
3897 candidatefilter = subsettable.get(None)
3893 while candidatefilter is not None:
3898 while candidatefilter is not None:
3894 candidatebm = repo.filtered(candidatefilter).branchmap()
3899 candidatebm = repo.filtered(candidatefilter).branchmap()
3895 if candidatebm.validfor(baserepo):
3900 if candidatebm.validfor(baserepo):
3896 filtered = repoview.filterrevs(repo, candidatefilter)
3901 filtered = repoview.filterrevs(repo, candidatefilter)
3897 missing = [r for r in allbaserevs if r in filtered]
3902 missing = [r for r in allbaserevs if r in filtered]
3898 base = candidatebm.copy()
3903 base = candidatebm.copy()
3899 base.update(baserepo, missing)
3904 base.update(baserepo, missing)
3900 break
3905 break
3901 candidatefilter = subsettable.get(candidatefilter)
3906 candidatefilter = subsettable.get(candidatefilter)
3902 else:
3907 else:
3903 # no suitable subset where found
3908 # no suitable subset where found
3904 base = branchmap.branchcache()
3909 base = branchmap.branchcache()
3905 base.update(baserepo, allbaserevs)
3910 base.update(baserepo, allbaserevs)
3906
3911
3907 def setup():
3912 def setup():
3908 x[0] = base.copy()
3913 x[0] = base.copy()
3909 if clearcaches:
3914 if clearcaches:
3910 unfi._revbranchcache = None
3915 unfi._revbranchcache = None
3911 clearchangelog(repo)
3916 clearchangelog(repo)
3912
3917
3913 def bench():
3918 def bench():
3914 x[0].update(targetrepo, newrevs)
3919 x[0].update(targetrepo, newrevs)
3915
3920
3916 timer(bench, setup=setup)
3921 timer(bench, setup=setup)
3917 fm.end()
3922 fm.end()
3918 finally:
3923 finally:
3919 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3924 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3920 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3925 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3921
3926
3922
3927
3923 @command(
3928 @command(
3924 b'perf::branchmapload|perfbranchmapload',
3929 b'perf::branchmapload|perfbranchmapload',
3925 [
3930 [
3926 (b'f', b'filter', b'', b'Specify repoview filter'),
3931 (b'f', b'filter', b'', b'Specify repoview filter'),
3927 (b'', b'list', False, b'List brachmap filter caches'),
3932 (b'', b'list', False, b'List brachmap filter caches'),
3928 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3933 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3929 ]
3934 ]
3930 + formatteropts,
3935 + formatteropts,
3931 )
3936 )
3932 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3937 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3933 """benchmark reading the branchmap"""
3938 """benchmark reading the branchmap"""
3934 opts = _byteskwargs(opts)
3939 opts = _byteskwargs(opts)
3935 clearrevlogs = opts[b'clear_revlogs']
3940 clearrevlogs = opts[b'clear_revlogs']
3936
3941
3937 if list:
3942 if list:
3938 for name, kind, st in repo.cachevfs.readdir(stat=True):
3943 for name, kind, st in repo.cachevfs.readdir(stat=True):
3939 if name.startswith(b'branch2'):
3944 if name.startswith(b'branch2'):
3940 filtername = name.partition(b'-')[2] or b'unfiltered'
3945 filtername = name.partition(b'-')[2] or b'unfiltered'
3941 ui.status(
3946 ui.status(
3942 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3947 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3943 )
3948 )
3944 return
3949 return
3945 if not filter:
3950 if not filter:
3946 filter = None
3951 filter = None
3947 subsettable = getbranchmapsubsettable()
3952 subsettable = getbranchmapsubsettable()
3948 if filter is None:
3953 if filter is None:
3949 repo = repo.unfiltered()
3954 repo = repo.unfiltered()
3950 else:
3955 else:
3951 repo = repoview.repoview(repo, filter)
3956 repo = repoview.repoview(repo, filter)
3952
3957
3953 repo.branchmap() # make sure we have a relevant, up to date branchmap
3958 repo.branchmap() # make sure we have a relevant, up to date branchmap
3954
3959
3955 try:
3960 try:
3956 fromfile = branchmap.branchcache.fromfile
3961 fromfile = branchmap.branchcache.fromfile
3957 except AttributeError:
3962 except AttributeError:
3958 # older versions
3963 # older versions
3959 fromfile = branchmap.read
3964 fromfile = branchmap.read
3960
3965
3961 currentfilter = filter
3966 currentfilter = filter
3962 # try once without timer, the filter may not be cached
3967 # try once without timer, the filter may not be cached
3963 while fromfile(repo) is None:
3968 while fromfile(repo) is None:
3964 currentfilter = subsettable.get(currentfilter)
3969 currentfilter = subsettable.get(currentfilter)
3965 if currentfilter is None:
3970 if currentfilter is None:
3966 raise error.Abort(
3971 raise error.Abort(
3967 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3972 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3968 )
3973 )
3969 repo = repo.filtered(currentfilter)
3974 repo = repo.filtered(currentfilter)
3970 timer, fm = gettimer(ui, opts)
3975 timer, fm = gettimer(ui, opts)
3971
3976
3972 def setup():
3977 def setup():
3973 if clearrevlogs:
3978 if clearrevlogs:
3974 clearchangelog(repo)
3979 clearchangelog(repo)
3975
3980
3976 def bench():
3981 def bench():
3977 fromfile(repo)
3982 fromfile(repo)
3978
3983
3979 timer(bench, setup=setup)
3984 timer(bench, setup=setup)
3980 fm.end()
3985 fm.end()
3981
3986
3982
3987
3983 @command(b'perf::loadmarkers|perfloadmarkers')
3988 @command(b'perf::loadmarkers|perfloadmarkers')
3984 def perfloadmarkers(ui, repo):
3989 def perfloadmarkers(ui, repo):
3985 """benchmark the time to parse the on-disk markers for a repo
3990 """benchmark the time to parse the on-disk markers for a repo
3986
3991
3987 Result is the number of markers in the repo."""
3992 Result is the number of markers in the repo."""
3988 timer, fm = gettimer(ui)
3993 timer, fm = gettimer(ui)
3989 svfs = getsvfs(repo)
3994 svfs = getsvfs(repo)
3990 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3995 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3991 fm.end()
3996 fm.end()
3992
3997
3993
3998
3994 @command(
3999 @command(
3995 b'perf::lrucachedict|perflrucachedict',
4000 b'perf::lrucachedict|perflrucachedict',
3996 formatteropts
4001 formatteropts
3997 + [
4002 + [
3998 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4003 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3999 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4004 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4000 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4005 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4001 (b'', b'size', 4, b'size of cache'),
4006 (b'', b'size', 4, b'size of cache'),
4002 (b'', b'gets', 10000, b'number of key lookups'),
4007 (b'', b'gets', 10000, b'number of key lookups'),
4003 (b'', b'sets', 10000, b'number of key sets'),
4008 (b'', b'sets', 10000, b'number of key sets'),
4004 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4009 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4005 (
4010 (
4006 b'',
4011 b'',
4007 b'mixedgetfreq',
4012 b'mixedgetfreq',
4008 50,
4013 50,
4009 b'frequency of get vs set ops in mixed mode',
4014 b'frequency of get vs set ops in mixed mode',
4010 ),
4015 ),
4011 ],
4016 ],
4012 norepo=True,
4017 norepo=True,
4013 )
4018 )
4014 def perflrucache(
4019 def perflrucache(
4015 ui,
4020 ui,
4016 mincost=0,
4021 mincost=0,
4017 maxcost=100,
4022 maxcost=100,
4018 costlimit=0,
4023 costlimit=0,
4019 size=4,
4024 size=4,
4020 gets=10000,
4025 gets=10000,
4021 sets=10000,
4026 sets=10000,
4022 mixed=10000,
4027 mixed=10000,
4023 mixedgetfreq=50,
4028 mixedgetfreq=50,
4024 **opts
4029 **opts
4025 ):
4030 ):
4026 opts = _byteskwargs(opts)
4031 opts = _byteskwargs(opts)
4027
4032
4028 def doinit():
4033 def doinit():
4029 for i in _xrange(10000):
4034 for i in _xrange(10000):
4030 util.lrucachedict(size)
4035 util.lrucachedict(size)
4031
4036
4032 costrange = list(range(mincost, maxcost + 1))
4037 costrange = list(range(mincost, maxcost + 1))
4033
4038
4034 values = []
4039 values = []
4035 for i in _xrange(size):
4040 for i in _xrange(size):
4036 values.append(random.randint(0, _maxint))
4041 values.append(random.randint(0, _maxint))
4037
4042
4038 # Get mode fills the cache and tests raw lookup performance with no
4043 # Get mode fills the cache and tests raw lookup performance with no
4039 # eviction.
4044 # eviction.
4040 getseq = []
4045 getseq = []
4041 for i in _xrange(gets):
4046 for i in _xrange(gets):
4042 getseq.append(random.choice(values))
4047 getseq.append(random.choice(values))
4043
4048
4044 def dogets():
4049 def dogets():
4045 d = util.lrucachedict(size)
4050 d = util.lrucachedict(size)
4046 for v in values:
4051 for v in values:
4047 d[v] = v
4052 d[v] = v
4048 for key in getseq:
4053 for key in getseq:
4049 value = d[key]
4054 value = d[key]
4050 value # silence pyflakes warning
4055 value # silence pyflakes warning
4051
4056
4052 def dogetscost():
4057 def dogetscost():
4053 d = util.lrucachedict(size, maxcost=costlimit)
4058 d = util.lrucachedict(size, maxcost=costlimit)
4054 for i, v in enumerate(values):
4059 for i, v in enumerate(values):
4055 d.insert(v, v, cost=costs[i])
4060 d.insert(v, v, cost=costs[i])
4056 for key in getseq:
4061 for key in getseq:
4057 try:
4062 try:
4058 value = d[key]
4063 value = d[key]
4059 value # silence pyflakes warning
4064 value # silence pyflakes warning
4060 except KeyError:
4065 except KeyError:
4061 pass
4066 pass
4062
4067
4063 # Set mode tests insertion speed with cache eviction.
4068 # Set mode tests insertion speed with cache eviction.
4064 setseq = []
4069 setseq = []
4065 costs = []
4070 costs = []
4066 for i in _xrange(sets):
4071 for i in _xrange(sets):
4067 setseq.append(random.randint(0, _maxint))
4072 setseq.append(random.randint(0, _maxint))
4068 costs.append(random.choice(costrange))
4073 costs.append(random.choice(costrange))
4069
4074
4070 def doinserts():
4075 def doinserts():
4071 d = util.lrucachedict(size)
4076 d = util.lrucachedict(size)
4072 for v in setseq:
4077 for v in setseq:
4073 d.insert(v, v)
4078 d.insert(v, v)
4074
4079
4075 def doinsertscost():
4080 def doinsertscost():
4076 d = util.lrucachedict(size, maxcost=costlimit)
4081 d = util.lrucachedict(size, maxcost=costlimit)
4077 for i, v in enumerate(setseq):
4082 for i, v in enumerate(setseq):
4078 d.insert(v, v, cost=costs[i])
4083 d.insert(v, v, cost=costs[i])
4079
4084
4080 def dosets():
4085 def dosets():
4081 d = util.lrucachedict(size)
4086 d = util.lrucachedict(size)
4082 for v in setseq:
4087 for v in setseq:
4083 d[v] = v
4088 d[v] = v
4084
4089
4085 # Mixed mode randomly performs gets and sets with eviction.
4090 # Mixed mode randomly performs gets and sets with eviction.
4086 mixedops = []
4091 mixedops = []
4087 for i in _xrange(mixed):
4092 for i in _xrange(mixed):
4088 r = random.randint(0, 100)
4093 r = random.randint(0, 100)
4089 if r < mixedgetfreq:
4094 if r < mixedgetfreq:
4090 op = 0
4095 op = 0
4091 else:
4096 else:
4092 op = 1
4097 op = 1
4093
4098
4094 mixedops.append(
4099 mixedops.append(
4095 (op, random.randint(0, size * 2), random.choice(costrange))
4100 (op, random.randint(0, size * 2), random.choice(costrange))
4096 )
4101 )
4097
4102
4098 def domixed():
4103 def domixed():
4099 d = util.lrucachedict(size)
4104 d = util.lrucachedict(size)
4100
4105
4101 for op, v, cost in mixedops:
4106 for op, v, cost in mixedops:
4102 if op == 0:
4107 if op == 0:
4103 try:
4108 try:
4104 d[v]
4109 d[v]
4105 except KeyError:
4110 except KeyError:
4106 pass
4111 pass
4107 else:
4112 else:
4108 d[v] = v
4113 d[v] = v
4109
4114
4110 def domixedcost():
4115 def domixedcost():
4111 d = util.lrucachedict(size, maxcost=costlimit)
4116 d = util.lrucachedict(size, maxcost=costlimit)
4112
4117
4113 for op, v, cost in mixedops:
4118 for op, v, cost in mixedops:
4114 if op == 0:
4119 if op == 0:
4115 try:
4120 try:
4116 d[v]
4121 d[v]
4117 except KeyError:
4122 except KeyError:
4118 pass
4123 pass
4119 else:
4124 else:
4120 d.insert(v, v, cost=cost)
4125 d.insert(v, v, cost=cost)
4121
4126
4122 benches = [
4127 benches = [
4123 (doinit, b'init'),
4128 (doinit, b'init'),
4124 ]
4129 ]
4125
4130
4126 if costlimit:
4131 if costlimit:
4127 benches.extend(
4132 benches.extend(
4128 [
4133 [
4129 (dogetscost, b'gets w/ cost limit'),
4134 (dogetscost, b'gets w/ cost limit'),
4130 (doinsertscost, b'inserts w/ cost limit'),
4135 (doinsertscost, b'inserts w/ cost limit'),
4131 (domixedcost, b'mixed w/ cost limit'),
4136 (domixedcost, b'mixed w/ cost limit'),
4132 ]
4137 ]
4133 )
4138 )
4134 else:
4139 else:
4135 benches.extend(
4140 benches.extend(
4136 [
4141 [
4137 (dogets, b'gets'),
4142 (dogets, b'gets'),
4138 (doinserts, b'inserts'),
4143 (doinserts, b'inserts'),
4139 (dosets, b'sets'),
4144 (dosets, b'sets'),
4140 (domixed, b'mixed'),
4145 (domixed, b'mixed'),
4141 ]
4146 ]
4142 )
4147 )
4143
4148
4144 for fn, title in benches:
4149 for fn, title in benches:
4145 timer, fm = gettimer(ui, opts)
4150 timer, fm = gettimer(ui, opts)
4146 timer(fn, title=title)
4151 timer(fn, title=title)
4147 fm.end()
4152 fm.end()
4148
4153
4149
4154
4150 @command(
4155 @command(
4151 b'perf::write|perfwrite',
4156 b'perf::write|perfwrite',
4152 formatteropts
4157 formatteropts
4153 + [
4158 + [
4154 (b'', b'write-method', b'write', b'ui write method'),
4159 (b'', b'write-method', b'write', b'ui write method'),
4155 (b'', b'nlines', 100, b'number of lines'),
4160 (b'', b'nlines', 100, b'number of lines'),
4156 (b'', b'nitems', 100, b'number of items (per line)'),
4161 (b'', b'nitems', 100, b'number of items (per line)'),
4157 (b'', b'item', b'x', b'item that is written'),
4162 (b'', b'item', b'x', b'item that is written'),
4158 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4163 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4159 (b'', b'flush-line', None, b'flush after each line'),
4164 (b'', b'flush-line', None, b'flush after each line'),
4160 ],
4165 ],
4161 )
4166 )
4162 def perfwrite(ui, repo, **opts):
4167 def perfwrite(ui, repo, **opts):
4163 """microbenchmark ui.write (and others)"""
4168 """microbenchmark ui.write (and others)"""
4164 opts = _byteskwargs(opts)
4169 opts = _byteskwargs(opts)
4165
4170
4166 write = getattr(ui, _sysstr(opts[b'write_method']))
4171 write = getattr(ui, _sysstr(opts[b'write_method']))
4167 nlines = int(opts[b'nlines'])
4172 nlines = int(opts[b'nlines'])
4168 nitems = int(opts[b'nitems'])
4173 nitems = int(opts[b'nitems'])
4169 item = opts[b'item']
4174 item = opts[b'item']
4170 batch_line = opts.get(b'batch_line')
4175 batch_line = opts.get(b'batch_line')
4171 flush_line = opts.get(b'flush_line')
4176 flush_line = opts.get(b'flush_line')
4172
4177
4173 if batch_line:
4178 if batch_line:
4174 line = item * nitems + b'\n'
4179 line = item * nitems + b'\n'
4175
4180
4176 def benchmark():
4181 def benchmark():
4177 for i in pycompat.xrange(nlines):
4182 for i in pycompat.xrange(nlines):
4178 if batch_line:
4183 if batch_line:
4179 write(line)
4184 write(line)
4180 else:
4185 else:
4181 for i in pycompat.xrange(nitems):
4186 for i in pycompat.xrange(nitems):
4182 write(item)
4187 write(item)
4183 write(b'\n')
4188 write(b'\n')
4184 if flush_line:
4189 if flush_line:
4185 ui.flush()
4190 ui.flush()
4186 ui.flush()
4191 ui.flush()
4187
4192
4188 timer, fm = gettimer(ui, opts)
4193 timer, fm = gettimer(ui, opts)
4189 timer(benchmark)
4194 timer(benchmark)
4190 fm.end()
4195 fm.end()
4191
4196
4192
4197
4193 def uisetup(ui):
4198 def uisetup(ui):
4194 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4199 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4195 commands, b'debugrevlogopts'
4200 commands, b'debugrevlogopts'
4196 ):
4201 ):
4197 # for "historical portability":
4202 # for "historical portability":
4198 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4203 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4199 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4204 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4200 # openrevlog() should cause failure, because it has been
4205 # openrevlog() should cause failure, because it has been
4201 # available since 3.5 (or 49c583ca48c4).
4206 # available since 3.5 (or 49c583ca48c4).
4202 def openrevlog(orig, repo, cmd, file_, opts):
4207 def openrevlog(orig, repo, cmd, file_, opts):
4203 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4208 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4204 raise error.Abort(
4209 raise error.Abort(
4205 b"This version doesn't support --dir option",
4210 b"This version doesn't support --dir option",
4206 hint=b"use 3.5 or later",
4211 hint=b"use 3.5 or later",
4207 )
4212 )
4208 return orig(repo, cmd, file_, opts)
4213 return orig(repo, cmd, file_, opts)
4209
4214
4210 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4215 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4211
4216
4212
4217
4213 @command(
4218 @command(
4214 b'perf::progress|perfprogress',
4219 b'perf::progress|perfprogress',
4215 formatteropts
4220 formatteropts
4216 + [
4221 + [
4217 (b'', b'topic', b'topic', b'topic for progress messages'),
4222 (b'', b'topic', b'topic', b'topic for progress messages'),
4218 (b'c', b'total', 1000000, b'total value we are progressing to'),
4223 (b'c', b'total', 1000000, b'total value we are progressing to'),
4219 ],
4224 ],
4220 norepo=True,
4225 norepo=True,
4221 )
4226 )
4222 def perfprogress(ui, topic=None, total=None, **opts):
4227 def perfprogress(ui, topic=None, total=None, **opts):
4223 """printing of progress bars"""
4228 """printing of progress bars"""
4224 opts = _byteskwargs(opts)
4229 opts = _byteskwargs(opts)
4225
4230
4226 timer, fm = gettimer(ui, opts)
4231 timer, fm = gettimer(ui, opts)
4227
4232
4228 def doprogress():
4233 def doprogress():
4229 with ui.makeprogress(topic, total=total) as progress:
4234 with ui.makeprogress(topic, total=total) as progress:
4230 for i in _xrange(total):
4235 for i in _xrange(total):
4231 progress.increment()
4236 progress.increment()
4232
4237
4233 timer(doprogress)
4238 timer(doprogress)
4234 fm.end()
4239 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now