##// END OF EJS Templates
perf-bundle: accept --rev arguments...
marmoute -
r50307:3635aae8 default
parent child Browse files
Show More
@@ -1,4029 +1,4044 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
929 def perfdiscovery(ui, repo, path, **opts):
929 def perfdiscovery(ui, repo, path, **opts):
930 """benchmark discovery between local repo and the peer at given path"""
930 """benchmark discovery between local repo and the peer at given path"""
931 repos = [repo, None]
931 repos = [repo, None]
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933
933
934 try:
934 try:
935 from mercurial.utils.urlutil import get_unique_pull_path
935 from mercurial.utils.urlutil import get_unique_pull_path
936
936
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
938 except ImportError:
938 except ImportError:
939 path = ui.expandpath(path)
939 path = ui.expandpath(path)
940
940
941 def s():
941 def s():
942 repos[1] = hg.peer(ui, opts, path)
942 repos[1] = hg.peer(ui, opts, path)
943
943
944 def d():
944 def d():
945 setdiscovery.findcommonheads(ui, *repos)
945 setdiscovery.findcommonheads(ui, *repos)
946
946
947 timer(d, setup=s)
947 timer(d, setup=s)
948 fm.end()
948 fm.end()
949
949
950
950
951 @command(
951 @command(
952 b'perf::bookmarks|perfbookmarks',
952 b'perf::bookmarks|perfbookmarks',
953 formatteropts
953 formatteropts
954 + [
954 + [
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
956 ],
956 ],
957 )
957 )
958 def perfbookmarks(ui, repo, **opts):
958 def perfbookmarks(ui, repo, **opts):
959 """benchmark parsing bookmarks from disk to memory"""
959 """benchmark parsing bookmarks from disk to memory"""
960 opts = _byteskwargs(opts)
960 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962
962
963 clearrevlogs = opts[b'clear_revlogs']
963 clearrevlogs = opts[b'clear_revlogs']
964
964
965 def s():
965 def s():
966 if clearrevlogs:
966 if clearrevlogs:
967 clearchangelog(repo)
967 clearchangelog(repo)
968 clearfilecache(repo, b'_bookmarks')
968 clearfilecache(repo, b'_bookmarks')
969
969
970 def d():
970 def d():
971 repo._bookmarks
971 repo._bookmarks
972
972
973 timer(d, setup=s)
973 timer(d, setup=s)
974 fm.end()
974 fm.end()
975
975
976
976
977 @command(b'perf::bundle', formatteropts, b'REVS')
977 @command(
978 b'perf::bundle',
979 [
980 (
981 b'r',
982 b'rev',
983 [],
984 b'changesets to bundle',
985 b'REV',
986 ),
987 ]
988 + formatteropts,
989 b'REVS',
990 )
978 def perfbundle(ui, repo, *revs, **opts):
991 def perfbundle(ui, repo, *revs, **opts):
979 """benchmark the creation of a bundle from a repository
992 """benchmark the creation of a bundle from a repository
980
993
981 For now, this create a `none-v1` bundle.
994 For now, this create a `none-v1` bundle.
982 """
995 """
983 from mercurial import bundlecaches
996 from mercurial import bundlecaches
984 from mercurial import discovery
997 from mercurial import discovery
985 from mercurial import bundle2
998 from mercurial import bundle2
986
999
987 opts = _byteskwargs(opts)
1000 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
1001 timer, fm = gettimer(ui, opts)
989
1002
990 cl = repo.changelog
1003 cl = repo.changelog
1004 revs = list(revs)
1005 revs.extend(opts.get(b'rev', ()))
991 revs = scmutil.revrange(repo, revs)
1006 revs = scmutil.revrange(repo, revs)
992 if not revs:
1007 if not revs:
993 raise error.Abort(b"not revision specified")
1008 raise error.Abort(b"not revision specified")
994 # make it a consistent set (ie: without topological gaps)
1009 # make it a consistent set (ie: without topological gaps)
995 old_len = len(revs)
1010 old_len = len(revs)
996 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1011 revs = list(repo.revs(b"%ld::%ld", revs, revs))
997 if old_len != len(revs):
1012 if old_len != len(revs):
998 new_count = len(revs) - old_len
1013 new_count = len(revs) - old_len
999 msg = b"add %d new revisions to make it a consistent set\n"
1014 msg = b"add %d new revisions to make it a consistent set\n"
1000 ui.write_err(msg % new_count)
1015 ui.write_err(msg % new_count)
1001
1016
1002 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1017 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1003 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1018 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1004 outgoing = discovery.outgoing(repo, bases, targets)
1019 outgoing = discovery.outgoing(repo, bases, targets)
1005
1020
1006 bundlespec = bundlecaches.parsebundlespec(
1021 bundlespec = bundlecaches.parsebundlespec(
1007 repo, b"none", strict=False
1022 repo, b"none", strict=False
1008 )
1023 )
1009
1024
1010 bversion = b'HG10' + bundlespec.wirecompression
1025 bversion = b'HG10' + bundlespec.wirecompression
1011
1026
1012 def do_bundle():
1027 def do_bundle():
1013 bundle2.writenewbundle(
1028 bundle2.writenewbundle(
1014 ui,
1029 ui,
1015 repo,
1030 repo,
1016 b'perf::bundle',
1031 b'perf::bundle',
1017 os.devnull,
1032 os.devnull,
1018 bversion,
1033 bversion,
1019 outgoing,
1034 outgoing,
1020 {},
1035 {},
1021 )
1036 )
1022
1037
1023 timer(do_bundle)
1038 timer(do_bundle)
1024 fm.end()
1039 fm.end()
1025
1040
1026
1041
1027 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1042 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1028 def perfbundleread(ui, repo, bundlepath, **opts):
1043 def perfbundleread(ui, repo, bundlepath, **opts):
1029 """Benchmark reading of bundle files.
1044 """Benchmark reading of bundle files.
1030
1045
1031 This command is meant to isolate the I/O part of bundle reading as
1046 This command is meant to isolate the I/O part of bundle reading as
1032 much as possible.
1047 much as possible.
1033 """
1048 """
1034 from mercurial import (
1049 from mercurial import (
1035 bundle2,
1050 bundle2,
1036 exchange,
1051 exchange,
1037 streamclone,
1052 streamclone,
1038 )
1053 )
1039
1054
1040 opts = _byteskwargs(opts)
1055 opts = _byteskwargs(opts)
1041
1056
1042 def makebench(fn):
1057 def makebench(fn):
1043 def run():
1058 def run():
1044 with open(bundlepath, b'rb') as fh:
1059 with open(bundlepath, b'rb') as fh:
1045 bundle = exchange.readbundle(ui, fh, bundlepath)
1060 bundle = exchange.readbundle(ui, fh, bundlepath)
1046 fn(bundle)
1061 fn(bundle)
1047
1062
1048 return run
1063 return run
1049
1064
1050 def makereadnbytes(size):
1065 def makereadnbytes(size):
1051 def run():
1066 def run():
1052 with open(bundlepath, b'rb') as fh:
1067 with open(bundlepath, b'rb') as fh:
1053 bundle = exchange.readbundle(ui, fh, bundlepath)
1068 bundle = exchange.readbundle(ui, fh, bundlepath)
1054 while bundle.read(size):
1069 while bundle.read(size):
1055 pass
1070 pass
1056
1071
1057 return run
1072 return run
1058
1073
1059 def makestdioread(size):
1074 def makestdioread(size):
1060 def run():
1075 def run():
1061 with open(bundlepath, b'rb') as fh:
1076 with open(bundlepath, b'rb') as fh:
1062 while fh.read(size):
1077 while fh.read(size):
1063 pass
1078 pass
1064
1079
1065 return run
1080 return run
1066
1081
1067 # bundle1
1082 # bundle1
1068
1083
1069 def deltaiter(bundle):
1084 def deltaiter(bundle):
1070 for delta in bundle.deltaiter():
1085 for delta in bundle.deltaiter():
1071 pass
1086 pass
1072
1087
1073 def iterchunks(bundle):
1088 def iterchunks(bundle):
1074 for chunk in bundle.getchunks():
1089 for chunk in bundle.getchunks():
1075 pass
1090 pass
1076
1091
1077 # bundle2
1092 # bundle2
1078
1093
1079 def forwardchunks(bundle):
1094 def forwardchunks(bundle):
1080 for chunk in bundle._forwardchunks():
1095 for chunk in bundle._forwardchunks():
1081 pass
1096 pass
1082
1097
1083 def iterparts(bundle):
1098 def iterparts(bundle):
1084 for part in bundle.iterparts():
1099 for part in bundle.iterparts():
1085 pass
1100 pass
1086
1101
1087 def iterpartsseekable(bundle):
1102 def iterpartsseekable(bundle):
1088 for part in bundle.iterparts(seekable=True):
1103 for part in bundle.iterparts(seekable=True):
1089 pass
1104 pass
1090
1105
1091 def seek(bundle):
1106 def seek(bundle):
1092 for part in bundle.iterparts(seekable=True):
1107 for part in bundle.iterparts(seekable=True):
1093 part.seek(0, os.SEEK_END)
1108 part.seek(0, os.SEEK_END)
1094
1109
1095 def makepartreadnbytes(size):
1110 def makepartreadnbytes(size):
1096 def run():
1111 def run():
1097 with open(bundlepath, b'rb') as fh:
1112 with open(bundlepath, b'rb') as fh:
1098 bundle = exchange.readbundle(ui, fh, bundlepath)
1113 bundle = exchange.readbundle(ui, fh, bundlepath)
1099 for part in bundle.iterparts():
1114 for part in bundle.iterparts():
1100 while part.read(size):
1115 while part.read(size):
1101 pass
1116 pass
1102
1117
1103 return run
1118 return run
1104
1119
1105 benches = [
1120 benches = [
1106 (makestdioread(8192), b'read(8k)'),
1121 (makestdioread(8192), b'read(8k)'),
1107 (makestdioread(16384), b'read(16k)'),
1122 (makestdioread(16384), b'read(16k)'),
1108 (makestdioread(32768), b'read(32k)'),
1123 (makestdioread(32768), b'read(32k)'),
1109 (makestdioread(131072), b'read(128k)'),
1124 (makestdioread(131072), b'read(128k)'),
1110 ]
1125 ]
1111
1126
1112 with open(bundlepath, b'rb') as fh:
1127 with open(bundlepath, b'rb') as fh:
1113 bundle = exchange.readbundle(ui, fh, bundlepath)
1128 bundle = exchange.readbundle(ui, fh, bundlepath)
1114
1129
1115 if isinstance(bundle, changegroup.cg1unpacker):
1130 if isinstance(bundle, changegroup.cg1unpacker):
1116 benches.extend(
1131 benches.extend(
1117 [
1132 [
1118 (makebench(deltaiter), b'cg1 deltaiter()'),
1133 (makebench(deltaiter), b'cg1 deltaiter()'),
1119 (makebench(iterchunks), b'cg1 getchunks()'),
1134 (makebench(iterchunks), b'cg1 getchunks()'),
1120 (makereadnbytes(8192), b'cg1 read(8k)'),
1135 (makereadnbytes(8192), b'cg1 read(8k)'),
1121 (makereadnbytes(16384), b'cg1 read(16k)'),
1136 (makereadnbytes(16384), b'cg1 read(16k)'),
1122 (makereadnbytes(32768), b'cg1 read(32k)'),
1137 (makereadnbytes(32768), b'cg1 read(32k)'),
1123 (makereadnbytes(131072), b'cg1 read(128k)'),
1138 (makereadnbytes(131072), b'cg1 read(128k)'),
1124 ]
1139 ]
1125 )
1140 )
1126 elif isinstance(bundle, bundle2.unbundle20):
1141 elif isinstance(bundle, bundle2.unbundle20):
1127 benches.extend(
1142 benches.extend(
1128 [
1143 [
1129 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1144 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1130 (makebench(iterparts), b'bundle2 iterparts()'),
1145 (makebench(iterparts), b'bundle2 iterparts()'),
1131 (
1146 (
1132 makebench(iterpartsseekable),
1147 makebench(iterpartsseekable),
1133 b'bundle2 iterparts() seekable',
1148 b'bundle2 iterparts() seekable',
1134 ),
1149 ),
1135 (makebench(seek), b'bundle2 part seek()'),
1150 (makebench(seek), b'bundle2 part seek()'),
1136 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1151 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1137 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1152 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1138 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1153 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1139 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1154 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1140 ]
1155 ]
1141 )
1156 )
1142 elif isinstance(bundle, streamclone.streamcloneapplier):
1157 elif isinstance(bundle, streamclone.streamcloneapplier):
1143 raise error.Abort(b'stream clone bundles not supported')
1158 raise error.Abort(b'stream clone bundles not supported')
1144 else:
1159 else:
1145 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1160 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1146
1161
1147 for fn, title in benches:
1162 for fn, title in benches:
1148 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1149 timer(fn, title=title)
1164 timer(fn, title=title)
1150 fm.end()
1165 fm.end()
1151
1166
1152
1167
1153 @command(
1168 @command(
1154 b'perf::changegroupchangelog|perfchangegroupchangelog',
1169 b'perf::changegroupchangelog|perfchangegroupchangelog',
1155 formatteropts
1170 formatteropts
1156 + [
1171 + [
1157 (b'', b'cgversion', b'02', b'changegroup version'),
1172 (b'', b'cgversion', b'02', b'changegroup version'),
1158 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1173 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1159 ],
1174 ],
1160 )
1175 )
1161 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1176 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1162 """Benchmark producing a changelog group for a changegroup.
1177 """Benchmark producing a changelog group for a changegroup.
1163
1178
1164 This measures the time spent processing the changelog during a
1179 This measures the time spent processing the changelog during a
1165 bundle operation. This occurs during `hg bundle` and on a server
1180 bundle operation. This occurs during `hg bundle` and on a server
1166 processing a `getbundle` wire protocol request (handles clones
1181 processing a `getbundle` wire protocol request (handles clones
1167 and pull requests).
1182 and pull requests).
1168
1183
1169 By default, all revisions are added to the changegroup.
1184 By default, all revisions are added to the changegroup.
1170 """
1185 """
1171 opts = _byteskwargs(opts)
1186 opts = _byteskwargs(opts)
1172 cl = repo.changelog
1187 cl = repo.changelog
1173 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1188 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1174 bundler = changegroup.getbundler(cgversion, repo)
1189 bundler = changegroup.getbundler(cgversion, repo)
1175
1190
1176 def d():
1191 def d():
1177 state, chunks = bundler._generatechangelog(cl, nodes)
1192 state, chunks = bundler._generatechangelog(cl, nodes)
1178 for chunk in chunks:
1193 for chunk in chunks:
1179 pass
1194 pass
1180
1195
1181 timer, fm = gettimer(ui, opts)
1196 timer, fm = gettimer(ui, opts)
1182
1197
1183 # Terminal printing can interfere with timing. So disable it.
1198 # Terminal printing can interfere with timing. So disable it.
1184 with ui.configoverride({(b'progress', b'disable'): True}):
1199 with ui.configoverride({(b'progress', b'disable'): True}):
1185 timer(d)
1200 timer(d)
1186
1201
1187 fm.end()
1202 fm.end()
1188
1203
1189
1204
1190 @command(b'perf::dirs|perfdirs', formatteropts)
1205 @command(b'perf::dirs|perfdirs', formatteropts)
1191 def perfdirs(ui, repo, **opts):
1206 def perfdirs(ui, repo, **opts):
1192 opts = _byteskwargs(opts)
1207 opts = _byteskwargs(opts)
1193 timer, fm = gettimer(ui, opts)
1208 timer, fm = gettimer(ui, opts)
1194 dirstate = repo.dirstate
1209 dirstate = repo.dirstate
1195 b'a' in dirstate
1210 b'a' in dirstate
1196
1211
1197 def d():
1212 def d():
1198 dirstate.hasdir(b'a')
1213 dirstate.hasdir(b'a')
1199 try:
1214 try:
1200 del dirstate._map._dirs
1215 del dirstate._map._dirs
1201 except AttributeError:
1216 except AttributeError:
1202 pass
1217 pass
1203
1218
1204 timer(d)
1219 timer(d)
1205 fm.end()
1220 fm.end()
1206
1221
1207
1222
1208 @command(
1223 @command(
1209 b'perf::dirstate|perfdirstate',
1224 b'perf::dirstate|perfdirstate',
1210 [
1225 [
1211 (
1226 (
1212 b'',
1227 b'',
1213 b'iteration',
1228 b'iteration',
1214 None,
1229 None,
1215 b'benchmark a full iteration for the dirstate',
1230 b'benchmark a full iteration for the dirstate',
1216 ),
1231 ),
1217 (
1232 (
1218 b'',
1233 b'',
1219 b'contains',
1234 b'contains',
1220 None,
1235 None,
1221 b'benchmark a large amount of `nf in dirstate` calls',
1236 b'benchmark a large amount of `nf in dirstate` calls',
1222 ),
1237 ),
1223 ]
1238 ]
1224 + formatteropts,
1239 + formatteropts,
1225 )
1240 )
1226 def perfdirstate(ui, repo, **opts):
1241 def perfdirstate(ui, repo, **opts):
1227 """benchmap the time of various distate operations
1242 """benchmap the time of various distate operations
1228
1243
1229 By default benchmark the time necessary to load a dirstate from scratch.
1244 By default benchmark the time necessary to load a dirstate from scratch.
1230 The dirstate is loaded to the point were a "contains" request can be
1245 The dirstate is loaded to the point were a "contains" request can be
1231 answered.
1246 answered.
1232 """
1247 """
1233 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1234 timer, fm = gettimer(ui, opts)
1249 timer, fm = gettimer(ui, opts)
1235 b"a" in repo.dirstate
1250 b"a" in repo.dirstate
1236
1251
1237 if opts[b'iteration'] and opts[b'contains']:
1252 if opts[b'iteration'] and opts[b'contains']:
1238 msg = b'only specify one of --iteration or --contains'
1253 msg = b'only specify one of --iteration or --contains'
1239 raise error.Abort(msg)
1254 raise error.Abort(msg)
1240
1255
1241 if opts[b'iteration']:
1256 if opts[b'iteration']:
1242 setup = None
1257 setup = None
1243 dirstate = repo.dirstate
1258 dirstate = repo.dirstate
1244
1259
1245 def d():
1260 def d():
1246 for f in dirstate:
1261 for f in dirstate:
1247 pass
1262 pass
1248
1263
1249 elif opts[b'contains']:
1264 elif opts[b'contains']:
1250 setup = None
1265 setup = None
1251 dirstate = repo.dirstate
1266 dirstate = repo.dirstate
1252 allfiles = list(dirstate)
1267 allfiles = list(dirstate)
1253 # also add file path that will be "missing" from the dirstate
1268 # also add file path that will be "missing" from the dirstate
1254 allfiles.extend([f[::-1] for f in allfiles])
1269 allfiles.extend([f[::-1] for f in allfiles])
1255
1270
1256 def d():
1271 def d():
1257 for f in allfiles:
1272 for f in allfiles:
1258 f in dirstate
1273 f in dirstate
1259
1274
1260 else:
1275 else:
1261
1276
1262 def setup():
1277 def setup():
1263 repo.dirstate.invalidate()
1278 repo.dirstate.invalidate()
1264
1279
1265 def d():
1280 def d():
1266 b"a" in repo.dirstate
1281 b"a" in repo.dirstate
1267
1282
1268 timer(d, setup=setup)
1283 timer(d, setup=setup)
1269 fm.end()
1284 fm.end()
1270
1285
1271
1286
1272 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1287 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1273 def perfdirstatedirs(ui, repo, **opts):
1288 def perfdirstatedirs(ui, repo, **opts):
1274 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1289 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1275 opts = _byteskwargs(opts)
1290 opts = _byteskwargs(opts)
1276 timer, fm = gettimer(ui, opts)
1291 timer, fm = gettimer(ui, opts)
1277 repo.dirstate.hasdir(b"a")
1292 repo.dirstate.hasdir(b"a")
1278
1293
1279 def setup():
1294 def setup():
1280 try:
1295 try:
1281 del repo.dirstate._map._dirs
1296 del repo.dirstate._map._dirs
1282 except AttributeError:
1297 except AttributeError:
1283 pass
1298 pass
1284
1299
1285 def d():
1300 def d():
1286 repo.dirstate.hasdir(b"a")
1301 repo.dirstate.hasdir(b"a")
1287
1302
1288 timer(d, setup=setup)
1303 timer(d, setup=setup)
1289 fm.end()
1304 fm.end()
1290
1305
1291
1306
1292 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1307 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1293 def perfdirstatefoldmap(ui, repo, **opts):
1308 def perfdirstatefoldmap(ui, repo, **opts):
1294 """benchmap a `dirstate._map.filefoldmap.get()` request
1309 """benchmap a `dirstate._map.filefoldmap.get()` request
1295
1310
1296 The dirstate filefoldmap cache is dropped between every request.
1311 The dirstate filefoldmap cache is dropped between every request.
1297 """
1312 """
1298 opts = _byteskwargs(opts)
1313 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1314 timer, fm = gettimer(ui, opts)
1300 dirstate = repo.dirstate
1315 dirstate = repo.dirstate
1301 dirstate._map.filefoldmap.get(b'a')
1316 dirstate._map.filefoldmap.get(b'a')
1302
1317
1303 def setup():
1318 def setup():
1304 del dirstate._map.filefoldmap
1319 del dirstate._map.filefoldmap
1305
1320
1306 def d():
1321 def d():
1307 dirstate._map.filefoldmap.get(b'a')
1322 dirstate._map.filefoldmap.get(b'a')
1308
1323
1309 timer(d, setup=setup)
1324 timer(d, setup=setup)
1310 fm.end()
1325 fm.end()
1311
1326
1312
1327
1313 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1328 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1314 def perfdirfoldmap(ui, repo, **opts):
1329 def perfdirfoldmap(ui, repo, **opts):
1315 """benchmap a `dirstate._map.dirfoldmap.get()` request
1330 """benchmap a `dirstate._map.dirfoldmap.get()` request
1316
1331
1317 The dirstate dirfoldmap cache is dropped between every request.
1332 The dirstate dirfoldmap cache is dropped between every request.
1318 """
1333 """
1319 opts = _byteskwargs(opts)
1334 opts = _byteskwargs(opts)
1320 timer, fm = gettimer(ui, opts)
1335 timer, fm = gettimer(ui, opts)
1321 dirstate = repo.dirstate
1336 dirstate = repo.dirstate
1322 dirstate._map.dirfoldmap.get(b'a')
1337 dirstate._map.dirfoldmap.get(b'a')
1323
1338
1324 def setup():
1339 def setup():
1325 del dirstate._map.dirfoldmap
1340 del dirstate._map.dirfoldmap
1326 try:
1341 try:
1327 del dirstate._map._dirs
1342 del dirstate._map._dirs
1328 except AttributeError:
1343 except AttributeError:
1329 pass
1344 pass
1330
1345
1331 def d():
1346 def d():
1332 dirstate._map.dirfoldmap.get(b'a')
1347 dirstate._map.dirfoldmap.get(b'a')
1333
1348
1334 timer(d, setup=setup)
1349 timer(d, setup=setup)
1335 fm.end()
1350 fm.end()
1336
1351
1337
1352
1338 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1353 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1339 def perfdirstatewrite(ui, repo, **opts):
1354 def perfdirstatewrite(ui, repo, **opts):
1340 """benchmap the time it take to write a dirstate on disk"""
1355 """benchmap the time it take to write a dirstate on disk"""
1341 opts = _byteskwargs(opts)
1356 opts = _byteskwargs(opts)
1342 timer, fm = gettimer(ui, opts)
1357 timer, fm = gettimer(ui, opts)
1343 ds = repo.dirstate
1358 ds = repo.dirstate
1344 b"a" in ds
1359 b"a" in ds
1345
1360
1346 def setup():
1361 def setup():
1347 ds._dirty = True
1362 ds._dirty = True
1348
1363
1349 def d():
1364 def d():
1350 ds.write(repo.currenttransaction())
1365 ds.write(repo.currenttransaction())
1351
1366
1352 timer(d, setup=setup)
1367 timer(d, setup=setup)
1353 fm.end()
1368 fm.end()
1354
1369
1355
1370
1356 def _getmergerevs(repo, opts):
1371 def _getmergerevs(repo, opts):
1357 """parse command argument to return rev involved in merge
1372 """parse command argument to return rev involved in merge
1358
1373
1359 input: options dictionnary with `rev`, `from` and `bse`
1374 input: options dictionnary with `rev`, `from` and `bse`
1360 output: (localctx, otherctx, basectx)
1375 output: (localctx, otherctx, basectx)
1361 """
1376 """
1362 if opts[b'from']:
1377 if opts[b'from']:
1363 fromrev = scmutil.revsingle(repo, opts[b'from'])
1378 fromrev = scmutil.revsingle(repo, opts[b'from'])
1364 wctx = repo[fromrev]
1379 wctx = repo[fromrev]
1365 else:
1380 else:
1366 wctx = repo[None]
1381 wctx = repo[None]
1367 # we don't want working dir files to be stat'd in the benchmark, so
1382 # we don't want working dir files to be stat'd in the benchmark, so
1368 # prime that cache
1383 # prime that cache
1369 wctx.dirty()
1384 wctx.dirty()
1370 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1385 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1371 if opts[b'base']:
1386 if opts[b'base']:
1372 fromrev = scmutil.revsingle(repo, opts[b'base'])
1387 fromrev = scmutil.revsingle(repo, opts[b'base'])
1373 ancestor = repo[fromrev]
1388 ancestor = repo[fromrev]
1374 else:
1389 else:
1375 ancestor = wctx.ancestor(rctx)
1390 ancestor = wctx.ancestor(rctx)
1376 return (wctx, rctx, ancestor)
1391 return (wctx, rctx, ancestor)
1377
1392
1378
1393
1379 @command(
1394 @command(
1380 b'perf::mergecalculate|perfmergecalculate',
1395 b'perf::mergecalculate|perfmergecalculate',
1381 [
1396 [
1382 (b'r', b'rev', b'.', b'rev to merge against'),
1397 (b'r', b'rev', b'.', b'rev to merge against'),
1383 (b'', b'from', b'', b'rev to merge from'),
1398 (b'', b'from', b'', b'rev to merge from'),
1384 (b'', b'base', b'', b'the revision to use as base'),
1399 (b'', b'base', b'', b'the revision to use as base'),
1385 ]
1400 ]
1386 + formatteropts,
1401 + formatteropts,
1387 )
1402 )
1388 def perfmergecalculate(ui, repo, **opts):
1403 def perfmergecalculate(ui, repo, **opts):
1389 opts = _byteskwargs(opts)
1404 opts = _byteskwargs(opts)
1390 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1391
1406
1392 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1407 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1393
1408
1394 def d():
1409 def d():
1395 # acceptremote is True because we don't want prompts in the middle of
1410 # acceptremote is True because we don't want prompts in the middle of
1396 # our benchmark
1411 # our benchmark
1397 merge.calculateupdates(
1412 merge.calculateupdates(
1398 repo,
1413 repo,
1399 wctx,
1414 wctx,
1400 rctx,
1415 rctx,
1401 [ancestor],
1416 [ancestor],
1402 branchmerge=False,
1417 branchmerge=False,
1403 force=False,
1418 force=False,
1404 acceptremote=True,
1419 acceptremote=True,
1405 followcopies=True,
1420 followcopies=True,
1406 )
1421 )
1407
1422
1408 timer(d)
1423 timer(d)
1409 fm.end()
1424 fm.end()
1410
1425
1411
1426
1412 @command(
1427 @command(
1413 b'perf::mergecopies|perfmergecopies',
1428 b'perf::mergecopies|perfmergecopies',
1414 [
1429 [
1415 (b'r', b'rev', b'.', b'rev to merge against'),
1430 (b'r', b'rev', b'.', b'rev to merge against'),
1416 (b'', b'from', b'', b'rev to merge from'),
1431 (b'', b'from', b'', b'rev to merge from'),
1417 (b'', b'base', b'', b'the revision to use as base'),
1432 (b'', b'base', b'', b'the revision to use as base'),
1418 ]
1433 ]
1419 + formatteropts,
1434 + formatteropts,
1420 )
1435 )
1421 def perfmergecopies(ui, repo, **opts):
1436 def perfmergecopies(ui, repo, **opts):
1422 """measure runtime of `copies.mergecopies`"""
1437 """measure runtime of `copies.mergecopies`"""
1423 opts = _byteskwargs(opts)
1438 opts = _byteskwargs(opts)
1424 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1425 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1440 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1426
1441
1427 def d():
1442 def d():
1428 # acceptremote is True because we don't want prompts in the middle of
1443 # acceptremote is True because we don't want prompts in the middle of
1429 # our benchmark
1444 # our benchmark
1430 copies.mergecopies(repo, wctx, rctx, ancestor)
1445 copies.mergecopies(repo, wctx, rctx, ancestor)
1431
1446
1432 timer(d)
1447 timer(d)
1433 fm.end()
1448 fm.end()
1434
1449
1435
1450
1436 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1451 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1437 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1452 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1438 """benchmark the copy tracing logic"""
1453 """benchmark the copy tracing logic"""
1439 opts = _byteskwargs(opts)
1454 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1455 timer, fm = gettimer(ui, opts)
1441 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1456 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1442 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1457 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1443
1458
1444 def d():
1459 def d():
1445 copies.pathcopies(ctx1, ctx2)
1460 copies.pathcopies(ctx1, ctx2)
1446
1461
1447 timer(d)
1462 timer(d)
1448 fm.end()
1463 fm.end()
1449
1464
1450
1465
1451 @command(
1466 @command(
1452 b'perf::phases|perfphases',
1467 b'perf::phases|perfphases',
1453 [
1468 [
1454 (b'', b'full', False, b'include file reading time too'),
1469 (b'', b'full', False, b'include file reading time too'),
1455 ],
1470 ],
1456 b"",
1471 b"",
1457 )
1472 )
1458 def perfphases(ui, repo, **opts):
1473 def perfphases(ui, repo, **opts):
1459 """benchmark phasesets computation"""
1474 """benchmark phasesets computation"""
1460 opts = _byteskwargs(opts)
1475 opts = _byteskwargs(opts)
1461 timer, fm = gettimer(ui, opts)
1476 timer, fm = gettimer(ui, opts)
1462 _phases = repo._phasecache
1477 _phases = repo._phasecache
1463 full = opts.get(b'full')
1478 full = opts.get(b'full')
1464
1479
1465 def d():
1480 def d():
1466 phases = _phases
1481 phases = _phases
1467 if full:
1482 if full:
1468 clearfilecache(repo, b'_phasecache')
1483 clearfilecache(repo, b'_phasecache')
1469 phases = repo._phasecache
1484 phases = repo._phasecache
1470 phases.invalidate()
1485 phases.invalidate()
1471 phases.loadphaserevs(repo)
1486 phases.loadphaserevs(repo)
1472
1487
1473 timer(d)
1488 timer(d)
1474 fm.end()
1489 fm.end()
1475
1490
1476
1491
1477 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1492 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1478 def perfphasesremote(ui, repo, dest=None, **opts):
1493 def perfphasesremote(ui, repo, dest=None, **opts):
1479 """benchmark time needed to analyse phases of the remote server"""
1494 """benchmark time needed to analyse phases of the remote server"""
1480 from mercurial.node import bin
1495 from mercurial.node import bin
1481 from mercurial import (
1496 from mercurial import (
1482 exchange,
1497 exchange,
1483 hg,
1498 hg,
1484 phases,
1499 phases,
1485 )
1500 )
1486
1501
1487 opts = _byteskwargs(opts)
1502 opts = _byteskwargs(opts)
1488 timer, fm = gettimer(ui, opts)
1503 timer, fm = gettimer(ui, opts)
1489
1504
1490 path = ui.getpath(dest, default=(b'default-push', b'default'))
1505 path = ui.getpath(dest, default=(b'default-push', b'default'))
1491 if not path:
1506 if not path:
1492 raise error.Abort(
1507 raise error.Abort(
1493 b'default repository not configured!',
1508 b'default repository not configured!',
1494 hint=b"see 'hg help config.paths'",
1509 hint=b"see 'hg help config.paths'",
1495 )
1510 )
1496 dest = path.pushloc or path.loc
1511 dest = path.pushloc or path.loc
1497 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1512 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1498 other = hg.peer(repo, opts, dest)
1513 other = hg.peer(repo, opts, dest)
1499
1514
1500 # easier to perform discovery through the operation
1515 # easier to perform discovery through the operation
1501 op = exchange.pushoperation(repo, other)
1516 op = exchange.pushoperation(repo, other)
1502 exchange._pushdiscoverychangeset(op)
1517 exchange._pushdiscoverychangeset(op)
1503
1518
1504 remotesubset = op.fallbackheads
1519 remotesubset = op.fallbackheads
1505
1520
1506 with other.commandexecutor() as e:
1521 with other.commandexecutor() as e:
1507 remotephases = e.callcommand(
1522 remotephases = e.callcommand(
1508 b'listkeys', {b'namespace': b'phases'}
1523 b'listkeys', {b'namespace': b'phases'}
1509 ).result()
1524 ).result()
1510 del other
1525 del other
1511 publishing = remotephases.get(b'publishing', False)
1526 publishing = remotephases.get(b'publishing', False)
1512 if publishing:
1527 if publishing:
1513 ui.statusnoi18n(b'publishing: yes\n')
1528 ui.statusnoi18n(b'publishing: yes\n')
1514 else:
1529 else:
1515 ui.statusnoi18n(b'publishing: no\n')
1530 ui.statusnoi18n(b'publishing: no\n')
1516
1531
1517 has_node = getattr(repo.changelog.index, 'has_node', None)
1532 has_node = getattr(repo.changelog.index, 'has_node', None)
1518 if has_node is None:
1533 if has_node is None:
1519 has_node = repo.changelog.nodemap.__contains__
1534 has_node = repo.changelog.nodemap.__contains__
1520 nonpublishroots = 0
1535 nonpublishroots = 0
1521 for nhex, phase in remotephases.iteritems():
1536 for nhex, phase in remotephases.iteritems():
1522 if nhex == b'publishing': # ignore data related to publish option
1537 if nhex == b'publishing': # ignore data related to publish option
1523 continue
1538 continue
1524 node = bin(nhex)
1539 node = bin(nhex)
1525 if has_node(node) and int(phase):
1540 if has_node(node) and int(phase):
1526 nonpublishroots += 1
1541 nonpublishroots += 1
1527 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1542 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1528 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1543 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1529
1544
1530 def d():
1545 def d():
1531 phases.remotephasessummary(repo, remotesubset, remotephases)
1546 phases.remotephasessummary(repo, remotesubset, remotephases)
1532
1547
1533 timer(d)
1548 timer(d)
1534 fm.end()
1549 fm.end()
1535
1550
1536
1551
1537 @command(
1552 @command(
1538 b'perf::manifest|perfmanifest',
1553 b'perf::manifest|perfmanifest',
1539 [
1554 [
1540 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1555 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1541 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1556 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1542 ]
1557 ]
1543 + formatteropts,
1558 + formatteropts,
1544 b'REV|NODE',
1559 b'REV|NODE',
1545 )
1560 )
1546 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1561 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1547 """benchmark the time to read a manifest from disk and return a usable
1562 """benchmark the time to read a manifest from disk and return a usable
1548 dict-like object
1563 dict-like object
1549
1564
1550 Manifest caches are cleared before retrieval."""
1565 Manifest caches are cleared before retrieval."""
1551 opts = _byteskwargs(opts)
1566 opts = _byteskwargs(opts)
1552 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1553 if not manifest_rev:
1568 if not manifest_rev:
1554 ctx = scmutil.revsingle(repo, rev, rev)
1569 ctx = scmutil.revsingle(repo, rev, rev)
1555 t = ctx.manifestnode()
1570 t = ctx.manifestnode()
1556 else:
1571 else:
1557 from mercurial.node import bin
1572 from mercurial.node import bin
1558
1573
1559 if len(rev) == 40:
1574 if len(rev) == 40:
1560 t = bin(rev)
1575 t = bin(rev)
1561 else:
1576 else:
1562 try:
1577 try:
1563 rev = int(rev)
1578 rev = int(rev)
1564
1579
1565 if util.safehasattr(repo.manifestlog, b'getstorage'):
1580 if util.safehasattr(repo.manifestlog, b'getstorage'):
1566 t = repo.manifestlog.getstorage(b'').node(rev)
1581 t = repo.manifestlog.getstorage(b'').node(rev)
1567 else:
1582 else:
1568 t = repo.manifestlog._revlog.lookup(rev)
1583 t = repo.manifestlog._revlog.lookup(rev)
1569 except ValueError:
1584 except ValueError:
1570 raise error.Abort(
1585 raise error.Abort(
1571 b'manifest revision must be integer or full node'
1586 b'manifest revision must be integer or full node'
1572 )
1587 )
1573
1588
1574 def d():
1589 def d():
1575 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1590 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1576 repo.manifestlog[t].read()
1591 repo.manifestlog[t].read()
1577
1592
1578 timer(d)
1593 timer(d)
1579 fm.end()
1594 fm.end()
1580
1595
1581
1596
1582 @command(b'perf::changeset|perfchangeset', formatteropts)
1597 @command(b'perf::changeset|perfchangeset', formatteropts)
1583 def perfchangeset(ui, repo, rev, **opts):
1598 def perfchangeset(ui, repo, rev, **opts):
1584 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1585 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1586 n = scmutil.revsingle(repo, rev).node()
1601 n = scmutil.revsingle(repo, rev).node()
1587
1602
1588 def d():
1603 def d():
1589 repo.changelog.read(n)
1604 repo.changelog.read(n)
1590 # repo.changelog._cache = None
1605 # repo.changelog._cache = None
1591
1606
1592 timer(d)
1607 timer(d)
1593 fm.end()
1608 fm.end()
1594
1609
1595
1610
1596 @command(b'perf::ignore|perfignore', formatteropts)
1611 @command(b'perf::ignore|perfignore', formatteropts)
1597 def perfignore(ui, repo, **opts):
1612 def perfignore(ui, repo, **opts):
1598 """benchmark operation related to computing ignore"""
1613 """benchmark operation related to computing ignore"""
1599 opts = _byteskwargs(opts)
1614 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1615 timer, fm = gettimer(ui, opts)
1601 dirstate = repo.dirstate
1616 dirstate = repo.dirstate
1602
1617
1603 def setupone():
1618 def setupone():
1604 dirstate.invalidate()
1619 dirstate.invalidate()
1605 clearfilecache(dirstate, b'_ignore')
1620 clearfilecache(dirstate, b'_ignore')
1606
1621
1607 def runone():
1622 def runone():
1608 dirstate._ignore
1623 dirstate._ignore
1609
1624
1610 timer(runone, setup=setupone, title=b"load")
1625 timer(runone, setup=setupone, title=b"load")
1611 fm.end()
1626 fm.end()
1612
1627
1613
1628
1614 @command(
1629 @command(
1615 b'perf::index|perfindex',
1630 b'perf::index|perfindex',
1616 [
1631 [
1617 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1632 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1618 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1633 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1619 ]
1634 ]
1620 + formatteropts,
1635 + formatteropts,
1621 )
1636 )
1622 def perfindex(ui, repo, **opts):
1637 def perfindex(ui, repo, **opts):
1623 """benchmark index creation time followed by a lookup
1638 """benchmark index creation time followed by a lookup
1624
1639
1625 The default is to look `tip` up. Depending on the index implementation,
1640 The default is to look `tip` up. Depending on the index implementation,
1626 the revision looked up can matters. For example, an implementation
1641 the revision looked up can matters. For example, an implementation
1627 scanning the index will have a faster lookup time for `--rev tip` than for
1642 scanning the index will have a faster lookup time for `--rev tip` than for
1628 `--rev 0`. The number of looked up revisions and their order can also
1643 `--rev 0`. The number of looked up revisions and their order can also
1629 matters.
1644 matters.
1630
1645
1631 Example of useful set to test:
1646 Example of useful set to test:
1632
1647
1633 * tip
1648 * tip
1634 * 0
1649 * 0
1635 * -10:
1650 * -10:
1636 * :10
1651 * :10
1637 * -10: + :10
1652 * -10: + :10
1638 * :10: + -10:
1653 * :10: + -10:
1639 * -10000:
1654 * -10000:
1640 * -10000: + 0
1655 * -10000: + 0
1641
1656
1642 It is not currently possible to check for lookup of a missing node. For
1657 It is not currently possible to check for lookup of a missing node. For
1643 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1658 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1644 import mercurial.revlog
1659 import mercurial.revlog
1645
1660
1646 opts = _byteskwargs(opts)
1661 opts = _byteskwargs(opts)
1647 timer, fm = gettimer(ui, opts)
1662 timer, fm = gettimer(ui, opts)
1648 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1663 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1649 if opts[b'no_lookup']:
1664 if opts[b'no_lookup']:
1650 if opts['rev']:
1665 if opts['rev']:
1651 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1666 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1652 nodes = []
1667 nodes = []
1653 elif not opts[b'rev']:
1668 elif not opts[b'rev']:
1654 nodes = [repo[b"tip"].node()]
1669 nodes = [repo[b"tip"].node()]
1655 else:
1670 else:
1656 revs = scmutil.revrange(repo, opts[b'rev'])
1671 revs = scmutil.revrange(repo, opts[b'rev'])
1657 cl = repo.changelog
1672 cl = repo.changelog
1658 nodes = [cl.node(r) for r in revs]
1673 nodes = [cl.node(r) for r in revs]
1659
1674
1660 unfi = repo.unfiltered()
1675 unfi = repo.unfiltered()
1661 # find the filecache func directly
1676 # find the filecache func directly
1662 # This avoid polluting the benchmark with the filecache logic
1677 # This avoid polluting the benchmark with the filecache logic
1663 makecl = unfi.__class__.changelog.func
1678 makecl = unfi.__class__.changelog.func
1664
1679
1665 def setup():
1680 def setup():
1666 # probably not necessary, but for good measure
1681 # probably not necessary, but for good measure
1667 clearchangelog(unfi)
1682 clearchangelog(unfi)
1668
1683
1669 def d():
1684 def d():
1670 cl = makecl(unfi)
1685 cl = makecl(unfi)
1671 for n in nodes:
1686 for n in nodes:
1672 cl.rev(n)
1687 cl.rev(n)
1673
1688
1674 timer(d, setup=setup)
1689 timer(d, setup=setup)
1675 fm.end()
1690 fm.end()
1676
1691
1677
1692
1678 @command(
1693 @command(
1679 b'perf::nodemap|perfnodemap',
1694 b'perf::nodemap|perfnodemap',
1680 [
1695 [
1681 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1696 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1682 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1697 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1683 ]
1698 ]
1684 + formatteropts,
1699 + formatteropts,
1685 )
1700 )
1686 def perfnodemap(ui, repo, **opts):
1701 def perfnodemap(ui, repo, **opts):
1687 """benchmark the time necessary to look up revision from a cold nodemap
1702 """benchmark the time necessary to look up revision from a cold nodemap
1688
1703
1689 Depending on the implementation, the amount and order of revision we look
1704 Depending on the implementation, the amount and order of revision we look
1690 up can varies. Example of useful set to test:
1705 up can varies. Example of useful set to test:
1691 * tip
1706 * tip
1692 * 0
1707 * 0
1693 * -10:
1708 * -10:
1694 * :10
1709 * :10
1695 * -10: + :10
1710 * -10: + :10
1696 * :10: + -10:
1711 * :10: + -10:
1697 * -10000:
1712 * -10000:
1698 * -10000: + 0
1713 * -10000: + 0
1699
1714
1700 The command currently focus on valid binary lookup. Benchmarking for
1715 The command currently focus on valid binary lookup. Benchmarking for
1701 hexlookup, prefix lookup and missing lookup would also be valuable.
1716 hexlookup, prefix lookup and missing lookup would also be valuable.
1702 """
1717 """
1703 import mercurial.revlog
1718 import mercurial.revlog
1704
1719
1705 opts = _byteskwargs(opts)
1720 opts = _byteskwargs(opts)
1706 timer, fm = gettimer(ui, opts)
1721 timer, fm = gettimer(ui, opts)
1707 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1722 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1708
1723
1709 unfi = repo.unfiltered()
1724 unfi = repo.unfiltered()
1710 clearcaches = opts[b'clear_caches']
1725 clearcaches = opts[b'clear_caches']
1711 # find the filecache func directly
1726 # find the filecache func directly
1712 # This avoid polluting the benchmark with the filecache logic
1727 # This avoid polluting the benchmark with the filecache logic
1713 makecl = unfi.__class__.changelog.func
1728 makecl = unfi.__class__.changelog.func
1714 if not opts[b'rev']:
1729 if not opts[b'rev']:
1715 raise error.Abort(b'use --rev to specify revisions to look up')
1730 raise error.Abort(b'use --rev to specify revisions to look up')
1716 revs = scmutil.revrange(repo, opts[b'rev'])
1731 revs = scmutil.revrange(repo, opts[b'rev'])
1717 cl = repo.changelog
1732 cl = repo.changelog
1718 nodes = [cl.node(r) for r in revs]
1733 nodes = [cl.node(r) for r in revs]
1719
1734
1720 # use a list to pass reference to a nodemap from one closure to the next
1735 # use a list to pass reference to a nodemap from one closure to the next
1721 nodeget = [None]
1736 nodeget = [None]
1722
1737
1723 def setnodeget():
1738 def setnodeget():
1724 # probably not necessary, but for good measure
1739 # probably not necessary, but for good measure
1725 clearchangelog(unfi)
1740 clearchangelog(unfi)
1726 cl = makecl(unfi)
1741 cl = makecl(unfi)
1727 if util.safehasattr(cl.index, 'get_rev'):
1742 if util.safehasattr(cl.index, 'get_rev'):
1728 nodeget[0] = cl.index.get_rev
1743 nodeget[0] = cl.index.get_rev
1729 else:
1744 else:
1730 nodeget[0] = cl.nodemap.get
1745 nodeget[0] = cl.nodemap.get
1731
1746
1732 def d():
1747 def d():
1733 get = nodeget[0]
1748 get = nodeget[0]
1734 for n in nodes:
1749 for n in nodes:
1735 get(n)
1750 get(n)
1736
1751
1737 setup = None
1752 setup = None
1738 if clearcaches:
1753 if clearcaches:
1739
1754
1740 def setup():
1755 def setup():
1741 setnodeget()
1756 setnodeget()
1742
1757
1743 else:
1758 else:
1744 setnodeget()
1759 setnodeget()
1745 d() # prewarm the data structure
1760 d() # prewarm the data structure
1746 timer(d, setup=setup)
1761 timer(d, setup=setup)
1747 fm.end()
1762 fm.end()
1748
1763
1749
1764
1750 @command(b'perf::startup|perfstartup', formatteropts)
1765 @command(b'perf::startup|perfstartup', formatteropts)
1751 def perfstartup(ui, repo, **opts):
1766 def perfstartup(ui, repo, **opts):
1752 opts = _byteskwargs(opts)
1767 opts = _byteskwargs(opts)
1753 timer, fm = gettimer(ui, opts)
1768 timer, fm = gettimer(ui, opts)
1754
1769
1755 def d():
1770 def d():
1756 if os.name != 'nt':
1771 if os.name != 'nt':
1757 os.system(
1772 os.system(
1758 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1773 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1759 )
1774 )
1760 else:
1775 else:
1761 os.environ['HGRCPATH'] = r' '
1776 os.environ['HGRCPATH'] = r' '
1762 os.system("%s version -q > NUL" % sys.argv[0])
1777 os.system("%s version -q > NUL" % sys.argv[0])
1763
1778
1764 timer(d)
1779 timer(d)
1765 fm.end()
1780 fm.end()
1766
1781
1767
1782
1768 @command(b'perf::parents|perfparents', formatteropts)
1783 @command(b'perf::parents|perfparents', formatteropts)
1769 def perfparents(ui, repo, **opts):
1784 def perfparents(ui, repo, **opts):
1770 """benchmark the time necessary to fetch one changeset's parents.
1785 """benchmark the time necessary to fetch one changeset's parents.
1771
1786
1772 The fetch is done using the `node identifier`, traversing all object layers
1787 The fetch is done using the `node identifier`, traversing all object layers
1773 from the repository object. The first N revisions will be used for this
1788 from the repository object. The first N revisions will be used for this
1774 benchmark. N is controlled by the ``perf.parentscount`` config option
1789 benchmark. N is controlled by the ``perf.parentscount`` config option
1775 (default: 1000).
1790 (default: 1000).
1776 """
1791 """
1777 opts = _byteskwargs(opts)
1792 opts = _byteskwargs(opts)
1778 timer, fm = gettimer(ui, opts)
1793 timer, fm = gettimer(ui, opts)
1779 # control the number of commits perfparents iterates over
1794 # control the number of commits perfparents iterates over
1780 # experimental config: perf.parentscount
1795 # experimental config: perf.parentscount
1781 count = getint(ui, b"perf", b"parentscount", 1000)
1796 count = getint(ui, b"perf", b"parentscount", 1000)
1782 if len(repo.changelog) < count:
1797 if len(repo.changelog) < count:
1783 raise error.Abort(b"repo needs %d commits for this test" % count)
1798 raise error.Abort(b"repo needs %d commits for this test" % count)
1784 repo = repo.unfiltered()
1799 repo = repo.unfiltered()
1785 nl = [repo.changelog.node(i) for i in _xrange(count)]
1800 nl = [repo.changelog.node(i) for i in _xrange(count)]
1786
1801
1787 def d():
1802 def d():
1788 for n in nl:
1803 for n in nl:
1789 repo.changelog.parents(n)
1804 repo.changelog.parents(n)
1790
1805
1791 timer(d)
1806 timer(d)
1792 fm.end()
1807 fm.end()
1793
1808
1794
1809
1795 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1810 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1796 def perfctxfiles(ui, repo, x, **opts):
1811 def perfctxfiles(ui, repo, x, **opts):
1797 opts = _byteskwargs(opts)
1812 opts = _byteskwargs(opts)
1798 x = int(x)
1813 x = int(x)
1799 timer, fm = gettimer(ui, opts)
1814 timer, fm = gettimer(ui, opts)
1800
1815
1801 def d():
1816 def d():
1802 len(repo[x].files())
1817 len(repo[x].files())
1803
1818
1804 timer(d)
1819 timer(d)
1805 fm.end()
1820 fm.end()
1806
1821
1807
1822
1808 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1823 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1809 def perfrawfiles(ui, repo, x, **opts):
1824 def perfrawfiles(ui, repo, x, **opts):
1810 opts = _byteskwargs(opts)
1825 opts = _byteskwargs(opts)
1811 x = int(x)
1826 x = int(x)
1812 timer, fm = gettimer(ui, opts)
1827 timer, fm = gettimer(ui, opts)
1813 cl = repo.changelog
1828 cl = repo.changelog
1814
1829
1815 def d():
1830 def d():
1816 len(cl.read(x)[3])
1831 len(cl.read(x)[3])
1817
1832
1818 timer(d)
1833 timer(d)
1819 fm.end()
1834 fm.end()
1820
1835
1821
1836
1822 @command(b'perf::lookup|perflookup', formatteropts)
1837 @command(b'perf::lookup|perflookup', formatteropts)
1823 def perflookup(ui, repo, rev, **opts):
1838 def perflookup(ui, repo, rev, **opts):
1824 opts = _byteskwargs(opts)
1839 opts = _byteskwargs(opts)
1825 timer, fm = gettimer(ui, opts)
1840 timer, fm = gettimer(ui, opts)
1826 timer(lambda: len(repo.lookup(rev)))
1841 timer(lambda: len(repo.lookup(rev)))
1827 fm.end()
1842 fm.end()
1828
1843
1829
1844
1830 @command(
1845 @command(
1831 b'perf::linelogedits|perflinelogedits',
1846 b'perf::linelogedits|perflinelogedits',
1832 [
1847 [
1833 (b'n', b'edits', 10000, b'number of edits'),
1848 (b'n', b'edits', 10000, b'number of edits'),
1834 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1849 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1835 ],
1850 ],
1836 norepo=True,
1851 norepo=True,
1837 )
1852 )
1838 def perflinelogedits(ui, **opts):
1853 def perflinelogedits(ui, **opts):
1839 from mercurial import linelog
1854 from mercurial import linelog
1840
1855
1841 opts = _byteskwargs(opts)
1856 opts = _byteskwargs(opts)
1842
1857
1843 edits = opts[b'edits']
1858 edits = opts[b'edits']
1844 maxhunklines = opts[b'max_hunk_lines']
1859 maxhunklines = opts[b'max_hunk_lines']
1845
1860
1846 maxb1 = 100000
1861 maxb1 = 100000
1847 random.seed(0)
1862 random.seed(0)
1848 randint = random.randint
1863 randint = random.randint
1849 currentlines = 0
1864 currentlines = 0
1850 arglist = []
1865 arglist = []
1851 for rev in _xrange(edits):
1866 for rev in _xrange(edits):
1852 a1 = randint(0, currentlines)
1867 a1 = randint(0, currentlines)
1853 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1868 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1854 b1 = randint(0, maxb1)
1869 b1 = randint(0, maxb1)
1855 b2 = randint(b1, b1 + maxhunklines)
1870 b2 = randint(b1, b1 + maxhunklines)
1856 currentlines += (b2 - b1) - (a2 - a1)
1871 currentlines += (b2 - b1) - (a2 - a1)
1857 arglist.append((rev, a1, a2, b1, b2))
1872 arglist.append((rev, a1, a2, b1, b2))
1858
1873
1859 def d():
1874 def d():
1860 ll = linelog.linelog()
1875 ll = linelog.linelog()
1861 for args in arglist:
1876 for args in arglist:
1862 ll.replacelines(*args)
1877 ll.replacelines(*args)
1863
1878
1864 timer, fm = gettimer(ui, opts)
1879 timer, fm = gettimer(ui, opts)
1865 timer(d)
1880 timer(d)
1866 fm.end()
1881 fm.end()
1867
1882
1868
1883
1869 @command(b'perf::revrange|perfrevrange', formatteropts)
1884 @command(b'perf::revrange|perfrevrange', formatteropts)
1870 def perfrevrange(ui, repo, *specs, **opts):
1885 def perfrevrange(ui, repo, *specs, **opts):
1871 opts = _byteskwargs(opts)
1886 opts = _byteskwargs(opts)
1872 timer, fm = gettimer(ui, opts)
1887 timer, fm = gettimer(ui, opts)
1873 revrange = scmutil.revrange
1888 revrange = scmutil.revrange
1874 timer(lambda: len(revrange(repo, specs)))
1889 timer(lambda: len(revrange(repo, specs)))
1875 fm.end()
1890 fm.end()
1876
1891
1877
1892
1878 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1893 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1879 def perfnodelookup(ui, repo, rev, **opts):
1894 def perfnodelookup(ui, repo, rev, **opts):
1880 opts = _byteskwargs(opts)
1895 opts = _byteskwargs(opts)
1881 timer, fm = gettimer(ui, opts)
1896 timer, fm = gettimer(ui, opts)
1882 import mercurial.revlog
1897 import mercurial.revlog
1883
1898
1884 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1899 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1885 n = scmutil.revsingle(repo, rev).node()
1900 n = scmutil.revsingle(repo, rev).node()
1886
1901
1887 try:
1902 try:
1888 cl = revlog(getsvfs(repo), radix=b"00changelog")
1903 cl = revlog(getsvfs(repo), radix=b"00changelog")
1889 except TypeError:
1904 except TypeError:
1890 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1905 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1891
1906
1892 def d():
1907 def d():
1893 cl.rev(n)
1908 cl.rev(n)
1894 clearcaches(cl)
1909 clearcaches(cl)
1895
1910
1896 timer(d)
1911 timer(d)
1897 fm.end()
1912 fm.end()
1898
1913
1899
1914
1900 @command(
1915 @command(
1901 b'perf::log|perflog',
1916 b'perf::log|perflog',
1902 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1917 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1903 )
1918 )
1904 def perflog(ui, repo, rev=None, **opts):
1919 def perflog(ui, repo, rev=None, **opts):
1905 opts = _byteskwargs(opts)
1920 opts = _byteskwargs(opts)
1906 if rev is None:
1921 if rev is None:
1907 rev = []
1922 rev = []
1908 timer, fm = gettimer(ui, opts)
1923 timer, fm = gettimer(ui, opts)
1909 ui.pushbuffer()
1924 ui.pushbuffer()
1910 timer(
1925 timer(
1911 lambda: commands.log(
1926 lambda: commands.log(
1912 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1927 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1913 )
1928 )
1914 )
1929 )
1915 ui.popbuffer()
1930 ui.popbuffer()
1916 fm.end()
1931 fm.end()
1917
1932
1918
1933
1919 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1934 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1920 def perfmoonwalk(ui, repo, **opts):
1935 def perfmoonwalk(ui, repo, **opts):
1921 """benchmark walking the changelog backwards
1936 """benchmark walking the changelog backwards
1922
1937
1923 This also loads the changelog data for each revision in the changelog.
1938 This also loads the changelog data for each revision in the changelog.
1924 """
1939 """
1925 opts = _byteskwargs(opts)
1940 opts = _byteskwargs(opts)
1926 timer, fm = gettimer(ui, opts)
1941 timer, fm = gettimer(ui, opts)
1927
1942
1928 def moonwalk():
1943 def moonwalk():
1929 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1944 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1930 ctx = repo[i]
1945 ctx = repo[i]
1931 ctx.branch() # read changelog data (in addition to the index)
1946 ctx.branch() # read changelog data (in addition to the index)
1932
1947
1933 timer(moonwalk)
1948 timer(moonwalk)
1934 fm.end()
1949 fm.end()
1935
1950
1936
1951
1937 @command(
1952 @command(
1938 b'perf::templating|perftemplating',
1953 b'perf::templating|perftemplating',
1939 [
1954 [
1940 (b'r', b'rev', [], b'revisions to run the template on'),
1955 (b'r', b'rev', [], b'revisions to run the template on'),
1941 ]
1956 ]
1942 + formatteropts,
1957 + formatteropts,
1943 )
1958 )
1944 def perftemplating(ui, repo, testedtemplate=None, **opts):
1959 def perftemplating(ui, repo, testedtemplate=None, **opts):
1945 """test the rendering time of a given template"""
1960 """test the rendering time of a given template"""
1946 if makelogtemplater is None:
1961 if makelogtemplater is None:
1947 raise error.Abort(
1962 raise error.Abort(
1948 b"perftemplating not available with this Mercurial",
1963 b"perftemplating not available with this Mercurial",
1949 hint=b"use 4.3 or later",
1964 hint=b"use 4.3 or later",
1950 )
1965 )
1951
1966
1952 opts = _byteskwargs(opts)
1967 opts = _byteskwargs(opts)
1953
1968
1954 nullui = ui.copy()
1969 nullui = ui.copy()
1955 nullui.fout = open(os.devnull, 'wb')
1970 nullui.fout = open(os.devnull, 'wb')
1956 nullui.disablepager()
1971 nullui.disablepager()
1957 revs = opts.get(b'rev')
1972 revs = opts.get(b'rev')
1958 if not revs:
1973 if not revs:
1959 revs = [b'all()']
1974 revs = [b'all()']
1960 revs = list(scmutil.revrange(repo, revs))
1975 revs = list(scmutil.revrange(repo, revs))
1961
1976
1962 defaulttemplate = (
1977 defaulttemplate = (
1963 b'{date|shortdate} [{rev}:{node|short}]'
1978 b'{date|shortdate} [{rev}:{node|short}]'
1964 b' {author|person}: {desc|firstline}\n'
1979 b' {author|person}: {desc|firstline}\n'
1965 )
1980 )
1966 if testedtemplate is None:
1981 if testedtemplate is None:
1967 testedtemplate = defaulttemplate
1982 testedtemplate = defaulttemplate
1968 displayer = makelogtemplater(nullui, repo, testedtemplate)
1983 displayer = makelogtemplater(nullui, repo, testedtemplate)
1969
1984
1970 def format():
1985 def format():
1971 for r in revs:
1986 for r in revs:
1972 ctx = repo[r]
1987 ctx = repo[r]
1973 displayer.show(ctx)
1988 displayer.show(ctx)
1974 displayer.flush(ctx)
1989 displayer.flush(ctx)
1975
1990
1976 timer, fm = gettimer(ui, opts)
1991 timer, fm = gettimer(ui, opts)
1977 timer(format)
1992 timer(format)
1978 fm.end()
1993 fm.end()
1979
1994
1980
1995
1981 def _displaystats(ui, opts, entries, data):
1996 def _displaystats(ui, opts, entries, data):
1982 # use a second formatter because the data are quite different, not sure
1997 # use a second formatter because the data are quite different, not sure
1983 # how it flies with the templater.
1998 # how it flies with the templater.
1984 fm = ui.formatter(b'perf-stats', opts)
1999 fm = ui.formatter(b'perf-stats', opts)
1985 for key, title in entries:
2000 for key, title in entries:
1986 values = data[key]
2001 values = data[key]
1987 nbvalues = len(data)
2002 nbvalues = len(data)
1988 values.sort()
2003 values.sort()
1989 stats = {
2004 stats = {
1990 'key': key,
2005 'key': key,
1991 'title': title,
2006 'title': title,
1992 'nbitems': len(values),
2007 'nbitems': len(values),
1993 'min': values[0][0],
2008 'min': values[0][0],
1994 '10%': values[(nbvalues * 10) // 100][0],
2009 '10%': values[(nbvalues * 10) // 100][0],
1995 '25%': values[(nbvalues * 25) // 100][0],
2010 '25%': values[(nbvalues * 25) // 100][0],
1996 '50%': values[(nbvalues * 50) // 100][0],
2011 '50%': values[(nbvalues * 50) // 100][0],
1997 '75%': values[(nbvalues * 75) // 100][0],
2012 '75%': values[(nbvalues * 75) // 100][0],
1998 '80%': values[(nbvalues * 80) // 100][0],
2013 '80%': values[(nbvalues * 80) // 100][0],
1999 '85%': values[(nbvalues * 85) // 100][0],
2014 '85%': values[(nbvalues * 85) // 100][0],
2000 '90%': values[(nbvalues * 90) // 100][0],
2015 '90%': values[(nbvalues * 90) // 100][0],
2001 '95%': values[(nbvalues * 95) // 100][0],
2016 '95%': values[(nbvalues * 95) // 100][0],
2002 '99%': values[(nbvalues * 99) // 100][0],
2017 '99%': values[(nbvalues * 99) // 100][0],
2003 'max': values[-1][0],
2018 'max': values[-1][0],
2004 }
2019 }
2005 fm.startitem()
2020 fm.startitem()
2006 fm.data(**stats)
2021 fm.data(**stats)
2007 # make node pretty for the human output
2022 # make node pretty for the human output
2008 fm.plain('### %s (%d items)\n' % (title, len(values)))
2023 fm.plain('### %s (%d items)\n' % (title, len(values)))
2009 lines = [
2024 lines = [
2010 'min',
2025 'min',
2011 '10%',
2026 '10%',
2012 '25%',
2027 '25%',
2013 '50%',
2028 '50%',
2014 '75%',
2029 '75%',
2015 '80%',
2030 '80%',
2016 '85%',
2031 '85%',
2017 '90%',
2032 '90%',
2018 '95%',
2033 '95%',
2019 '99%',
2034 '99%',
2020 'max',
2035 'max',
2021 ]
2036 ]
2022 for l in lines:
2037 for l in lines:
2023 fm.plain('%s: %s\n' % (l, stats[l]))
2038 fm.plain('%s: %s\n' % (l, stats[l]))
2024 fm.end()
2039 fm.end()
2025
2040
2026
2041
2027 @command(
2042 @command(
2028 b'perf::helper-mergecopies|perfhelper-mergecopies',
2043 b'perf::helper-mergecopies|perfhelper-mergecopies',
2029 formatteropts
2044 formatteropts
2030 + [
2045 + [
2031 (b'r', b'revs', [], b'restrict search to these revisions'),
2046 (b'r', b'revs', [], b'restrict search to these revisions'),
2032 (b'', b'timing', False, b'provides extra data (costly)'),
2047 (b'', b'timing', False, b'provides extra data (costly)'),
2033 (b'', b'stats', False, b'provides statistic about the measured data'),
2048 (b'', b'stats', False, b'provides statistic about the measured data'),
2034 ],
2049 ],
2035 )
2050 )
2036 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2051 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2037 """find statistics about potential parameters for `perfmergecopies`
2052 """find statistics about potential parameters for `perfmergecopies`
2038
2053
2039 This command find (base, p1, p2) triplet relevant for copytracing
2054 This command find (base, p1, p2) triplet relevant for copytracing
2040 benchmarking in the context of a merge. It reports values for some of the
2055 benchmarking in the context of a merge. It reports values for some of the
2041 parameters that impact merge copy tracing time during merge.
2056 parameters that impact merge copy tracing time during merge.
2042
2057
2043 If `--timing` is set, rename detection is run and the associated timing
2058 If `--timing` is set, rename detection is run and the associated timing
2044 will be reported. The extra details come at the cost of slower command
2059 will be reported. The extra details come at the cost of slower command
2045 execution.
2060 execution.
2046
2061
2047 Since rename detection is only run once, other factors might easily
2062 Since rename detection is only run once, other factors might easily
2048 affect the precision of the timing. However it should give a good
2063 affect the precision of the timing. However it should give a good
2049 approximation of which revision triplets are very costly.
2064 approximation of which revision triplets are very costly.
2050 """
2065 """
2051 opts = _byteskwargs(opts)
2066 opts = _byteskwargs(opts)
2052 fm = ui.formatter(b'perf', opts)
2067 fm = ui.formatter(b'perf', opts)
2053 dotiming = opts[b'timing']
2068 dotiming = opts[b'timing']
2054 dostats = opts[b'stats']
2069 dostats = opts[b'stats']
2055
2070
2056 output_template = [
2071 output_template = [
2057 ("base", "%(base)12s"),
2072 ("base", "%(base)12s"),
2058 ("p1", "%(p1.node)12s"),
2073 ("p1", "%(p1.node)12s"),
2059 ("p2", "%(p2.node)12s"),
2074 ("p2", "%(p2.node)12s"),
2060 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2075 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2061 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2076 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2062 ("p1.renames", "%(p1.renamedfiles)12d"),
2077 ("p1.renames", "%(p1.renamedfiles)12d"),
2063 ("p1.time", "%(p1.time)12.3f"),
2078 ("p1.time", "%(p1.time)12.3f"),
2064 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2079 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2065 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2080 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2066 ("p2.renames", "%(p2.renamedfiles)12d"),
2081 ("p2.renames", "%(p2.renamedfiles)12d"),
2067 ("p2.time", "%(p2.time)12.3f"),
2082 ("p2.time", "%(p2.time)12.3f"),
2068 ("renames", "%(nbrenamedfiles)12d"),
2083 ("renames", "%(nbrenamedfiles)12d"),
2069 ("total.time", "%(time)12.3f"),
2084 ("total.time", "%(time)12.3f"),
2070 ]
2085 ]
2071 if not dotiming:
2086 if not dotiming:
2072 output_template = [
2087 output_template = [
2073 i
2088 i
2074 for i in output_template
2089 for i in output_template
2075 if not ('time' in i[0] or 'renames' in i[0])
2090 if not ('time' in i[0] or 'renames' in i[0])
2076 ]
2091 ]
2077 header_names = [h for (h, v) in output_template]
2092 header_names = [h for (h, v) in output_template]
2078 output = ' '.join([v for (h, v) in output_template]) + '\n'
2093 output = ' '.join([v for (h, v) in output_template]) + '\n'
2079 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2094 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2080 fm.plain(header % tuple(header_names))
2095 fm.plain(header % tuple(header_names))
2081
2096
2082 if not revs:
2097 if not revs:
2083 revs = ['all()']
2098 revs = ['all()']
2084 revs = scmutil.revrange(repo, revs)
2099 revs = scmutil.revrange(repo, revs)
2085
2100
2086 if dostats:
2101 if dostats:
2087 alldata = {
2102 alldata = {
2088 'nbrevs': [],
2103 'nbrevs': [],
2089 'nbmissingfiles': [],
2104 'nbmissingfiles': [],
2090 }
2105 }
2091 if dotiming:
2106 if dotiming:
2092 alldata['parentnbrenames'] = []
2107 alldata['parentnbrenames'] = []
2093 alldata['totalnbrenames'] = []
2108 alldata['totalnbrenames'] = []
2094 alldata['parenttime'] = []
2109 alldata['parenttime'] = []
2095 alldata['totaltime'] = []
2110 alldata['totaltime'] = []
2096
2111
2097 roi = repo.revs('merge() and %ld', revs)
2112 roi = repo.revs('merge() and %ld', revs)
2098 for r in roi:
2113 for r in roi:
2099 ctx = repo[r]
2114 ctx = repo[r]
2100 p1 = ctx.p1()
2115 p1 = ctx.p1()
2101 p2 = ctx.p2()
2116 p2 = ctx.p2()
2102 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2117 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2103 for b in bases:
2118 for b in bases:
2104 b = repo[b]
2119 b = repo[b]
2105 p1missing = copies._computeforwardmissing(b, p1)
2120 p1missing = copies._computeforwardmissing(b, p1)
2106 p2missing = copies._computeforwardmissing(b, p2)
2121 p2missing = copies._computeforwardmissing(b, p2)
2107 data = {
2122 data = {
2108 b'base': b.hex(),
2123 b'base': b.hex(),
2109 b'p1.node': p1.hex(),
2124 b'p1.node': p1.hex(),
2110 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2125 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2111 b'p1.nbmissingfiles': len(p1missing),
2126 b'p1.nbmissingfiles': len(p1missing),
2112 b'p2.node': p2.hex(),
2127 b'p2.node': p2.hex(),
2113 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2128 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2114 b'p2.nbmissingfiles': len(p2missing),
2129 b'p2.nbmissingfiles': len(p2missing),
2115 }
2130 }
2116 if dostats:
2131 if dostats:
2117 if p1missing:
2132 if p1missing:
2118 alldata['nbrevs'].append(
2133 alldata['nbrevs'].append(
2119 (data['p1.nbrevs'], b.hex(), p1.hex())
2134 (data['p1.nbrevs'], b.hex(), p1.hex())
2120 )
2135 )
2121 alldata['nbmissingfiles'].append(
2136 alldata['nbmissingfiles'].append(
2122 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2137 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2123 )
2138 )
2124 if p2missing:
2139 if p2missing:
2125 alldata['nbrevs'].append(
2140 alldata['nbrevs'].append(
2126 (data['p2.nbrevs'], b.hex(), p2.hex())
2141 (data['p2.nbrevs'], b.hex(), p2.hex())
2127 )
2142 )
2128 alldata['nbmissingfiles'].append(
2143 alldata['nbmissingfiles'].append(
2129 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2144 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2130 )
2145 )
2131 if dotiming:
2146 if dotiming:
2132 begin = util.timer()
2147 begin = util.timer()
2133 mergedata = copies.mergecopies(repo, p1, p2, b)
2148 mergedata = copies.mergecopies(repo, p1, p2, b)
2134 end = util.timer()
2149 end = util.timer()
2135 # not very stable timing since we did only one run
2150 # not very stable timing since we did only one run
2136 data['time'] = end - begin
2151 data['time'] = end - begin
2137 # mergedata contains five dicts: "copy", "movewithdir",
2152 # mergedata contains five dicts: "copy", "movewithdir",
2138 # "diverge", "renamedelete" and "dirmove".
2153 # "diverge", "renamedelete" and "dirmove".
2139 # The first 4 are about renamed file so lets count that.
2154 # The first 4 are about renamed file so lets count that.
2140 renames = len(mergedata[0])
2155 renames = len(mergedata[0])
2141 renames += len(mergedata[1])
2156 renames += len(mergedata[1])
2142 renames += len(mergedata[2])
2157 renames += len(mergedata[2])
2143 renames += len(mergedata[3])
2158 renames += len(mergedata[3])
2144 data['nbrenamedfiles'] = renames
2159 data['nbrenamedfiles'] = renames
2145 begin = util.timer()
2160 begin = util.timer()
2146 p1renames = copies.pathcopies(b, p1)
2161 p1renames = copies.pathcopies(b, p1)
2147 end = util.timer()
2162 end = util.timer()
2148 data['p1.time'] = end - begin
2163 data['p1.time'] = end - begin
2149 begin = util.timer()
2164 begin = util.timer()
2150 p2renames = copies.pathcopies(b, p2)
2165 p2renames = copies.pathcopies(b, p2)
2151 end = util.timer()
2166 end = util.timer()
2152 data['p2.time'] = end - begin
2167 data['p2.time'] = end - begin
2153 data['p1.renamedfiles'] = len(p1renames)
2168 data['p1.renamedfiles'] = len(p1renames)
2154 data['p2.renamedfiles'] = len(p2renames)
2169 data['p2.renamedfiles'] = len(p2renames)
2155
2170
2156 if dostats:
2171 if dostats:
2157 if p1missing:
2172 if p1missing:
2158 alldata['parentnbrenames'].append(
2173 alldata['parentnbrenames'].append(
2159 (data['p1.renamedfiles'], b.hex(), p1.hex())
2174 (data['p1.renamedfiles'], b.hex(), p1.hex())
2160 )
2175 )
2161 alldata['parenttime'].append(
2176 alldata['parenttime'].append(
2162 (data['p1.time'], b.hex(), p1.hex())
2177 (data['p1.time'], b.hex(), p1.hex())
2163 )
2178 )
2164 if p2missing:
2179 if p2missing:
2165 alldata['parentnbrenames'].append(
2180 alldata['parentnbrenames'].append(
2166 (data['p2.renamedfiles'], b.hex(), p2.hex())
2181 (data['p2.renamedfiles'], b.hex(), p2.hex())
2167 )
2182 )
2168 alldata['parenttime'].append(
2183 alldata['parenttime'].append(
2169 (data['p2.time'], b.hex(), p2.hex())
2184 (data['p2.time'], b.hex(), p2.hex())
2170 )
2185 )
2171 if p1missing or p2missing:
2186 if p1missing or p2missing:
2172 alldata['totalnbrenames'].append(
2187 alldata['totalnbrenames'].append(
2173 (
2188 (
2174 data['nbrenamedfiles'],
2189 data['nbrenamedfiles'],
2175 b.hex(),
2190 b.hex(),
2176 p1.hex(),
2191 p1.hex(),
2177 p2.hex(),
2192 p2.hex(),
2178 )
2193 )
2179 )
2194 )
2180 alldata['totaltime'].append(
2195 alldata['totaltime'].append(
2181 (data['time'], b.hex(), p1.hex(), p2.hex())
2196 (data['time'], b.hex(), p1.hex(), p2.hex())
2182 )
2197 )
2183 fm.startitem()
2198 fm.startitem()
2184 fm.data(**data)
2199 fm.data(**data)
2185 # make node pretty for the human output
2200 # make node pretty for the human output
2186 out = data.copy()
2201 out = data.copy()
2187 out['base'] = fm.hexfunc(b.node())
2202 out['base'] = fm.hexfunc(b.node())
2188 out['p1.node'] = fm.hexfunc(p1.node())
2203 out['p1.node'] = fm.hexfunc(p1.node())
2189 out['p2.node'] = fm.hexfunc(p2.node())
2204 out['p2.node'] = fm.hexfunc(p2.node())
2190 fm.plain(output % out)
2205 fm.plain(output % out)
2191
2206
2192 fm.end()
2207 fm.end()
2193 if dostats:
2208 if dostats:
2194 # use a second formatter because the data are quite different, not sure
2209 # use a second formatter because the data are quite different, not sure
2195 # how it flies with the templater.
2210 # how it flies with the templater.
2196 entries = [
2211 entries = [
2197 ('nbrevs', 'number of revision covered'),
2212 ('nbrevs', 'number of revision covered'),
2198 ('nbmissingfiles', 'number of missing files at head'),
2213 ('nbmissingfiles', 'number of missing files at head'),
2199 ]
2214 ]
2200 if dotiming:
2215 if dotiming:
2201 entries.append(
2216 entries.append(
2202 ('parentnbrenames', 'rename from one parent to base')
2217 ('parentnbrenames', 'rename from one parent to base')
2203 )
2218 )
2204 entries.append(('totalnbrenames', 'total number of renames'))
2219 entries.append(('totalnbrenames', 'total number of renames'))
2205 entries.append(('parenttime', 'time for one parent'))
2220 entries.append(('parenttime', 'time for one parent'))
2206 entries.append(('totaltime', 'time for both parents'))
2221 entries.append(('totaltime', 'time for both parents'))
2207 _displaystats(ui, opts, entries, alldata)
2222 _displaystats(ui, opts, entries, alldata)
2208
2223
2209
2224
2210 @command(
2225 @command(
2211 b'perf::helper-pathcopies|perfhelper-pathcopies',
2226 b'perf::helper-pathcopies|perfhelper-pathcopies',
2212 formatteropts
2227 formatteropts
2213 + [
2228 + [
2214 (b'r', b'revs', [], b'restrict search to these revisions'),
2229 (b'r', b'revs', [], b'restrict search to these revisions'),
2215 (b'', b'timing', False, b'provides extra data (costly)'),
2230 (b'', b'timing', False, b'provides extra data (costly)'),
2216 (b'', b'stats', False, b'provides statistic about the measured data'),
2231 (b'', b'stats', False, b'provides statistic about the measured data'),
2217 ],
2232 ],
2218 )
2233 )
2219 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2234 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2220 """find statistic about potential parameters for the `perftracecopies`
2235 """find statistic about potential parameters for the `perftracecopies`
2221
2236
2222 This command find source-destination pair relevant for copytracing testing.
2237 This command find source-destination pair relevant for copytracing testing.
2223 It report value for some of the parameters that impact copy tracing time.
2238 It report value for some of the parameters that impact copy tracing time.
2224
2239
2225 If `--timing` is set, rename detection is run and the associated timing
2240 If `--timing` is set, rename detection is run and the associated timing
2226 will be reported. The extra details comes at the cost of a slower command
2241 will be reported. The extra details comes at the cost of a slower command
2227 execution.
2242 execution.
2228
2243
2229 Since the rename detection is only run once, other factors might easily
2244 Since the rename detection is only run once, other factors might easily
2230 affect the precision of the timing. However it should give a good
2245 affect the precision of the timing. However it should give a good
2231 approximation of which revision pairs are very costly.
2246 approximation of which revision pairs are very costly.
2232 """
2247 """
2233 opts = _byteskwargs(opts)
2248 opts = _byteskwargs(opts)
2234 fm = ui.formatter(b'perf', opts)
2249 fm = ui.formatter(b'perf', opts)
2235 dotiming = opts[b'timing']
2250 dotiming = opts[b'timing']
2236 dostats = opts[b'stats']
2251 dostats = opts[b'stats']
2237
2252
2238 if dotiming:
2253 if dotiming:
2239 header = '%12s %12s %12s %12s %12s %12s\n'
2254 header = '%12s %12s %12s %12s %12s %12s\n'
2240 output = (
2255 output = (
2241 "%(source)12s %(destination)12s "
2256 "%(source)12s %(destination)12s "
2242 "%(nbrevs)12d %(nbmissingfiles)12d "
2257 "%(nbrevs)12d %(nbmissingfiles)12d "
2243 "%(nbrenamedfiles)12d %(time)18.5f\n"
2258 "%(nbrenamedfiles)12d %(time)18.5f\n"
2244 )
2259 )
2245 header_names = (
2260 header_names = (
2246 "source",
2261 "source",
2247 "destination",
2262 "destination",
2248 "nb-revs",
2263 "nb-revs",
2249 "nb-files",
2264 "nb-files",
2250 "nb-renames",
2265 "nb-renames",
2251 "time",
2266 "time",
2252 )
2267 )
2253 fm.plain(header % header_names)
2268 fm.plain(header % header_names)
2254 else:
2269 else:
2255 header = '%12s %12s %12s %12s\n'
2270 header = '%12s %12s %12s %12s\n'
2256 output = (
2271 output = (
2257 "%(source)12s %(destination)12s "
2272 "%(source)12s %(destination)12s "
2258 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2273 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2259 )
2274 )
2260 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2275 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2261
2276
2262 if not revs:
2277 if not revs:
2263 revs = ['all()']
2278 revs = ['all()']
2264 revs = scmutil.revrange(repo, revs)
2279 revs = scmutil.revrange(repo, revs)
2265
2280
2266 if dostats:
2281 if dostats:
2267 alldata = {
2282 alldata = {
2268 'nbrevs': [],
2283 'nbrevs': [],
2269 'nbmissingfiles': [],
2284 'nbmissingfiles': [],
2270 }
2285 }
2271 if dotiming:
2286 if dotiming:
2272 alldata['nbrenames'] = []
2287 alldata['nbrenames'] = []
2273 alldata['time'] = []
2288 alldata['time'] = []
2274
2289
2275 roi = repo.revs('merge() and %ld', revs)
2290 roi = repo.revs('merge() and %ld', revs)
2276 for r in roi:
2291 for r in roi:
2277 ctx = repo[r]
2292 ctx = repo[r]
2278 p1 = ctx.p1().rev()
2293 p1 = ctx.p1().rev()
2279 p2 = ctx.p2().rev()
2294 p2 = ctx.p2().rev()
2280 bases = repo.changelog._commonancestorsheads(p1, p2)
2295 bases = repo.changelog._commonancestorsheads(p1, p2)
2281 for p in (p1, p2):
2296 for p in (p1, p2):
2282 for b in bases:
2297 for b in bases:
2283 base = repo[b]
2298 base = repo[b]
2284 parent = repo[p]
2299 parent = repo[p]
2285 missing = copies._computeforwardmissing(base, parent)
2300 missing = copies._computeforwardmissing(base, parent)
2286 if not missing:
2301 if not missing:
2287 continue
2302 continue
2288 data = {
2303 data = {
2289 b'source': base.hex(),
2304 b'source': base.hex(),
2290 b'destination': parent.hex(),
2305 b'destination': parent.hex(),
2291 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2306 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2292 b'nbmissingfiles': len(missing),
2307 b'nbmissingfiles': len(missing),
2293 }
2308 }
2294 if dostats:
2309 if dostats:
2295 alldata['nbrevs'].append(
2310 alldata['nbrevs'].append(
2296 (
2311 (
2297 data['nbrevs'],
2312 data['nbrevs'],
2298 base.hex(),
2313 base.hex(),
2299 parent.hex(),
2314 parent.hex(),
2300 )
2315 )
2301 )
2316 )
2302 alldata['nbmissingfiles'].append(
2317 alldata['nbmissingfiles'].append(
2303 (
2318 (
2304 data['nbmissingfiles'],
2319 data['nbmissingfiles'],
2305 base.hex(),
2320 base.hex(),
2306 parent.hex(),
2321 parent.hex(),
2307 )
2322 )
2308 )
2323 )
2309 if dotiming:
2324 if dotiming:
2310 begin = util.timer()
2325 begin = util.timer()
2311 renames = copies.pathcopies(base, parent)
2326 renames = copies.pathcopies(base, parent)
2312 end = util.timer()
2327 end = util.timer()
2313 # not very stable timing since we did only one run
2328 # not very stable timing since we did only one run
2314 data['time'] = end - begin
2329 data['time'] = end - begin
2315 data['nbrenamedfiles'] = len(renames)
2330 data['nbrenamedfiles'] = len(renames)
2316 if dostats:
2331 if dostats:
2317 alldata['time'].append(
2332 alldata['time'].append(
2318 (
2333 (
2319 data['time'],
2334 data['time'],
2320 base.hex(),
2335 base.hex(),
2321 parent.hex(),
2336 parent.hex(),
2322 )
2337 )
2323 )
2338 )
2324 alldata['nbrenames'].append(
2339 alldata['nbrenames'].append(
2325 (
2340 (
2326 data['nbrenamedfiles'],
2341 data['nbrenamedfiles'],
2327 base.hex(),
2342 base.hex(),
2328 parent.hex(),
2343 parent.hex(),
2329 )
2344 )
2330 )
2345 )
2331 fm.startitem()
2346 fm.startitem()
2332 fm.data(**data)
2347 fm.data(**data)
2333 out = data.copy()
2348 out = data.copy()
2334 out['source'] = fm.hexfunc(base.node())
2349 out['source'] = fm.hexfunc(base.node())
2335 out['destination'] = fm.hexfunc(parent.node())
2350 out['destination'] = fm.hexfunc(parent.node())
2336 fm.plain(output % out)
2351 fm.plain(output % out)
2337
2352
2338 fm.end()
2353 fm.end()
2339 if dostats:
2354 if dostats:
2340 entries = [
2355 entries = [
2341 ('nbrevs', 'number of revision covered'),
2356 ('nbrevs', 'number of revision covered'),
2342 ('nbmissingfiles', 'number of missing files at head'),
2357 ('nbmissingfiles', 'number of missing files at head'),
2343 ]
2358 ]
2344 if dotiming:
2359 if dotiming:
2345 entries.append(('nbrenames', 'renamed files'))
2360 entries.append(('nbrenames', 'renamed files'))
2346 entries.append(('time', 'time'))
2361 entries.append(('time', 'time'))
2347 _displaystats(ui, opts, entries, alldata)
2362 _displaystats(ui, opts, entries, alldata)
2348
2363
2349
2364
2350 @command(b'perf::cca|perfcca', formatteropts)
2365 @command(b'perf::cca|perfcca', formatteropts)
2351 def perfcca(ui, repo, **opts):
2366 def perfcca(ui, repo, **opts):
2352 opts = _byteskwargs(opts)
2367 opts = _byteskwargs(opts)
2353 timer, fm = gettimer(ui, opts)
2368 timer, fm = gettimer(ui, opts)
2354 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2369 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2355 fm.end()
2370 fm.end()
2356
2371
2357
2372
2358 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2373 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2359 def perffncacheload(ui, repo, **opts):
2374 def perffncacheload(ui, repo, **opts):
2360 opts = _byteskwargs(opts)
2375 opts = _byteskwargs(opts)
2361 timer, fm = gettimer(ui, opts)
2376 timer, fm = gettimer(ui, opts)
2362 s = repo.store
2377 s = repo.store
2363
2378
2364 def d():
2379 def d():
2365 s.fncache._load()
2380 s.fncache._load()
2366
2381
2367 timer(d)
2382 timer(d)
2368 fm.end()
2383 fm.end()
2369
2384
2370
2385
2371 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2386 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2372 def perffncachewrite(ui, repo, **opts):
2387 def perffncachewrite(ui, repo, **opts):
2373 opts = _byteskwargs(opts)
2388 opts = _byteskwargs(opts)
2374 timer, fm = gettimer(ui, opts)
2389 timer, fm = gettimer(ui, opts)
2375 s = repo.store
2390 s = repo.store
2376 lock = repo.lock()
2391 lock = repo.lock()
2377 s.fncache._load()
2392 s.fncache._load()
2378 tr = repo.transaction(b'perffncachewrite')
2393 tr = repo.transaction(b'perffncachewrite')
2379 tr.addbackup(b'fncache')
2394 tr.addbackup(b'fncache')
2380
2395
2381 def d():
2396 def d():
2382 s.fncache._dirty = True
2397 s.fncache._dirty = True
2383 s.fncache.write(tr)
2398 s.fncache.write(tr)
2384
2399
2385 timer(d)
2400 timer(d)
2386 tr.close()
2401 tr.close()
2387 lock.release()
2402 lock.release()
2388 fm.end()
2403 fm.end()
2389
2404
2390
2405
2391 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2406 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2392 def perffncacheencode(ui, repo, **opts):
2407 def perffncacheencode(ui, repo, **opts):
2393 opts = _byteskwargs(opts)
2408 opts = _byteskwargs(opts)
2394 timer, fm = gettimer(ui, opts)
2409 timer, fm = gettimer(ui, opts)
2395 s = repo.store
2410 s = repo.store
2396 s.fncache._load()
2411 s.fncache._load()
2397
2412
2398 def d():
2413 def d():
2399 for p in s.fncache.entries:
2414 for p in s.fncache.entries:
2400 s.encode(p)
2415 s.encode(p)
2401
2416
2402 timer(d)
2417 timer(d)
2403 fm.end()
2418 fm.end()
2404
2419
2405
2420
2406 def _bdiffworker(q, blocks, xdiff, ready, done):
2421 def _bdiffworker(q, blocks, xdiff, ready, done):
2407 while not done.is_set():
2422 while not done.is_set():
2408 pair = q.get()
2423 pair = q.get()
2409 while pair is not None:
2424 while pair is not None:
2410 if xdiff:
2425 if xdiff:
2411 mdiff.bdiff.xdiffblocks(*pair)
2426 mdiff.bdiff.xdiffblocks(*pair)
2412 elif blocks:
2427 elif blocks:
2413 mdiff.bdiff.blocks(*pair)
2428 mdiff.bdiff.blocks(*pair)
2414 else:
2429 else:
2415 mdiff.textdiff(*pair)
2430 mdiff.textdiff(*pair)
2416 q.task_done()
2431 q.task_done()
2417 pair = q.get()
2432 pair = q.get()
2418 q.task_done() # for the None one
2433 q.task_done() # for the None one
2419 with ready:
2434 with ready:
2420 ready.wait()
2435 ready.wait()
2421
2436
2422
2437
2423 def _manifestrevision(repo, mnode):
2438 def _manifestrevision(repo, mnode):
2424 ml = repo.manifestlog
2439 ml = repo.manifestlog
2425
2440
2426 if util.safehasattr(ml, b'getstorage'):
2441 if util.safehasattr(ml, b'getstorage'):
2427 store = ml.getstorage(b'')
2442 store = ml.getstorage(b'')
2428 else:
2443 else:
2429 store = ml._revlog
2444 store = ml._revlog
2430
2445
2431 return store.revision(mnode)
2446 return store.revision(mnode)
2432
2447
2433
2448
2434 @command(
2449 @command(
2435 b'perf::bdiff|perfbdiff',
2450 b'perf::bdiff|perfbdiff',
2436 revlogopts
2451 revlogopts
2437 + formatteropts
2452 + formatteropts
2438 + [
2453 + [
2439 (
2454 (
2440 b'',
2455 b'',
2441 b'count',
2456 b'count',
2442 1,
2457 1,
2443 b'number of revisions to test (when using --startrev)',
2458 b'number of revisions to test (when using --startrev)',
2444 ),
2459 ),
2445 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2460 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2446 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2461 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2447 (b'', b'blocks', False, b'test computing diffs into blocks'),
2462 (b'', b'blocks', False, b'test computing diffs into blocks'),
2448 (b'', b'xdiff', False, b'use xdiff algorithm'),
2463 (b'', b'xdiff', False, b'use xdiff algorithm'),
2449 ],
2464 ],
2450 b'-c|-m|FILE REV',
2465 b'-c|-m|FILE REV',
2451 )
2466 )
2452 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2467 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2453 """benchmark a bdiff between revisions
2468 """benchmark a bdiff between revisions
2454
2469
2455 By default, benchmark a bdiff between its delta parent and itself.
2470 By default, benchmark a bdiff between its delta parent and itself.
2456
2471
2457 With ``--count``, benchmark bdiffs between delta parents and self for N
2472 With ``--count``, benchmark bdiffs between delta parents and self for N
2458 revisions starting at the specified revision.
2473 revisions starting at the specified revision.
2459
2474
2460 With ``--alldata``, assume the requested revision is a changeset and
2475 With ``--alldata``, assume the requested revision is a changeset and
2461 measure bdiffs for all changes related to that changeset (manifest
2476 measure bdiffs for all changes related to that changeset (manifest
2462 and filelogs).
2477 and filelogs).
2463 """
2478 """
2464 opts = _byteskwargs(opts)
2479 opts = _byteskwargs(opts)
2465
2480
2466 if opts[b'xdiff'] and not opts[b'blocks']:
2481 if opts[b'xdiff'] and not opts[b'blocks']:
2467 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2482 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2468
2483
2469 if opts[b'alldata']:
2484 if opts[b'alldata']:
2470 opts[b'changelog'] = True
2485 opts[b'changelog'] = True
2471
2486
2472 if opts.get(b'changelog') or opts.get(b'manifest'):
2487 if opts.get(b'changelog') or opts.get(b'manifest'):
2473 file_, rev = None, file_
2488 file_, rev = None, file_
2474 elif rev is None:
2489 elif rev is None:
2475 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2490 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2476
2491
2477 blocks = opts[b'blocks']
2492 blocks = opts[b'blocks']
2478 xdiff = opts[b'xdiff']
2493 xdiff = opts[b'xdiff']
2479 textpairs = []
2494 textpairs = []
2480
2495
2481 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2496 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2482
2497
2483 startrev = r.rev(r.lookup(rev))
2498 startrev = r.rev(r.lookup(rev))
2484 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2499 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2485 if opts[b'alldata']:
2500 if opts[b'alldata']:
2486 # Load revisions associated with changeset.
2501 # Load revisions associated with changeset.
2487 ctx = repo[rev]
2502 ctx = repo[rev]
2488 mtext = _manifestrevision(repo, ctx.manifestnode())
2503 mtext = _manifestrevision(repo, ctx.manifestnode())
2489 for pctx in ctx.parents():
2504 for pctx in ctx.parents():
2490 pman = _manifestrevision(repo, pctx.manifestnode())
2505 pman = _manifestrevision(repo, pctx.manifestnode())
2491 textpairs.append((pman, mtext))
2506 textpairs.append((pman, mtext))
2492
2507
2493 # Load filelog revisions by iterating manifest delta.
2508 # Load filelog revisions by iterating manifest delta.
2494 man = ctx.manifest()
2509 man = ctx.manifest()
2495 pman = ctx.p1().manifest()
2510 pman = ctx.p1().manifest()
2496 for filename, change in pman.diff(man).items():
2511 for filename, change in pman.diff(man).items():
2497 fctx = repo.file(filename)
2512 fctx = repo.file(filename)
2498 f1 = fctx.revision(change[0][0] or -1)
2513 f1 = fctx.revision(change[0][0] or -1)
2499 f2 = fctx.revision(change[1][0] or -1)
2514 f2 = fctx.revision(change[1][0] or -1)
2500 textpairs.append((f1, f2))
2515 textpairs.append((f1, f2))
2501 else:
2516 else:
2502 dp = r.deltaparent(rev)
2517 dp = r.deltaparent(rev)
2503 textpairs.append((r.revision(dp), r.revision(rev)))
2518 textpairs.append((r.revision(dp), r.revision(rev)))
2504
2519
2505 withthreads = threads > 0
2520 withthreads = threads > 0
2506 if not withthreads:
2521 if not withthreads:
2507
2522
2508 def d():
2523 def d():
2509 for pair in textpairs:
2524 for pair in textpairs:
2510 if xdiff:
2525 if xdiff:
2511 mdiff.bdiff.xdiffblocks(*pair)
2526 mdiff.bdiff.xdiffblocks(*pair)
2512 elif blocks:
2527 elif blocks:
2513 mdiff.bdiff.blocks(*pair)
2528 mdiff.bdiff.blocks(*pair)
2514 else:
2529 else:
2515 mdiff.textdiff(*pair)
2530 mdiff.textdiff(*pair)
2516
2531
2517 else:
2532 else:
2518 q = queue()
2533 q = queue()
2519 for i in _xrange(threads):
2534 for i in _xrange(threads):
2520 q.put(None)
2535 q.put(None)
2521 ready = threading.Condition()
2536 ready = threading.Condition()
2522 done = threading.Event()
2537 done = threading.Event()
2523 for i in _xrange(threads):
2538 for i in _xrange(threads):
2524 threading.Thread(
2539 threading.Thread(
2525 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2540 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2526 ).start()
2541 ).start()
2527 q.join()
2542 q.join()
2528
2543
2529 def d():
2544 def d():
2530 for pair in textpairs:
2545 for pair in textpairs:
2531 q.put(pair)
2546 q.put(pair)
2532 for i in _xrange(threads):
2547 for i in _xrange(threads):
2533 q.put(None)
2548 q.put(None)
2534 with ready:
2549 with ready:
2535 ready.notify_all()
2550 ready.notify_all()
2536 q.join()
2551 q.join()
2537
2552
2538 timer, fm = gettimer(ui, opts)
2553 timer, fm = gettimer(ui, opts)
2539 timer(d)
2554 timer(d)
2540 fm.end()
2555 fm.end()
2541
2556
2542 if withthreads:
2557 if withthreads:
2543 done.set()
2558 done.set()
2544 for i in _xrange(threads):
2559 for i in _xrange(threads):
2545 q.put(None)
2560 q.put(None)
2546 with ready:
2561 with ready:
2547 ready.notify_all()
2562 ready.notify_all()
2548
2563
2549
2564
2550 @command(
2565 @command(
2551 b'perf::unidiff|perfunidiff',
2566 b'perf::unidiff|perfunidiff',
2552 revlogopts
2567 revlogopts
2553 + formatteropts
2568 + formatteropts
2554 + [
2569 + [
2555 (
2570 (
2556 b'',
2571 b'',
2557 b'count',
2572 b'count',
2558 1,
2573 1,
2559 b'number of revisions to test (when using --startrev)',
2574 b'number of revisions to test (when using --startrev)',
2560 ),
2575 ),
2561 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2576 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2562 ],
2577 ],
2563 b'-c|-m|FILE REV',
2578 b'-c|-m|FILE REV',
2564 )
2579 )
2565 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2580 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2566 """benchmark a unified diff between revisions
2581 """benchmark a unified diff between revisions
2567
2582
2568 This doesn't include any copy tracing - it's just a unified diff
2583 This doesn't include any copy tracing - it's just a unified diff
2569 of the texts.
2584 of the texts.
2570
2585
2571 By default, benchmark a diff between its delta parent and itself.
2586 By default, benchmark a diff between its delta parent and itself.
2572
2587
2573 With ``--count``, benchmark diffs between delta parents and self for N
2588 With ``--count``, benchmark diffs between delta parents and self for N
2574 revisions starting at the specified revision.
2589 revisions starting at the specified revision.
2575
2590
2576 With ``--alldata``, assume the requested revision is a changeset and
2591 With ``--alldata``, assume the requested revision is a changeset and
2577 measure diffs for all changes related to that changeset (manifest
2592 measure diffs for all changes related to that changeset (manifest
2578 and filelogs).
2593 and filelogs).
2579 """
2594 """
2580 opts = _byteskwargs(opts)
2595 opts = _byteskwargs(opts)
2581 if opts[b'alldata']:
2596 if opts[b'alldata']:
2582 opts[b'changelog'] = True
2597 opts[b'changelog'] = True
2583
2598
2584 if opts.get(b'changelog') or opts.get(b'manifest'):
2599 if opts.get(b'changelog') or opts.get(b'manifest'):
2585 file_, rev = None, file_
2600 file_, rev = None, file_
2586 elif rev is None:
2601 elif rev is None:
2587 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2602 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2588
2603
2589 textpairs = []
2604 textpairs = []
2590
2605
2591 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2606 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2592
2607
2593 startrev = r.rev(r.lookup(rev))
2608 startrev = r.rev(r.lookup(rev))
2594 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2609 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2595 if opts[b'alldata']:
2610 if opts[b'alldata']:
2596 # Load revisions associated with changeset.
2611 # Load revisions associated with changeset.
2597 ctx = repo[rev]
2612 ctx = repo[rev]
2598 mtext = _manifestrevision(repo, ctx.manifestnode())
2613 mtext = _manifestrevision(repo, ctx.manifestnode())
2599 for pctx in ctx.parents():
2614 for pctx in ctx.parents():
2600 pman = _manifestrevision(repo, pctx.manifestnode())
2615 pman = _manifestrevision(repo, pctx.manifestnode())
2601 textpairs.append((pman, mtext))
2616 textpairs.append((pman, mtext))
2602
2617
2603 # Load filelog revisions by iterating manifest delta.
2618 # Load filelog revisions by iterating manifest delta.
2604 man = ctx.manifest()
2619 man = ctx.manifest()
2605 pman = ctx.p1().manifest()
2620 pman = ctx.p1().manifest()
2606 for filename, change in pman.diff(man).items():
2621 for filename, change in pman.diff(man).items():
2607 fctx = repo.file(filename)
2622 fctx = repo.file(filename)
2608 f1 = fctx.revision(change[0][0] or -1)
2623 f1 = fctx.revision(change[0][0] or -1)
2609 f2 = fctx.revision(change[1][0] or -1)
2624 f2 = fctx.revision(change[1][0] or -1)
2610 textpairs.append((f1, f2))
2625 textpairs.append((f1, f2))
2611 else:
2626 else:
2612 dp = r.deltaparent(rev)
2627 dp = r.deltaparent(rev)
2613 textpairs.append((r.revision(dp), r.revision(rev)))
2628 textpairs.append((r.revision(dp), r.revision(rev)))
2614
2629
2615 def d():
2630 def d():
2616 for left, right in textpairs:
2631 for left, right in textpairs:
2617 # The date strings don't matter, so we pass empty strings.
2632 # The date strings don't matter, so we pass empty strings.
2618 headerlines, hunks = mdiff.unidiff(
2633 headerlines, hunks = mdiff.unidiff(
2619 left, b'', right, b'', b'left', b'right', binary=False
2634 left, b'', right, b'', b'left', b'right', binary=False
2620 )
2635 )
2621 # consume iterators in roughly the way patch.py does
2636 # consume iterators in roughly the way patch.py does
2622 b'\n'.join(headerlines)
2637 b'\n'.join(headerlines)
2623 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2638 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2624
2639
2625 timer, fm = gettimer(ui, opts)
2640 timer, fm = gettimer(ui, opts)
2626 timer(d)
2641 timer(d)
2627 fm.end()
2642 fm.end()
2628
2643
2629
2644
2630 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2645 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2631 def perfdiffwd(ui, repo, **opts):
2646 def perfdiffwd(ui, repo, **opts):
2632 """Profile diff of working directory changes"""
2647 """Profile diff of working directory changes"""
2633 opts = _byteskwargs(opts)
2648 opts = _byteskwargs(opts)
2634 timer, fm = gettimer(ui, opts)
2649 timer, fm = gettimer(ui, opts)
2635 options = {
2650 options = {
2636 'w': 'ignore_all_space',
2651 'w': 'ignore_all_space',
2637 'b': 'ignore_space_change',
2652 'b': 'ignore_space_change',
2638 'B': 'ignore_blank_lines',
2653 'B': 'ignore_blank_lines',
2639 }
2654 }
2640
2655
2641 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2656 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2642 opts = {options[c]: b'1' for c in diffopt}
2657 opts = {options[c]: b'1' for c in diffopt}
2643
2658
2644 def d():
2659 def d():
2645 ui.pushbuffer()
2660 ui.pushbuffer()
2646 commands.diff(ui, repo, **opts)
2661 commands.diff(ui, repo, **opts)
2647 ui.popbuffer()
2662 ui.popbuffer()
2648
2663
2649 diffopt = diffopt.encode('ascii')
2664 diffopt = diffopt.encode('ascii')
2650 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2665 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2651 timer(d, title=title)
2666 timer(d, title=title)
2652 fm.end()
2667 fm.end()
2653
2668
2654
2669
2655 @command(
2670 @command(
2656 b'perf::revlogindex|perfrevlogindex',
2671 b'perf::revlogindex|perfrevlogindex',
2657 revlogopts + formatteropts,
2672 revlogopts + formatteropts,
2658 b'-c|-m|FILE',
2673 b'-c|-m|FILE',
2659 )
2674 )
2660 def perfrevlogindex(ui, repo, file_=None, **opts):
2675 def perfrevlogindex(ui, repo, file_=None, **opts):
2661 """Benchmark operations against a revlog index.
2676 """Benchmark operations against a revlog index.
2662
2677
2663 This tests constructing a revlog instance, reading index data,
2678 This tests constructing a revlog instance, reading index data,
2664 parsing index data, and performing various operations related to
2679 parsing index data, and performing various operations related to
2665 index data.
2680 index data.
2666 """
2681 """
2667
2682
2668 opts = _byteskwargs(opts)
2683 opts = _byteskwargs(opts)
2669
2684
2670 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2685 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2671
2686
2672 opener = getattr(rl, 'opener') # trick linter
2687 opener = getattr(rl, 'opener') # trick linter
2673 # compat with hg <= 5.8
2688 # compat with hg <= 5.8
2674 radix = getattr(rl, 'radix', None)
2689 radix = getattr(rl, 'radix', None)
2675 indexfile = getattr(rl, '_indexfile', None)
2690 indexfile = getattr(rl, '_indexfile', None)
2676 if indexfile is None:
2691 if indexfile is None:
2677 # compatibility with <= hg-5.8
2692 # compatibility with <= hg-5.8
2678 indexfile = getattr(rl, 'indexfile')
2693 indexfile = getattr(rl, 'indexfile')
2679 data = opener.read(indexfile)
2694 data = opener.read(indexfile)
2680
2695
2681 header = struct.unpack(b'>I', data[0:4])[0]
2696 header = struct.unpack(b'>I', data[0:4])[0]
2682 version = header & 0xFFFF
2697 version = header & 0xFFFF
2683 if version == 1:
2698 if version == 1:
2684 inline = header & (1 << 16)
2699 inline = header & (1 << 16)
2685 else:
2700 else:
2686 raise error.Abort(b'unsupported revlog version: %d' % version)
2701 raise error.Abort(b'unsupported revlog version: %d' % version)
2687
2702
2688 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2703 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2689 if parse_index_v1 is None:
2704 if parse_index_v1 is None:
2690 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2705 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2691
2706
2692 rllen = len(rl)
2707 rllen = len(rl)
2693
2708
2694 node0 = rl.node(0)
2709 node0 = rl.node(0)
2695 node25 = rl.node(rllen // 4)
2710 node25 = rl.node(rllen // 4)
2696 node50 = rl.node(rllen // 2)
2711 node50 = rl.node(rllen // 2)
2697 node75 = rl.node(rllen // 4 * 3)
2712 node75 = rl.node(rllen // 4 * 3)
2698 node100 = rl.node(rllen - 1)
2713 node100 = rl.node(rllen - 1)
2699
2714
2700 allrevs = range(rllen)
2715 allrevs = range(rllen)
2701 allrevsrev = list(reversed(allrevs))
2716 allrevsrev = list(reversed(allrevs))
2702 allnodes = [rl.node(rev) for rev in range(rllen)]
2717 allnodes = [rl.node(rev) for rev in range(rllen)]
2703 allnodesrev = list(reversed(allnodes))
2718 allnodesrev = list(reversed(allnodes))
2704
2719
2705 def constructor():
2720 def constructor():
2706 if radix is not None:
2721 if radix is not None:
2707 revlog(opener, radix=radix)
2722 revlog(opener, radix=radix)
2708 else:
2723 else:
2709 # hg <= 5.8
2724 # hg <= 5.8
2710 revlog(opener, indexfile=indexfile)
2725 revlog(opener, indexfile=indexfile)
2711
2726
2712 def read():
2727 def read():
2713 with opener(indexfile) as fh:
2728 with opener(indexfile) as fh:
2714 fh.read()
2729 fh.read()
2715
2730
2716 def parseindex():
2731 def parseindex():
2717 parse_index_v1(data, inline)
2732 parse_index_v1(data, inline)
2718
2733
2719 def getentry(revornode):
2734 def getentry(revornode):
2720 index = parse_index_v1(data, inline)[0]
2735 index = parse_index_v1(data, inline)[0]
2721 index[revornode]
2736 index[revornode]
2722
2737
2723 def getentries(revs, count=1):
2738 def getentries(revs, count=1):
2724 index = parse_index_v1(data, inline)[0]
2739 index = parse_index_v1(data, inline)[0]
2725
2740
2726 for i in range(count):
2741 for i in range(count):
2727 for rev in revs:
2742 for rev in revs:
2728 index[rev]
2743 index[rev]
2729
2744
2730 def resolvenode(node):
2745 def resolvenode(node):
2731 index = parse_index_v1(data, inline)[0]
2746 index = parse_index_v1(data, inline)[0]
2732 rev = getattr(index, 'rev', None)
2747 rev = getattr(index, 'rev', None)
2733 if rev is None:
2748 if rev is None:
2734 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2749 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2735 # This only works for the C code.
2750 # This only works for the C code.
2736 if nodemap is None:
2751 if nodemap is None:
2737 return
2752 return
2738 rev = nodemap.__getitem__
2753 rev = nodemap.__getitem__
2739
2754
2740 try:
2755 try:
2741 rev(node)
2756 rev(node)
2742 except error.RevlogError:
2757 except error.RevlogError:
2743 pass
2758 pass
2744
2759
2745 def resolvenodes(nodes, count=1):
2760 def resolvenodes(nodes, count=1):
2746 index = parse_index_v1(data, inline)[0]
2761 index = parse_index_v1(data, inline)[0]
2747 rev = getattr(index, 'rev', None)
2762 rev = getattr(index, 'rev', None)
2748 if rev is None:
2763 if rev is None:
2749 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2764 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2750 # This only works for the C code.
2765 # This only works for the C code.
2751 if nodemap is None:
2766 if nodemap is None:
2752 return
2767 return
2753 rev = nodemap.__getitem__
2768 rev = nodemap.__getitem__
2754
2769
2755 for i in range(count):
2770 for i in range(count):
2756 for node in nodes:
2771 for node in nodes:
2757 try:
2772 try:
2758 rev(node)
2773 rev(node)
2759 except error.RevlogError:
2774 except error.RevlogError:
2760 pass
2775 pass
2761
2776
2762 benches = [
2777 benches = [
2763 (constructor, b'revlog constructor'),
2778 (constructor, b'revlog constructor'),
2764 (read, b'read'),
2779 (read, b'read'),
2765 (parseindex, b'create index object'),
2780 (parseindex, b'create index object'),
2766 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2781 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2767 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2782 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2768 (lambda: resolvenode(node0), b'look up node at rev 0'),
2783 (lambda: resolvenode(node0), b'look up node at rev 0'),
2769 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2784 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2770 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2785 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2771 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2786 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2772 (lambda: resolvenode(node100), b'look up node at tip'),
2787 (lambda: resolvenode(node100), b'look up node at tip'),
2773 # 2x variation is to measure caching impact.
2788 # 2x variation is to measure caching impact.
2774 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2789 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2775 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2790 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2776 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2791 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2777 (
2792 (
2778 lambda: resolvenodes(allnodesrev, 2),
2793 lambda: resolvenodes(allnodesrev, 2),
2779 b'look up all nodes 2x (reverse)',
2794 b'look up all nodes 2x (reverse)',
2780 ),
2795 ),
2781 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2796 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2782 (
2797 (
2783 lambda: getentries(allrevs, 2),
2798 lambda: getentries(allrevs, 2),
2784 b'retrieve all index entries 2x (forward)',
2799 b'retrieve all index entries 2x (forward)',
2785 ),
2800 ),
2786 (
2801 (
2787 lambda: getentries(allrevsrev),
2802 lambda: getentries(allrevsrev),
2788 b'retrieve all index entries (reverse)',
2803 b'retrieve all index entries (reverse)',
2789 ),
2804 ),
2790 (
2805 (
2791 lambda: getentries(allrevsrev, 2),
2806 lambda: getentries(allrevsrev, 2),
2792 b'retrieve all index entries 2x (reverse)',
2807 b'retrieve all index entries 2x (reverse)',
2793 ),
2808 ),
2794 ]
2809 ]
2795
2810
2796 for fn, title in benches:
2811 for fn, title in benches:
2797 timer, fm = gettimer(ui, opts)
2812 timer, fm = gettimer(ui, opts)
2798 timer(fn, title=title)
2813 timer(fn, title=title)
2799 fm.end()
2814 fm.end()
2800
2815
2801
2816
2802 @command(
2817 @command(
2803 b'perf::revlogrevisions|perfrevlogrevisions',
2818 b'perf::revlogrevisions|perfrevlogrevisions',
2804 revlogopts
2819 revlogopts
2805 + formatteropts
2820 + formatteropts
2806 + [
2821 + [
2807 (b'd', b'dist', 100, b'distance between the revisions'),
2822 (b'd', b'dist', 100, b'distance between the revisions'),
2808 (b's', b'startrev', 0, b'revision to start reading at'),
2823 (b's', b'startrev', 0, b'revision to start reading at'),
2809 (b'', b'reverse', False, b'read in reverse'),
2824 (b'', b'reverse', False, b'read in reverse'),
2810 ],
2825 ],
2811 b'-c|-m|FILE',
2826 b'-c|-m|FILE',
2812 )
2827 )
2813 def perfrevlogrevisions(
2828 def perfrevlogrevisions(
2814 ui, repo, file_=None, startrev=0, reverse=False, **opts
2829 ui, repo, file_=None, startrev=0, reverse=False, **opts
2815 ):
2830 ):
2816 """Benchmark reading a series of revisions from a revlog.
2831 """Benchmark reading a series of revisions from a revlog.
2817
2832
2818 By default, we read every ``-d/--dist`` revision from 0 to tip of
2833 By default, we read every ``-d/--dist`` revision from 0 to tip of
2819 the specified revlog.
2834 the specified revlog.
2820
2835
2821 The start revision can be defined via ``-s/--startrev``.
2836 The start revision can be defined via ``-s/--startrev``.
2822 """
2837 """
2823 opts = _byteskwargs(opts)
2838 opts = _byteskwargs(opts)
2824
2839
2825 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2840 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2826 rllen = getlen(ui)(rl)
2841 rllen = getlen(ui)(rl)
2827
2842
2828 if startrev < 0:
2843 if startrev < 0:
2829 startrev = rllen + startrev
2844 startrev = rllen + startrev
2830
2845
2831 def d():
2846 def d():
2832 rl.clearcaches()
2847 rl.clearcaches()
2833
2848
2834 beginrev = startrev
2849 beginrev = startrev
2835 endrev = rllen
2850 endrev = rllen
2836 dist = opts[b'dist']
2851 dist = opts[b'dist']
2837
2852
2838 if reverse:
2853 if reverse:
2839 beginrev, endrev = endrev - 1, beginrev - 1
2854 beginrev, endrev = endrev - 1, beginrev - 1
2840 dist = -1 * dist
2855 dist = -1 * dist
2841
2856
2842 for x in _xrange(beginrev, endrev, dist):
2857 for x in _xrange(beginrev, endrev, dist):
2843 # Old revisions don't support passing int.
2858 # Old revisions don't support passing int.
2844 n = rl.node(x)
2859 n = rl.node(x)
2845 rl.revision(n)
2860 rl.revision(n)
2846
2861
2847 timer, fm = gettimer(ui, opts)
2862 timer, fm = gettimer(ui, opts)
2848 timer(d)
2863 timer(d)
2849 fm.end()
2864 fm.end()
2850
2865
2851
2866
2852 @command(
2867 @command(
2853 b'perf::revlogwrite|perfrevlogwrite',
2868 b'perf::revlogwrite|perfrevlogwrite',
2854 revlogopts
2869 revlogopts
2855 + formatteropts
2870 + formatteropts
2856 + [
2871 + [
2857 (b's', b'startrev', 1000, b'revision to start writing at'),
2872 (b's', b'startrev', 1000, b'revision to start writing at'),
2858 (b'', b'stoprev', -1, b'last revision to write'),
2873 (b'', b'stoprev', -1, b'last revision to write'),
2859 (b'', b'count', 3, b'number of passes to perform'),
2874 (b'', b'count', 3, b'number of passes to perform'),
2860 (b'', b'details', False, b'print timing for every revisions tested'),
2875 (b'', b'details', False, b'print timing for every revisions tested'),
2861 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2876 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2862 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2877 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2863 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2878 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2864 ],
2879 ],
2865 b'-c|-m|FILE',
2880 b'-c|-m|FILE',
2866 )
2881 )
2867 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2882 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2868 """Benchmark writing a series of revisions to a revlog.
2883 """Benchmark writing a series of revisions to a revlog.
2869
2884
2870 Possible source values are:
2885 Possible source values are:
2871 * `full`: add from a full text (default).
2886 * `full`: add from a full text (default).
2872 * `parent-1`: add from a delta to the first parent
2887 * `parent-1`: add from a delta to the first parent
2873 * `parent-2`: add from a delta to the second parent if it exists
2888 * `parent-2`: add from a delta to the second parent if it exists
2874 (use a delta from the first parent otherwise)
2889 (use a delta from the first parent otherwise)
2875 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2890 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2876 * `storage`: add from the existing precomputed deltas
2891 * `storage`: add from the existing precomputed deltas
2877
2892
2878 Note: This performance command measures performance in a custom way. As a
2893 Note: This performance command measures performance in a custom way. As a
2879 result some of the global configuration of the 'perf' command does not
2894 result some of the global configuration of the 'perf' command does not
2880 apply to it:
2895 apply to it:
2881
2896
2882 * ``pre-run``: disabled
2897 * ``pre-run``: disabled
2883
2898
2884 * ``profile-benchmark``: disabled
2899 * ``profile-benchmark``: disabled
2885
2900
2886 * ``run-limits``: disabled use --count instead
2901 * ``run-limits``: disabled use --count instead
2887 """
2902 """
2888 opts = _byteskwargs(opts)
2903 opts = _byteskwargs(opts)
2889
2904
2890 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2905 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2891 rllen = getlen(ui)(rl)
2906 rllen = getlen(ui)(rl)
2892 if startrev < 0:
2907 if startrev < 0:
2893 startrev = rllen + startrev
2908 startrev = rllen + startrev
2894 if stoprev < 0:
2909 if stoprev < 0:
2895 stoprev = rllen + stoprev
2910 stoprev = rllen + stoprev
2896
2911
2897 lazydeltabase = opts['lazydeltabase']
2912 lazydeltabase = opts['lazydeltabase']
2898 source = opts['source']
2913 source = opts['source']
2899 clearcaches = opts['clear_caches']
2914 clearcaches = opts['clear_caches']
2900 validsource = (
2915 validsource = (
2901 b'full',
2916 b'full',
2902 b'parent-1',
2917 b'parent-1',
2903 b'parent-2',
2918 b'parent-2',
2904 b'parent-smallest',
2919 b'parent-smallest',
2905 b'storage',
2920 b'storage',
2906 )
2921 )
2907 if source not in validsource:
2922 if source not in validsource:
2908 raise error.Abort('invalid source type: %s' % source)
2923 raise error.Abort('invalid source type: %s' % source)
2909
2924
2910 ### actually gather results
2925 ### actually gather results
2911 count = opts['count']
2926 count = opts['count']
2912 if count <= 0:
2927 if count <= 0:
2913 raise error.Abort('invalide run count: %d' % count)
2928 raise error.Abort('invalide run count: %d' % count)
2914 allresults = []
2929 allresults = []
2915 for c in range(count):
2930 for c in range(count):
2916 timing = _timeonewrite(
2931 timing = _timeonewrite(
2917 ui,
2932 ui,
2918 rl,
2933 rl,
2919 source,
2934 source,
2920 startrev,
2935 startrev,
2921 stoprev,
2936 stoprev,
2922 c + 1,
2937 c + 1,
2923 lazydeltabase=lazydeltabase,
2938 lazydeltabase=lazydeltabase,
2924 clearcaches=clearcaches,
2939 clearcaches=clearcaches,
2925 )
2940 )
2926 allresults.append(timing)
2941 allresults.append(timing)
2927
2942
2928 ### consolidate the results in a single list
2943 ### consolidate the results in a single list
2929 results = []
2944 results = []
2930 for idx, (rev, t) in enumerate(allresults[0]):
2945 for idx, (rev, t) in enumerate(allresults[0]):
2931 ts = [t]
2946 ts = [t]
2932 for other in allresults[1:]:
2947 for other in allresults[1:]:
2933 orev, ot = other[idx]
2948 orev, ot = other[idx]
2934 assert orev == rev
2949 assert orev == rev
2935 ts.append(ot)
2950 ts.append(ot)
2936 results.append((rev, ts))
2951 results.append((rev, ts))
2937 resultcount = len(results)
2952 resultcount = len(results)
2938
2953
2939 ### Compute and display relevant statistics
2954 ### Compute and display relevant statistics
2940
2955
2941 # get a formatter
2956 # get a formatter
2942 fm = ui.formatter(b'perf', opts)
2957 fm = ui.formatter(b'perf', opts)
2943 displayall = ui.configbool(b"perf", b"all-timing", False)
2958 displayall = ui.configbool(b"perf", b"all-timing", False)
2944
2959
2945 # print individual details if requested
2960 # print individual details if requested
2946 if opts['details']:
2961 if opts['details']:
2947 for idx, item in enumerate(results, 1):
2962 for idx, item in enumerate(results, 1):
2948 rev, data = item
2963 rev, data = item
2949 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2964 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2950 formatone(fm, data, title=title, displayall=displayall)
2965 formatone(fm, data, title=title, displayall=displayall)
2951
2966
2952 # sorts results by median time
2967 # sorts results by median time
2953 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2968 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2954 # list of (name, index) to display)
2969 # list of (name, index) to display)
2955 relevants = [
2970 relevants = [
2956 ("min", 0),
2971 ("min", 0),
2957 ("10%", resultcount * 10 // 100),
2972 ("10%", resultcount * 10 // 100),
2958 ("25%", resultcount * 25 // 100),
2973 ("25%", resultcount * 25 // 100),
2959 ("50%", resultcount * 70 // 100),
2974 ("50%", resultcount * 70 // 100),
2960 ("75%", resultcount * 75 // 100),
2975 ("75%", resultcount * 75 // 100),
2961 ("90%", resultcount * 90 // 100),
2976 ("90%", resultcount * 90 // 100),
2962 ("95%", resultcount * 95 // 100),
2977 ("95%", resultcount * 95 // 100),
2963 ("99%", resultcount * 99 // 100),
2978 ("99%", resultcount * 99 // 100),
2964 ("99.9%", resultcount * 999 // 1000),
2979 ("99.9%", resultcount * 999 // 1000),
2965 ("99.99%", resultcount * 9999 // 10000),
2980 ("99.99%", resultcount * 9999 // 10000),
2966 ("99.999%", resultcount * 99999 // 100000),
2981 ("99.999%", resultcount * 99999 // 100000),
2967 ("max", -1),
2982 ("max", -1),
2968 ]
2983 ]
2969 if not ui.quiet:
2984 if not ui.quiet:
2970 for name, idx in relevants:
2985 for name, idx in relevants:
2971 data = results[idx]
2986 data = results[idx]
2972 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2987 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2973 formatone(fm, data[1], title=title, displayall=displayall)
2988 formatone(fm, data[1], title=title, displayall=displayall)
2974
2989
2975 # XXX summing that many float will not be very precise, we ignore this fact
2990 # XXX summing that many float will not be very precise, we ignore this fact
2976 # for now
2991 # for now
2977 totaltime = []
2992 totaltime = []
2978 for item in allresults:
2993 for item in allresults:
2979 totaltime.append(
2994 totaltime.append(
2980 (
2995 (
2981 sum(x[1][0] for x in item),
2996 sum(x[1][0] for x in item),
2982 sum(x[1][1] for x in item),
2997 sum(x[1][1] for x in item),
2983 sum(x[1][2] for x in item),
2998 sum(x[1][2] for x in item),
2984 )
2999 )
2985 )
3000 )
2986 formatone(
3001 formatone(
2987 fm,
3002 fm,
2988 totaltime,
3003 totaltime,
2989 title="total time (%d revs)" % resultcount,
3004 title="total time (%d revs)" % resultcount,
2990 displayall=displayall,
3005 displayall=displayall,
2991 )
3006 )
2992 fm.end()
3007 fm.end()
2993
3008
2994
3009
2995 class _faketr:
3010 class _faketr:
2996 def add(s, x, y, z=None):
3011 def add(s, x, y, z=None):
2997 return None
3012 return None
2998
3013
2999
3014
3000 def _timeonewrite(
3015 def _timeonewrite(
3001 ui,
3016 ui,
3002 orig,
3017 orig,
3003 source,
3018 source,
3004 startrev,
3019 startrev,
3005 stoprev,
3020 stoprev,
3006 runidx=None,
3021 runidx=None,
3007 lazydeltabase=True,
3022 lazydeltabase=True,
3008 clearcaches=True,
3023 clearcaches=True,
3009 ):
3024 ):
3010 timings = []
3025 timings = []
3011 tr = _faketr()
3026 tr = _faketr()
3012 with _temprevlog(ui, orig, startrev) as dest:
3027 with _temprevlog(ui, orig, startrev) as dest:
3013 dest._lazydeltabase = lazydeltabase
3028 dest._lazydeltabase = lazydeltabase
3014 revs = list(orig.revs(startrev, stoprev))
3029 revs = list(orig.revs(startrev, stoprev))
3015 total = len(revs)
3030 total = len(revs)
3016 topic = 'adding'
3031 topic = 'adding'
3017 if runidx is not None:
3032 if runidx is not None:
3018 topic += ' (run #%d)' % runidx
3033 topic += ' (run #%d)' % runidx
3019 # Support both old and new progress API
3034 # Support both old and new progress API
3020 if util.safehasattr(ui, 'makeprogress'):
3035 if util.safehasattr(ui, 'makeprogress'):
3021 progress = ui.makeprogress(topic, unit='revs', total=total)
3036 progress = ui.makeprogress(topic, unit='revs', total=total)
3022
3037
3023 def updateprogress(pos):
3038 def updateprogress(pos):
3024 progress.update(pos)
3039 progress.update(pos)
3025
3040
3026 def completeprogress():
3041 def completeprogress():
3027 progress.complete()
3042 progress.complete()
3028
3043
3029 else:
3044 else:
3030
3045
3031 def updateprogress(pos):
3046 def updateprogress(pos):
3032 ui.progress(topic, pos, unit='revs', total=total)
3047 ui.progress(topic, pos, unit='revs', total=total)
3033
3048
3034 def completeprogress():
3049 def completeprogress():
3035 ui.progress(topic, None, unit='revs', total=total)
3050 ui.progress(topic, None, unit='revs', total=total)
3036
3051
3037 for idx, rev in enumerate(revs):
3052 for idx, rev in enumerate(revs):
3038 updateprogress(idx)
3053 updateprogress(idx)
3039 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3054 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3040 if clearcaches:
3055 if clearcaches:
3041 dest.index.clearcaches()
3056 dest.index.clearcaches()
3042 dest.clearcaches()
3057 dest.clearcaches()
3043 with timeone() as r:
3058 with timeone() as r:
3044 dest.addrawrevision(*addargs, **addkwargs)
3059 dest.addrawrevision(*addargs, **addkwargs)
3045 timings.append((rev, r[0]))
3060 timings.append((rev, r[0]))
3046 updateprogress(total)
3061 updateprogress(total)
3047 completeprogress()
3062 completeprogress()
3048 return timings
3063 return timings
3049
3064
3050
3065
3051 def _getrevisionseed(orig, rev, tr, source):
3066 def _getrevisionseed(orig, rev, tr, source):
3052 from mercurial.node import nullid
3067 from mercurial.node import nullid
3053
3068
3054 linkrev = orig.linkrev(rev)
3069 linkrev = orig.linkrev(rev)
3055 node = orig.node(rev)
3070 node = orig.node(rev)
3056 p1, p2 = orig.parents(node)
3071 p1, p2 = orig.parents(node)
3057 flags = orig.flags(rev)
3072 flags = orig.flags(rev)
3058 cachedelta = None
3073 cachedelta = None
3059 text = None
3074 text = None
3060
3075
3061 if source == b'full':
3076 if source == b'full':
3062 text = orig.revision(rev)
3077 text = orig.revision(rev)
3063 elif source == b'parent-1':
3078 elif source == b'parent-1':
3064 baserev = orig.rev(p1)
3079 baserev = orig.rev(p1)
3065 cachedelta = (baserev, orig.revdiff(p1, rev))
3080 cachedelta = (baserev, orig.revdiff(p1, rev))
3066 elif source == b'parent-2':
3081 elif source == b'parent-2':
3067 parent = p2
3082 parent = p2
3068 if p2 == nullid:
3083 if p2 == nullid:
3069 parent = p1
3084 parent = p1
3070 baserev = orig.rev(parent)
3085 baserev = orig.rev(parent)
3071 cachedelta = (baserev, orig.revdiff(parent, rev))
3086 cachedelta = (baserev, orig.revdiff(parent, rev))
3072 elif source == b'parent-smallest':
3087 elif source == b'parent-smallest':
3073 p1diff = orig.revdiff(p1, rev)
3088 p1diff = orig.revdiff(p1, rev)
3074 parent = p1
3089 parent = p1
3075 diff = p1diff
3090 diff = p1diff
3076 if p2 != nullid:
3091 if p2 != nullid:
3077 p2diff = orig.revdiff(p2, rev)
3092 p2diff = orig.revdiff(p2, rev)
3078 if len(p1diff) > len(p2diff):
3093 if len(p1diff) > len(p2diff):
3079 parent = p2
3094 parent = p2
3080 diff = p2diff
3095 diff = p2diff
3081 baserev = orig.rev(parent)
3096 baserev = orig.rev(parent)
3082 cachedelta = (baserev, diff)
3097 cachedelta = (baserev, diff)
3083 elif source == b'storage':
3098 elif source == b'storage':
3084 baserev = orig.deltaparent(rev)
3099 baserev = orig.deltaparent(rev)
3085 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3100 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3086
3101
3087 return (
3102 return (
3088 (text, tr, linkrev, p1, p2),
3103 (text, tr, linkrev, p1, p2),
3089 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3104 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3090 )
3105 )
3091
3106
3092
3107
3093 @contextlib.contextmanager
3108 @contextlib.contextmanager
3094 def _temprevlog(ui, orig, truncaterev):
3109 def _temprevlog(ui, orig, truncaterev):
3095 from mercurial import vfs as vfsmod
3110 from mercurial import vfs as vfsmod
3096
3111
3097 if orig._inline:
3112 if orig._inline:
3098 raise error.Abort('not supporting inline revlog (yet)')
3113 raise error.Abort('not supporting inline revlog (yet)')
3099 revlogkwargs = {}
3114 revlogkwargs = {}
3100 k = 'upperboundcomp'
3115 k = 'upperboundcomp'
3101 if util.safehasattr(orig, k):
3116 if util.safehasattr(orig, k):
3102 revlogkwargs[k] = getattr(orig, k)
3117 revlogkwargs[k] = getattr(orig, k)
3103
3118
3104 indexfile = getattr(orig, '_indexfile', None)
3119 indexfile = getattr(orig, '_indexfile', None)
3105 if indexfile is None:
3120 if indexfile is None:
3106 # compatibility with <= hg-5.8
3121 # compatibility with <= hg-5.8
3107 indexfile = getattr(orig, 'indexfile')
3122 indexfile = getattr(orig, 'indexfile')
3108 origindexpath = orig.opener.join(indexfile)
3123 origindexpath = orig.opener.join(indexfile)
3109
3124
3110 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3125 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3111 origdatapath = orig.opener.join(datafile)
3126 origdatapath = orig.opener.join(datafile)
3112 radix = b'revlog'
3127 radix = b'revlog'
3113 indexname = b'revlog.i'
3128 indexname = b'revlog.i'
3114 dataname = b'revlog.d'
3129 dataname = b'revlog.d'
3115
3130
3116 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3117 try:
3132 try:
3118 # copy the data file in a temporary directory
3133 # copy the data file in a temporary directory
3119 ui.debug('copying data in %s\n' % tmpdir)
3134 ui.debug('copying data in %s\n' % tmpdir)
3120 destindexpath = os.path.join(tmpdir, 'revlog.i')
3135 destindexpath = os.path.join(tmpdir, 'revlog.i')
3121 destdatapath = os.path.join(tmpdir, 'revlog.d')
3136 destdatapath = os.path.join(tmpdir, 'revlog.d')
3122 shutil.copyfile(origindexpath, destindexpath)
3137 shutil.copyfile(origindexpath, destindexpath)
3123 shutil.copyfile(origdatapath, destdatapath)
3138 shutil.copyfile(origdatapath, destdatapath)
3124
3139
3125 # remove the data we want to add again
3140 # remove the data we want to add again
3126 ui.debug('truncating data to be rewritten\n')
3141 ui.debug('truncating data to be rewritten\n')
3127 with open(destindexpath, 'ab') as index:
3142 with open(destindexpath, 'ab') as index:
3128 index.seek(0)
3143 index.seek(0)
3129 index.truncate(truncaterev * orig._io.size)
3144 index.truncate(truncaterev * orig._io.size)
3130 with open(destdatapath, 'ab') as data:
3145 with open(destdatapath, 'ab') as data:
3131 data.seek(0)
3146 data.seek(0)
3132 data.truncate(orig.start(truncaterev))
3147 data.truncate(orig.start(truncaterev))
3133
3148
3134 # instantiate a new revlog from the temporary copy
3149 # instantiate a new revlog from the temporary copy
3135 ui.debug('truncating adding to be rewritten\n')
3150 ui.debug('truncating adding to be rewritten\n')
3136 vfs = vfsmod.vfs(tmpdir)
3151 vfs = vfsmod.vfs(tmpdir)
3137 vfs.options = getattr(orig.opener, 'options', None)
3152 vfs.options = getattr(orig.opener, 'options', None)
3138
3153
3139 try:
3154 try:
3140 dest = revlog(vfs, radix=radix, **revlogkwargs)
3155 dest = revlog(vfs, radix=radix, **revlogkwargs)
3141 except TypeError:
3156 except TypeError:
3142 dest = revlog(
3157 dest = revlog(
3143 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3158 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3144 )
3159 )
3145 if dest._inline:
3160 if dest._inline:
3146 raise error.Abort('not supporting inline revlog (yet)')
3161 raise error.Abort('not supporting inline revlog (yet)')
3147 # make sure internals are initialized
3162 # make sure internals are initialized
3148 dest.revision(len(dest) - 1)
3163 dest.revision(len(dest) - 1)
3149 yield dest
3164 yield dest
3150 del dest, vfs
3165 del dest, vfs
3151 finally:
3166 finally:
3152 shutil.rmtree(tmpdir, True)
3167 shutil.rmtree(tmpdir, True)
3153
3168
3154
3169
3155 @command(
3170 @command(
3156 b'perf::revlogchunks|perfrevlogchunks',
3171 b'perf::revlogchunks|perfrevlogchunks',
3157 revlogopts
3172 revlogopts
3158 + formatteropts
3173 + formatteropts
3159 + [
3174 + [
3160 (b'e', b'engines', b'', b'compression engines to use'),
3175 (b'e', b'engines', b'', b'compression engines to use'),
3161 (b's', b'startrev', 0, b'revision to start at'),
3176 (b's', b'startrev', 0, b'revision to start at'),
3162 ],
3177 ],
3163 b'-c|-m|FILE',
3178 b'-c|-m|FILE',
3164 )
3179 )
3165 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3180 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3166 """Benchmark operations on revlog chunks.
3181 """Benchmark operations on revlog chunks.
3167
3182
3168 Logically, each revlog is a collection of fulltext revisions. However,
3183 Logically, each revlog is a collection of fulltext revisions. However,
3169 stored within each revlog are "chunks" of possibly compressed data. This
3184 stored within each revlog are "chunks" of possibly compressed data. This
3170 data needs to be read and decompressed or compressed and written.
3185 data needs to be read and decompressed or compressed and written.
3171
3186
3172 This command measures the time it takes to read+decompress and recompress
3187 This command measures the time it takes to read+decompress and recompress
3173 chunks in a revlog. It effectively isolates I/O and compression performance.
3188 chunks in a revlog. It effectively isolates I/O and compression performance.
3174 For measurements of higher-level operations like resolving revisions,
3189 For measurements of higher-level operations like resolving revisions,
3175 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3190 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3176 """
3191 """
3177 opts = _byteskwargs(opts)
3192 opts = _byteskwargs(opts)
3178
3193
3179 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3194 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3180
3195
3181 # _chunkraw was renamed to _getsegmentforrevs.
3196 # _chunkraw was renamed to _getsegmentforrevs.
3182 try:
3197 try:
3183 segmentforrevs = rl._getsegmentforrevs
3198 segmentforrevs = rl._getsegmentforrevs
3184 except AttributeError:
3199 except AttributeError:
3185 segmentforrevs = rl._chunkraw
3200 segmentforrevs = rl._chunkraw
3186
3201
3187 # Verify engines argument.
3202 # Verify engines argument.
3188 if engines:
3203 if engines:
3189 engines = {e.strip() for e in engines.split(b',')}
3204 engines = {e.strip() for e in engines.split(b',')}
3190 for engine in engines:
3205 for engine in engines:
3191 try:
3206 try:
3192 util.compressionengines[engine]
3207 util.compressionengines[engine]
3193 except KeyError:
3208 except KeyError:
3194 raise error.Abort(b'unknown compression engine: %s' % engine)
3209 raise error.Abort(b'unknown compression engine: %s' % engine)
3195 else:
3210 else:
3196 engines = []
3211 engines = []
3197 for e in util.compengines:
3212 for e in util.compengines:
3198 engine = util.compengines[e]
3213 engine = util.compengines[e]
3199 try:
3214 try:
3200 if engine.available():
3215 if engine.available():
3201 engine.revlogcompressor().compress(b'dummy')
3216 engine.revlogcompressor().compress(b'dummy')
3202 engines.append(e)
3217 engines.append(e)
3203 except NotImplementedError:
3218 except NotImplementedError:
3204 pass
3219 pass
3205
3220
3206 revs = list(rl.revs(startrev, len(rl) - 1))
3221 revs = list(rl.revs(startrev, len(rl) - 1))
3207
3222
3208 def rlfh(rl):
3223 def rlfh(rl):
3209 if rl._inline:
3224 if rl._inline:
3210 indexfile = getattr(rl, '_indexfile', None)
3225 indexfile = getattr(rl, '_indexfile', None)
3211 if indexfile is None:
3226 if indexfile is None:
3212 # compatibility with <= hg-5.8
3227 # compatibility with <= hg-5.8
3213 indexfile = getattr(rl, 'indexfile')
3228 indexfile = getattr(rl, 'indexfile')
3214 return getsvfs(repo)(indexfile)
3229 return getsvfs(repo)(indexfile)
3215 else:
3230 else:
3216 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3231 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3217 return getsvfs(repo)(datafile)
3232 return getsvfs(repo)(datafile)
3218
3233
3219 def doread():
3234 def doread():
3220 rl.clearcaches()
3235 rl.clearcaches()
3221 for rev in revs:
3236 for rev in revs:
3222 segmentforrevs(rev, rev)
3237 segmentforrevs(rev, rev)
3223
3238
3224 def doreadcachedfh():
3239 def doreadcachedfh():
3225 rl.clearcaches()
3240 rl.clearcaches()
3226 fh = rlfh(rl)
3241 fh = rlfh(rl)
3227 for rev in revs:
3242 for rev in revs:
3228 segmentforrevs(rev, rev, df=fh)
3243 segmentforrevs(rev, rev, df=fh)
3229
3244
3230 def doreadbatch():
3245 def doreadbatch():
3231 rl.clearcaches()
3246 rl.clearcaches()
3232 segmentforrevs(revs[0], revs[-1])
3247 segmentforrevs(revs[0], revs[-1])
3233
3248
3234 def doreadbatchcachedfh():
3249 def doreadbatchcachedfh():
3235 rl.clearcaches()
3250 rl.clearcaches()
3236 fh = rlfh(rl)
3251 fh = rlfh(rl)
3237 segmentforrevs(revs[0], revs[-1], df=fh)
3252 segmentforrevs(revs[0], revs[-1], df=fh)
3238
3253
3239 def dochunk():
3254 def dochunk():
3240 rl.clearcaches()
3255 rl.clearcaches()
3241 fh = rlfh(rl)
3256 fh = rlfh(rl)
3242 for rev in revs:
3257 for rev in revs:
3243 rl._chunk(rev, df=fh)
3258 rl._chunk(rev, df=fh)
3244
3259
3245 chunks = [None]
3260 chunks = [None]
3246
3261
3247 def dochunkbatch():
3262 def dochunkbatch():
3248 rl.clearcaches()
3263 rl.clearcaches()
3249 fh = rlfh(rl)
3264 fh = rlfh(rl)
3250 # Save chunks as a side-effect.
3265 # Save chunks as a side-effect.
3251 chunks[0] = rl._chunks(revs, df=fh)
3266 chunks[0] = rl._chunks(revs, df=fh)
3252
3267
3253 def docompress(compressor):
3268 def docompress(compressor):
3254 rl.clearcaches()
3269 rl.clearcaches()
3255
3270
3256 try:
3271 try:
3257 # Swap in the requested compression engine.
3272 # Swap in the requested compression engine.
3258 oldcompressor = rl._compressor
3273 oldcompressor = rl._compressor
3259 rl._compressor = compressor
3274 rl._compressor = compressor
3260 for chunk in chunks[0]:
3275 for chunk in chunks[0]:
3261 rl.compress(chunk)
3276 rl.compress(chunk)
3262 finally:
3277 finally:
3263 rl._compressor = oldcompressor
3278 rl._compressor = oldcompressor
3264
3279
3265 benches = [
3280 benches = [
3266 (lambda: doread(), b'read'),
3281 (lambda: doread(), b'read'),
3267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3282 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3268 (lambda: doreadbatch(), b'read batch'),
3283 (lambda: doreadbatch(), b'read batch'),
3269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3284 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3270 (lambda: dochunk(), b'chunk'),
3285 (lambda: dochunk(), b'chunk'),
3271 (lambda: dochunkbatch(), b'chunk batch'),
3286 (lambda: dochunkbatch(), b'chunk batch'),
3272 ]
3287 ]
3273
3288
3274 for engine in sorted(engines):
3289 for engine in sorted(engines):
3275 compressor = util.compengines[engine].revlogcompressor()
3290 compressor = util.compengines[engine].revlogcompressor()
3276 benches.append(
3291 benches.append(
3277 (
3292 (
3278 functools.partial(docompress, compressor),
3293 functools.partial(docompress, compressor),
3279 b'compress w/ %s' % engine,
3294 b'compress w/ %s' % engine,
3280 )
3295 )
3281 )
3296 )
3282
3297
3283 for fn, title in benches:
3298 for fn, title in benches:
3284 timer, fm = gettimer(ui, opts)
3299 timer, fm = gettimer(ui, opts)
3285 timer(fn, title=title)
3300 timer(fn, title=title)
3286 fm.end()
3301 fm.end()
3287
3302
3288
3303
3289 @command(
3304 @command(
3290 b'perf::revlogrevision|perfrevlogrevision',
3305 b'perf::revlogrevision|perfrevlogrevision',
3291 revlogopts
3306 revlogopts
3292 + formatteropts
3307 + formatteropts
3293 + [(b'', b'cache', False, b'use caches instead of clearing')],
3308 + [(b'', b'cache', False, b'use caches instead of clearing')],
3294 b'-c|-m|FILE REV',
3309 b'-c|-m|FILE REV',
3295 )
3310 )
3296 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3311 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3297 """Benchmark obtaining a revlog revision.
3312 """Benchmark obtaining a revlog revision.
3298
3313
3299 Obtaining a revlog revision consists of roughly the following steps:
3314 Obtaining a revlog revision consists of roughly the following steps:
3300
3315
3301 1. Compute the delta chain
3316 1. Compute the delta chain
3302 2. Slice the delta chain if applicable
3317 2. Slice the delta chain if applicable
3303 3. Obtain the raw chunks for that delta chain
3318 3. Obtain the raw chunks for that delta chain
3304 4. Decompress each raw chunk
3319 4. Decompress each raw chunk
3305 5. Apply binary patches to obtain fulltext
3320 5. Apply binary patches to obtain fulltext
3306 6. Verify hash of fulltext
3321 6. Verify hash of fulltext
3307
3322
3308 This command measures the time spent in each of these phases.
3323 This command measures the time spent in each of these phases.
3309 """
3324 """
3310 opts = _byteskwargs(opts)
3325 opts = _byteskwargs(opts)
3311
3326
3312 if opts.get(b'changelog') or opts.get(b'manifest'):
3327 if opts.get(b'changelog') or opts.get(b'manifest'):
3313 file_, rev = None, file_
3328 file_, rev = None, file_
3314 elif rev is None:
3329 elif rev is None:
3315 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3330 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3316
3331
3317 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3332 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3318
3333
3319 # _chunkraw was renamed to _getsegmentforrevs.
3334 # _chunkraw was renamed to _getsegmentforrevs.
3320 try:
3335 try:
3321 segmentforrevs = r._getsegmentforrevs
3336 segmentforrevs = r._getsegmentforrevs
3322 except AttributeError:
3337 except AttributeError:
3323 segmentforrevs = r._chunkraw
3338 segmentforrevs = r._chunkraw
3324
3339
3325 node = r.lookup(rev)
3340 node = r.lookup(rev)
3326 rev = r.rev(node)
3341 rev = r.rev(node)
3327
3342
3328 def getrawchunks(data, chain):
3343 def getrawchunks(data, chain):
3329 start = r.start
3344 start = r.start
3330 length = r.length
3345 length = r.length
3331 inline = r._inline
3346 inline = r._inline
3332 try:
3347 try:
3333 iosize = r.index.entry_size
3348 iosize = r.index.entry_size
3334 except AttributeError:
3349 except AttributeError:
3335 iosize = r._io.size
3350 iosize = r._io.size
3336 buffer = util.buffer
3351 buffer = util.buffer
3337
3352
3338 chunks = []
3353 chunks = []
3339 ladd = chunks.append
3354 ladd = chunks.append
3340 for idx, item in enumerate(chain):
3355 for idx, item in enumerate(chain):
3341 offset = start(item[0])
3356 offset = start(item[0])
3342 bits = data[idx]
3357 bits = data[idx]
3343 for rev in item:
3358 for rev in item:
3344 chunkstart = start(rev)
3359 chunkstart = start(rev)
3345 if inline:
3360 if inline:
3346 chunkstart += (rev + 1) * iosize
3361 chunkstart += (rev + 1) * iosize
3347 chunklength = length(rev)
3362 chunklength = length(rev)
3348 ladd(buffer(bits, chunkstart - offset, chunklength))
3363 ladd(buffer(bits, chunkstart - offset, chunklength))
3349
3364
3350 return chunks
3365 return chunks
3351
3366
3352 def dodeltachain(rev):
3367 def dodeltachain(rev):
3353 if not cache:
3368 if not cache:
3354 r.clearcaches()
3369 r.clearcaches()
3355 r._deltachain(rev)
3370 r._deltachain(rev)
3356
3371
3357 def doread(chain):
3372 def doread(chain):
3358 if not cache:
3373 if not cache:
3359 r.clearcaches()
3374 r.clearcaches()
3360 for item in slicedchain:
3375 for item in slicedchain:
3361 segmentforrevs(item[0], item[-1])
3376 segmentforrevs(item[0], item[-1])
3362
3377
3363 def doslice(r, chain, size):
3378 def doslice(r, chain, size):
3364 for s in slicechunk(r, chain, targetsize=size):
3379 for s in slicechunk(r, chain, targetsize=size):
3365 pass
3380 pass
3366
3381
3367 def dorawchunks(data, chain):
3382 def dorawchunks(data, chain):
3368 if not cache:
3383 if not cache:
3369 r.clearcaches()
3384 r.clearcaches()
3370 getrawchunks(data, chain)
3385 getrawchunks(data, chain)
3371
3386
3372 def dodecompress(chunks):
3387 def dodecompress(chunks):
3373 decomp = r.decompress
3388 decomp = r.decompress
3374 for chunk in chunks:
3389 for chunk in chunks:
3375 decomp(chunk)
3390 decomp(chunk)
3376
3391
3377 def dopatch(text, bins):
3392 def dopatch(text, bins):
3378 if not cache:
3393 if not cache:
3379 r.clearcaches()
3394 r.clearcaches()
3380 mdiff.patches(text, bins)
3395 mdiff.patches(text, bins)
3381
3396
3382 def dohash(text):
3397 def dohash(text):
3383 if not cache:
3398 if not cache:
3384 r.clearcaches()
3399 r.clearcaches()
3385 r.checkhash(text, node, rev=rev)
3400 r.checkhash(text, node, rev=rev)
3386
3401
3387 def dorevision():
3402 def dorevision():
3388 if not cache:
3403 if not cache:
3389 r.clearcaches()
3404 r.clearcaches()
3390 r.revision(node)
3405 r.revision(node)
3391
3406
3392 try:
3407 try:
3393 from mercurial.revlogutils.deltas import slicechunk
3408 from mercurial.revlogutils.deltas import slicechunk
3394 except ImportError:
3409 except ImportError:
3395 slicechunk = getattr(revlog, '_slicechunk', None)
3410 slicechunk = getattr(revlog, '_slicechunk', None)
3396
3411
3397 size = r.length(rev)
3412 size = r.length(rev)
3398 chain = r._deltachain(rev)[0]
3413 chain = r._deltachain(rev)[0]
3399 if not getattr(r, '_withsparseread', False):
3414 if not getattr(r, '_withsparseread', False):
3400 slicedchain = (chain,)
3415 slicedchain = (chain,)
3401 else:
3416 else:
3402 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3417 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3403 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3418 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3404 rawchunks = getrawchunks(data, slicedchain)
3419 rawchunks = getrawchunks(data, slicedchain)
3405 bins = r._chunks(chain)
3420 bins = r._chunks(chain)
3406 text = bytes(bins[0])
3421 text = bytes(bins[0])
3407 bins = bins[1:]
3422 bins = bins[1:]
3408 text = mdiff.patches(text, bins)
3423 text = mdiff.patches(text, bins)
3409
3424
3410 benches = [
3425 benches = [
3411 (lambda: dorevision(), b'full'),
3426 (lambda: dorevision(), b'full'),
3412 (lambda: dodeltachain(rev), b'deltachain'),
3427 (lambda: dodeltachain(rev), b'deltachain'),
3413 (lambda: doread(chain), b'read'),
3428 (lambda: doread(chain), b'read'),
3414 ]
3429 ]
3415
3430
3416 if getattr(r, '_withsparseread', False):
3431 if getattr(r, '_withsparseread', False):
3417 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3432 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3418 benches.append(slicing)
3433 benches.append(slicing)
3419
3434
3420 benches.extend(
3435 benches.extend(
3421 [
3436 [
3422 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3437 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3423 (lambda: dodecompress(rawchunks), b'decompress'),
3438 (lambda: dodecompress(rawchunks), b'decompress'),
3424 (lambda: dopatch(text, bins), b'patch'),
3439 (lambda: dopatch(text, bins), b'patch'),
3425 (lambda: dohash(text), b'hash'),
3440 (lambda: dohash(text), b'hash'),
3426 ]
3441 ]
3427 )
3442 )
3428
3443
3429 timer, fm = gettimer(ui, opts)
3444 timer, fm = gettimer(ui, opts)
3430 for fn, title in benches:
3445 for fn, title in benches:
3431 timer(fn, title=title)
3446 timer(fn, title=title)
3432 fm.end()
3447 fm.end()
3433
3448
3434
3449
3435 @command(
3450 @command(
3436 b'perf::revset|perfrevset',
3451 b'perf::revset|perfrevset',
3437 [
3452 [
3438 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3453 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3439 (b'', b'contexts', False, b'obtain changectx for each revision'),
3454 (b'', b'contexts', False, b'obtain changectx for each revision'),
3440 ]
3455 ]
3441 + formatteropts,
3456 + formatteropts,
3442 b"REVSET",
3457 b"REVSET",
3443 )
3458 )
3444 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3459 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3445 """benchmark the execution time of a revset
3460 """benchmark the execution time of a revset
3446
3461
3447 Use the --clean option if need to evaluate the impact of build volatile
3462 Use the --clean option if need to evaluate the impact of build volatile
3448 revisions set cache on the revset execution. Volatile cache hold filtered
3463 revisions set cache on the revset execution. Volatile cache hold filtered
3449 and obsolete related cache."""
3464 and obsolete related cache."""
3450 opts = _byteskwargs(opts)
3465 opts = _byteskwargs(opts)
3451
3466
3452 timer, fm = gettimer(ui, opts)
3467 timer, fm = gettimer(ui, opts)
3453
3468
3454 def d():
3469 def d():
3455 if clear:
3470 if clear:
3456 repo.invalidatevolatilesets()
3471 repo.invalidatevolatilesets()
3457 if contexts:
3472 if contexts:
3458 for ctx in repo.set(expr):
3473 for ctx in repo.set(expr):
3459 pass
3474 pass
3460 else:
3475 else:
3461 for r in repo.revs(expr):
3476 for r in repo.revs(expr):
3462 pass
3477 pass
3463
3478
3464 timer(d)
3479 timer(d)
3465 fm.end()
3480 fm.end()
3466
3481
3467
3482
3468 @command(
3483 @command(
3469 b'perf::volatilesets|perfvolatilesets',
3484 b'perf::volatilesets|perfvolatilesets',
3470 [
3485 [
3471 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3486 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3472 ]
3487 ]
3473 + formatteropts,
3488 + formatteropts,
3474 )
3489 )
3475 def perfvolatilesets(ui, repo, *names, **opts):
3490 def perfvolatilesets(ui, repo, *names, **opts):
3476 """benchmark the computation of various volatile set
3491 """benchmark the computation of various volatile set
3477
3492
3478 Volatile set computes element related to filtering and obsolescence."""
3493 Volatile set computes element related to filtering and obsolescence."""
3479 opts = _byteskwargs(opts)
3494 opts = _byteskwargs(opts)
3480 timer, fm = gettimer(ui, opts)
3495 timer, fm = gettimer(ui, opts)
3481 repo = repo.unfiltered()
3496 repo = repo.unfiltered()
3482
3497
3483 def getobs(name):
3498 def getobs(name):
3484 def d():
3499 def d():
3485 repo.invalidatevolatilesets()
3500 repo.invalidatevolatilesets()
3486 if opts[b'clear_obsstore']:
3501 if opts[b'clear_obsstore']:
3487 clearfilecache(repo, b'obsstore')
3502 clearfilecache(repo, b'obsstore')
3488 obsolete.getrevs(repo, name)
3503 obsolete.getrevs(repo, name)
3489
3504
3490 return d
3505 return d
3491
3506
3492 allobs = sorted(obsolete.cachefuncs)
3507 allobs = sorted(obsolete.cachefuncs)
3493 if names:
3508 if names:
3494 allobs = [n for n in allobs if n in names]
3509 allobs = [n for n in allobs if n in names]
3495
3510
3496 for name in allobs:
3511 for name in allobs:
3497 timer(getobs(name), title=name)
3512 timer(getobs(name), title=name)
3498
3513
3499 def getfiltered(name):
3514 def getfiltered(name):
3500 def d():
3515 def d():
3501 repo.invalidatevolatilesets()
3516 repo.invalidatevolatilesets()
3502 if opts[b'clear_obsstore']:
3517 if opts[b'clear_obsstore']:
3503 clearfilecache(repo, b'obsstore')
3518 clearfilecache(repo, b'obsstore')
3504 repoview.filterrevs(repo, name)
3519 repoview.filterrevs(repo, name)
3505
3520
3506 return d
3521 return d
3507
3522
3508 allfilter = sorted(repoview.filtertable)
3523 allfilter = sorted(repoview.filtertable)
3509 if names:
3524 if names:
3510 allfilter = [n for n in allfilter if n in names]
3525 allfilter = [n for n in allfilter if n in names]
3511
3526
3512 for name in allfilter:
3527 for name in allfilter:
3513 timer(getfiltered(name), title=name)
3528 timer(getfiltered(name), title=name)
3514 fm.end()
3529 fm.end()
3515
3530
3516
3531
3517 @command(
3532 @command(
3518 b'perf::branchmap|perfbranchmap',
3533 b'perf::branchmap|perfbranchmap',
3519 [
3534 [
3520 (b'f', b'full', False, b'Includes build time of subset'),
3535 (b'f', b'full', False, b'Includes build time of subset'),
3521 (
3536 (
3522 b'',
3537 b'',
3523 b'clear-revbranch',
3538 b'clear-revbranch',
3524 False,
3539 False,
3525 b'purge the revbranch cache between computation',
3540 b'purge the revbranch cache between computation',
3526 ),
3541 ),
3527 ]
3542 ]
3528 + formatteropts,
3543 + formatteropts,
3529 )
3544 )
3530 def perfbranchmap(ui, repo, *filternames, **opts):
3545 def perfbranchmap(ui, repo, *filternames, **opts):
3531 """benchmark the update of a branchmap
3546 """benchmark the update of a branchmap
3532
3547
3533 This benchmarks the full repo.branchmap() call with read and write disabled
3548 This benchmarks the full repo.branchmap() call with read and write disabled
3534 """
3549 """
3535 opts = _byteskwargs(opts)
3550 opts = _byteskwargs(opts)
3536 full = opts.get(b"full", False)
3551 full = opts.get(b"full", False)
3537 clear_revbranch = opts.get(b"clear_revbranch", False)
3552 clear_revbranch = opts.get(b"clear_revbranch", False)
3538 timer, fm = gettimer(ui, opts)
3553 timer, fm = gettimer(ui, opts)
3539
3554
3540 def getbranchmap(filtername):
3555 def getbranchmap(filtername):
3541 """generate a benchmark function for the filtername"""
3556 """generate a benchmark function for the filtername"""
3542 if filtername is None:
3557 if filtername is None:
3543 view = repo
3558 view = repo
3544 else:
3559 else:
3545 view = repo.filtered(filtername)
3560 view = repo.filtered(filtername)
3546 if util.safehasattr(view._branchcaches, '_per_filter'):
3561 if util.safehasattr(view._branchcaches, '_per_filter'):
3547 filtered = view._branchcaches._per_filter
3562 filtered = view._branchcaches._per_filter
3548 else:
3563 else:
3549 # older versions
3564 # older versions
3550 filtered = view._branchcaches
3565 filtered = view._branchcaches
3551
3566
3552 def d():
3567 def d():
3553 if clear_revbranch:
3568 if clear_revbranch:
3554 repo.revbranchcache()._clear()
3569 repo.revbranchcache()._clear()
3555 if full:
3570 if full:
3556 view._branchcaches.clear()
3571 view._branchcaches.clear()
3557 else:
3572 else:
3558 filtered.pop(filtername, None)
3573 filtered.pop(filtername, None)
3559 view.branchmap()
3574 view.branchmap()
3560
3575
3561 return d
3576 return d
3562
3577
3563 # add filter in smaller subset to bigger subset
3578 # add filter in smaller subset to bigger subset
3564 possiblefilters = set(repoview.filtertable)
3579 possiblefilters = set(repoview.filtertable)
3565 if filternames:
3580 if filternames:
3566 possiblefilters &= set(filternames)
3581 possiblefilters &= set(filternames)
3567 subsettable = getbranchmapsubsettable()
3582 subsettable = getbranchmapsubsettable()
3568 allfilters = []
3583 allfilters = []
3569 while possiblefilters:
3584 while possiblefilters:
3570 for name in possiblefilters:
3585 for name in possiblefilters:
3571 subset = subsettable.get(name)
3586 subset = subsettable.get(name)
3572 if subset not in possiblefilters:
3587 if subset not in possiblefilters:
3573 break
3588 break
3574 else:
3589 else:
3575 assert False, b'subset cycle %s!' % possiblefilters
3590 assert False, b'subset cycle %s!' % possiblefilters
3576 allfilters.append(name)
3591 allfilters.append(name)
3577 possiblefilters.remove(name)
3592 possiblefilters.remove(name)
3578
3593
3579 # warm the cache
3594 # warm the cache
3580 if not full:
3595 if not full:
3581 for name in allfilters:
3596 for name in allfilters:
3582 repo.filtered(name).branchmap()
3597 repo.filtered(name).branchmap()
3583 if not filternames or b'unfiltered' in filternames:
3598 if not filternames or b'unfiltered' in filternames:
3584 # add unfiltered
3599 # add unfiltered
3585 allfilters.append(None)
3600 allfilters.append(None)
3586
3601
3587 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3602 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3588 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3603 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3589 branchcacheread.set(classmethod(lambda *args: None))
3604 branchcacheread.set(classmethod(lambda *args: None))
3590 else:
3605 else:
3591 # older versions
3606 # older versions
3592 branchcacheread = safeattrsetter(branchmap, b'read')
3607 branchcacheread = safeattrsetter(branchmap, b'read')
3593 branchcacheread.set(lambda *args: None)
3608 branchcacheread.set(lambda *args: None)
3594 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3609 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3595 branchcachewrite.set(lambda *args: None)
3610 branchcachewrite.set(lambda *args: None)
3596 try:
3611 try:
3597 for name in allfilters:
3612 for name in allfilters:
3598 printname = name
3613 printname = name
3599 if name is None:
3614 if name is None:
3600 printname = b'unfiltered'
3615 printname = b'unfiltered'
3601 timer(getbranchmap(name), title=printname)
3616 timer(getbranchmap(name), title=printname)
3602 finally:
3617 finally:
3603 branchcacheread.restore()
3618 branchcacheread.restore()
3604 branchcachewrite.restore()
3619 branchcachewrite.restore()
3605 fm.end()
3620 fm.end()
3606
3621
3607
3622
3608 @command(
3623 @command(
3609 b'perf::branchmapupdate|perfbranchmapupdate',
3624 b'perf::branchmapupdate|perfbranchmapupdate',
3610 [
3625 [
3611 (b'', b'base', [], b'subset of revision to start from'),
3626 (b'', b'base', [], b'subset of revision to start from'),
3612 (b'', b'target', [], b'subset of revision to end with'),
3627 (b'', b'target', [], b'subset of revision to end with'),
3613 (b'', b'clear-caches', False, b'clear cache between each runs'),
3628 (b'', b'clear-caches', False, b'clear cache between each runs'),
3614 ]
3629 ]
3615 + formatteropts,
3630 + formatteropts,
3616 )
3631 )
3617 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3632 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3618 """benchmark branchmap update from for <base> revs to <target> revs
3633 """benchmark branchmap update from for <base> revs to <target> revs
3619
3634
3620 If `--clear-caches` is passed, the following items will be reset before
3635 If `--clear-caches` is passed, the following items will be reset before
3621 each update:
3636 each update:
3622 * the changelog instance and associated indexes
3637 * the changelog instance and associated indexes
3623 * the rev-branch-cache instance
3638 * the rev-branch-cache instance
3624
3639
3625 Examples:
3640 Examples:
3626
3641
3627 # update for the one last revision
3642 # update for the one last revision
3628 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3643 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3629
3644
3630 $ update for change coming with a new branch
3645 $ update for change coming with a new branch
3631 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3646 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3632 """
3647 """
3633 from mercurial import branchmap
3648 from mercurial import branchmap
3634 from mercurial import repoview
3649 from mercurial import repoview
3635
3650
3636 opts = _byteskwargs(opts)
3651 opts = _byteskwargs(opts)
3637 timer, fm = gettimer(ui, opts)
3652 timer, fm = gettimer(ui, opts)
3638 clearcaches = opts[b'clear_caches']
3653 clearcaches = opts[b'clear_caches']
3639 unfi = repo.unfiltered()
3654 unfi = repo.unfiltered()
3640 x = [None] # used to pass data between closure
3655 x = [None] # used to pass data between closure
3641
3656
3642 # we use a `list` here to avoid possible side effect from smartset
3657 # we use a `list` here to avoid possible side effect from smartset
3643 baserevs = list(scmutil.revrange(repo, base))
3658 baserevs = list(scmutil.revrange(repo, base))
3644 targetrevs = list(scmutil.revrange(repo, target))
3659 targetrevs = list(scmutil.revrange(repo, target))
3645 if not baserevs:
3660 if not baserevs:
3646 raise error.Abort(b'no revisions selected for --base')
3661 raise error.Abort(b'no revisions selected for --base')
3647 if not targetrevs:
3662 if not targetrevs:
3648 raise error.Abort(b'no revisions selected for --target')
3663 raise error.Abort(b'no revisions selected for --target')
3649
3664
3650 # make sure the target branchmap also contains the one in the base
3665 # make sure the target branchmap also contains the one in the base
3651 targetrevs = list(set(baserevs) | set(targetrevs))
3666 targetrevs = list(set(baserevs) | set(targetrevs))
3652 targetrevs.sort()
3667 targetrevs.sort()
3653
3668
3654 cl = repo.changelog
3669 cl = repo.changelog
3655 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3670 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3656 allbaserevs.sort()
3671 allbaserevs.sort()
3657 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3672 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3658
3673
3659 newrevs = list(alltargetrevs.difference(allbaserevs))
3674 newrevs = list(alltargetrevs.difference(allbaserevs))
3660 newrevs.sort()
3675 newrevs.sort()
3661
3676
3662 allrevs = frozenset(unfi.changelog.revs())
3677 allrevs = frozenset(unfi.changelog.revs())
3663 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3678 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3664 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3679 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3665
3680
3666 def basefilter(repo, visibilityexceptions=None):
3681 def basefilter(repo, visibilityexceptions=None):
3667 return basefilterrevs
3682 return basefilterrevs
3668
3683
3669 def targetfilter(repo, visibilityexceptions=None):
3684 def targetfilter(repo, visibilityexceptions=None):
3670 return targetfilterrevs
3685 return targetfilterrevs
3671
3686
3672 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3687 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3673 ui.status(msg % (len(allbaserevs), len(newrevs)))
3688 ui.status(msg % (len(allbaserevs), len(newrevs)))
3674 if targetfilterrevs:
3689 if targetfilterrevs:
3675 msg = b'(%d revisions still filtered)\n'
3690 msg = b'(%d revisions still filtered)\n'
3676 ui.status(msg % len(targetfilterrevs))
3691 ui.status(msg % len(targetfilterrevs))
3677
3692
3678 try:
3693 try:
3679 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3694 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3680 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3695 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3681
3696
3682 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3697 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3683 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3698 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3684
3699
3685 # try to find an existing branchmap to reuse
3700 # try to find an existing branchmap to reuse
3686 subsettable = getbranchmapsubsettable()
3701 subsettable = getbranchmapsubsettable()
3687 candidatefilter = subsettable.get(None)
3702 candidatefilter = subsettable.get(None)
3688 while candidatefilter is not None:
3703 while candidatefilter is not None:
3689 candidatebm = repo.filtered(candidatefilter).branchmap()
3704 candidatebm = repo.filtered(candidatefilter).branchmap()
3690 if candidatebm.validfor(baserepo):
3705 if candidatebm.validfor(baserepo):
3691 filtered = repoview.filterrevs(repo, candidatefilter)
3706 filtered = repoview.filterrevs(repo, candidatefilter)
3692 missing = [r for r in allbaserevs if r in filtered]
3707 missing = [r for r in allbaserevs if r in filtered]
3693 base = candidatebm.copy()
3708 base = candidatebm.copy()
3694 base.update(baserepo, missing)
3709 base.update(baserepo, missing)
3695 break
3710 break
3696 candidatefilter = subsettable.get(candidatefilter)
3711 candidatefilter = subsettable.get(candidatefilter)
3697 else:
3712 else:
3698 # no suitable subset where found
3713 # no suitable subset where found
3699 base = branchmap.branchcache()
3714 base = branchmap.branchcache()
3700 base.update(baserepo, allbaserevs)
3715 base.update(baserepo, allbaserevs)
3701
3716
3702 def setup():
3717 def setup():
3703 x[0] = base.copy()
3718 x[0] = base.copy()
3704 if clearcaches:
3719 if clearcaches:
3705 unfi._revbranchcache = None
3720 unfi._revbranchcache = None
3706 clearchangelog(repo)
3721 clearchangelog(repo)
3707
3722
3708 def bench():
3723 def bench():
3709 x[0].update(targetrepo, newrevs)
3724 x[0].update(targetrepo, newrevs)
3710
3725
3711 timer(bench, setup=setup)
3726 timer(bench, setup=setup)
3712 fm.end()
3727 fm.end()
3713 finally:
3728 finally:
3714 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3729 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3715 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3730 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3716
3731
3717
3732
3718 @command(
3733 @command(
3719 b'perf::branchmapload|perfbranchmapload',
3734 b'perf::branchmapload|perfbranchmapload',
3720 [
3735 [
3721 (b'f', b'filter', b'', b'Specify repoview filter'),
3736 (b'f', b'filter', b'', b'Specify repoview filter'),
3722 (b'', b'list', False, b'List brachmap filter caches'),
3737 (b'', b'list', False, b'List brachmap filter caches'),
3723 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3738 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3724 ]
3739 ]
3725 + formatteropts,
3740 + formatteropts,
3726 )
3741 )
3727 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3742 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3728 """benchmark reading the branchmap"""
3743 """benchmark reading the branchmap"""
3729 opts = _byteskwargs(opts)
3744 opts = _byteskwargs(opts)
3730 clearrevlogs = opts[b'clear_revlogs']
3745 clearrevlogs = opts[b'clear_revlogs']
3731
3746
3732 if list:
3747 if list:
3733 for name, kind, st in repo.cachevfs.readdir(stat=True):
3748 for name, kind, st in repo.cachevfs.readdir(stat=True):
3734 if name.startswith(b'branch2'):
3749 if name.startswith(b'branch2'):
3735 filtername = name.partition(b'-')[2] or b'unfiltered'
3750 filtername = name.partition(b'-')[2] or b'unfiltered'
3736 ui.status(
3751 ui.status(
3737 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3752 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3738 )
3753 )
3739 return
3754 return
3740 if not filter:
3755 if not filter:
3741 filter = None
3756 filter = None
3742 subsettable = getbranchmapsubsettable()
3757 subsettable = getbranchmapsubsettable()
3743 if filter is None:
3758 if filter is None:
3744 repo = repo.unfiltered()
3759 repo = repo.unfiltered()
3745 else:
3760 else:
3746 repo = repoview.repoview(repo, filter)
3761 repo = repoview.repoview(repo, filter)
3747
3762
3748 repo.branchmap() # make sure we have a relevant, up to date branchmap
3763 repo.branchmap() # make sure we have a relevant, up to date branchmap
3749
3764
3750 try:
3765 try:
3751 fromfile = branchmap.branchcache.fromfile
3766 fromfile = branchmap.branchcache.fromfile
3752 except AttributeError:
3767 except AttributeError:
3753 # older versions
3768 # older versions
3754 fromfile = branchmap.read
3769 fromfile = branchmap.read
3755
3770
3756 currentfilter = filter
3771 currentfilter = filter
3757 # try once without timer, the filter may not be cached
3772 # try once without timer, the filter may not be cached
3758 while fromfile(repo) is None:
3773 while fromfile(repo) is None:
3759 currentfilter = subsettable.get(currentfilter)
3774 currentfilter = subsettable.get(currentfilter)
3760 if currentfilter is None:
3775 if currentfilter is None:
3761 raise error.Abort(
3776 raise error.Abort(
3762 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3777 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3763 )
3778 )
3764 repo = repo.filtered(currentfilter)
3779 repo = repo.filtered(currentfilter)
3765 timer, fm = gettimer(ui, opts)
3780 timer, fm = gettimer(ui, opts)
3766
3781
3767 def setup():
3782 def setup():
3768 if clearrevlogs:
3783 if clearrevlogs:
3769 clearchangelog(repo)
3784 clearchangelog(repo)
3770
3785
3771 def bench():
3786 def bench():
3772 fromfile(repo)
3787 fromfile(repo)
3773
3788
3774 timer(bench, setup=setup)
3789 timer(bench, setup=setup)
3775 fm.end()
3790 fm.end()
3776
3791
3777
3792
3778 @command(b'perf::loadmarkers|perfloadmarkers')
3793 @command(b'perf::loadmarkers|perfloadmarkers')
3779 def perfloadmarkers(ui, repo):
3794 def perfloadmarkers(ui, repo):
3780 """benchmark the time to parse the on-disk markers for a repo
3795 """benchmark the time to parse the on-disk markers for a repo
3781
3796
3782 Result is the number of markers in the repo."""
3797 Result is the number of markers in the repo."""
3783 timer, fm = gettimer(ui)
3798 timer, fm = gettimer(ui)
3784 svfs = getsvfs(repo)
3799 svfs = getsvfs(repo)
3785 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3800 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3786 fm.end()
3801 fm.end()
3787
3802
3788
3803
3789 @command(
3804 @command(
3790 b'perf::lrucachedict|perflrucachedict',
3805 b'perf::lrucachedict|perflrucachedict',
3791 formatteropts
3806 formatteropts
3792 + [
3807 + [
3793 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3808 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3794 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3809 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3795 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3810 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3796 (b'', b'size', 4, b'size of cache'),
3811 (b'', b'size', 4, b'size of cache'),
3797 (b'', b'gets', 10000, b'number of key lookups'),
3812 (b'', b'gets', 10000, b'number of key lookups'),
3798 (b'', b'sets', 10000, b'number of key sets'),
3813 (b'', b'sets', 10000, b'number of key sets'),
3799 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3814 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3800 (
3815 (
3801 b'',
3816 b'',
3802 b'mixedgetfreq',
3817 b'mixedgetfreq',
3803 50,
3818 50,
3804 b'frequency of get vs set ops in mixed mode',
3819 b'frequency of get vs set ops in mixed mode',
3805 ),
3820 ),
3806 ],
3821 ],
3807 norepo=True,
3822 norepo=True,
3808 )
3823 )
3809 def perflrucache(
3824 def perflrucache(
3810 ui,
3825 ui,
3811 mincost=0,
3826 mincost=0,
3812 maxcost=100,
3827 maxcost=100,
3813 costlimit=0,
3828 costlimit=0,
3814 size=4,
3829 size=4,
3815 gets=10000,
3830 gets=10000,
3816 sets=10000,
3831 sets=10000,
3817 mixed=10000,
3832 mixed=10000,
3818 mixedgetfreq=50,
3833 mixedgetfreq=50,
3819 **opts
3834 **opts
3820 ):
3835 ):
3821 opts = _byteskwargs(opts)
3836 opts = _byteskwargs(opts)
3822
3837
3823 def doinit():
3838 def doinit():
3824 for i in _xrange(10000):
3839 for i in _xrange(10000):
3825 util.lrucachedict(size)
3840 util.lrucachedict(size)
3826
3841
3827 costrange = list(range(mincost, maxcost + 1))
3842 costrange = list(range(mincost, maxcost + 1))
3828
3843
3829 values = []
3844 values = []
3830 for i in _xrange(size):
3845 for i in _xrange(size):
3831 values.append(random.randint(0, _maxint))
3846 values.append(random.randint(0, _maxint))
3832
3847
3833 # Get mode fills the cache and tests raw lookup performance with no
3848 # Get mode fills the cache and tests raw lookup performance with no
3834 # eviction.
3849 # eviction.
3835 getseq = []
3850 getseq = []
3836 for i in _xrange(gets):
3851 for i in _xrange(gets):
3837 getseq.append(random.choice(values))
3852 getseq.append(random.choice(values))
3838
3853
3839 def dogets():
3854 def dogets():
3840 d = util.lrucachedict(size)
3855 d = util.lrucachedict(size)
3841 for v in values:
3856 for v in values:
3842 d[v] = v
3857 d[v] = v
3843 for key in getseq:
3858 for key in getseq:
3844 value = d[key]
3859 value = d[key]
3845 value # silence pyflakes warning
3860 value # silence pyflakes warning
3846
3861
3847 def dogetscost():
3862 def dogetscost():
3848 d = util.lrucachedict(size, maxcost=costlimit)
3863 d = util.lrucachedict(size, maxcost=costlimit)
3849 for i, v in enumerate(values):
3864 for i, v in enumerate(values):
3850 d.insert(v, v, cost=costs[i])
3865 d.insert(v, v, cost=costs[i])
3851 for key in getseq:
3866 for key in getseq:
3852 try:
3867 try:
3853 value = d[key]
3868 value = d[key]
3854 value # silence pyflakes warning
3869 value # silence pyflakes warning
3855 except KeyError:
3870 except KeyError:
3856 pass
3871 pass
3857
3872
3858 # Set mode tests insertion speed with cache eviction.
3873 # Set mode tests insertion speed with cache eviction.
3859 setseq = []
3874 setseq = []
3860 costs = []
3875 costs = []
3861 for i in _xrange(sets):
3876 for i in _xrange(sets):
3862 setseq.append(random.randint(0, _maxint))
3877 setseq.append(random.randint(0, _maxint))
3863 costs.append(random.choice(costrange))
3878 costs.append(random.choice(costrange))
3864
3879
3865 def doinserts():
3880 def doinserts():
3866 d = util.lrucachedict(size)
3881 d = util.lrucachedict(size)
3867 for v in setseq:
3882 for v in setseq:
3868 d.insert(v, v)
3883 d.insert(v, v)
3869
3884
3870 def doinsertscost():
3885 def doinsertscost():
3871 d = util.lrucachedict(size, maxcost=costlimit)
3886 d = util.lrucachedict(size, maxcost=costlimit)
3872 for i, v in enumerate(setseq):
3887 for i, v in enumerate(setseq):
3873 d.insert(v, v, cost=costs[i])
3888 d.insert(v, v, cost=costs[i])
3874
3889
3875 def dosets():
3890 def dosets():
3876 d = util.lrucachedict(size)
3891 d = util.lrucachedict(size)
3877 for v in setseq:
3892 for v in setseq:
3878 d[v] = v
3893 d[v] = v
3879
3894
3880 # Mixed mode randomly performs gets and sets with eviction.
3895 # Mixed mode randomly performs gets and sets with eviction.
3881 mixedops = []
3896 mixedops = []
3882 for i in _xrange(mixed):
3897 for i in _xrange(mixed):
3883 r = random.randint(0, 100)
3898 r = random.randint(0, 100)
3884 if r < mixedgetfreq:
3899 if r < mixedgetfreq:
3885 op = 0
3900 op = 0
3886 else:
3901 else:
3887 op = 1
3902 op = 1
3888
3903
3889 mixedops.append(
3904 mixedops.append(
3890 (op, random.randint(0, size * 2), random.choice(costrange))
3905 (op, random.randint(0, size * 2), random.choice(costrange))
3891 )
3906 )
3892
3907
3893 def domixed():
3908 def domixed():
3894 d = util.lrucachedict(size)
3909 d = util.lrucachedict(size)
3895
3910
3896 for op, v, cost in mixedops:
3911 for op, v, cost in mixedops:
3897 if op == 0:
3912 if op == 0:
3898 try:
3913 try:
3899 d[v]
3914 d[v]
3900 except KeyError:
3915 except KeyError:
3901 pass
3916 pass
3902 else:
3917 else:
3903 d[v] = v
3918 d[v] = v
3904
3919
3905 def domixedcost():
3920 def domixedcost():
3906 d = util.lrucachedict(size, maxcost=costlimit)
3921 d = util.lrucachedict(size, maxcost=costlimit)
3907
3922
3908 for op, v, cost in mixedops:
3923 for op, v, cost in mixedops:
3909 if op == 0:
3924 if op == 0:
3910 try:
3925 try:
3911 d[v]
3926 d[v]
3912 except KeyError:
3927 except KeyError:
3913 pass
3928 pass
3914 else:
3929 else:
3915 d.insert(v, v, cost=cost)
3930 d.insert(v, v, cost=cost)
3916
3931
3917 benches = [
3932 benches = [
3918 (doinit, b'init'),
3933 (doinit, b'init'),
3919 ]
3934 ]
3920
3935
3921 if costlimit:
3936 if costlimit:
3922 benches.extend(
3937 benches.extend(
3923 [
3938 [
3924 (dogetscost, b'gets w/ cost limit'),
3939 (dogetscost, b'gets w/ cost limit'),
3925 (doinsertscost, b'inserts w/ cost limit'),
3940 (doinsertscost, b'inserts w/ cost limit'),
3926 (domixedcost, b'mixed w/ cost limit'),
3941 (domixedcost, b'mixed w/ cost limit'),
3927 ]
3942 ]
3928 )
3943 )
3929 else:
3944 else:
3930 benches.extend(
3945 benches.extend(
3931 [
3946 [
3932 (dogets, b'gets'),
3947 (dogets, b'gets'),
3933 (doinserts, b'inserts'),
3948 (doinserts, b'inserts'),
3934 (dosets, b'sets'),
3949 (dosets, b'sets'),
3935 (domixed, b'mixed'),
3950 (domixed, b'mixed'),
3936 ]
3951 ]
3937 )
3952 )
3938
3953
3939 for fn, title in benches:
3954 for fn, title in benches:
3940 timer, fm = gettimer(ui, opts)
3955 timer, fm = gettimer(ui, opts)
3941 timer(fn, title=title)
3956 timer(fn, title=title)
3942 fm.end()
3957 fm.end()
3943
3958
3944
3959
3945 @command(
3960 @command(
3946 b'perf::write|perfwrite',
3961 b'perf::write|perfwrite',
3947 formatteropts
3962 formatteropts
3948 + [
3963 + [
3949 (b'', b'write-method', b'write', b'ui write method'),
3964 (b'', b'write-method', b'write', b'ui write method'),
3950 (b'', b'nlines', 100, b'number of lines'),
3965 (b'', b'nlines', 100, b'number of lines'),
3951 (b'', b'nitems', 100, b'number of items (per line)'),
3966 (b'', b'nitems', 100, b'number of items (per line)'),
3952 (b'', b'item', b'x', b'item that is written'),
3967 (b'', b'item', b'x', b'item that is written'),
3953 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3968 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3954 (b'', b'flush-line', None, b'flush after each line'),
3969 (b'', b'flush-line', None, b'flush after each line'),
3955 ],
3970 ],
3956 )
3971 )
3957 def perfwrite(ui, repo, **opts):
3972 def perfwrite(ui, repo, **opts):
3958 """microbenchmark ui.write (and others)"""
3973 """microbenchmark ui.write (and others)"""
3959 opts = _byteskwargs(opts)
3974 opts = _byteskwargs(opts)
3960
3975
3961 write = getattr(ui, _sysstr(opts[b'write_method']))
3976 write = getattr(ui, _sysstr(opts[b'write_method']))
3962 nlines = int(opts[b'nlines'])
3977 nlines = int(opts[b'nlines'])
3963 nitems = int(opts[b'nitems'])
3978 nitems = int(opts[b'nitems'])
3964 item = opts[b'item']
3979 item = opts[b'item']
3965 batch_line = opts.get(b'batch_line')
3980 batch_line = opts.get(b'batch_line')
3966 flush_line = opts.get(b'flush_line')
3981 flush_line = opts.get(b'flush_line')
3967
3982
3968 if batch_line:
3983 if batch_line:
3969 line = item * nitems + b'\n'
3984 line = item * nitems + b'\n'
3970
3985
3971 def benchmark():
3986 def benchmark():
3972 for i in pycompat.xrange(nlines):
3987 for i in pycompat.xrange(nlines):
3973 if batch_line:
3988 if batch_line:
3974 write(line)
3989 write(line)
3975 else:
3990 else:
3976 for i in pycompat.xrange(nitems):
3991 for i in pycompat.xrange(nitems):
3977 write(item)
3992 write(item)
3978 write(b'\n')
3993 write(b'\n')
3979 if flush_line:
3994 if flush_line:
3980 ui.flush()
3995 ui.flush()
3981 ui.flush()
3996 ui.flush()
3982
3997
3983 timer, fm = gettimer(ui, opts)
3998 timer, fm = gettimer(ui, opts)
3984 timer(benchmark)
3999 timer(benchmark)
3985 fm.end()
4000 fm.end()
3986
4001
3987
4002
3988 def uisetup(ui):
4003 def uisetup(ui):
3989 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4004 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3990 commands, b'debugrevlogopts'
4005 commands, b'debugrevlogopts'
3991 ):
4006 ):
3992 # for "historical portability":
4007 # for "historical portability":
3993 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4008 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3994 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4009 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3995 # openrevlog() should cause failure, because it has been
4010 # openrevlog() should cause failure, because it has been
3996 # available since 3.5 (or 49c583ca48c4).
4011 # available since 3.5 (or 49c583ca48c4).
3997 def openrevlog(orig, repo, cmd, file_, opts):
4012 def openrevlog(orig, repo, cmd, file_, opts):
3998 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4013 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3999 raise error.Abort(
4014 raise error.Abort(
4000 b"This version doesn't support --dir option",
4015 b"This version doesn't support --dir option",
4001 hint=b"use 3.5 or later",
4016 hint=b"use 3.5 or later",
4002 )
4017 )
4003 return orig(repo, cmd, file_, opts)
4018 return orig(repo, cmd, file_, opts)
4004
4019
4005 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4020 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4006
4021
4007
4022
4008 @command(
4023 @command(
4009 b'perf::progress|perfprogress',
4024 b'perf::progress|perfprogress',
4010 formatteropts
4025 formatteropts
4011 + [
4026 + [
4012 (b'', b'topic', b'topic', b'topic for progress messages'),
4027 (b'', b'topic', b'topic', b'topic for progress messages'),
4013 (b'c', b'total', 1000000, b'total value we are progressing to'),
4028 (b'c', b'total', 1000000, b'total value we are progressing to'),
4014 ],
4029 ],
4015 norepo=True,
4030 norepo=True,
4016 )
4031 )
4017 def perfprogress(ui, topic=None, total=None, **opts):
4032 def perfprogress(ui, topic=None, total=None, **opts):
4018 """printing of progress bars"""
4033 """printing of progress bars"""
4019 opts = _byteskwargs(opts)
4034 opts = _byteskwargs(opts)
4020
4035
4021 timer, fm = gettimer(ui, opts)
4036 timer, fm = gettimer(ui, opts)
4022
4037
4023 def doprogress():
4038 def doprogress():
4024 with ui.makeprogress(topic, total=total) as progress:
4039 with ui.makeprogress(topic, total=total) as progress:
4025 for i in _xrange(total):
4040 for i in _xrange(total):
4026 progress.increment()
4041 progress.increment()
4027
4042
4028 timer(doprogress)
4043 timer(doprogress)
4029 fm.end()
4044 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now