##// END OF EJS Templates
perf-bundle: add a new command to benchmark bundle creation time
marmoute -
r50306:b081a5aa default
parent child Browse files
Show More
@@ -1,3979 +1,4029 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
929 def perfdiscovery(ui, repo, path, **opts):
929 def perfdiscovery(ui, repo, path, **opts):
930 """benchmark discovery between local repo and the peer at given path"""
930 """benchmark discovery between local repo and the peer at given path"""
931 repos = [repo, None]
931 repos = [repo, None]
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933
933
934 try:
934 try:
935 from mercurial.utils.urlutil import get_unique_pull_path
935 from mercurial.utils.urlutil import get_unique_pull_path
936
936
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
938 except ImportError:
938 except ImportError:
939 path = ui.expandpath(path)
939 path = ui.expandpath(path)
940
940
941 def s():
941 def s():
942 repos[1] = hg.peer(ui, opts, path)
942 repos[1] = hg.peer(ui, opts, path)
943
943
944 def d():
944 def d():
945 setdiscovery.findcommonheads(ui, *repos)
945 setdiscovery.findcommonheads(ui, *repos)
946
946
947 timer(d, setup=s)
947 timer(d, setup=s)
948 fm.end()
948 fm.end()
949
949
950
950
951 @command(
951 @command(
952 b'perf::bookmarks|perfbookmarks',
952 b'perf::bookmarks|perfbookmarks',
953 formatteropts
953 formatteropts
954 + [
954 + [
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
956 ],
956 ],
957 )
957 )
958 def perfbookmarks(ui, repo, **opts):
958 def perfbookmarks(ui, repo, **opts):
959 """benchmark parsing bookmarks from disk to memory"""
959 """benchmark parsing bookmarks from disk to memory"""
960 opts = _byteskwargs(opts)
960 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962
962
963 clearrevlogs = opts[b'clear_revlogs']
963 clearrevlogs = opts[b'clear_revlogs']
964
964
965 def s():
965 def s():
966 if clearrevlogs:
966 if clearrevlogs:
967 clearchangelog(repo)
967 clearchangelog(repo)
968 clearfilecache(repo, b'_bookmarks')
968 clearfilecache(repo, b'_bookmarks')
969
969
970 def d():
970 def d():
971 repo._bookmarks
971 repo._bookmarks
972
972
973 timer(d, setup=s)
973 timer(d, setup=s)
974 fm.end()
974 fm.end()
975
975
976
976
977 @command(b'perf::bundle', formatteropts, b'REVS')
978 def perfbundle(ui, repo, *revs, **opts):
979 """benchmark the creation of a bundle from a repository
980
981 For now, this create a `none-v1` bundle.
982 """
983 from mercurial import bundlecaches
984 from mercurial import discovery
985 from mercurial import bundle2
986
987 opts = _byteskwargs(opts)
988 timer, fm = gettimer(ui, opts)
989
990 cl = repo.changelog
991 revs = scmutil.revrange(repo, revs)
992 if not revs:
993 raise error.Abort(b"not revision specified")
994 # make it a consistent set (ie: without topological gaps)
995 old_len = len(revs)
996 revs = list(repo.revs(b"%ld::%ld", revs, revs))
997 if old_len != len(revs):
998 new_count = len(revs) - old_len
999 msg = b"add %d new revisions to make it a consistent set\n"
1000 ui.write_err(msg % new_count)
1001
1002 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1003 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1004 outgoing = discovery.outgoing(repo, bases, targets)
1005
1006 bundlespec = bundlecaches.parsebundlespec(
1007 repo, b"none", strict=False
1008 )
1009
1010 bversion = b'HG10' + bundlespec.wirecompression
1011
1012 def do_bundle():
1013 bundle2.writenewbundle(
1014 ui,
1015 repo,
1016 b'perf::bundle',
1017 os.devnull,
1018 bversion,
1019 outgoing,
1020 {},
1021 )
1022
1023 timer(do_bundle)
1024 fm.end()
1025
1026
977 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1027 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
978 def perfbundleread(ui, repo, bundlepath, **opts):
1028 def perfbundleread(ui, repo, bundlepath, **opts):
979 """Benchmark reading of bundle files.
1029 """Benchmark reading of bundle files.
980
1030
981 This command is meant to isolate the I/O part of bundle reading as
1031 This command is meant to isolate the I/O part of bundle reading as
982 much as possible.
1032 much as possible.
983 """
1033 """
984 from mercurial import (
1034 from mercurial import (
985 bundle2,
1035 bundle2,
986 exchange,
1036 exchange,
987 streamclone,
1037 streamclone,
988 )
1038 )
989
1039
990 opts = _byteskwargs(opts)
1040 opts = _byteskwargs(opts)
991
1041
992 def makebench(fn):
1042 def makebench(fn):
993 def run():
1043 def run():
994 with open(bundlepath, b'rb') as fh:
1044 with open(bundlepath, b'rb') as fh:
995 bundle = exchange.readbundle(ui, fh, bundlepath)
1045 bundle = exchange.readbundle(ui, fh, bundlepath)
996 fn(bundle)
1046 fn(bundle)
997
1047
998 return run
1048 return run
999
1049
1000 def makereadnbytes(size):
1050 def makereadnbytes(size):
1001 def run():
1051 def run():
1002 with open(bundlepath, b'rb') as fh:
1052 with open(bundlepath, b'rb') as fh:
1003 bundle = exchange.readbundle(ui, fh, bundlepath)
1053 bundle = exchange.readbundle(ui, fh, bundlepath)
1004 while bundle.read(size):
1054 while bundle.read(size):
1005 pass
1055 pass
1006
1056
1007 return run
1057 return run
1008
1058
1009 def makestdioread(size):
1059 def makestdioread(size):
1010 def run():
1060 def run():
1011 with open(bundlepath, b'rb') as fh:
1061 with open(bundlepath, b'rb') as fh:
1012 while fh.read(size):
1062 while fh.read(size):
1013 pass
1063 pass
1014
1064
1015 return run
1065 return run
1016
1066
1017 # bundle1
1067 # bundle1
1018
1068
1019 def deltaiter(bundle):
1069 def deltaiter(bundle):
1020 for delta in bundle.deltaiter():
1070 for delta in bundle.deltaiter():
1021 pass
1071 pass
1022
1072
1023 def iterchunks(bundle):
1073 def iterchunks(bundle):
1024 for chunk in bundle.getchunks():
1074 for chunk in bundle.getchunks():
1025 pass
1075 pass
1026
1076
1027 # bundle2
1077 # bundle2
1028
1078
1029 def forwardchunks(bundle):
1079 def forwardchunks(bundle):
1030 for chunk in bundle._forwardchunks():
1080 for chunk in bundle._forwardchunks():
1031 pass
1081 pass
1032
1082
1033 def iterparts(bundle):
1083 def iterparts(bundle):
1034 for part in bundle.iterparts():
1084 for part in bundle.iterparts():
1035 pass
1085 pass
1036
1086
1037 def iterpartsseekable(bundle):
1087 def iterpartsseekable(bundle):
1038 for part in bundle.iterparts(seekable=True):
1088 for part in bundle.iterparts(seekable=True):
1039 pass
1089 pass
1040
1090
1041 def seek(bundle):
1091 def seek(bundle):
1042 for part in bundle.iterparts(seekable=True):
1092 for part in bundle.iterparts(seekable=True):
1043 part.seek(0, os.SEEK_END)
1093 part.seek(0, os.SEEK_END)
1044
1094
1045 def makepartreadnbytes(size):
1095 def makepartreadnbytes(size):
1046 def run():
1096 def run():
1047 with open(bundlepath, b'rb') as fh:
1097 with open(bundlepath, b'rb') as fh:
1048 bundle = exchange.readbundle(ui, fh, bundlepath)
1098 bundle = exchange.readbundle(ui, fh, bundlepath)
1049 for part in bundle.iterparts():
1099 for part in bundle.iterparts():
1050 while part.read(size):
1100 while part.read(size):
1051 pass
1101 pass
1052
1102
1053 return run
1103 return run
1054
1104
1055 benches = [
1105 benches = [
1056 (makestdioread(8192), b'read(8k)'),
1106 (makestdioread(8192), b'read(8k)'),
1057 (makestdioread(16384), b'read(16k)'),
1107 (makestdioread(16384), b'read(16k)'),
1058 (makestdioread(32768), b'read(32k)'),
1108 (makestdioread(32768), b'read(32k)'),
1059 (makestdioread(131072), b'read(128k)'),
1109 (makestdioread(131072), b'read(128k)'),
1060 ]
1110 ]
1061
1111
1062 with open(bundlepath, b'rb') as fh:
1112 with open(bundlepath, b'rb') as fh:
1063 bundle = exchange.readbundle(ui, fh, bundlepath)
1113 bundle = exchange.readbundle(ui, fh, bundlepath)
1064
1114
1065 if isinstance(bundle, changegroup.cg1unpacker):
1115 if isinstance(bundle, changegroup.cg1unpacker):
1066 benches.extend(
1116 benches.extend(
1067 [
1117 [
1068 (makebench(deltaiter), b'cg1 deltaiter()'),
1118 (makebench(deltaiter), b'cg1 deltaiter()'),
1069 (makebench(iterchunks), b'cg1 getchunks()'),
1119 (makebench(iterchunks), b'cg1 getchunks()'),
1070 (makereadnbytes(8192), b'cg1 read(8k)'),
1120 (makereadnbytes(8192), b'cg1 read(8k)'),
1071 (makereadnbytes(16384), b'cg1 read(16k)'),
1121 (makereadnbytes(16384), b'cg1 read(16k)'),
1072 (makereadnbytes(32768), b'cg1 read(32k)'),
1122 (makereadnbytes(32768), b'cg1 read(32k)'),
1073 (makereadnbytes(131072), b'cg1 read(128k)'),
1123 (makereadnbytes(131072), b'cg1 read(128k)'),
1074 ]
1124 ]
1075 )
1125 )
1076 elif isinstance(bundle, bundle2.unbundle20):
1126 elif isinstance(bundle, bundle2.unbundle20):
1077 benches.extend(
1127 benches.extend(
1078 [
1128 [
1079 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1129 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1080 (makebench(iterparts), b'bundle2 iterparts()'),
1130 (makebench(iterparts), b'bundle2 iterparts()'),
1081 (
1131 (
1082 makebench(iterpartsseekable),
1132 makebench(iterpartsseekable),
1083 b'bundle2 iterparts() seekable',
1133 b'bundle2 iterparts() seekable',
1084 ),
1134 ),
1085 (makebench(seek), b'bundle2 part seek()'),
1135 (makebench(seek), b'bundle2 part seek()'),
1086 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1136 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1087 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1137 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1088 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1138 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1089 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1139 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1090 ]
1140 ]
1091 )
1141 )
1092 elif isinstance(bundle, streamclone.streamcloneapplier):
1142 elif isinstance(bundle, streamclone.streamcloneapplier):
1093 raise error.Abort(b'stream clone bundles not supported')
1143 raise error.Abort(b'stream clone bundles not supported')
1094 else:
1144 else:
1095 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1145 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1096
1146
1097 for fn, title in benches:
1147 for fn, title in benches:
1098 timer, fm = gettimer(ui, opts)
1148 timer, fm = gettimer(ui, opts)
1099 timer(fn, title=title)
1149 timer(fn, title=title)
1100 fm.end()
1150 fm.end()
1101
1151
1102
1152
1103 @command(
1153 @command(
1104 b'perf::changegroupchangelog|perfchangegroupchangelog',
1154 b'perf::changegroupchangelog|perfchangegroupchangelog',
1105 formatteropts
1155 formatteropts
1106 + [
1156 + [
1107 (b'', b'cgversion', b'02', b'changegroup version'),
1157 (b'', b'cgversion', b'02', b'changegroup version'),
1108 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1158 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1109 ],
1159 ],
1110 )
1160 )
1111 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1161 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1112 """Benchmark producing a changelog group for a changegroup.
1162 """Benchmark producing a changelog group for a changegroup.
1113
1163
1114 This measures the time spent processing the changelog during a
1164 This measures the time spent processing the changelog during a
1115 bundle operation. This occurs during `hg bundle` and on a server
1165 bundle operation. This occurs during `hg bundle` and on a server
1116 processing a `getbundle` wire protocol request (handles clones
1166 processing a `getbundle` wire protocol request (handles clones
1117 and pull requests).
1167 and pull requests).
1118
1168
1119 By default, all revisions are added to the changegroup.
1169 By default, all revisions are added to the changegroup.
1120 """
1170 """
1121 opts = _byteskwargs(opts)
1171 opts = _byteskwargs(opts)
1122 cl = repo.changelog
1172 cl = repo.changelog
1123 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1173 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1124 bundler = changegroup.getbundler(cgversion, repo)
1174 bundler = changegroup.getbundler(cgversion, repo)
1125
1175
1126 def d():
1176 def d():
1127 state, chunks = bundler._generatechangelog(cl, nodes)
1177 state, chunks = bundler._generatechangelog(cl, nodes)
1128 for chunk in chunks:
1178 for chunk in chunks:
1129 pass
1179 pass
1130
1180
1131 timer, fm = gettimer(ui, opts)
1181 timer, fm = gettimer(ui, opts)
1132
1182
1133 # Terminal printing can interfere with timing. So disable it.
1183 # Terminal printing can interfere with timing. So disable it.
1134 with ui.configoverride({(b'progress', b'disable'): True}):
1184 with ui.configoverride({(b'progress', b'disable'): True}):
1135 timer(d)
1185 timer(d)
1136
1186
1137 fm.end()
1187 fm.end()
1138
1188
1139
1189
1140 @command(b'perf::dirs|perfdirs', formatteropts)
1190 @command(b'perf::dirs|perfdirs', formatteropts)
1141 def perfdirs(ui, repo, **opts):
1191 def perfdirs(ui, repo, **opts):
1142 opts = _byteskwargs(opts)
1192 opts = _byteskwargs(opts)
1143 timer, fm = gettimer(ui, opts)
1193 timer, fm = gettimer(ui, opts)
1144 dirstate = repo.dirstate
1194 dirstate = repo.dirstate
1145 b'a' in dirstate
1195 b'a' in dirstate
1146
1196
1147 def d():
1197 def d():
1148 dirstate.hasdir(b'a')
1198 dirstate.hasdir(b'a')
1149 try:
1199 try:
1150 del dirstate._map._dirs
1200 del dirstate._map._dirs
1151 except AttributeError:
1201 except AttributeError:
1152 pass
1202 pass
1153
1203
1154 timer(d)
1204 timer(d)
1155 fm.end()
1205 fm.end()
1156
1206
1157
1207
1158 @command(
1208 @command(
1159 b'perf::dirstate|perfdirstate',
1209 b'perf::dirstate|perfdirstate',
1160 [
1210 [
1161 (
1211 (
1162 b'',
1212 b'',
1163 b'iteration',
1213 b'iteration',
1164 None,
1214 None,
1165 b'benchmark a full iteration for the dirstate',
1215 b'benchmark a full iteration for the dirstate',
1166 ),
1216 ),
1167 (
1217 (
1168 b'',
1218 b'',
1169 b'contains',
1219 b'contains',
1170 None,
1220 None,
1171 b'benchmark a large amount of `nf in dirstate` calls',
1221 b'benchmark a large amount of `nf in dirstate` calls',
1172 ),
1222 ),
1173 ]
1223 ]
1174 + formatteropts,
1224 + formatteropts,
1175 )
1225 )
1176 def perfdirstate(ui, repo, **opts):
1226 def perfdirstate(ui, repo, **opts):
1177 """benchmap the time of various distate operations
1227 """benchmap the time of various distate operations
1178
1228
1179 By default benchmark the time necessary to load a dirstate from scratch.
1229 By default benchmark the time necessary to load a dirstate from scratch.
1180 The dirstate is loaded to the point were a "contains" request can be
1230 The dirstate is loaded to the point were a "contains" request can be
1181 answered.
1231 answered.
1182 """
1232 """
1183 opts = _byteskwargs(opts)
1233 opts = _byteskwargs(opts)
1184 timer, fm = gettimer(ui, opts)
1234 timer, fm = gettimer(ui, opts)
1185 b"a" in repo.dirstate
1235 b"a" in repo.dirstate
1186
1236
1187 if opts[b'iteration'] and opts[b'contains']:
1237 if opts[b'iteration'] and opts[b'contains']:
1188 msg = b'only specify one of --iteration or --contains'
1238 msg = b'only specify one of --iteration or --contains'
1189 raise error.Abort(msg)
1239 raise error.Abort(msg)
1190
1240
1191 if opts[b'iteration']:
1241 if opts[b'iteration']:
1192 setup = None
1242 setup = None
1193 dirstate = repo.dirstate
1243 dirstate = repo.dirstate
1194
1244
1195 def d():
1245 def d():
1196 for f in dirstate:
1246 for f in dirstate:
1197 pass
1247 pass
1198
1248
1199 elif opts[b'contains']:
1249 elif opts[b'contains']:
1200 setup = None
1250 setup = None
1201 dirstate = repo.dirstate
1251 dirstate = repo.dirstate
1202 allfiles = list(dirstate)
1252 allfiles = list(dirstate)
1203 # also add file path that will be "missing" from the dirstate
1253 # also add file path that will be "missing" from the dirstate
1204 allfiles.extend([f[::-1] for f in allfiles])
1254 allfiles.extend([f[::-1] for f in allfiles])
1205
1255
1206 def d():
1256 def d():
1207 for f in allfiles:
1257 for f in allfiles:
1208 f in dirstate
1258 f in dirstate
1209
1259
1210 else:
1260 else:
1211
1261
1212 def setup():
1262 def setup():
1213 repo.dirstate.invalidate()
1263 repo.dirstate.invalidate()
1214
1264
1215 def d():
1265 def d():
1216 b"a" in repo.dirstate
1266 b"a" in repo.dirstate
1217
1267
1218 timer(d, setup=setup)
1268 timer(d, setup=setup)
1219 fm.end()
1269 fm.end()
1220
1270
1221
1271
1222 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1272 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1223 def perfdirstatedirs(ui, repo, **opts):
1273 def perfdirstatedirs(ui, repo, **opts):
1224 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1274 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1225 opts = _byteskwargs(opts)
1275 opts = _byteskwargs(opts)
1226 timer, fm = gettimer(ui, opts)
1276 timer, fm = gettimer(ui, opts)
1227 repo.dirstate.hasdir(b"a")
1277 repo.dirstate.hasdir(b"a")
1228
1278
1229 def setup():
1279 def setup():
1230 try:
1280 try:
1231 del repo.dirstate._map._dirs
1281 del repo.dirstate._map._dirs
1232 except AttributeError:
1282 except AttributeError:
1233 pass
1283 pass
1234
1284
1235 def d():
1285 def d():
1236 repo.dirstate.hasdir(b"a")
1286 repo.dirstate.hasdir(b"a")
1237
1287
1238 timer(d, setup=setup)
1288 timer(d, setup=setup)
1239 fm.end()
1289 fm.end()
1240
1290
1241
1291
1242 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1292 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1243 def perfdirstatefoldmap(ui, repo, **opts):
1293 def perfdirstatefoldmap(ui, repo, **opts):
1244 """benchmap a `dirstate._map.filefoldmap.get()` request
1294 """benchmap a `dirstate._map.filefoldmap.get()` request
1245
1295
1246 The dirstate filefoldmap cache is dropped between every request.
1296 The dirstate filefoldmap cache is dropped between every request.
1247 """
1297 """
1248 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1249 timer, fm = gettimer(ui, opts)
1299 timer, fm = gettimer(ui, opts)
1250 dirstate = repo.dirstate
1300 dirstate = repo.dirstate
1251 dirstate._map.filefoldmap.get(b'a')
1301 dirstate._map.filefoldmap.get(b'a')
1252
1302
1253 def setup():
1303 def setup():
1254 del dirstate._map.filefoldmap
1304 del dirstate._map.filefoldmap
1255
1305
1256 def d():
1306 def d():
1257 dirstate._map.filefoldmap.get(b'a')
1307 dirstate._map.filefoldmap.get(b'a')
1258
1308
1259 timer(d, setup=setup)
1309 timer(d, setup=setup)
1260 fm.end()
1310 fm.end()
1261
1311
1262
1312
1263 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1313 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1264 def perfdirfoldmap(ui, repo, **opts):
1314 def perfdirfoldmap(ui, repo, **opts):
1265 """benchmap a `dirstate._map.dirfoldmap.get()` request
1315 """benchmap a `dirstate._map.dirfoldmap.get()` request
1266
1316
1267 The dirstate dirfoldmap cache is dropped between every request.
1317 The dirstate dirfoldmap cache is dropped between every request.
1268 """
1318 """
1269 opts = _byteskwargs(opts)
1319 opts = _byteskwargs(opts)
1270 timer, fm = gettimer(ui, opts)
1320 timer, fm = gettimer(ui, opts)
1271 dirstate = repo.dirstate
1321 dirstate = repo.dirstate
1272 dirstate._map.dirfoldmap.get(b'a')
1322 dirstate._map.dirfoldmap.get(b'a')
1273
1323
1274 def setup():
1324 def setup():
1275 del dirstate._map.dirfoldmap
1325 del dirstate._map.dirfoldmap
1276 try:
1326 try:
1277 del dirstate._map._dirs
1327 del dirstate._map._dirs
1278 except AttributeError:
1328 except AttributeError:
1279 pass
1329 pass
1280
1330
1281 def d():
1331 def d():
1282 dirstate._map.dirfoldmap.get(b'a')
1332 dirstate._map.dirfoldmap.get(b'a')
1283
1333
1284 timer(d, setup=setup)
1334 timer(d, setup=setup)
1285 fm.end()
1335 fm.end()
1286
1336
1287
1337
1288 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1338 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1289 def perfdirstatewrite(ui, repo, **opts):
1339 def perfdirstatewrite(ui, repo, **opts):
1290 """benchmap the time it take to write a dirstate on disk"""
1340 """benchmap the time it take to write a dirstate on disk"""
1291 opts = _byteskwargs(opts)
1341 opts = _byteskwargs(opts)
1292 timer, fm = gettimer(ui, opts)
1342 timer, fm = gettimer(ui, opts)
1293 ds = repo.dirstate
1343 ds = repo.dirstate
1294 b"a" in ds
1344 b"a" in ds
1295
1345
1296 def setup():
1346 def setup():
1297 ds._dirty = True
1347 ds._dirty = True
1298
1348
1299 def d():
1349 def d():
1300 ds.write(repo.currenttransaction())
1350 ds.write(repo.currenttransaction())
1301
1351
1302 timer(d, setup=setup)
1352 timer(d, setup=setup)
1303 fm.end()
1353 fm.end()
1304
1354
1305
1355
1306 def _getmergerevs(repo, opts):
1356 def _getmergerevs(repo, opts):
1307 """parse command argument to return rev involved in merge
1357 """parse command argument to return rev involved in merge
1308
1358
1309 input: options dictionnary with `rev`, `from` and `bse`
1359 input: options dictionnary with `rev`, `from` and `bse`
1310 output: (localctx, otherctx, basectx)
1360 output: (localctx, otherctx, basectx)
1311 """
1361 """
1312 if opts[b'from']:
1362 if opts[b'from']:
1313 fromrev = scmutil.revsingle(repo, opts[b'from'])
1363 fromrev = scmutil.revsingle(repo, opts[b'from'])
1314 wctx = repo[fromrev]
1364 wctx = repo[fromrev]
1315 else:
1365 else:
1316 wctx = repo[None]
1366 wctx = repo[None]
1317 # we don't want working dir files to be stat'd in the benchmark, so
1367 # we don't want working dir files to be stat'd in the benchmark, so
1318 # prime that cache
1368 # prime that cache
1319 wctx.dirty()
1369 wctx.dirty()
1320 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1370 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1321 if opts[b'base']:
1371 if opts[b'base']:
1322 fromrev = scmutil.revsingle(repo, opts[b'base'])
1372 fromrev = scmutil.revsingle(repo, opts[b'base'])
1323 ancestor = repo[fromrev]
1373 ancestor = repo[fromrev]
1324 else:
1374 else:
1325 ancestor = wctx.ancestor(rctx)
1375 ancestor = wctx.ancestor(rctx)
1326 return (wctx, rctx, ancestor)
1376 return (wctx, rctx, ancestor)
1327
1377
1328
1378
1329 @command(
1379 @command(
1330 b'perf::mergecalculate|perfmergecalculate',
1380 b'perf::mergecalculate|perfmergecalculate',
1331 [
1381 [
1332 (b'r', b'rev', b'.', b'rev to merge against'),
1382 (b'r', b'rev', b'.', b'rev to merge against'),
1333 (b'', b'from', b'', b'rev to merge from'),
1383 (b'', b'from', b'', b'rev to merge from'),
1334 (b'', b'base', b'', b'the revision to use as base'),
1384 (b'', b'base', b'', b'the revision to use as base'),
1335 ]
1385 ]
1336 + formatteropts,
1386 + formatteropts,
1337 )
1387 )
1338 def perfmergecalculate(ui, repo, **opts):
1388 def perfmergecalculate(ui, repo, **opts):
1339 opts = _byteskwargs(opts)
1389 opts = _byteskwargs(opts)
1340 timer, fm = gettimer(ui, opts)
1390 timer, fm = gettimer(ui, opts)
1341
1391
1342 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1392 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1343
1393
1344 def d():
1394 def d():
1345 # acceptremote is True because we don't want prompts in the middle of
1395 # acceptremote is True because we don't want prompts in the middle of
1346 # our benchmark
1396 # our benchmark
1347 merge.calculateupdates(
1397 merge.calculateupdates(
1348 repo,
1398 repo,
1349 wctx,
1399 wctx,
1350 rctx,
1400 rctx,
1351 [ancestor],
1401 [ancestor],
1352 branchmerge=False,
1402 branchmerge=False,
1353 force=False,
1403 force=False,
1354 acceptremote=True,
1404 acceptremote=True,
1355 followcopies=True,
1405 followcopies=True,
1356 )
1406 )
1357
1407
1358 timer(d)
1408 timer(d)
1359 fm.end()
1409 fm.end()
1360
1410
1361
1411
1362 @command(
1412 @command(
1363 b'perf::mergecopies|perfmergecopies',
1413 b'perf::mergecopies|perfmergecopies',
1364 [
1414 [
1365 (b'r', b'rev', b'.', b'rev to merge against'),
1415 (b'r', b'rev', b'.', b'rev to merge against'),
1366 (b'', b'from', b'', b'rev to merge from'),
1416 (b'', b'from', b'', b'rev to merge from'),
1367 (b'', b'base', b'', b'the revision to use as base'),
1417 (b'', b'base', b'', b'the revision to use as base'),
1368 ]
1418 ]
1369 + formatteropts,
1419 + formatteropts,
1370 )
1420 )
1371 def perfmergecopies(ui, repo, **opts):
1421 def perfmergecopies(ui, repo, **opts):
1372 """measure runtime of `copies.mergecopies`"""
1422 """measure runtime of `copies.mergecopies`"""
1373 opts = _byteskwargs(opts)
1423 opts = _byteskwargs(opts)
1374 timer, fm = gettimer(ui, opts)
1424 timer, fm = gettimer(ui, opts)
1375 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1425 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1376
1426
1377 def d():
1427 def d():
1378 # acceptremote is True because we don't want prompts in the middle of
1428 # acceptremote is True because we don't want prompts in the middle of
1379 # our benchmark
1429 # our benchmark
1380 copies.mergecopies(repo, wctx, rctx, ancestor)
1430 copies.mergecopies(repo, wctx, rctx, ancestor)
1381
1431
1382 timer(d)
1432 timer(d)
1383 fm.end()
1433 fm.end()
1384
1434
1385
1435
1386 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1436 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1387 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1437 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1388 """benchmark the copy tracing logic"""
1438 """benchmark the copy tracing logic"""
1389 opts = _byteskwargs(opts)
1439 opts = _byteskwargs(opts)
1390 timer, fm = gettimer(ui, opts)
1440 timer, fm = gettimer(ui, opts)
1391 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1441 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1392 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1442 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1393
1443
1394 def d():
1444 def d():
1395 copies.pathcopies(ctx1, ctx2)
1445 copies.pathcopies(ctx1, ctx2)
1396
1446
1397 timer(d)
1447 timer(d)
1398 fm.end()
1448 fm.end()
1399
1449
1400
1450
1401 @command(
1451 @command(
1402 b'perf::phases|perfphases',
1452 b'perf::phases|perfphases',
1403 [
1453 [
1404 (b'', b'full', False, b'include file reading time too'),
1454 (b'', b'full', False, b'include file reading time too'),
1405 ],
1455 ],
1406 b"",
1456 b"",
1407 )
1457 )
1408 def perfphases(ui, repo, **opts):
1458 def perfphases(ui, repo, **opts):
1409 """benchmark phasesets computation"""
1459 """benchmark phasesets computation"""
1410 opts = _byteskwargs(opts)
1460 opts = _byteskwargs(opts)
1411 timer, fm = gettimer(ui, opts)
1461 timer, fm = gettimer(ui, opts)
1412 _phases = repo._phasecache
1462 _phases = repo._phasecache
1413 full = opts.get(b'full')
1463 full = opts.get(b'full')
1414
1464
1415 def d():
1465 def d():
1416 phases = _phases
1466 phases = _phases
1417 if full:
1467 if full:
1418 clearfilecache(repo, b'_phasecache')
1468 clearfilecache(repo, b'_phasecache')
1419 phases = repo._phasecache
1469 phases = repo._phasecache
1420 phases.invalidate()
1470 phases.invalidate()
1421 phases.loadphaserevs(repo)
1471 phases.loadphaserevs(repo)
1422
1472
1423 timer(d)
1473 timer(d)
1424 fm.end()
1474 fm.end()
1425
1475
1426
1476
1427 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1477 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1428 def perfphasesremote(ui, repo, dest=None, **opts):
1478 def perfphasesremote(ui, repo, dest=None, **opts):
1429 """benchmark time needed to analyse phases of the remote server"""
1479 """benchmark time needed to analyse phases of the remote server"""
1430 from mercurial.node import bin
1480 from mercurial.node import bin
1431 from mercurial import (
1481 from mercurial import (
1432 exchange,
1482 exchange,
1433 hg,
1483 hg,
1434 phases,
1484 phases,
1435 )
1485 )
1436
1486
1437 opts = _byteskwargs(opts)
1487 opts = _byteskwargs(opts)
1438 timer, fm = gettimer(ui, opts)
1488 timer, fm = gettimer(ui, opts)
1439
1489
1440 path = ui.getpath(dest, default=(b'default-push', b'default'))
1490 path = ui.getpath(dest, default=(b'default-push', b'default'))
1441 if not path:
1491 if not path:
1442 raise error.Abort(
1492 raise error.Abort(
1443 b'default repository not configured!',
1493 b'default repository not configured!',
1444 hint=b"see 'hg help config.paths'",
1494 hint=b"see 'hg help config.paths'",
1445 )
1495 )
1446 dest = path.pushloc or path.loc
1496 dest = path.pushloc or path.loc
1447 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1497 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1448 other = hg.peer(repo, opts, dest)
1498 other = hg.peer(repo, opts, dest)
1449
1499
1450 # easier to perform discovery through the operation
1500 # easier to perform discovery through the operation
1451 op = exchange.pushoperation(repo, other)
1501 op = exchange.pushoperation(repo, other)
1452 exchange._pushdiscoverychangeset(op)
1502 exchange._pushdiscoverychangeset(op)
1453
1503
1454 remotesubset = op.fallbackheads
1504 remotesubset = op.fallbackheads
1455
1505
1456 with other.commandexecutor() as e:
1506 with other.commandexecutor() as e:
1457 remotephases = e.callcommand(
1507 remotephases = e.callcommand(
1458 b'listkeys', {b'namespace': b'phases'}
1508 b'listkeys', {b'namespace': b'phases'}
1459 ).result()
1509 ).result()
1460 del other
1510 del other
1461 publishing = remotephases.get(b'publishing', False)
1511 publishing = remotephases.get(b'publishing', False)
1462 if publishing:
1512 if publishing:
1463 ui.statusnoi18n(b'publishing: yes\n')
1513 ui.statusnoi18n(b'publishing: yes\n')
1464 else:
1514 else:
1465 ui.statusnoi18n(b'publishing: no\n')
1515 ui.statusnoi18n(b'publishing: no\n')
1466
1516
1467 has_node = getattr(repo.changelog.index, 'has_node', None)
1517 has_node = getattr(repo.changelog.index, 'has_node', None)
1468 if has_node is None:
1518 if has_node is None:
1469 has_node = repo.changelog.nodemap.__contains__
1519 has_node = repo.changelog.nodemap.__contains__
1470 nonpublishroots = 0
1520 nonpublishroots = 0
1471 for nhex, phase in remotephases.iteritems():
1521 for nhex, phase in remotephases.iteritems():
1472 if nhex == b'publishing': # ignore data related to publish option
1522 if nhex == b'publishing': # ignore data related to publish option
1473 continue
1523 continue
1474 node = bin(nhex)
1524 node = bin(nhex)
1475 if has_node(node) and int(phase):
1525 if has_node(node) and int(phase):
1476 nonpublishroots += 1
1526 nonpublishroots += 1
1477 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1527 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1478 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1528 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1479
1529
1480 def d():
1530 def d():
1481 phases.remotephasessummary(repo, remotesubset, remotephases)
1531 phases.remotephasessummary(repo, remotesubset, remotephases)
1482
1532
1483 timer(d)
1533 timer(d)
1484 fm.end()
1534 fm.end()
1485
1535
1486
1536
1487 @command(
1537 @command(
1488 b'perf::manifest|perfmanifest',
1538 b'perf::manifest|perfmanifest',
1489 [
1539 [
1490 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1540 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1491 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1541 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1492 ]
1542 ]
1493 + formatteropts,
1543 + formatteropts,
1494 b'REV|NODE',
1544 b'REV|NODE',
1495 )
1545 )
1496 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1546 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1497 """benchmark the time to read a manifest from disk and return a usable
1547 """benchmark the time to read a manifest from disk and return a usable
1498 dict-like object
1548 dict-like object
1499
1549
1500 Manifest caches are cleared before retrieval."""
1550 Manifest caches are cleared before retrieval."""
1501 opts = _byteskwargs(opts)
1551 opts = _byteskwargs(opts)
1502 timer, fm = gettimer(ui, opts)
1552 timer, fm = gettimer(ui, opts)
1503 if not manifest_rev:
1553 if not manifest_rev:
1504 ctx = scmutil.revsingle(repo, rev, rev)
1554 ctx = scmutil.revsingle(repo, rev, rev)
1505 t = ctx.manifestnode()
1555 t = ctx.manifestnode()
1506 else:
1556 else:
1507 from mercurial.node import bin
1557 from mercurial.node import bin
1508
1558
1509 if len(rev) == 40:
1559 if len(rev) == 40:
1510 t = bin(rev)
1560 t = bin(rev)
1511 else:
1561 else:
1512 try:
1562 try:
1513 rev = int(rev)
1563 rev = int(rev)
1514
1564
1515 if util.safehasattr(repo.manifestlog, b'getstorage'):
1565 if util.safehasattr(repo.manifestlog, b'getstorage'):
1516 t = repo.manifestlog.getstorage(b'').node(rev)
1566 t = repo.manifestlog.getstorage(b'').node(rev)
1517 else:
1567 else:
1518 t = repo.manifestlog._revlog.lookup(rev)
1568 t = repo.manifestlog._revlog.lookup(rev)
1519 except ValueError:
1569 except ValueError:
1520 raise error.Abort(
1570 raise error.Abort(
1521 b'manifest revision must be integer or full node'
1571 b'manifest revision must be integer or full node'
1522 )
1572 )
1523
1573
1524 def d():
1574 def d():
1525 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1575 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1526 repo.manifestlog[t].read()
1576 repo.manifestlog[t].read()
1527
1577
1528 timer(d)
1578 timer(d)
1529 fm.end()
1579 fm.end()
1530
1580
1531
1581
1532 @command(b'perf::changeset|perfchangeset', formatteropts)
1582 @command(b'perf::changeset|perfchangeset', formatteropts)
1533 def perfchangeset(ui, repo, rev, **opts):
1583 def perfchangeset(ui, repo, rev, **opts):
1534 opts = _byteskwargs(opts)
1584 opts = _byteskwargs(opts)
1535 timer, fm = gettimer(ui, opts)
1585 timer, fm = gettimer(ui, opts)
1536 n = scmutil.revsingle(repo, rev).node()
1586 n = scmutil.revsingle(repo, rev).node()
1537
1587
1538 def d():
1588 def d():
1539 repo.changelog.read(n)
1589 repo.changelog.read(n)
1540 # repo.changelog._cache = None
1590 # repo.changelog._cache = None
1541
1591
1542 timer(d)
1592 timer(d)
1543 fm.end()
1593 fm.end()
1544
1594
1545
1595
1546 @command(b'perf::ignore|perfignore', formatteropts)
1596 @command(b'perf::ignore|perfignore', formatteropts)
1547 def perfignore(ui, repo, **opts):
1597 def perfignore(ui, repo, **opts):
1548 """benchmark operation related to computing ignore"""
1598 """benchmark operation related to computing ignore"""
1549 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1550 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1551 dirstate = repo.dirstate
1601 dirstate = repo.dirstate
1552
1602
1553 def setupone():
1603 def setupone():
1554 dirstate.invalidate()
1604 dirstate.invalidate()
1555 clearfilecache(dirstate, b'_ignore')
1605 clearfilecache(dirstate, b'_ignore')
1556
1606
1557 def runone():
1607 def runone():
1558 dirstate._ignore
1608 dirstate._ignore
1559
1609
1560 timer(runone, setup=setupone, title=b"load")
1610 timer(runone, setup=setupone, title=b"load")
1561 fm.end()
1611 fm.end()
1562
1612
1563
1613
1564 @command(
1614 @command(
1565 b'perf::index|perfindex',
1615 b'perf::index|perfindex',
1566 [
1616 [
1567 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1617 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1568 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1618 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1569 ]
1619 ]
1570 + formatteropts,
1620 + formatteropts,
1571 )
1621 )
1572 def perfindex(ui, repo, **opts):
1622 def perfindex(ui, repo, **opts):
1573 """benchmark index creation time followed by a lookup
1623 """benchmark index creation time followed by a lookup
1574
1624
1575 The default is to look `tip` up. Depending on the index implementation,
1625 The default is to look `tip` up. Depending on the index implementation,
1576 the revision looked up can matters. For example, an implementation
1626 the revision looked up can matters. For example, an implementation
1577 scanning the index will have a faster lookup time for `--rev tip` than for
1627 scanning the index will have a faster lookup time for `--rev tip` than for
1578 `--rev 0`. The number of looked up revisions and their order can also
1628 `--rev 0`. The number of looked up revisions and their order can also
1579 matters.
1629 matters.
1580
1630
1581 Example of useful set to test:
1631 Example of useful set to test:
1582
1632
1583 * tip
1633 * tip
1584 * 0
1634 * 0
1585 * -10:
1635 * -10:
1586 * :10
1636 * :10
1587 * -10: + :10
1637 * -10: + :10
1588 * :10: + -10:
1638 * :10: + -10:
1589 * -10000:
1639 * -10000:
1590 * -10000: + 0
1640 * -10000: + 0
1591
1641
1592 It is not currently possible to check for lookup of a missing node. For
1642 It is not currently possible to check for lookup of a missing node. For
1593 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1643 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1594 import mercurial.revlog
1644 import mercurial.revlog
1595
1645
1596 opts = _byteskwargs(opts)
1646 opts = _byteskwargs(opts)
1597 timer, fm = gettimer(ui, opts)
1647 timer, fm = gettimer(ui, opts)
1598 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1648 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1599 if opts[b'no_lookup']:
1649 if opts[b'no_lookup']:
1600 if opts['rev']:
1650 if opts['rev']:
1601 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1651 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1602 nodes = []
1652 nodes = []
1603 elif not opts[b'rev']:
1653 elif not opts[b'rev']:
1604 nodes = [repo[b"tip"].node()]
1654 nodes = [repo[b"tip"].node()]
1605 else:
1655 else:
1606 revs = scmutil.revrange(repo, opts[b'rev'])
1656 revs = scmutil.revrange(repo, opts[b'rev'])
1607 cl = repo.changelog
1657 cl = repo.changelog
1608 nodes = [cl.node(r) for r in revs]
1658 nodes = [cl.node(r) for r in revs]
1609
1659
1610 unfi = repo.unfiltered()
1660 unfi = repo.unfiltered()
1611 # find the filecache func directly
1661 # find the filecache func directly
1612 # This avoid polluting the benchmark with the filecache logic
1662 # This avoid polluting the benchmark with the filecache logic
1613 makecl = unfi.__class__.changelog.func
1663 makecl = unfi.__class__.changelog.func
1614
1664
1615 def setup():
1665 def setup():
1616 # probably not necessary, but for good measure
1666 # probably not necessary, but for good measure
1617 clearchangelog(unfi)
1667 clearchangelog(unfi)
1618
1668
1619 def d():
1669 def d():
1620 cl = makecl(unfi)
1670 cl = makecl(unfi)
1621 for n in nodes:
1671 for n in nodes:
1622 cl.rev(n)
1672 cl.rev(n)
1623
1673
1624 timer(d, setup=setup)
1674 timer(d, setup=setup)
1625 fm.end()
1675 fm.end()
1626
1676
1627
1677
1628 @command(
1678 @command(
1629 b'perf::nodemap|perfnodemap',
1679 b'perf::nodemap|perfnodemap',
1630 [
1680 [
1631 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1681 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1632 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1682 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1633 ]
1683 ]
1634 + formatteropts,
1684 + formatteropts,
1635 )
1685 )
1636 def perfnodemap(ui, repo, **opts):
1686 def perfnodemap(ui, repo, **opts):
1637 """benchmark the time necessary to look up revision from a cold nodemap
1687 """benchmark the time necessary to look up revision from a cold nodemap
1638
1688
1639 Depending on the implementation, the amount and order of revision we look
1689 Depending on the implementation, the amount and order of revision we look
1640 up can varies. Example of useful set to test:
1690 up can varies. Example of useful set to test:
1641 * tip
1691 * tip
1642 * 0
1692 * 0
1643 * -10:
1693 * -10:
1644 * :10
1694 * :10
1645 * -10: + :10
1695 * -10: + :10
1646 * :10: + -10:
1696 * :10: + -10:
1647 * -10000:
1697 * -10000:
1648 * -10000: + 0
1698 * -10000: + 0
1649
1699
1650 The command currently focus on valid binary lookup. Benchmarking for
1700 The command currently focus on valid binary lookup. Benchmarking for
1651 hexlookup, prefix lookup and missing lookup would also be valuable.
1701 hexlookup, prefix lookup and missing lookup would also be valuable.
1652 """
1702 """
1653 import mercurial.revlog
1703 import mercurial.revlog
1654
1704
1655 opts = _byteskwargs(opts)
1705 opts = _byteskwargs(opts)
1656 timer, fm = gettimer(ui, opts)
1706 timer, fm = gettimer(ui, opts)
1657 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1707 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1658
1708
1659 unfi = repo.unfiltered()
1709 unfi = repo.unfiltered()
1660 clearcaches = opts[b'clear_caches']
1710 clearcaches = opts[b'clear_caches']
1661 # find the filecache func directly
1711 # find the filecache func directly
1662 # This avoid polluting the benchmark with the filecache logic
1712 # This avoid polluting the benchmark with the filecache logic
1663 makecl = unfi.__class__.changelog.func
1713 makecl = unfi.__class__.changelog.func
1664 if not opts[b'rev']:
1714 if not opts[b'rev']:
1665 raise error.Abort(b'use --rev to specify revisions to look up')
1715 raise error.Abort(b'use --rev to specify revisions to look up')
1666 revs = scmutil.revrange(repo, opts[b'rev'])
1716 revs = scmutil.revrange(repo, opts[b'rev'])
1667 cl = repo.changelog
1717 cl = repo.changelog
1668 nodes = [cl.node(r) for r in revs]
1718 nodes = [cl.node(r) for r in revs]
1669
1719
1670 # use a list to pass reference to a nodemap from one closure to the next
1720 # use a list to pass reference to a nodemap from one closure to the next
1671 nodeget = [None]
1721 nodeget = [None]
1672
1722
1673 def setnodeget():
1723 def setnodeget():
1674 # probably not necessary, but for good measure
1724 # probably not necessary, but for good measure
1675 clearchangelog(unfi)
1725 clearchangelog(unfi)
1676 cl = makecl(unfi)
1726 cl = makecl(unfi)
1677 if util.safehasattr(cl.index, 'get_rev'):
1727 if util.safehasattr(cl.index, 'get_rev'):
1678 nodeget[0] = cl.index.get_rev
1728 nodeget[0] = cl.index.get_rev
1679 else:
1729 else:
1680 nodeget[0] = cl.nodemap.get
1730 nodeget[0] = cl.nodemap.get
1681
1731
1682 def d():
1732 def d():
1683 get = nodeget[0]
1733 get = nodeget[0]
1684 for n in nodes:
1734 for n in nodes:
1685 get(n)
1735 get(n)
1686
1736
1687 setup = None
1737 setup = None
1688 if clearcaches:
1738 if clearcaches:
1689
1739
1690 def setup():
1740 def setup():
1691 setnodeget()
1741 setnodeget()
1692
1742
1693 else:
1743 else:
1694 setnodeget()
1744 setnodeget()
1695 d() # prewarm the data structure
1745 d() # prewarm the data structure
1696 timer(d, setup=setup)
1746 timer(d, setup=setup)
1697 fm.end()
1747 fm.end()
1698
1748
1699
1749
1700 @command(b'perf::startup|perfstartup', formatteropts)
1750 @command(b'perf::startup|perfstartup', formatteropts)
1701 def perfstartup(ui, repo, **opts):
1751 def perfstartup(ui, repo, **opts):
1702 opts = _byteskwargs(opts)
1752 opts = _byteskwargs(opts)
1703 timer, fm = gettimer(ui, opts)
1753 timer, fm = gettimer(ui, opts)
1704
1754
1705 def d():
1755 def d():
1706 if os.name != 'nt':
1756 if os.name != 'nt':
1707 os.system(
1757 os.system(
1708 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1758 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1709 )
1759 )
1710 else:
1760 else:
1711 os.environ['HGRCPATH'] = r' '
1761 os.environ['HGRCPATH'] = r' '
1712 os.system("%s version -q > NUL" % sys.argv[0])
1762 os.system("%s version -q > NUL" % sys.argv[0])
1713
1763
1714 timer(d)
1764 timer(d)
1715 fm.end()
1765 fm.end()
1716
1766
1717
1767
1718 @command(b'perf::parents|perfparents', formatteropts)
1768 @command(b'perf::parents|perfparents', formatteropts)
1719 def perfparents(ui, repo, **opts):
1769 def perfparents(ui, repo, **opts):
1720 """benchmark the time necessary to fetch one changeset's parents.
1770 """benchmark the time necessary to fetch one changeset's parents.
1721
1771
1722 The fetch is done using the `node identifier`, traversing all object layers
1772 The fetch is done using the `node identifier`, traversing all object layers
1723 from the repository object. The first N revisions will be used for this
1773 from the repository object. The first N revisions will be used for this
1724 benchmark. N is controlled by the ``perf.parentscount`` config option
1774 benchmark. N is controlled by the ``perf.parentscount`` config option
1725 (default: 1000).
1775 (default: 1000).
1726 """
1776 """
1727 opts = _byteskwargs(opts)
1777 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1778 timer, fm = gettimer(ui, opts)
1729 # control the number of commits perfparents iterates over
1779 # control the number of commits perfparents iterates over
1730 # experimental config: perf.parentscount
1780 # experimental config: perf.parentscount
1731 count = getint(ui, b"perf", b"parentscount", 1000)
1781 count = getint(ui, b"perf", b"parentscount", 1000)
1732 if len(repo.changelog) < count:
1782 if len(repo.changelog) < count:
1733 raise error.Abort(b"repo needs %d commits for this test" % count)
1783 raise error.Abort(b"repo needs %d commits for this test" % count)
1734 repo = repo.unfiltered()
1784 repo = repo.unfiltered()
1735 nl = [repo.changelog.node(i) for i in _xrange(count)]
1785 nl = [repo.changelog.node(i) for i in _xrange(count)]
1736
1786
1737 def d():
1787 def d():
1738 for n in nl:
1788 for n in nl:
1739 repo.changelog.parents(n)
1789 repo.changelog.parents(n)
1740
1790
1741 timer(d)
1791 timer(d)
1742 fm.end()
1792 fm.end()
1743
1793
1744
1794
1745 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1795 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1746 def perfctxfiles(ui, repo, x, **opts):
1796 def perfctxfiles(ui, repo, x, **opts):
1747 opts = _byteskwargs(opts)
1797 opts = _byteskwargs(opts)
1748 x = int(x)
1798 x = int(x)
1749 timer, fm = gettimer(ui, opts)
1799 timer, fm = gettimer(ui, opts)
1750
1800
1751 def d():
1801 def d():
1752 len(repo[x].files())
1802 len(repo[x].files())
1753
1803
1754 timer(d)
1804 timer(d)
1755 fm.end()
1805 fm.end()
1756
1806
1757
1807
1758 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1808 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1759 def perfrawfiles(ui, repo, x, **opts):
1809 def perfrawfiles(ui, repo, x, **opts):
1760 opts = _byteskwargs(opts)
1810 opts = _byteskwargs(opts)
1761 x = int(x)
1811 x = int(x)
1762 timer, fm = gettimer(ui, opts)
1812 timer, fm = gettimer(ui, opts)
1763 cl = repo.changelog
1813 cl = repo.changelog
1764
1814
1765 def d():
1815 def d():
1766 len(cl.read(x)[3])
1816 len(cl.read(x)[3])
1767
1817
1768 timer(d)
1818 timer(d)
1769 fm.end()
1819 fm.end()
1770
1820
1771
1821
1772 @command(b'perf::lookup|perflookup', formatteropts)
1822 @command(b'perf::lookup|perflookup', formatteropts)
1773 def perflookup(ui, repo, rev, **opts):
1823 def perflookup(ui, repo, rev, **opts):
1774 opts = _byteskwargs(opts)
1824 opts = _byteskwargs(opts)
1775 timer, fm = gettimer(ui, opts)
1825 timer, fm = gettimer(ui, opts)
1776 timer(lambda: len(repo.lookup(rev)))
1826 timer(lambda: len(repo.lookup(rev)))
1777 fm.end()
1827 fm.end()
1778
1828
1779
1829
1780 @command(
1830 @command(
1781 b'perf::linelogedits|perflinelogedits',
1831 b'perf::linelogedits|perflinelogedits',
1782 [
1832 [
1783 (b'n', b'edits', 10000, b'number of edits'),
1833 (b'n', b'edits', 10000, b'number of edits'),
1784 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1834 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1785 ],
1835 ],
1786 norepo=True,
1836 norepo=True,
1787 )
1837 )
1788 def perflinelogedits(ui, **opts):
1838 def perflinelogedits(ui, **opts):
1789 from mercurial import linelog
1839 from mercurial import linelog
1790
1840
1791 opts = _byteskwargs(opts)
1841 opts = _byteskwargs(opts)
1792
1842
1793 edits = opts[b'edits']
1843 edits = opts[b'edits']
1794 maxhunklines = opts[b'max_hunk_lines']
1844 maxhunklines = opts[b'max_hunk_lines']
1795
1845
1796 maxb1 = 100000
1846 maxb1 = 100000
1797 random.seed(0)
1847 random.seed(0)
1798 randint = random.randint
1848 randint = random.randint
1799 currentlines = 0
1849 currentlines = 0
1800 arglist = []
1850 arglist = []
1801 for rev in _xrange(edits):
1851 for rev in _xrange(edits):
1802 a1 = randint(0, currentlines)
1852 a1 = randint(0, currentlines)
1803 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1853 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1804 b1 = randint(0, maxb1)
1854 b1 = randint(0, maxb1)
1805 b2 = randint(b1, b1 + maxhunklines)
1855 b2 = randint(b1, b1 + maxhunklines)
1806 currentlines += (b2 - b1) - (a2 - a1)
1856 currentlines += (b2 - b1) - (a2 - a1)
1807 arglist.append((rev, a1, a2, b1, b2))
1857 arglist.append((rev, a1, a2, b1, b2))
1808
1858
1809 def d():
1859 def d():
1810 ll = linelog.linelog()
1860 ll = linelog.linelog()
1811 for args in arglist:
1861 for args in arglist:
1812 ll.replacelines(*args)
1862 ll.replacelines(*args)
1813
1863
1814 timer, fm = gettimer(ui, opts)
1864 timer, fm = gettimer(ui, opts)
1815 timer(d)
1865 timer(d)
1816 fm.end()
1866 fm.end()
1817
1867
1818
1868
1819 @command(b'perf::revrange|perfrevrange', formatteropts)
1869 @command(b'perf::revrange|perfrevrange', formatteropts)
1820 def perfrevrange(ui, repo, *specs, **opts):
1870 def perfrevrange(ui, repo, *specs, **opts):
1821 opts = _byteskwargs(opts)
1871 opts = _byteskwargs(opts)
1822 timer, fm = gettimer(ui, opts)
1872 timer, fm = gettimer(ui, opts)
1823 revrange = scmutil.revrange
1873 revrange = scmutil.revrange
1824 timer(lambda: len(revrange(repo, specs)))
1874 timer(lambda: len(revrange(repo, specs)))
1825 fm.end()
1875 fm.end()
1826
1876
1827
1877
1828 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1878 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1829 def perfnodelookup(ui, repo, rev, **opts):
1879 def perfnodelookup(ui, repo, rev, **opts):
1830 opts = _byteskwargs(opts)
1880 opts = _byteskwargs(opts)
1831 timer, fm = gettimer(ui, opts)
1881 timer, fm = gettimer(ui, opts)
1832 import mercurial.revlog
1882 import mercurial.revlog
1833
1883
1834 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1884 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1835 n = scmutil.revsingle(repo, rev).node()
1885 n = scmutil.revsingle(repo, rev).node()
1836
1886
1837 try:
1887 try:
1838 cl = revlog(getsvfs(repo), radix=b"00changelog")
1888 cl = revlog(getsvfs(repo), radix=b"00changelog")
1839 except TypeError:
1889 except TypeError:
1840 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1890 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1841
1891
1842 def d():
1892 def d():
1843 cl.rev(n)
1893 cl.rev(n)
1844 clearcaches(cl)
1894 clearcaches(cl)
1845
1895
1846 timer(d)
1896 timer(d)
1847 fm.end()
1897 fm.end()
1848
1898
1849
1899
1850 @command(
1900 @command(
1851 b'perf::log|perflog',
1901 b'perf::log|perflog',
1852 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1902 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1853 )
1903 )
1854 def perflog(ui, repo, rev=None, **opts):
1904 def perflog(ui, repo, rev=None, **opts):
1855 opts = _byteskwargs(opts)
1905 opts = _byteskwargs(opts)
1856 if rev is None:
1906 if rev is None:
1857 rev = []
1907 rev = []
1858 timer, fm = gettimer(ui, opts)
1908 timer, fm = gettimer(ui, opts)
1859 ui.pushbuffer()
1909 ui.pushbuffer()
1860 timer(
1910 timer(
1861 lambda: commands.log(
1911 lambda: commands.log(
1862 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1912 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1863 )
1913 )
1864 )
1914 )
1865 ui.popbuffer()
1915 ui.popbuffer()
1866 fm.end()
1916 fm.end()
1867
1917
1868
1918
1869 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1919 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1870 def perfmoonwalk(ui, repo, **opts):
1920 def perfmoonwalk(ui, repo, **opts):
1871 """benchmark walking the changelog backwards
1921 """benchmark walking the changelog backwards
1872
1922
1873 This also loads the changelog data for each revision in the changelog.
1923 This also loads the changelog data for each revision in the changelog.
1874 """
1924 """
1875 opts = _byteskwargs(opts)
1925 opts = _byteskwargs(opts)
1876 timer, fm = gettimer(ui, opts)
1926 timer, fm = gettimer(ui, opts)
1877
1927
1878 def moonwalk():
1928 def moonwalk():
1879 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1929 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1880 ctx = repo[i]
1930 ctx = repo[i]
1881 ctx.branch() # read changelog data (in addition to the index)
1931 ctx.branch() # read changelog data (in addition to the index)
1882
1932
1883 timer(moonwalk)
1933 timer(moonwalk)
1884 fm.end()
1934 fm.end()
1885
1935
1886
1936
1887 @command(
1937 @command(
1888 b'perf::templating|perftemplating',
1938 b'perf::templating|perftemplating',
1889 [
1939 [
1890 (b'r', b'rev', [], b'revisions to run the template on'),
1940 (b'r', b'rev', [], b'revisions to run the template on'),
1891 ]
1941 ]
1892 + formatteropts,
1942 + formatteropts,
1893 )
1943 )
1894 def perftemplating(ui, repo, testedtemplate=None, **opts):
1944 def perftemplating(ui, repo, testedtemplate=None, **opts):
1895 """test the rendering time of a given template"""
1945 """test the rendering time of a given template"""
1896 if makelogtemplater is None:
1946 if makelogtemplater is None:
1897 raise error.Abort(
1947 raise error.Abort(
1898 b"perftemplating not available with this Mercurial",
1948 b"perftemplating not available with this Mercurial",
1899 hint=b"use 4.3 or later",
1949 hint=b"use 4.3 or later",
1900 )
1950 )
1901
1951
1902 opts = _byteskwargs(opts)
1952 opts = _byteskwargs(opts)
1903
1953
1904 nullui = ui.copy()
1954 nullui = ui.copy()
1905 nullui.fout = open(os.devnull, 'wb')
1955 nullui.fout = open(os.devnull, 'wb')
1906 nullui.disablepager()
1956 nullui.disablepager()
1907 revs = opts.get(b'rev')
1957 revs = opts.get(b'rev')
1908 if not revs:
1958 if not revs:
1909 revs = [b'all()']
1959 revs = [b'all()']
1910 revs = list(scmutil.revrange(repo, revs))
1960 revs = list(scmutil.revrange(repo, revs))
1911
1961
1912 defaulttemplate = (
1962 defaulttemplate = (
1913 b'{date|shortdate} [{rev}:{node|short}]'
1963 b'{date|shortdate} [{rev}:{node|short}]'
1914 b' {author|person}: {desc|firstline}\n'
1964 b' {author|person}: {desc|firstline}\n'
1915 )
1965 )
1916 if testedtemplate is None:
1966 if testedtemplate is None:
1917 testedtemplate = defaulttemplate
1967 testedtemplate = defaulttemplate
1918 displayer = makelogtemplater(nullui, repo, testedtemplate)
1968 displayer = makelogtemplater(nullui, repo, testedtemplate)
1919
1969
1920 def format():
1970 def format():
1921 for r in revs:
1971 for r in revs:
1922 ctx = repo[r]
1972 ctx = repo[r]
1923 displayer.show(ctx)
1973 displayer.show(ctx)
1924 displayer.flush(ctx)
1974 displayer.flush(ctx)
1925
1975
1926 timer, fm = gettimer(ui, opts)
1976 timer, fm = gettimer(ui, opts)
1927 timer(format)
1977 timer(format)
1928 fm.end()
1978 fm.end()
1929
1979
1930
1980
1931 def _displaystats(ui, opts, entries, data):
1981 def _displaystats(ui, opts, entries, data):
1932 # use a second formatter because the data are quite different, not sure
1982 # use a second formatter because the data are quite different, not sure
1933 # how it flies with the templater.
1983 # how it flies with the templater.
1934 fm = ui.formatter(b'perf-stats', opts)
1984 fm = ui.formatter(b'perf-stats', opts)
1935 for key, title in entries:
1985 for key, title in entries:
1936 values = data[key]
1986 values = data[key]
1937 nbvalues = len(data)
1987 nbvalues = len(data)
1938 values.sort()
1988 values.sort()
1939 stats = {
1989 stats = {
1940 'key': key,
1990 'key': key,
1941 'title': title,
1991 'title': title,
1942 'nbitems': len(values),
1992 'nbitems': len(values),
1943 'min': values[0][0],
1993 'min': values[0][0],
1944 '10%': values[(nbvalues * 10) // 100][0],
1994 '10%': values[(nbvalues * 10) // 100][0],
1945 '25%': values[(nbvalues * 25) // 100][0],
1995 '25%': values[(nbvalues * 25) // 100][0],
1946 '50%': values[(nbvalues * 50) // 100][0],
1996 '50%': values[(nbvalues * 50) // 100][0],
1947 '75%': values[(nbvalues * 75) // 100][0],
1997 '75%': values[(nbvalues * 75) // 100][0],
1948 '80%': values[(nbvalues * 80) // 100][0],
1998 '80%': values[(nbvalues * 80) // 100][0],
1949 '85%': values[(nbvalues * 85) // 100][0],
1999 '85%': values[(nbvalues * 85) // 100][0],
1950 '90%': values[(nbvalues * 90) // 100][0],
2000 '90%': values[(nbvalues * 90) // 100][0],
1951 '95%': values[(nbvalues * 95) // 100][0],
2001 '95%': values[(nbvalues * 95) // 100][0],
1952 '99%': values[(nbvalues * 99) // 100][0],
2002 '99%': values[(nbvalues * 99) // 100][0],
1953 'max': values[-1][0],
2003 'max': values[-1][0],
1954 }
2004 }
1955 fm.startitem()
2005 fm.startitem()
1956 fm.data(**stats)
2006 fm.data(**stats)
1957 # make node pretty for the human output
2007 # make node pretty for the human output
1958 fm.plain('### %s (%d items)\n' % (title, len(values)))
2008 fm.plain('### %s (%d items)\n' % (title, len(values)))
1959 lines = [
2009 lines = [
1960 'min',
2010 'min',
1961 '10%',
2011 '10%',
1962 '25%',
2012 '25%',
1963 '50%',
2013 '50%',
1964 '75%',
2014 '75%',
1965 '80%',
2015 '80%',
1966 '85%',
2016 '85%',
1967 '90%',
2017 '90%',
1968 '95%',
2018 '95%',
1969 '99%',
2019 '99%',
1970 'max',
2020 'max',
1971 ]
2021 ]
1972 for l in lines:
2022 for l in lines:
1973 fm.plain('%s: %s\n' % (l, stats[l]))
2023 fm.plain('%s: %s\n' % (l, stats[l]))
1974 fm.end()
2024 fm.end()
1975
2025
1976
2026
1977 @command(
2027 @command(
1978 b'perf::helper-mergecopies|perfhelper-mergecopies',
2028 b'perf::helper-mergecopies|perfhelper-mergecopies',
1979 formatteropts
2029 formatteropts
1980 + [
2030 + [
1981 (b'r', b'revs', [], b'restrict search to these revisions'),
2031 (b'r', b'revs', [], b'restrict search to these revisions'),
1982 (b'', b'timing', False, b'provides extra data (costly)'),
2032 (b'', b'timing', False, b'provides extra data (costly)'),
1983 (b'', b'stats', False, b'provides statistic about the measured data'),
2033 (b'', b'stats', False, b'provides statistic about the measured data'),
1984 ],
2034 ],
1985 )
2035 )
1986 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2036 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1987 """find statistics about potential parameters for `perfmergecopies`
2037 """find statistics about potential parameters for `perfmergecopies`
1988
2038
1989 This command find (base, p1, p2) triplet relevant for copytracing
2039 This command find (base, p1, p2) triplet relevant for copytracing
1990 benchmarking in the context of a merge. It reports values for some of the
2040 benchmarking in the context of a merge. It reports values for some of the
1991 parameters that impact merge copy tracing time during merge.
2041 parameters that impact merge copy tracing time during merge.
1992
2042
1993 If `--timing` is set, rename detection is run and the associated timing
2043 If `--timing` is set, rename detection is run and the associated timing
1994 will be reported. The extra details come at the cost of slower command
2044 will be reported. The extra details come at the cost of slower command
1995 execution.
2045 execution.
1996
2046
1997 Since rename detection is only run once, other factors might easily
2047 Since rename detection is only run once, other factors might easily
1998 affect the precision of the timing. However it should give a good
2048 affect the precision of the timing. However it should give a good
1999 approximation of which revision triplets are very costly.
2049 approximation of which revision triplets are very costly.
2000 """
2050 """
2001 opts = _byteskwargs(opts)
2051 opts = _byteskwargs(opts)
2002 fm = ui.formatter(b'perf', opts)
2052 fm = ui.formatter(b'perf', opts)
2003 dotiming = opts[b'timing']
2053 dotiming = opts[b'timing']
2004 dostats = opts[b'stats']
2054 dostats = opts[b'stats']
2005
2055
2006 output_template = [
2056 output_template = [
2007 ("base", "%(base)12s"),
2057 ("base", "%(base)12s"),
2008 ("p1", "%(p1.node)12s"),
2058 ("p1", "%(p1.node)12s"),
2009 ("p2", "%(p2.node)12s"),
2059 ("p2", "%(p2.node)12s"),
2010 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2060 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2011 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2061 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2012 ("p1.renames", "%(p1.renamedfiles)12d"),
2062 ("p1.renames", "%(p1.renamedfiles)12d"),
2013 ("p1.time", "%(p1.time)12.3f"),
2063 ("p1.time", "%(p1.time)12.3f"),
2014 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2064 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2015 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2065 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2016 ("p2.renames", "%(p2.renamedfiles)12d"),
2066 ("p2.renames", "%(p2.renamedfiles)12d"),
2017 ("p2.time", "%(p2.time)12.3f"),
2067 ("p2.time", "%(p2.time)12.3f"),
2018 ("renames", "%(nbrenamedfiles)12d"),
2068 ("renames", "%(nbrenamedfiles)12d"),
2019 ("total.time", "%(time)12.3f"),
2069 ("total.time", "%(time)12.3f"),
2020 ]
2070 ]
2021 if not dotiming:
2071 if not dotiming:
2022 output_template = [
2072 output_template = [
2023 i
2073 i
2024 for i in output_template
2074 for i in output_template
2025 if not ('time' in i[0] or 'renames' in i[0])
2075 if not ('time' in i[0] or 'renames' in i[0])
2026 ]
2076 ]
2027 header_names = [h for (h, v) in output_template]
2077 header_names = [h for (h, v) in output_template]
2028 output = ' '.join([v for (h, v) in output_template]) + '\n'
2078 output = ' '.join([v for (h, v) in output_template]) + '\n'
2029 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2079 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2030 fm.plain(header % tuple(header_names))
2080 fm.plain(header % tuple(header_names))
2031
2081
2032 if not revs:
2082 if not revs:
2033 revs = ['all()']
2083 revs = ['all()']
2034 revs = scmutil.revrange(repo, revs)
2084 revs = scmutil.revrange(repo, revs)
2035
2085
2036 if dostats:
2086 if dostats:
2037 alldata = {
2087 alldata = {
2038 'nbrevs': [],
2088 'nbrevs': [],
2039 'nbmissingfiles': [],
2089 'nbmissingfiles': [],
2040 }
2090 }
2041 if dotiming:
2091 if dotiming:
2042 alldata['parentnbrenames'] = []
2092 alldata['parentnbrenames'] = []
2043 alldata['totalnbrenames'] = []
2093 alldata['totalnbrenames'] = []
2044 alldata['parenttime'] = []
2094 alldata['parenttime'] = []
2045 alldata['totaltime'] = []
2095 alldata['totaltime'] = []
2046
2096
2047 roi = repo.revs('merge() and %ld', revs)
2097 roi = repo.revs('merge() and %ld', revs)
2048 for r in roi:
2098 for r in roi:
2049 ctx = repo[r]
2099 ctx = repo[r]
2050 p1 = ctx.p1()
2100 p1 = ctx.p1()
2051 p2 = ctx.p2()
2101 p2 = ctx.p2()
2052 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2102 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2053 for b in bases:
2103 for b in bases:
2054 b = repo[b]
2104 b = repo[b]
2055 p1missing = copies._computeforwardmissing(b, p1)
2105 p1missing = copies._computeforwardmissing(b, p1)
2056 p2missing = copies._computeforwardmissing(b, p2)
2106 p2missing = copies._computeforwardmissing(b, p2)
2057 data = {
2107 data = {
2058 b'base': b.hex(),
2108 b'base': b.hex(),
2059 b'p1.node': p1.hex(),
2109 b'p1.node': p1.hex(),
2060 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2110 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2061 b'p1.nbmissingfiles': len(p1missing),
2111 b'p1.nbmissingfiles': len(p1missing),
2062 b'p2.node': p2.hex(),
2112 b'p2.node': p2.hex(),
2063 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2113 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2064 b'p2.nbmissingfiles': len(p2missing),
2114 b'p2.nbmissingfiles': len(p2missing),
2065 }
2115 }
2066 if dostats:
2116 if dostats:
2067 if p1missing:
2117 if p1missing:
2068 alldata['nbrevs'].append(
2118 alldata['nbrevs'].append(
2069 (data['p1.nbrevs'], b.hex(), p1.hex())
2119 (data['p1.nbrevs'], b.hex(), p1.hex())
2070 )
2120 )
2071 alldata['nbmissingfiles'].append(
2121 alldata['nbmissingfiles'].append(
2072 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2122 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2073 )
2123 )
2074 if p2missing:
2124 if p2missing:
2075 alldata['nbrevs'].append(
2125 alldata['nbrevs'].append(
2076 (data['p2.nbrevs'], b.hex(), p2.hex())
2126 (data['p2.nbrevs'], b.hex(), p2.hex())
2077 )
2127 )
2078 alldata['nbmissingfiles'].append(
2128 alldata['nbmissingfiles'].append(
2079 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2129 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2080 )
2130 )
2081 if dotiming:
2131 if dotiming:
2082 begin = util.timer()
2132 begin = util.timer()
2083 mergedata = copies.mergecopies(repo, p1, p2, b)
2133 mergedata = copies.mergecopies(repo, p1, p2, b)
2084 end = util.timer()
2134 end = util.timer()
2085 # not very stable timing since we did only one run
2135 # not very stable timing since we did only one run
2086 data['time'] = end - begin
2136 data['time'] = end - begin
2087 # mergedata contains five dicts: "copy", "movewithdir",
2137 # mergedata contains five dicts: "copy", "movewithdir",
2088 # "diverge", "renamedelete" and "dirmove".
2138 # "diverge", "renamedelete" and "dirmove".
2089 # The first 4 are about renamed file so lets count that.
2139 # The first 4 are about renamed file so lets count that.
2090 renames = len(mergedata[0])
2140 renames = len(mergedata[0])
2091 renames += len(mergedata[1])
2141 renames += len(mergedata[1])
2092 renames += len(mergedata[2])
2142 renames += len(mergedata[2])
2093 renames += len(mergedata[3])
2143 renames += len(mergedata[3])
2094 data['nbrenamedfiles'] = renames
2144 data['nbrenamedfiles'] = renames
2095 begin = util.timer()
2145 begin = util.timer()
2096 p1renames = copies.pathcopies(b, p1)
2146 p1renames = copies.pathcopies(b, p1)
2097 end = util.timer()
2147 end = util.timer()
2098 data['p1.time'] = end - begin
2148 data['p1.time'] = end - begin
2099 begin = util.timer()
2149 begin = util.timer()
2100 p2renames = copies.pathcopies(b, p2)
2150 p2renames = copies.pathcopies(b, p2)
2101 end = util.timer()
2151 end = util.timer()
2102 data['p2.time'] = end - begin
2152 data['p2.time'] = end - begin
2103 data['p1.renamedfiles'] = len(p1renames)
2153 data['p1.renamedfiles'] = len(p1renames)
2104 data['p2.renamedfiles'] = len(p2renames)
2154 data['p2.renamedfiles'] = len(p2renames)
2105
2155
2106 if dostats:
2156 if dostats:
2107 if p1missing:
2157 if p1missing:
2108 alldata['parentnbrenames'].append(
2158 alldata['parentnbrenames'].append(
2109 (data['p1.renamedfiles'], b.hex(), p1.hex())
2159 (data['p1.renamedfiles'], b.hex(), p1.hex())
2110 )
2160 )
2111 alldata['parenttime'].append(
2161 alldata['parenttime'].append(
2112 (data['p1.time'], b.hex(), p1.hex())
2162 (data['p1.time'], b.hex(), p1.hex())
2113 )
2163 )
2114 if p2missing:
2164 if p2missing:
2115 alldata['parentnbrenames'].append(
2165 alldata['parentnbrenames'].append(
2116 (data['p2.renamedfiles'], b.hex(), p2.hex())
2166 (data['p2.renamedfiles'], b.hex(), p2.hex())
2117 )
2167 )
2118 alldata['parenttime'].append(
2168 alldata['parenttime'].append(
2119 (data['p2.time'], b.hex(), p2.hex())
2169 (data['p2.time'], b.hex(), p2.hex())
2120 )
2170 )
2121 if p1missing or p2missing:
2171 if p1missing or p2missing:
2122 alldata['totalnbrenames'].append(
2172 alldata['totalnbrenames'].append(
2123 (
2173 (
2124 data['nbrenamedfiles'],
2174 data['nbrenamedfiles'],
2125 b.hex(),
2175 b.hex(),
2126 p1.hex(),
2176 p1.hex(),
2127 p2.hex(),
2177 p2.hex(),
2128 )
2178 )
2129 )
2179 )
2130 alldata['totaltime'].append(
2180 alldata['totaltime'].append(
2131 (data['time'], b.hex(), p1.hex(), p2.hex())
2181 (data['time'], b.hex(), p1.hex(), p2.hex())
2132 )
2182 )
2133 fm.startitem()
2183 fm.startitem()
2134 fm.data(**data)
2184 fm.data(**data)
2135 # make node pretty for the human output
2185 # make node pretty for the human output
2136 out = data.copy()
2186 out = data.copy()
2137 out['base'] = fm.hexfunc(b.node())
2187 out['base'] = fm.hexfunc(b.node())
2138 out['p1.node'] = fm.hexfunc(p1.node())
2188 out['p1.node'] = fm.hexfunc(p1.node())
2139 out['p2.node'] = fm.hexfunc(p2.node())
2189 out['p2.node'] = fm.hexfunc(p2.node())
2140 fm.plain(output % out)
2190 fm.plain(output % out)
2141
2191
2142 fm.end()
2192 fm.end()
2143 if dostats:
2193 if dostats:
2144 # use a second formatter because the data are quite different, not sure
2194 # use a second formatter because the data are quite different, not sure
2145 # how it flies with the templater.
2195 # how it flies with the templater.
2146 entries = [
2196 entries = [
2147 ('nbrevs', 'number of revision covered'),
2197 ('nbrevs', 'number of revision covered'),
2148 ('nbmissingfiles', 'number of missing files at head'),
2198 ('nbmissingfiles', 'number of missing files at head'),
2149 ]
2199 ]
2150 if dotiming:
2200 if dotiming:
2151 entries.append(
2201 entries.append(
2152 ('parentnbrenames', 'rename from one parent to base')
2202 ('parentnbrenames', 'rename from one parent to base')
2153 )
2203 )
2154 entries.append(('totalnbrenames', 'total number of renames'))
2204 entries.append(('totalnbrenames', 'total number of renames'))
2155 entries.append(('parenttime', 'time for one parent'))
2205 entries.append(('parenttime', 'time for one parent'))
2156 entries.append(('totaltime', 'time for both parents'))
2206 entries.append(('totaltime', 'time for both parents'))
2157 _displaystats(ui, opts, entries, alldata)
2207 _displaystats(ui, opts, entries, alldata)
2158
2208
2159
2209
2160 @command(
2210 @command(
2161 b'perf::helper-pathcopies|perfhelper-pathcopies',
2211 b'perf::helper-pathcopies|perfhelper-pathcopies',
2162 formatteropts
2212 formatteropts
2163 + [
2213 + [
2164 (b'r', b'revs', [], b'restrict search to these revisions'),
2214 (b'r', b'revs', [], b'restrict search to these revisions'),
2165 (b'', b'timing', False, b'provides extra data (costly)'),
2215 (b'', b'timing', False, b'provides extra data (costly)'),
2166 (b'', b'stats', False, b'provides statistic about the measured data'),
2216 (b'', b'stats', False, b'provides statistic about the measured data'),
2167 ],
2217 ],
2168 )
2218 )
2169 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2219 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2170 """find statistic about potential parameters for the `perftracecopies`
2220 """find statistic about potential parameters for the `perftracecopies`
2171
2221
2172 This command find source-destination pair relevant for copytracing testing.
2222 This command find source-destination pair relevant for copytracing testing.
2173 It report value for some of the parameters that impact copy tracing time.
2223 It report value for some of the parameters that impact copy tracing time.
2174
2224
2175 If `--timing` is set, rename detection is run and the associated timing
2225 If `--timing` is set, rename detection is run and the associated timing
2176 will be reported. The extra details comes at the cost of a slower command
2226 will be reported. The extra details comes at the cost of a slower command
2177 execution.
2227 execution.
2178
2228
2179 Since the rename detection is only run once, other factors might easily
2229 Since the rename detection is only run once, other factors might easily
2180 affect the precision of the timing. However it should give a good
2230 affect the precision of the timing. However it should give a good
2181 approximation of which revision pairs are very costly.
2231 approximation of which revision pairs are very costly.
2182 """
2232 """
2183 opts = _byteskwargs(opts)
2233 opts = _byteskwargs(opts)
2184 fm = ui.formatter(b'perf', opts)
2234 fm = ui.formatter(b'perf', opts)
2185 dotiming = opts[b'timing']
2235 dotiming = opts[b'timing']
2186 dostats = opts[b'stats']
2236 dostats = opts[b'stats']
2187
2237
2188 if dotiming:
2238 if dotiming:
2189 header = '%12s %12s %12s %12s %12s %12s\n'
2239 header = '%12s %12s %12s %12s %12s %12s\n'
2190 output = (
2240 output = (
2191 "%(source)12s %(destination)12s "
2241 "%(source)12s %(destination)12s "
2192 "%(nbrevs)12d %(nbmissingfiles)12d "
2242 "%(nbrevs)12d %(nbmissingfiles)12d "
2193 "%(nbrenamedfiles)12d %(time)18.5f\n"
2243 "%(nbrenamedfiles)12d %(time)18.5f\n"
2194 )
2244 )
2195 header_names = (
2245 header_names = (
2196 "source",
2246 "source",
2197 "destination",
2247 "destination",
2198 "nb-revs",
2248 "nb-revs",
2199 "nb-files",
2249 "nb-files",
2200 "nb-renames",
2250 "nb-renames",
2201 "time",
2251 "time",
2202 )
2252 )
2203 fm.plain(header % header_names)
2253 fm.plain(header % header_names)
2204 else:
2254 else:
2205 header = '%12s %12s %12s %12s\n'
2255 header = '%12s %12s %12s %12s\n'
2206 output = (
2256 output = (
2207 "%(source)12s %(destination)12s "
2257 "%(source)12s %(destination)12s "
2208 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2258 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2209 )
2259 )
2210 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2260 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2211
2261
2212 if not revs:
2262 if not revs:
2213 revs = ['all()']
2263 revs = ['all()']
2214 revs = scmutil.revrange(repo, revs)
2264 revs = scmutil.revrange(repo, revs)
2215
2265
2216 if dostats:
2266 if dostats:
2217 alldata = {
2267 alldata = {
2218 'nbrevs': [],
2268 'nbrevs': [],
2219 'nbmissingfiles': [],
2269 'nbmissingfiles': [],
2220 }
2270 }
2221 if dotiming:
2271 if dotiming:
2222 alldata['nbrenames'] = []
2272 alldata['nbrenames'] = []
2223 alldata['time'] = []
2273 alldata['time'] = []
2224
2274
2225 roi = repo.revs('merge() and %ld', revs)
2275 roi = repo.revs('merge() and %ld', revs)
2226 for r in roi:
2276 for r in roi:
2227 ctx = repo[r]
2277 ctx = repo[r]
2228 p1 = ctx.p1().rev()
2278 p1 = ctx.p1().rev()
2229 p2 = ctx.p2().rev()
2279 p2 = ctx.p2().rev()
2230 bases = repo.changelog._commonancestorsheads(p1, p2)
2280 bases = repo.changelog._commonancestorsheads(p1, p2)
2231 for p in (p1, p2):
2281 for p in (p1, p2):
2232 for b in bases:
2282 for b in bases:
2233 base = repo[b]
2283 base = repo[b]
2234 parent = repo[p]
2284 parent = repo[p]
2235 missing = copies._computeforwardmissing(base, parent)
2285 missing = copies._computeforwardmissing(base, parent)
2236 if not missing:
2286 if not missing:
2237 continue
2287 continue
2238 data = {
2288 data = {
2239 b'source': base.hex(),
2289 b'source': base.hex(),
2240 b'destination': parent.hex(),
2290 b'destination': parent.hex(),
2241 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2291 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2242 b'nbmissingfiles': len(missing),
2292 b'nbmissingfiles': len(missing),
2243 }
2293 }
2244 if dostats:
2294 if dostats:
2245 alldata['nbrevs'].append(
2295 alldata['nbrevs'].append(
2246 (
2296 (
2247 data['nbrevs'],
2297 data['nbrevs'],
2248 base.hex(),
2298 base.hex(),
2249 parent.hex(),
2299 parent.hex(),
2250 )
2300 )
2251 )
2301 )
2252 alldata['nbmissingfiles'].append(
2302 alldata['nbmissingfiles'].append(
2253 (
2303 (
2254 data['nbmissingfiles'],
2304 data['nbmissingfiles'],
2255 base.hex(),
2305 base.hex(),
2256 parent.hex(),
2306 parent.hex(),
2257 )
2307 )
2258 )
2308 )
2259 if dotiming:
2309 if dotiming:
2260 begin = util.timer()
2310 begin = util.timer()
2261 renames = copies.pathcopies(base, parent)
2311 renames = copies.pathcopies(base, parent)
2262 end = util.timer()
2312 end = util.timer()
2263 # not very stable timing since we did only one run
2313 # not very stable timing since we did only one run
2264 data['time'] = end - begin
2314 data['time'] = end - begin
2265 data['nbrenamedfiles'] = len(renames)
2315 data['nbrenamedfiles'] = len(renames)
2266 if dostats:
2316 if dostats:
2267 alldata['time'].append(
2317 alldata['time'].append(
2268 (
2318 (
2269 data['time'],
2319 data['time'],
2270 base.hex(),
2320 base.hex(),
2271 parent.hex(),
2321 parent.hex(),
2272 )
2322 )
2273 )
2323 )
2274 alldata['nbrenames'].append(
2324 alldata['nbrenames'].append(
2275 (
2325 (
2276 data['nbrenamedfiles'],
2326 data['nbrenamedfiles'],
2277 base.hex(),
2327 base.hex(),
2278 parent.hex(),
2328 parent.hex(),
2279 )
2329 )
2280 )
2330 )
2281 fm.startitem()
2331 fm.startitem()
2282 fm.data(**data)
2332 fm.data(**data)
2283 out = data.copy()
2333 out = data.copy()
2284 out['source'] = fm.hexfunc(base.node())
2334 out['source'] = fm.hexfunc(base.node())
2285 out['destination'] = fm.hexfunc(parent.node())
2335 out['destination'] = fm.hexfunc(parent.node())
2286 fm.plain(output % out)
2336 fm.plain(output % out)
2287
2337
2288 fm.end()
2338 fm.end()
2289 if dostats:
2339 if dostats:
2290 entries = [
2340 entries = [
2291 ('nbrevs', 'number of revision covered'),
2341 ('nbrevs', 'number of revision covered'),
2292 ('nbmissingfiles', 'number of missing files at head'),
2342 ('nbmissingfiles', 'number of missing files at head'),
2293 ]
2343 ]
2294 if dotiming:
2344 if dotiming:
2295 entries.append(('nbrenames', 'renamed files'))
2345 entries.append(('nbrenames', 'renamed files'))
2296 entries.append(('time', 'time'))
2346 entries.append(('time', 'time'))
2297 _displaystats(ui, opts, entries, alldata)
2347 _displaystats(ui, opts, entries, alldata)
2298
2348
2299
2349
2300 @command(b'perf::cca|perfcca', formatteropts)
2350 @command(b'perf::cca|perfcca', formatteropts)
2301 def perfcca(ui, repo, **opts):
2351 def perfcca(ui, repo, **opts):
2302 opts = _byteskwargs(opts)
2352 opts = _byteskwargs(opts)
2303 timer, fm = gettimer(ui, opts)
2353 timer, fm = gettimer(ui, opts)
2304 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2354 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2305 fm.end()
2355 fm.end()
2306
2356
2307
2357
2308 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2358 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2309 def perffncacheload(ui, repo, **opts):
2359 def perffncacheload(ui, repo, **opts):
2310 opts = _byteskwargs(opts)
2360 opts = _byteskwargs(opts)
2311 timer, fm = gettimer(ui, opts)
2361 timer, fm = gettimer(ui, opts)
2312 s = repo.store
2362 s = repo.store
2313
2363
2314 def d():
2364 def d():
2315 s.fncache._load()
2365 s.fncache._load()
2316
2366
2317 timer(d)
2367 timer(d)
2318 fm.end()
2368 fm.end()
2319
2369
2320
2370
2321 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2371 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2322 def perffncachewrite(ui, repo, **opts):
2372 def perffncachewrite(ui, repo, **opts):
2323 opts = _byteskwargs(opts)
2373 opts = _byteskwargs(opts)
2324 timer, fm = gettimer(ui, opts)
2374 timer, fm = gettimer(ui, opts)
2325 s = repo.store
2375 s = repo.store
2326 lock = repo.lock()
2376 lock = repo.lock()
2327 s.fncache._load()
2377 s.fncache._load()
2328 tr = repo.transaction(b'perffncachewrite')
2378 tr = repo.transaction(b'perffncachewrite')
2329 tr.addbackup(b'fncache')
2379 tr.addbackup(b'fncache')
2330
2380
2331 def d():
2381 def d():
2332 s.fncache._dirty = True
2382 s.fncache._dirty = True
2333 s.fncache.write(tr)
2383 s.fncache.write(tr)
2334
2384
2335 timer(d)
2385 timer(d)
2336 tr.close()
2386 tr.close()
2337 lock.release()
2387 lock.release()
2338 fm.end()
2388 fm.end()
2339
2389
2340
2390
2341 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2391 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2342 def perffncacheencode(ui, repo, **opts):
2392 def perffncacheencode(ui, repo, **opts):
2343 opts = _byteskwargs(opts)
2393 opts = _byteskwargs(opts)
2344 timer, fm = gettimer(ui, opts)
2394 timer, fm = gettimer(ui, opts)
2345 s = repo.store
2395 s = repo.store
2346 s.fncache._load()
2396 s.fncache._load()
2347
2397
2348 def d():
2398 def d():
2349 for p in s.fncache.entries:
2399 for p in s.fncache.entries:
2350 s.encode(p)
2400 s.encode(p)
2351
2401
2352 timer(d)
2402 timer(d)
2353 fm.end()
2403 fm.end()
2354
2404
2355
2405
2356 def _bdiffworker(q, blocks, xdiff, ready, done):
2406 def _bdiffworker(q, blocks, xdiff, ready, done):
2357 while not done.is_set():
2407 while not done.is_set():
2358 pair = q.get()
2408 pair = q.get()
2359 while pair is not None:
2409 while pair is not None:
2360 if xdiff:
2410 if xdiff:
2361 mdiff.bdiff.xdiffblocks(*pair)
2411 mdiff.bdiff.xdiffblocks(*pair)
2362 elif blocks:
2412 elif blocks:
2363 mdiff.bdiff.blocks(*pair)
2413 mdiff.bdiff.blocks(*pair)
2364 else:
2414 else:
2365 mdiff.textdiff(*pair)
2415 mdiff.textdiff(*pair)
2366 q.task_done()
2416 q.task_done()
2367 pair = q.get()
2417 pair = q.get()
2368 q.task_done() # for the None one
2418 q.task_done() # for the None one
2369 with ready:
2419 with ready:
2370 ready.wait()
2420 ready.wait()
2371
2421
2372
2422
2373 def _manifestrevision(repo, mnode):
2423 def _manifestrevision(repo, mnode):
2374 ml = repo.manifestlog
2424 ml = repo.manifestlog
2375
2425
2376 if util.safehasattr(ml, b'getstorage'):
2426 if util.safehasattr(ml, b'getstorage'):
2377 store = ml.getstorage(b'')
2427 store = ml.getstorage(b'')
2378 else:
2428 else:
2379 store = ml._revlog
2429 store = ml._revlog
2380
2430
2381 return store.revision(mnode)
2431 return store.revision(mnode)
2382
2432
2383
2433
2384 @command(
2434 @command(
2385 b'perf::bdiff|perfbdiff',
2435 b'perf::bdiff|perfbdiff',
2386 revlogopts
2436 revlogopts
2387 + formatteropts
2437 + formatteropts
2388 + [
2438 + [
2389 (
2439 (
2390 b'',
2440 b'',
2391 b'count',
2441 b'count',
2392 1,
2442 1,
2393 b'number of revisions to test (when using --startrev)',
2443 b'number of revisions to test (when using --startrev)',
2394 ),
2444 ),
2395 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2445 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2396 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2446 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2397 (b'', b'blocks', False, b'test computing diffs into blocks'),
2447 (b'', b'blocks', False, b'test computing diffs into blocks'),
2398 (b'', b'xdiff', False, b'use xdiff algorithm'),
2448 (b'', b'xdiff', False, b'use xdiff algorithm'),
2399 ],
2449 ],
2400 b'-c|-m|FILE REV',
2450 b'-c|-m|FILE REV',
2401 )
2451 )
2402 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2452 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2403 """benchmark a bdiff between revisions
2453 """benchmark a bdiff between revisions
2404
2454
2405 By default, benchmark a bdiff between its delta parent and itself.
2455 By default, benchmark a bdiff between its delta parent and itself.
2406
2456
2407 With ``--count``, benchmark bdiffs between delta parents and self for N
2457 With ``--count``, benchmark bdiffs between delta parents and self for N
2408 revisions starting at the specified revision.
2458 revisions starting at the specified revision.
2409
2459
2410 With ``--alldata``, assume the requested revision is a changeset and
2460 With ``--alldata``, assume the requested revision is a changeset and
2411 measure bdiffs for all changes related to that changeset (manifest
2461 measure bdiffs for all changes related to that changeset (manifest
2412 and filelogs).
2462 and filelogs).
2413 """
2463 """
2414 opts = _byteskwargs(opts)
2464 opts = _byteskwargs(opts)
2415
2465
2416 if opts[b'xdiff'] and not opts[b'blocks']:
2466 if opts[b'xdiff'] and not opts[b'blocks']:
2417 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2467 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2418
2468
2419 if opts[b'alldata']:
2469 if opts[b'alldata']:
2420 opts[b'changelog'] = True
2470 opts[b'changelog'] = True
2421
2471
2422 if opts.get(b'changelog') or opts.get(b'manifest'):
2472 if opts.get(b'changelog') or opts.get(b'manifest'):
2423 file_, rev = None, file_
2473 file_, rev = None, file_
2424 elif rev is None:
2474 elif rev is None:
2425 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2475 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2426
2476
2427 blocks = opts[b'blocks']
2477 blocks = opts[b'blocks']
2428 xdiff = opts[b'xdiff']
2478 xdiff = opts[b'xdiff']
2429 textpairs = []
2479 textpairs = []
2430
2480
2431 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2481 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2432
2482
2433 startrev = r.rev(r.lookup(rev))
2483 startrev = r.rev(r.lookup(rev))
2434 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2484 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2435 if opts[b'alldata']:
2485 if opts[b'alldata']:
2436 # Load revisions associated with changeset.
2486 # Load revisions associated with changeset.
2437 ctx = repo[rev]
2487 ctx = repo[rev]
2438 mtext = _manifestrevision(repo, ctx.manifestnode())
2488 mtext = _manifestrevision(repo, ctx.manifestnode())
2439 for pctx in ctx.parents():
2489 for pctx in ctx.parents():
2440 pman = _manifestrevision(repo, pctx.manifestnode())
2490 pman = _manifestrevision(repo, pctx.manifestnode())
2441 textpairs.append((pman, mtext))
2491 textpairs.append((pman, mtext))
2442
2492
2443 # Load filelog revisions by iterating manifest delta.
2493 # Load filelog revisions by iterating manifest delta.
2444 man = ctx.manifest()
2494 man = ctx.manifest()
2445 pman = ctx.p1().manifest()
2495 pman = ctx.p1().manifest()
2446 for filename, change in pman.diff(man).items():
2496 for filename, change in pman.diff(man).items():
2447 fctx = repo.file(filename)
2497 fctx = repo.file(filename)
2448 f1 = fctx.revision(change[0][0] or -1)
2498 f1 = fctx.revision(change[0][0] or -1)
2449 f2 = fctx.revision(change[1][0] or -1)
2499 f2 = fctx.revision(change[1][0] or -1)
2450 textpairs.append((f1, f2))
2500 textpairs.append((f1, f2))
2451 else:
2501 else:
2452 dp = r.deltaparent(rev)
2502 dp = r.deltaparent(rev)
2453 textpairs.append((r.revision(dp), r.revision(rev)))
2503 textpairs.append((r.revision(dp), r.revision(rev)))
2454
2504
2455 withthreads = threads > 0
2505 withthreads = threads > 0
2456 if not withthreads:
2506 if not withthreads:
2457
2507
2458 def d():
2508 def d():
2459 for pair in textpairs:
2509 for pair in textpairs:
2460 if xdiff:
2510 if xdiff:
2461 mdiff.bdiff.xdiffblocks(*pair)
2511 mdiff.bdiff.xdiffblocks(*pair)
2462 elif blocks:
2512 elif blocks:
2463 mdiff.bdiff.blocks(*pair)
2513 mdiff.bdiff.blocks(*pair)
2464 else:
2514 else:
2465 mdiff.textdiff(*pair)
2515 mdiff.textdiff(*pair)
2466
2516
2467 else:
2517 else:
2468 q = queue()
2518 q = queue()
2469 for i in _xrange(threads):
2519 for i in _xrange(threads):
2470 q.put(None)
2520 q.put(None)
2471 ready = threading.Condition()
2521 ready = threading.Condition()
2472 done = threading.Event()
2522 done = threading.Event()
2473 for i in _xrange(threads):
2523 for i in _xrange(threads):
2474 threading.Thread(
2524 threading.Thread(
2475 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2525 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2476 ).start()
2526 ).start()
2477 q.join()
2527 q.join()
2478
2528
2479 def d():
2529 def d():
2480 for pair in textpairs:
2530 for pair in textpairs:
2481 q.put(pair)
2531 q.put(pair)
2482 for i in _xrange(threads):
2532 for i in _xrange(threads):
2483 q.put(None)
2533 q.put(None)
2484 with ready:
2534 with ready:
2485 ready.notify_all()
2535 ready.notify_all()
2486 q.join()
2536 q.join()
2487
2537
2488 timer, fm = gettimer(ui, opts)
2538 timer, fm = gettimer(ui, opts)
2489 timer(d)
2539 timer(d)
2490 fm.end()
2540 fm.end()
2491
2541
2492 if withthreads:
2542 if withthreads:
2493 done.set()
2543 done.set()
2494 for i in _xrange(threads):
2544 for i in _xrange(threads):
2495 q.put(None)
2545 q.put(None)
2496 with ready:
2546 with ready:
2497 ready.notify_all()
2547 ready.notify_all()
2498
2548
2499
2549
2500 @command(
2550 @command(
2501 b'perf::unidiff|perfunidiff',
2551 b'perf::unidiff|perfunidiff',
2502 revlogopts
2552 revlogopts
2503 + formatteropts
2553 + formatteropts
2504 + [
2554 + [
2505 (
2555 (
2506 b'',
2556 b'',
2507 b'count',
2557 b'count',
2508 1,
2558 1,
2509 b'number of revisions to test (when using --startrev)',
2559 b'number of revisions to test (when using --startrev)',
2510 ),
2560 ),
2511 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2561 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2512 ],
2562 ],
2513 b'-c|-m|FILE REV',
2563 b'-c|-m|FILE REV',
2514 )
2564 )
2515 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2565 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2516 """benchmark a unified diff between revisions
2566 """benchmark a unified diff between revisions
2517
2567
2518 This doesn't include any copy tracing - it's just a unified diff
2568 This doesn't include any copy tracing - it's just a unified diff
2519 of the texts.
2569 of the texts.
2520
2570
2521 By default, benchmark a diff between its delta parent and itself.
2571 By default, benchmark a diff between its delta parent and itself.
2522
2572
2523 With ``--count``, benchmark diffs between delta parents and self for N
2573 With ``--count``, benchmark diffs between delta parents and self for N
2524 revisions starting at the specified revision.
2574 revisions starting at the specified revision.
2525
2575
2526 With ``--alldata``, assume the requested revision is a changeset and
2576 With ``--alldata``, assume the requested revision is a changeset and
2527 measure diffs for all changes related to that changeset (manifest
2577 measure diffs for all changes related to that changeset (manifest
2528 and filelogs).
2578 and filelogs).
2529 """
2579 """
2530 opts = _byteskwargs(opts)
2580 opts = _byteskwargs(opts)
2531 if opts[b'alldata']:
2581 if opts[b'alldata']:
2532 opts[b'changelog'] = True
2582 opts[b'changelog'] = True
2533
2583
2534 if opts.get(b'changelog') or opts.get(b'manifest'):
2584 if opts.get(b'changelog') or opts.get(b'manifest'):
2535 file_, rev = None, file_
2585 file_, rev = None, file_
2536 elif rev is None:
2586 elif rev is None:
2537 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2587 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2538
2588
2539 textpairs = []
2589 textpairs = []
2540
2590
2541 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2591 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2542
2592
2543 startrev = r.rev(r.lookup(rev))
2593 startrev = r.rev(r.lookup(rev))
2544 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2594 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2545 if opts[b'alldata']:
2595 if opts[b'alldata']:
2546 # Load revisions associated with changeset.
2596 # Load revisions associated with changeset.
2547 ctx = repo[rev]
2597 ctx = repo[rev]
2548 mtext = _manifestrevision(repo, ctx.manifestnode())
2598 mtext = _manifestrevision(repo, ctx.manifestnode())
2549 for pctx in ctx.parents():
2599 for pctx in ctx.parents():
2550 pman = _manifestrevision(repo, pctx.manifestnode())
2600 pman = _manifestrevision(repo, pctx.manifestnode())
2551 textpairs.append((pman, mtext))
2601 textpairs.append((pman, mtext))
2552
2602
2553 # Load filelog revisions by iterating manifest delta.
2603 # Load filelog revisions by iterating manifest delta.
2554 man = ctx.manifest()
2604 man = ctx.manifest()
2555 pman = ctx.p1().manifest()
2605 pman = ctx.p1().manifest()
2556 for filename, change in pman.diff(man).items():
2606 for filename, change in pman.diff(man).items():
2557 fctx = repo.file(filename)
2607 fctx = repo.file(filename)
2558 f1 = fctx.revision(change[0][0] or -1)
2608 f1 = fctx.revision(change[0][0] or -1)
2559 f2 = fctx.revision(change[1][0] or -1)
2609 f2 = fctx.revision(change[1][0] or -1)
2560 textpairs.append((f1, f2))
2610 textpairs.append((f1, f2))
2561 else:
2611 else:
2562 dp = r.deltaparent(rev)
2612 dp = r.deltaparent(rev)
2563 textpairs.append((r.revision(dp), r.revision(rev)))
2613 textpairs.append((r.revision(dp), r.revision(rev)))
2564
2614
2565 def d():
2615 def d():
2566 for left, right in textpairs:
2616 for left, right in textpairs:
2567 # The date strings don't matter, so we pass empty strings.
2617 # The date strings don't matter, so we pass empty strings.
2568 headerlines, hunks = mdiff.unidiff(
2618 headerlines, hunks = mdiff.unidiff(
2569 left, b'', right, b'', b'left', b'right', binary=False
2619 left, b'', right, b'', b'left', b'right', binary=False
2570 )
2620 )
2571 # consume iterators in roughly the way patch.py does
2621 # consume iterators in roughly the way patch.py does
2572 b'\n'.join(headerlines)
2622 b'\n'.join(headerlines)
2573 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2623 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2574
2624
2575 timer, fm = gettimer(ui, opts)
2625 timer, fm = gettimer(ui, opts)
2576 timer(d)
2626 timer(d)
2577 fm.end()
2627 fm.end()
2578
2628
2579
2629
2580 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2630 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2581 def perfdiffwd(ui, repo, **opts):
2631 def perfdiffwd(ui, repo, **opts):
2582 """Profile diff of working directory changes"""
2632 """Profile diff of working directory changes"""
2583 opts = _byteskwargs(opts)
2633 opts = _byteskwargs(opts)
2584 timer, fm = gettimer(ui, opts)
2634 timer, fm = gettimer(ui, opts)
2585 options = {
2635 options = {
2586 'w': 'ignore_all_space',
2636 'w': 'ignore_all_space',
2587 'b': 'ignore_space_change',
2637 'b': 'ignore_space_change',
2588 'B': 'ignore_blank_lines',
2638 'B': 'ignore_blank_lines',
2589 }
2639 }
2590
2640
2591 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2641 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2592 opts = {options[c]: b'1' for c in diffopt}
2642 opts = {options[c]: b'1' for c in diffopt}
2593
2643
2594 def d():
2644 def d():
2595 ui.pushbuffer()
2645 ui.pushbuffer()
2596 commands.diff(ui, repo, **opts)
2646 commands.diff(ui, repo, **opts)
2597 ui.popbuffer()
2647 ui.popbuffer()
2598
2648
2599 diffopt = diffopt.encode('ascii')
2649 diffopt = diffopt.encode('ascii')
2600 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2650 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2601 timer(d, title=title)
2651 timer(d, title=title)
2602 fm.end()
2652 fm.end()
2603
2653
2604
2654
2605 @command(
2655 @command(
2606 b'perf::revlogindex|perfrevlogindex',
2656 b'perf::revlogindex|perfrevlogindex',
2607 revlogopts + formatteropts,
2657 revlogopts + formatteropts,
2608 b'-c|-m|FILE',
2658 b'-c|-m|FILE',
2609 )
2659 )
2610 def perfrevlogindex(ui, repo, file_=None, **opts):
2660 def perfrevlogindex(ui, repo, file_=None, **opts):
2611 """Benchmark operations against a revlog index.
2661 """Benchmark operations against a revlog index.
2612
2662
2613 This tests constructing a revlog instance, reading index data,
2663 This tests constructing a revlog instance, reading index data,
2614 parsing index data, and performing various operations related to
2664 parsing index data, and performing various operations related to
2615 index data.
2665 index data.
2616 """
2666 """
2617
2667
2618 opts = _byteskwargs(opts)
2668 opts = _byteskwargs(opts)
2619
2669
2620 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2670 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2621
2671
2622 opener = getattr(rl, 'opener') # trick linter
2672 opener = getattr(rl, 'opener') # trick linter
2623 # compat with hg <= 5.8
2673 # compat with hg <= 5.8
2624 radix = getattr(rl, 'radix', None)
2674 radix = getattr(rl, 'radix', None)
2625 indexfile = getattr(rl, '_indexfile', None)
2675 indexfile = getattr(rl, '_indexfile', None)
2626 if indexfile is None:
2676 if indexfile is None:
2627 # compatibility with <= hg-5.8
2677 # compatibility with <= hg-5.8
2628 indexfile = getattr(rl, 'indexfile')
2678 indexfile = getattr(rl, 'indexfile')
2629 data = opener.read(indexfile)
2679 data = opener.read(indexfile)
2630
2680
2631 header = struct.unpack(b'>I', data[0:4])[0]
2681 header = struct.unpack(b'>I', data[0:4])[0]
2632 version = header & 0xFFFF
2682 version = header & 0xFFFF
2633 if version == 1:
2683 if version == 1:
2634 inline = header & (1 << 16)
2684 inline = header & (1 << 16)
2635 else:
2685 else:
2636 raise error.Abort(b'unsupported revlog version: %d' % version)
2686 raise error.Abort(b'unsupported revlog version: %d' % version)
2637
2687
2638 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2688 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2639 if parse_index_v1 is None:
2689 if parse_index_v1 is None:
2640 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2690 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2641
2691
2642 rllen = len(rl)
2692 rllen = len(rl)
2643
2693
2644 node0 = rl.node(0)
2694 node0 = rl.node(0)
2645 node25 = rl.node(rllen // 4)
2695 node25 = rl.node(rllen // 4)
2646 node50 = rl.node(rllen // 2)
2696 node50 = rl.node(rllen // 2)
2647 node75 = rl.node(rllen // 4 * 3)
2697 node75 = rl.node(rllen // 4 * 3)
2648 node100 = rl.node(rllen - 1)
2698 node100 = rl.node(rllen - 1)
2649
2699
2650 allrevs = range(rllen)
2700 allrevs = range(rllen)
2651 allrevsrev = list(reversed(allrevs))
2701 allrevsrev = list(reversed(allrevs))
2652 allnodes = [rl.node(rev) for rev in range(rllen)]
2702 allnodes = [rl.node(rev) for rev in range(rllen)]
2653 allnodesrev = list(reversed(allnodes))
2703 allnodesrev = list(reversed(allnodes))
2654
2704
2655 def constructor():
2705 def constructor():
2656 if radix is not None:
2706 if radix is not None:
2657 revlog(opener, radix=radix)
2707 revlog(opener, radix=radix)
2658 else:
2708 else:
2659 # hg <= 5.8
2709 # hg <= 5.8
2660 revlog(opener, indexfile=indexfile)
2710 revlog(opener, indexfile=indexfile)
2661
2711
2662 def read():
2712 def read():
2663 with opener(indexfile) as fh:
2713 with opener(indexfile) as fh:
2664 fh.read()
2714 fh.read()
2665
2715
2666 def parseindex():
2716 def parseindex():
2667 parse_index_v1(data, inline)
2717 parse_index_v1(data, inline)
2668
2718
2669 def getentry(revornode):
2719 def getentry(revornode):
2670 index = parse_index_v1(data, inline)[0]
2720 index = parse_index_v1(data, inline)[0]
2671 index[revornode]
2721 index[revornode]
2672
2722
2673 def getentries(revs, count=1):
2723 def getentries(revs, count=1):
2674 index = parse_index_v1(data, inline)[0]
2724 index = parse_index_v1(data, inline)[0]
2675
2725
2676 for i in range(count):
2726 for i in range(count):
2677 for rev in revs:
2727 for rev in revs:
2678 index[rev]
2728 index[rev]
2679
2729
2680 def resolvenode(node):
2730 def resolvenode(node):
2681 index = parse_index_v1(data, inline)[0]
2731 index = parse_index_v1(data, inline)[0]
2682 rev = getattr(index, 'rev', None)
2732 rev = getattr(index, 'rev', None)
2683 if rev is None:
2733 if rev is None:
2684 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2734 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2685 # This only works for the C code.
2735 # This only works for the C code.
2686 if nodemap is None:
2736 if nodemap is None:
2687 return
2737 return
2688 rev = nodemap.__getitem__
2738 rev = nodemap.__getitem__
2689
2739
2690 try:
2740 try:
2691 rev(node)
2741 rev(node)
2692 except error.RevlogError:
2742 except error.RevlogError:
2693 pass
2743 pass
2694
2744
2695 def resolvenodes(nodes, count=1):
2745 def resolvenodes(nodes, count=1):
2696 index = parse_index_v1(data, inline)[0]
2746 index = parse_index_v1(data, inline)[0]
2697 rev = getattr(index, 'rev', None)
2747 rev = getattr(index, 'rev', None)
2698 if rev is None:
2748 if rev is None:
2699 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2749 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2700 # This only works for the C code.
2750 # This only works for the C code.
2701 if nodemap is None:
2751 if nodemap is None:
2702 return
2752 return
2703 rev = nodemap.__getitem__
2753 rev = nodemap.__getitem__
2704
2754
2705 for i in range(count):
2755 for i in range(count):
2706 for node in nodes:
2756 for node in nodes:
2707 try:
2757 try:
2708 rev(node)
2758 rev(node)
2709 except error.RevlogError:
2759 except error.RevlogError:
2710 pass
2760 pass
2711
2761
2712 benches = [
2762 benches = [
2713 (constructor, b'revlog constructor'),
2763 (constructor, b'revlog constructor'),
2714 (read, b'read'),
2764 (read, b'read'),
2715 (parseindex, b'create index object'),
2765 (parseindex, b'create index object'),
2716 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2766 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2717 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2767 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2718 (lambda: resolvenode(node0), b'look up node at rev 0'),
2768 (lambda: resolvenode(node0), b'look up node at rev 0'),
2719 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2769 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2720 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2770 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2721 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2771 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2722 (lambda: resolvenode(node100), b'look up node at tip'),
2772 (lambda: resolvenode(node100), b'look up node at tip'),
2723 # 2x variation is to measure caching impact.
2773 # 2x variation is to measure caching impact.
2724 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2774 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2725 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2775 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2726 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2776 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2727 (
2777 (
2728 lambda: resolvenodes(allnodesrev, 2),
2778 lambda: resolvenodes(allnodesrev, 2),
2729 b'look up all nodes 2x (reverse)',
2779 b'look up all nodes 2x (reverse)',
2730 ),
2780 ),
2731 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2781 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2732 (
2782 (
2733 lambda: getentries(allrevs, 2),
2783 lambda: getentries(allrevs, 2),
2734 b'retrieve all index entries 2x (forward)',
2784 b'retrieve all index entries 2x (forward)',
2735 ),
2785 ),
2736 (
2786 (
2737 lambda: getentries(allrevsrev),
2787 lambda: getentries(allrevsrev),
2738 b'retrieve all index entries (reverse)',
2788 b'retrieve all index entries (reverse)',
2739 ),
2789 ),
2740 (
2790 (
2741 lambda: getentries(allrevsrev, 2),
2791 lambda: getentries(allrevsrev, 2),
2742 b'retrieve all index entries 2x (reverse)',
2792 b'retrieve all index entries 2x (reverse)',
2743 ),
2793 ),
2744 ]
2794 ]
2745
2795
2746 for fn, title in benches:
2796 for fn, title in benches:
2747 timer, fm = gettimer(ui, opts)
2797 timer, fm = gettimer(ui, opts)
2748 timer(fn, title=title)
2798 timer(fn, title=title)
2749 fm.end()
2799 fm.end()
2750
2800
2751
2801
2752 @command(
2802 @command(
2753 b'perf::revlogrevisions|perfrevlogrevisions',
2803 b'perf::revlogrevisions|perfrevlogrevisions',
2754 revlogopts
2804 revlogopts
2755 + formatteropts
2805 + formatteropts
2756 + [
2806 + [
2757 (b'd', b'dist', 100, b'distance between the revisions'),
2807 (b'd', b'dist', 100, b'distance between the revisions'),
2758 (b's', b'startrev', 0, b'revision to start reading at'),
2808 (b's', b'startrev', 0, b'revision to start reading at'),
2759 (b'', b'reverse', False, b'read in reverse'),
2809 (b'', b'reverse', False, b'read in reverse'),
2760 ],
2810 ],
2761 b'-c|-m|FILE',
2811 b'-c|-m|FILE',
2762 )
2812 )
2763 def perfrevlogrevisions(
2813 def perfrevlogrevisions(
2764 ui, repo, file_=None, startrev=0, reverse=False, **opts
2814 ui, repo, file_=None, startrev=0, reverse=False, **opts
2765 ):
2815 ):
2766 """Benchmark reading a series of revisions from a revlog.
2816 """Benchmark reading a series of revisions from a revlog.
2767
2817
2768 By default, we read every ``-d/--dist`` revision from 0 to tip of
2818 By default, we read every ``-d/--dist`` revision from 0 to tip of
2769 the specified revlog.
2819 the specified revlog.
2770
2820
2771 The start revision can be defined via ``-s/--startrev``.
2821 The start revision can be defined via ``-s/--startrev``.
2772 """
2822 """
2773 opts = _byteskwargs(opts)
2823 opts = _byteskwargs(opts)
2774
2824
2775 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2825 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2776 rllen = getlen(ui)(rl)
2826 rllen = getlen(ui)(rl)
2777
2827
2778 if startrev < 0:
2828 if startrev < 0:
2779 startrev = rllen + startrev
2829 startrev = rllen + startrev
2780
2830
2781 def d():
2831 def d():
2782 rl.clearcaches()
2832 rl.clearcaches()
2783
2833
2784 beginrev = startrev
2834 beginrev = startrev
2785 endrev = rllen
2835 endrev = rllen
2786 dist = opts[b'dist']
2836 dist = opts[b'dist']
2787
2837
2788 if reverse:
2838 if reverse:
2789 beginrev, endrev = endrev - 1, beginrev - 1
2839 beginrev, endrev = endrev - 1, beginrev - 1
2790 dist = -1 * dist
2840 dist = -1 * dist
2791
2841
2792 for x in _xrange(beginrev, endrev, dist):
2842 for x in _xrange(beginrev, endrev, dist):
2793 # Old revisions don't support passing int.
2843 # Old revisions don't support passing int.
2794 n = rl.node(x)
2844 n = rl.node(x)
2795 rl.revision(n)
2845 rl.revision(n)
2796
2846
2797 timer, fm = gettimer(ui, opts)
2847 timer, fm = gettimer(ui, opts)
2798 timer(d)
2848 timer(d)
2799 fm.end()
2849 fm.end()
2800
2850
2801
2851
2802 @command(
2852 @command(
2803 b'perf::revlogwrite|perfrevlogwrite',
2853 b'perf::revlogwrite|perfrevlogwrite',
2804 revlogopts
2854 revlogopts
2805 + formatteropts
2855 + formatteropts
2806 + [
2856 + [
2807 (b's', b'startrev', 1000, b'revision to start writing at'),
2857 (b's', b'startrev', 1000, b'revision to start writing at'),
2808 (b'', b'stoprev', -1, b'last revision to write'),
2858 (b'', b'stoprev', -1, b'last revision to write'),
2809 (b'', b'count', 3, b'number of passes to perform'),
2859 (b'', b'count', 3, b'number of passes to perform'),
2810 (b'', b'details', False, b'print timing for every revisions tested'),
2860 (b'', b'details', False, b'print timing for every revisions tested'),
2811 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2861 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2812 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2862 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2813 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2863 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2814 ],
2864 ],
2815 b'-c|-m|FILE',
2865 b'-c|-m|FILE',
2816 )
2866 )
2817 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2867 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2818 """Benchmark writing a series of revisions to a revlog.
2868 """Benchmark writing a series of revisions to a revlog.
2819
2869
2820 Possible source values are:
2870 Possible source values are:
2821 * `full`: add from a full text (default).
2871 * `full`: add from a full text (default).
2822 * `parent-1`: add from a delta to the first parent
2872 * `parent-1`: add from a delta to the first parent
2823 * `parent-2`: add from a delta to the second parent if it exists
2873 * `parent-2`: add from a delta to the second parent if it exists
2824 (use a delta from the first parent otherwise)
2874 (use a delta from the first parent otherwise)
2825 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2875 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2826 * `storage`: add from the existing precomputed deltas
2876 * `storage`: add from the existing precomputed deltas
2827
2877
2828 Note: This performance command measures performance in a custom way. As a
2878 Note: This performance command measures performance in a custom way. As a
2829 result some of the global configuration of the 'perf' command does not
2879 result some of the global configuration of the 'perf' command does not
2830 apply to it:
2880 apply to it:
2831
2881
2832 * ``pre-run``: disabled
2882 * ``pre-run``: disabled
2833
2883
2834 * ``profile-benchmark``: disabled
2884 * ``profile-benchmark``: disabled
2835
2885
2836 * ``run-limits``: disabled use --count instead
2886 * ``run-limits``: disabled use --count instead
2837 """
2887 """
2838 opts = _byteskwargs(opts)
2888 opts = _byteskwargs(opts)
2839
2889
2840 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2890 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2841 rllen = getlen(ui)(rl)
2891 rllen = getlen(ui)(rl)
2842 if startrev < 0:
2892 if startrev < 0:
2843 startrev = rllen + startrev
2893 startrev = rllen + startrev
2844 if stoprev < 0:
2894 if stoprev < 0:
2845 stoprev = rllen + stoprev
2895 stoprev = rllen + stoprev
2846
2896
2847 lazydeltabase = opts['lazydeltabase']
2897 lazydeltabase = opts['lazydeltabase']
2848 source = opts['source']
2898 source = opts['source']
2849 clearcaches = opts['clear_caches']
2899 clearcaches = opts['clear_caches']
2850 validsource = (
2900 validsource = (
2851 b'full',
2901 b'full',
2852 b'parent-1',
2902 b'parent-1',
2853 b'parent-2',
2903 b'parent-2',
2854 b'parent-smallest',
2904 b'parent-smallest',
2855 b'storage',
2905 b'storage',
2856 )
2906 )
2857 if source not in validsource:
2907 if source not in validsource:
2858 raise error.Abort('invalid source type: %s' % source)
2908 raise error.Abort('invalid source type: %s' % source)
2859
2909
2860 ### actually gather results
2910 ### actually gather results
2861 count = opts['count']
2911 count = opts['count']
2862 if count <= 0:
2912 if count <= 0:
2863 raise error.Abort('invalide run count: %d' % count)
2913 raise error.Abort('invalide run count: %d' % count)
2864 allresults = []
2914 allresults = []
2865 for c in range(count):
2915 for c in range(count):
2866 timing = _timeonewrite(
2916 timing = _timeonewrite(
2867 ui,
2917 ui,
2868 rl,
2918 rl,
2869 source,
2919 source,
2870 startrev,
2920 startrev,
2871 stoprev,
2921 stoprev,
2872 c + 1,
2922 c + 1,
2873 lazydeltabase=lazydeltabase,
2923 lazydeltabase=lazydeltabase,
2874 clearcaches=clearcaches,
2924 clearcaches=clearcaches,
2875 )
2925 )
2876 allresults.append(timing)
2926 allresults.append(timing)
2877
2927
2878 ### consolidate the results in a single list
2928 ### consolidate the results in a single list
2879 results = []
2929 results = []
2880 for idx, (rev, t) in enumerate(allresults[0]):
2930 for idx, (rev, t) in enumerate(allresults[0]):
2881 ts = [t]
2931 ts = [t]
2882 for other in allresults[1:]:
2932 for other in allresults[1:]:
2883 orev, ot = other[idx]
2933 orev, ot = other[idx]
2884 assert orev == rev
2934 assert orev == rev
2885 ts.append(ot)
2935 ts.append(ot)
2886 results.append((rev, ts))
2936 results.append((rev, ts))
2887 resultcount = len(results)
2937 resultcount = len(results)
2888
2938
2889 ### Compute and display relevant statistics
2939 ### Compute and display relevant statistics
2890
2940
2891 # get a formatter
2941 # get a formatter
2892 fm = ui.formatter(b'perf', opts)
2942 fm = ui.formatter(b'perf', opts)
2893 displayall = ui.configbool(b"perf", b"all-timing", False)
2943 displayall = ui.configbool(b"perf", b"all-timing", False)
2894
2944
2895 # print individual details if requested
2945 # print individual details if requested
2896 if opts['details']:
2946 if opts['details']:
2897 for idx, item in enumerate(results, 1):
2947 for idx, item in enumerate(results, 1):
2898 rev, data = item
2948 rev, data = item
2899 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2949 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2900 formatone(fm, data, title=title, displayall=displayall)
2950 formatone(fm, data, title=title, displayall=displayall)
2901
2951
2902 # sorts results by median time
2952 # sorts results by median time
2903 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2953 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2904 # list of (name, index) to display)
2954 # list of (name, index) to display)
2905 relevants = [
2955 relevants = [
2906 ("min", 0),
2956 ("min", 0),
2907 ("10%", resultcount * 10 // 100),
2957 ("10%", resultcount * 10 // 100),
2908 ("25%", resultcount * 25 // 100),
2958 ("25%", resultcount * 25 // 100),
2909 ("50%", resultcount * 70 // 100),
2959 ("50%", resultcount * 70 // 100),
2910 ("75%", resultcount * 75 // 100),
2960 ("75%", resultcount * 75 // 100),
2911 ("90%", resultcount * 90 // 100),
2961 ("90%", resultcount * 90 // 100),
2912 ("95%", resultcount * 95 // 100),
2962 ("95%", resultcount * 95 // 100),
2913 ("99%", resultcount * 99 // 100),
2963 ("99%", resultcount * 99 // 100),
2914 ("99.9%", resultcount * 999 // 1000),
2964 ("99.9%", resultcount * 999 // 1000),
2915 ("99.99%", resultcount * 9999 // 10000),
2965 ("99.99%", resultcount * 9999 // 10000),
2916 ("99.999%", resultcount * 99999 // 100000),
2966 ("99.999%", resultcount * 99999 // 100000),
2917 ("max", -1),
2967 ("max", -1),
2918 ]
2968 ]
2919 if not ui.quiet:
2969 if not ui.quiet:
2920 for name, idx in relevants:
2970 for name, idx in relevants:
2921 data = results[idx]
2971 data = results[idx]
2922 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2972 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2923 formatone(fm, data[1], title=title, displayall=displayall)
2973 formatone(fm, data[1], title=title, displayall=displayall)
2924
2974
2925 # XXX summing that many float will not be very precise, we ignore this fact
2975 # XXX summing that many float will not be very precise, we ignore this fact
2926 # for now
2976 # for now
2927 totaltime = []
2977 totaltime = []
2928 for item in allresults:
2978 for item in allresults:
2929 totaltime.append(
2979 totaltime.append(
2930 (
2980 (
2931 sum(x[1][0] for x in item),
2981 sum(x[1][0] for x in item),
2932 sum(x[1][1] for x in item),
2982 sum(x[1][1] for x in item),
2933 sum(x[1][2] for x in item),
2983 sum(x[1][2] for x in item),
2934 )
2984 )
2935 )
2985 )
2936 formatone(
2986 formatone(
2937 fm,
2987 fm,
2938 totaltime,
2988 totaltime,
2939 title="total time (%d revs)" % resultcount,
2989 title="total time (%d revs)" % resultcount,
2940 displayall=displayall,
2990 displayall=displayall,
2941 )
2991 )
2942 fm.end()
2992 fm.end()
2943
2993
2944
2994
2945 class _faketr:
2995 class _faketr:
2946 def add(s, x, y, z=None):
2996 def add(s, x, y, z=None):
2947 return None
2997 return None
2948
2998
2949
2999
2950 def _timeonewrite(
3000 def _timeonewrite(
2951 ui,
3001 ui,
2952 orig,
3002 orig,
2953 source,
3003 source,
2954 startrev,
3004 startrev,
2955 stoprev,
3005 stoprev,
2956 runidx=None,
3006 runidx=None,
2957 lazydeltabase=True,
3007 lazydeltabase=True,
2958 clearcaches=True,
3008 clearcaches=True,
2959 ):
3009 ):
2960 timings = []
3010 timings = []
2961 tr = _faketr()
3011 tr = _faketr()
2962 with _temprevlog(ui, orig, startrev) as dest:
3012 with _temprevlog(ui, orig, startrev) as dest:
2963 dest._lazydeltabase = lazydeltabase
3013 dest._lazydeltabase = lazydeltabase
2964 revs = list(orig.revs(startrev, stoprev))
3014 revs = list(orig.revs(startrev, stoprev))
2965 total = len(revs)
3015 total = len(revs)
2966 topic = 'adding'
3016 topic = 'adding'
2967 if runidx is not None:
3017 if runidx is not None:
2968 topic += ' (run #%d)' % runidx
3018 topic += ' (run #%d)' % runidx
2969 # Support both old and new progress API
3019 # Support both old and new progress API
2970 if util.safehasattr(ui, 'makeprogress'):
3020 if util.safehasattr(ui, 'makeprogress'):
2971 progress = ui.makeprogress(topic, unit='revs', total=total)
3021 progress = ui.makeprogress(topic, unit='revs', total=total)
2972
3022
2973 def updateprogress(pos):
3023 def updateprogress(pos):
2974 progress.update(pos)
3024 progress.update(pos)
2975
3025
2976 def completeprogress():
3026 def completeprogress():
2977 progress.complete()
3027 progress.complete()
2978
3028
2979 else:
3029 else:
2980
3030
2981 def updateprogress(pos):
3031 def updateprogress(pos):
2982 ui.progress(topic, pos, unit='revs', total=total)
3032 ui.progress(topic, pos, unit='revs', total=total)
2983
3033
2984 def completeprogress():
3034 def completeprogress():
2985 ui.progress(topic, None, unit='revs', total=total)
3035 ui.progress(topic, None, unit='revs', total=total)
2986
3036
2987 for idx, rev in enumerate(revs):
3037 for idx, rev in enumerate(revs):
2988 updateprogress(idx)
3038 updateprogress(idx)
2989 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3039 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2990 if clearcaches:
3040 if clearcaches:
2991 dest.index.clearcaches()
3041 dest.index.clearcaches()
2992 dest.clearcaches()
3042 dest.clearcaches()
2993 with timeone() as r:
3043 with timeone() as r:
2994 dest.addrawrevision(*addargs, **addkwargs)
3044 dest.addrawrevision(*addargs, **addkwargs)
2995 timings.append((rev, r[0]))
3045 timings.append((rev, r[0]))
2996 updateprogress(total)
3046 updateprogress(total)
2997 completeprogress()
3047 completeprogress()
2998 return timings
3048 return timings
2999
3049
3000
3050
3001 def _getrevisionseed(orig, rev, tr, source):
3051 def _getrevisionseed(orig, rev, tr, source):
3002 from mercurial.node import nullid
3052 from mercurial.node import nullid
3003
3053
3004 linkrev = orig.linkrev(rev)
3054 linkrev = orig.linkrev(rev)
3005 node = orig.node(rev)
3055 node = orig.node(rev)
3006 p1, p2 = orig.parents(node)
3056 p1, p2 = orig.parents(node)
3007 flags = orig.flags(rev)
3057 flags = orig.flags(rev)
3008 cachedelta = None
3058 cachedelta = None
3009 text = None
3059 text = None
3010
3060
3011 if source == b'full':
3061 if source == b'full':
3012 text = orig.revision(rev)
3062 text = orig.revision(rev)
3013 elif source == b'parent-1':
3063 elif source == b'parent-1':
3014 baserev = orig.rev(p1)
3064 baserev = orig.rev(p1)
3015 cachedelta = (baserev, orig.revdiff(p1, rev))
3065 cachedelta = (baserev, orig.revdiff(p1, rev))
3016 elif source == b'parent-2':
3066 elif source == b'parent-2':
3017 parent = p2
3067 parent = p2
3018 if p2 == nullid:
3068 if p2 == nullid:
3019 parent = p1
3069 parent = p1
3020 baserev = orig.rev(parent)
3070 baserev = orig.rev(parent)
3021 cachedelta = (baserev, orig.revdiff(parent, rev))
3071 cachedelta = (baserev, orig.revdiff(parent, rev))
3022 elif source == b'parent-smallest':
3072 elif source == b'parent-smallest':
3023 p1diff = orig.revdiff(p1, rev)
3073 p1diff = orig.revdiff(p1, rev)
3024 parent = p1
3074 parent = p1
3025 diff = p1diff
3075 diff = p1diff
3026 if p2 != nullid:
3076 if p2 != nullid:
3027 p2diff = orig.revdiff(p2, rev)
3077 p2diff = orig.revdiff(p2, rev)
3028 if len(p1diff) > len(p2diff):
3078 if len(p1diff) > len(p2diff):
3029 parent = p2
3079 parent = p2
3030 diff = p2diff
3080 diff = p2diff
3031 baserev = orig.rev(parent)
3081 baserev = orig.rev(parent)
3032 cachedelta = (baserev, diff)
3082 cachedelta = (baserev, diff)
3033 elif source == b'storage':
3083 elif source == b'storage':
3034 baserev = orig.deltaparent(rev)
3084 baserev = orig.deltaparent(rev)
3035 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3085 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3036
3086
3037 return (
3087 return (
3038 (text, tr, linkrev, p1, p2),
3088 (text, tr, linkrev, p1, p2),
3039 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3089 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3040 )
3090 )
3041
3091
3042
3092
3043 @contextlib.contextmanager
3093 @contextlib.contextmanager
3044 def _temprevlog(ui, orig, truncaterev):
3094 def _temprevlog(ui, orig, truncaterev):
3045 from mercurial import vfs as vfsmod
3095 from mercurial import vfs as vfsmod
3046
3096
3047 if orig._inline:
3097 if orig._inline:
3048 raise error.Abort('not supporting inline revlog (yet)')
3098 raise error.Abort('not supporting inline revlog (yet)')
3049 revlogkwargs = {}
3099 revlogkwargs = {}
3050 k = 'upperboundcomp'
3100 k = 'upperboundcomp'
3051 if util.safehasattr(orig, k):
3101 if util.safehasattr(orig, k):
3052 revlogkwargs[k] = getattr(orig, k)
3102 revlogkwargs[k] = getattr(orig, k)
3053
3103
3054 indexfile = getattr(orig, '_indexfile', None)
3104 indexfile = getattr(orig, '_indexfile', None)
3055 if indexfile is None:
3105 if indexfile is None:
3056 # compatibility with <= hg-5.8
3106 # compatibility with <= hg-5.8
3057 indexfile = getattr(orig, 'indexfile')
3107 indexfile = getattr(orig, 'indexfile')
3058 origindexpath = orig.opener.join(indexfile)
3108 origindexpath = orig.opener.join(indexfile)
3059
3109
3060 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3110 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3061 origdatapath = orig.opener.join(datafile)
3111 origdatapath = orig.opener.join(datafile)
3062 radix = b'revlog'
3112 radix = b'revlog'
3063 indexname = b'revlog.i'
3113 indexname = b'revlog.i'
3064 dataname = b'revlog.d'
3114 dataname = b'revlog.d'
3065
3115
3066 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3116 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3067 try:
3117 try:
3068 # copy the data file in a temporary directory
3118 # copy the data file in a temporary directory
3069 ui.debug('copying data in %s\n' % tmpdir)
3119 ui.debug('copying data in %s\n' % tmpdir)
3070 destindexpath = os.path.join(tmpdir, 'revlog.i')
3120 destindexpath = os.path.join(tmpdir, 'revlog.i')
3071 destdatapath = os.path.join(tmpdir, 'revlog.d')
3121 destdatapath = os.path.join(tmpdir, 'revlog.d')
3072 shutil.copyfile(origindexpath, destindexpath)
3122 shutil.copyfile(origindexpath, destindexpath)
3073 shutil.copyfile(origdatapath, destdatapath)
3123 shutil.copyfile(origdatapath, destdatapath)
3074
3124
3075 # remove the data we want to add again
3125 # remove the data we want to add again
3076 ui.debug('truncating data to be rewritten\n')
3126 ui.debug('truncating data to be rewritten\n')
3077 with open(destindexpath, 'ab') as index:
3127 with open(destindexpath, 'ab') as index:
3078 index.seek(0)
3128 index.seek(0)
3079 index.truncate(truncaterev * orig._io.size)
3129 index.truncate(truncaterev * orig._io.size)
3080 with open(destdatapath, 'ab') as data:
3130 with open(destdatapath, 'ab') as data:
3081 data.seek(0)
3131 data.seek(0)
3082 data.truncate(orig.start(truncaterev))
3132 data.truncate(orig.start(truncaterev))
3083
3133
3084 # instantiate a new revlog from the temporary copy
3134 # instantiate a new revlog from the temporary copy
3085 ui.debug('truncating adding to be rewritten\n')
3135 ui.debug('truncating adding to be rewritten\n')
3086 vfs = vfsmod.vfs(tmpdir)
3136 vfs = vfsmod.vfs(tmpdir)
3087 vfs.options = getattr(orig.opener, 'options', None)
3137 vfs.options = getattr(orig.opener, 'options', None)
3088
3138
3089 try:
3139 try:
3090 dest = revlog(vfs, radix=radix, **revlogkwargs)
3140 dest = revlog(vfs, radix=radix, **revlogkwargs)
3091 except TypeError:
3141 except TypeError:
3092 dest = revlog(
3142 dest = revlog(
3093 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3143 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3094 )
3144 )
3095 if dest._inline:
3145 if dest._inline:
3096 raise error.Abort('not supporting inline revlog (yet)')
3146 raise error.Abort('not supporting inline revlog (yet)')
3097 # make sure internals are initialized
3147 # make sure internals are initialized
3098 dest.revision(len(dest) - 1)
3148 dest.revision(len(dest) - 1)
3099 yield dest
3149 yield dest
3100 del dest, vfs
3150 del dest, vfs
3101 finally:
3151 finally:
3102 shutil.rmtree(tmpdir, True)
3152 shutil.rmtree(tmpdir, True)
3103
3153
3104
3154
3105 @command(
3155 @command(
3106 b'perf::revlogchunks|perfrevlogchunks',
3156 b'perf::revlogchunks|perfrevlogchunks',
3107 revlogopts
3157 revlogopts
3108 + formatteropts
3158 + formatteropts
3109 + [
3159 + [
3110 (b'e', b'engines', b'', b'compression engines to use'),
3160 (b'e', b'engines', b'', b'compression engines to use'),
3111 (b's', b'startrev', 0, b'revision to start at'),
3161 (b's', b'startrev', 0, b'revision to start at'),
3112 ],
3162 ],
3113 b'-c|-m|FILE',
3163 b'-c|-m|FILE',
3114 )
3164 )
3115 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3165 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3116 """Benchmark operations on revlog chunks.
3166 """Benchmark operations on revlog chunks.
3117
3167
3118 Logically, each revlog is a collection of fulltext revisions. However,
3168 Logically, each revlog is a collection of fulltext revisions. However,
3119 stored within each revlog are "chunks" of possibly compressed data. This
3169 stored within each revlog are "chunks" of possibly compressed data. This
3120 data needs to be read and decompressed or compressed and written.
3170 data needs to be read and decompressed or compressed and written.
3121
3171
3122 This command measures the time it takes to read+decompress and recompress
3172 This command measures the time it takes to read+decompress and recompress
3123 chunks in a revlog. It effectively isolates I/O and compression performance.
3173 chunks in a revlog. It effectively isolates I/O and compression performance.
3124 For measurements of higher-level operations like resolving revisions,
3174 For measurements of higher-level operations like resolving revisions,
3125 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3175 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3126 """
3176 """
3127 opts = _byteskwargs(opts)
3177 opts = _byteskwargs(opts)
3128
3178
3129 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3179 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3130
3180
3131 # _chunkraw was renamed to _getsegmentforrevs.
3181 # _chunkraw was renamed to _getsegmentforrevs.
3132 try:
3182 try:
3133 segmentforrevs = rl._getsegmentforrevs
3183 segmentforrevs = rl._getsegmentforrevs
3134 except AttributeError:
3184 except AttributeError:
3135 segmentforrevs = rl._chunkraw
3185 segmentforrevs = rl._chunkraw
3136
3186
3137 # Verify engines argument.
3187 # Verify engines argument.
3138 if engines:
3188 if engines:
3139 engines = {e.strip() for e in engines.split(b',')}
3189 engines = {e.strip() for e in engines.split(b',')}
3140 for engine in engines:
3190 for engine in engines:
3141 try:
3191 try:
3142 util.compressionengines[engine]
3192 util.compressionengines[engine]
3143 except KeyError:
3193 except KeyError:
3144 raise error.Abort(b'unknown compression engine: %s' % engine)
3194 raise error.Abort(b'unknown compression engine: %s' % engine)
3145 else:
3195 else:
3146 engines = []
3196 engines = []
3147 for e in util.compengines:
3197 for e in util.compengines:
3148 engine = util.compengines[e]
3198 engine = util.compengines[e]
3149 try:
3199 try:
3150 if engine.available():
3200 if engine.available():
3151 engine.revlogcompressor().compress(b'dummy')
3201 engine.revlogcompressor().compress(b'dummy')
3152 engines.append(e)
3202 engines.append(e)
3153 except NotImplementedError:
3203 except NotImplementedError:
3154 pass
3204 pass
3155
3205
3156 revs = list(rl.revs(startrev, len(rl) - 1))
3206 revs = list(rl.revs(startrev, len(rl) - 1))
3157
3207
3158 def rlfh(rl):
3208 def rlfh(rl):
3159 if rl._inline:
3209 if rl._inline:
3160 indexfile = getattr(rl, '_indexfile', None)
3210 indexfile = getattr(rl, '_indexfile', None)
3161 if indexfile is None:
3211 if indexfile is None:
3162 # compatibility with <= hg-5.8
3212 # compatibility with <= hg-5.8
3163 indexfile = getattr(rl, 'indexfile')
3213 indexfile = getattr(rl, 'indexfile')
3164 return getsvfs(repo)(indexfile)
3214 return getsvfs(repo)(indexfile)
3165 else:
3215 else:
3166 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3216 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3167 return getsvfs(repo)(datafile)
3217 return getsvfs(repo)(datafile)
3168
3218
3169 def doread():
3219 def doread():
3170 rl.clearcaches()
3220 rl.clearcaches()
3171 for rev in revs:
3221 for rev in revs:
3172 segmentforrevs(rev, rev)
3222 segmentforrevs(rev, rev)
3173
3223
3174 def doreadcachedfh():
3224 def doreadcachedfh():
3175 rl.clearcaches()
3225 rl.clearcaches()
3176 fh = rlfh(rl)
3226 fh = rlfh(rl)
3177 for rev in revs:
3227 for rev in revs:
3178 segmentforrevs(rev, rev, df=fh)
3228 segmentforrevs(rev, rev, df=fh)
3179
3229
3180 def doreadbatch():
3230 def doreadbatch():
3181 rl.clearcaches()
3231 rl.clearcaches()
3182 segmentforrevs(revs[0], revs[-1])
3232 segmentforrevs(revs[0], revs[-1])
3183
3233
3184 def doreadbatchcachedfh():
3234 def doreadbatchcachedfh():
3185 rl.clearcaches()
3235 rl.clearcaches()
3186 fh = rlfh(rl)
3236 fh = rlfh(rl)
3187 segmentforrevs(revs[0], revs[-1], df=fh)
3237 segmentforrevs(revs[0], revs[-1], df=fh)
3188
3238
3189 def dochunk():
3239 def dochunk():
3190 rl.clearcaches()
3240 rl.clearcaches()
3191 fh = rlfh(rl)
3241 fh = rlfh(rl)
3192 for rev in revs:
3242 for rev in revs:
3193 rl._chunk(rev, df=fh)
3243 rl._chunk(rev, df=fh)
3194
3244
3195 chunks = [None]
3245 chunks = [None]
3196
3246
3197 def dochunkbatch():
3247 def dochunkbatch():
3198 rl.clearcaches()
3248 rl.clearcaches()
3199 fh = rlfh(rl)
3249 fh = rlfh(rl)
3200 # Save chunks as a side-effect.
3250 # Save chunks as a side-effect.
3201 chunks[0] = rl._chunks(revs, df=fh)
3251 chunks[0] = rl._chunks(revs, df=fh)
3202
3252
3203 def docompress(compressor):
3253 def docompress(compressor):
3204 rl.clearcaches()
3254 rl.clearcaches()
3205
3255
3206 try:
3256 try:
3207 # Swap in the requested compression engine.
3257 # Swap in the requested compression engine.
3208 oldcompressor = rl._compressor
3258 oldcompressor = rl._compressor
3209 rl._compressor = compressor
3259 rl._compressor = compressor
3210 for chunk in chunks[0]:
3260 for chunk in chunks[0]:
3211 rl.compress(chunk)
3261 rl.compress(chunk)
3212 finally:
3262 finally:
3213 rl._compressor = oldcompressor
3263 rl._compressor = oldcompressor
3214
3264
3215 benches = [
3265 benches = [
3216 (lambda: doread(), b'read'),
3266 (lambda: doread(), b'read'),
3217 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3267 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3218 (lambda: doreadbatch(), b'read batch'),
3268 (lambda: doreadbatch(), b'read batch'),
3219 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3269 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3220 (lambda: dochunk(), b'chunk'),
3270 (lambda: dochunk(), b'chunk'),
3221 (lambda: dochunkbatch(), b'chunk batch'),
3271 (lambda: dochunkbatch(), b'chunk batch'),
3222 ]
3272 ]
3223
3273
3224 for engine in sorted(engines):
3274 for engine in sorted(engines):
3225 compressor = util.compengines[engine].revlogcompressor()
3275 compressor = util.compengines[engine].revlogcompressor()
3226 benches.append(
3276 benches.append(
3227 (
3277 (
3228 functools.partial(docompress, compressor),
3278 functools.partial(docompress, compressor),
3229 b'compress w/ %s' % engine,
3279 b'compress w/ %s' % engine,
3230 )
3280 )
3231 )
3281 )
3232
3282
3233 for fn, title in benches:
3283 for fn, title in benches:
3234 timer, fm = gettimer(ui, opts)
3284 timer, fm = gettimer(ui, opts)
3235 timer(fn, title=title)
3285 timer(fn, title=title)
3236 fm.end()
3286 fm.end()
3237
3287
3238
3288
3239 @command(
3289 @command(
3240 b'perf::revlogrevision|perfrevlogrevision',
3290 b'perf::revlogrevision|perfrevlogrevision',
3241 revlogopts
3291 revlogopts
3242 + formatteropts
3292 + formatteropts
3243 + [(b'', b'cache', False, b'use caches instead of clearing')],
3293 + [(b'', b'cache', False, b'use caches instead of clearing')],
3244 b'-c|-m|FILE REV',
3294 b'-c|-m|FILE REV',
3245 )
3295 )
3246 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3296 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3247 """Benchmark obtaining a revlog revision.
3297 """Benchmark obtaining a revlog revision.
3248
3298
3249 Obtaining a revlog revision consists of roughly the following steps:
3299 Obtaining a revlog revision consists of roughly the following steps:
3250
3300
3251 1. Compute the delta chain
3301 1. Compute the delta chain
3252 2. Slice the delta chain if applicable
3302 2. Slice the delta chain if applicable
3253 3. Obtain the raw chunks for that delta chain
3303 3. Obtain the raw chunks for that delta chain
3254 4. Decompress each raw chunk
3304 4. Decompress each raw chunk
3255 5. Apply binary patches to obtain fulltext
3305 5. Apply binary patches to obtain fulltext
3256 6. Verify hash of fulltext
3306 6. Verify hash of fulltext
3257
3307
3258 This command measures the time spent in each of these phases.
3308 This command measures the time spent in each of these phases.
3259 """
3309 """
3260 opts = _byteskwargs(opts)
3310 opts = _byteskwargs(opts)
3261
3311
3262 if opts.get(b'changelog') or opts.get(b'manifest'):
3312 if opts.get(b'changelog') or opts.get(b'manifest'):
3263 file_, rev = None, file_
3313 file_, rev = None, file_
3264 elif rev is None:
3314 elif rev is None:
3265 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3315 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3266
3316
3267 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3317 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3268
3318
3269 # _chunkraw was renamed to _getsegmentforrevs.
3319 # _chunkraw was renamed to _getsegmentforrevs.
3270 try:
3320 try:
3271 segmentforrevs = r._getsegmentforrevs
3321 segmentforrevs = r._getsegmentforrevs
3272 except AttributeError:
3322 except AttributeError:
3273 segmentforrevs = r._chunkraw
3323 segmentforrevs = r._chunkraw
3274
3324
3275 node = r.lookup(rev)
3325 node = r.lookup(rev)
3276 rev = r.rev(node)
3326 rev = r.rev(node)
3277
3327
3278 def getrawchunks(data, chain):
3328 def getrawchunks(data, chain):
3279 start = r.start
3329 start = r.start
3280 length = r.length
3330 length = r.length
3281 inline = r._inline
3331 inline = r._inline
3282 try:
3332 try:
3283 iosize = r.index.entry_size
3333 iosize = r.index.entry_size
3284 except AttributeError:
3334 except AttributeError:
3285 iosize = r._io.size
3335 iosize = r._io.size
3286 buffer = util.buffer
3336 buffer = util.buffer
3287
3337
3288 chunks = []
3338 chunks = []
3289 ladd = chunks.append
3339 ladd = chunks.append
3290 for idx, item in enumerate(chain):
3340 for idx, item in enumerate(chain):
3291 offset = start(item[0])
3341 offset = start(item[0])
3292 bits = data[idx]
3342 bits = data[idx]
3293 for rev in item:
3343 for rev in item:
3294 chunkstart = start(rev)
3344 chunkstart = start(rev)
3295 if inline:
3345 if inline:
3296 chunkstart += (rev + 1) * iosize
3346 chunkstart += (rev + 1) * iosize
3297 chunklength = length(rev)
3347 chunklength = length(rev)
3298 ladd(buffer(bits, chunkstart - offset, chunklength))
3348 ladd(buffer(bits, chunkstart - offset, chunklength))
3299
3349
3300 return chunks
3350 return chunks
3301
3351
3302 def dodeltachain(rev):
3352 def dodeltachain(rev):
3303 if not cache:
3353 if not cache:
3304 r.clearcaches()
3354 r.clearcaches()
3305 r._deltachain(rev)
3355 r._deltachain(rev)
3306
3356
3307 def doread(chain):
3357 def doread(chain):
3308 if not cache:
3358 if not cache:
3309 r.clearcaches()
3359 r.clearcaches()
3310 for item in slicedchain:
3360 for item in slicedchain:
3311 segmentforrevs(item[0], item[-1])
3361 segmentforrevs(item[0], item[-1])
3312
3362
3313 def doslice(r, chain, size):
3363 def doslice(r, chain, size):
3314 for s in slicechunk(r, chain, targetsize=size):
3364 for s in slicechunk(r, chain, targetsize=size):
3315 pass
3365 pass
3316
3366
3317 def dorawchunks(data, chain):
3367 def dorawchunks(data, chain):
3318 if not cache:
3368 if not cache:
3319 r.clearcaches()
3369 r.clearcaches()
3320 getrawchunks(data, chain)
3370 getrawchunks(data, chain)
3321
3371
3322 def dodecompress(chunks):
3372 def dodecompress(chunks):
3323 decomp = r.decompress
3373 decomp = r.decompress
3324 for chunk in chunks:
3374 for chunk in chunks:
3325 decomp(chunk)
3375 decomp(chunk)
3326
3376
3327 def dopatch(text, bins):
3377 def dopatch(text, bins):
3328 if not cache:
3378 if not cache:
3329 r.clearcaches()
3379 r.clearcaches()
3330 mdiff.patches(text, bins)
3380 mdiff.patches(text, bins)
3331
3381
3332 def dohash(text):
3382 def dohash(text):
3333 if not cache:
3383 if not cache:
3334 r.clearcaches()
3384 r.clearcaches()
3335 r.checkhash(text, node, rev=rev)
3385 r.checkhash(text, node, rev=rev)
3336
3386
3337 def dorevision():
3387 def dorevision():
3338 if not cache:
3388 if not cache:
3339 r.clearcaches()
3389 r.clearcaches()
3340 r.revision(node)
3390 r.revision(node)
3341
3391
3342 try:
3392 try:
3343 from mercurial.revlogutils.deltas import slicechunk
3393 from mercurial.revlogutils.deltas import slicechunk
3344 except ImportError:
3394 except ImportError:
3345 slicechunk = getattr(revlog, '_slicechunk', None)
3395 slicechunk = getattr(revlog, '_slicechunk', None)
3346
3396
3347 size = r.length(rev)
3397 size = r.length(rev)
3348 chain = r._deltachain(rev)[0]
3398 chain = r._deltachain(rev)[0]
3349 if not getattr(r, '_withsparseread', False):
3399 if not getattr(r, '_withsparseread', False):
3350 slicedchain = (chain,)
3400 slicedchain = (chain,)
3351 else:
3401 else:
3352 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3402 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3353 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3403 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3354 rawchunks = getrawchunks(data, slicedchain)
3404 rawchunks = getrawchunks(data, slicedchain)
3355 bins = r._chunks(chain)
3405 bins = r._chunks(chain)
3356 text = bytes(bins[0])
3406 text = bytes(bins[0])
3357 bins = bins[1:]
3407 bins = bins[1:]
3358 text = mdiff.patches(text, bins)
3408 text = mdiff.patches(text, bins)
3359
3409
3360 benches = [
3410 benches = [
3361 (lambda: dorevision(), b'full'),
3411 (lambda: dorevision(), b'full'),
3362 (lambda: dodeltachain(rev), b'deltachain'),
3412 (lambda: dodeltachain(rev), b'deltachain'),
3363 (lambda: doread(chain), b'read'),
3413 (lambda: doread(chain), b'read'),
3364 ]
3414 ]
3365
3415
3366 if getattr(r, '_withsparseread', False):
3416 if getattr(r, '_withsparseread', False):
3367 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3417 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3368 benches.append(slicing)
3418 benches.append(slicing)
3369
3419
3370 benches.extend(
3420 benches.extend(
3371 [
3421 [
3372 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3422 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3373 (lambda: dodecompress(rawchunks), b'decompress'),
3423 (lambda: dodecompress(rawchunks), b'decompress'),
3374 (lambda: dopatch(text, bins), b'patch'),
3424 (lambda: dopatch(text, bins), b'patch'),
3375 (lambda: dohash(text), b'hash'),
3425 (lambda: dohash(text), b'hash'),
3376 ]
3426 ]
3377 )
3427 )
3378
3428
3379 timer, fm = gettimer(ui, opts)
3429 timer, fm = gettimer(ui, opts)
3380 for fn, title in benches:
3430 for fn, title in benches:
3381 timer(fn, title=title)
3431 timer(fn, title=title)
3382 fm.end()
3432 fm.end()
3383
3433
3384
3434
3385 @command(
3435 @command(
3386 b'perf::revset|perfrevset',
3436 b'perf::revset|perfrevset',
3387 [
3437 [
3388 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3438 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3389 (b'', b'contexts', False, b'obtain changectx for each revision'),
3439 (b'', b'contexts', False, b'obtain changectx for each revision'),
3390 ]
3440 ]
3391 + formatteropts,
3441 + formatteropts,
3392 b"REVSET",
3442 b"REVSET",
3393 )
3443 )
3394 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3444 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3395 """benchmark the execution time of a revset
3445 """benchmark the execution time of a revset
3396
3446
3397 Use the --clean option if need to evaluate the impact of build volatile
3447 Use the --clean option if need to evaluate the impact of build volatile
3398 revisions set cache on the revset execution. Volatile cache hold filtered
3448 revisions set cache on the revset execution. Volatile cache hold filtered
3399 and obsolete related cache."""
3449 and obsolete related cache."""
3400 opts = _byteskwargs(opts)
3450 opts = _byteskwargs(opts)
3401
3451
3402 timer, fm = gettimer(ui, opts)
3452 timer, fm = gettimer(ui, opts)
3403
3453
3404 def d():
3454 def d():
3405 if clear:
3455 if clear:
3406 repo.invalidatevolatilesets()
3456 repo.invalidatevolatilesets()
3407 if contexts:
3457 if contexts:
3408 for ctx in repo.set(expr):
3458 for ctx in repo.set(expr):
3409 pass
3459 pass
3410 else:
3460 else:
3411 for r in repo.revs(expr):
3461 for r in repo.revs(expr):
3412 pass
3462 pass
3413
3463
3414 timer(d)
3464 timer(d)
3415 fm.end()
3465 fm.end()
3416
3466
3417
3467
3418 @command(
3468 @command(
3419 b'perf::volatilesets|perfvolatilesets',
3469 b'perf::volatilesets|perfvolatilesets',
3420 [
3470 [
3421 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3471 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3422 ]
3472 ]
3423 + formatteropts,
3473 + formatteropts,
3424 )
3474 )
3425 def perfvolatilesets(ui, repo, *names, **opts):
3475 def perfvolatilesets(ui, repo, *names, **opts):
3426 """benchmark the computation of various volatile set
3476 """benchmark the computation of various volatile set
3427
3477
3428 Volatile set computes element related to filtering and obsolescence."""
3478 Volatile set computes element related to filtering and obsolescence."""
3429 opts = _byteskwargs(opts)
3479 opts = _byteskwargs(opts)
3430 timer, fm = gettimer(ui, opts)
3480 timer, fm = gettimer(ui, opts)
3431 repo = repo.unfiltered()
3481 repo = repo.unfiltered()
3432
3482
3433 def getobs(name):
3483 def getobs(name):
3434 def d():
3484 def d():
3435 repo.invalidatevolatilesets()
3485 repo.invalidatevolatilesets()
3436 if opts[b'clear_obsstore']:
3486 if opts[b'clear_obsstore']:
3437 clearfilecache(repo, b'obsstore')
3487 clearfilecache(repo, b'obsstore')
3438 obsolete.getrevs(repo, name)
3488 obsolete.getrevs(repo, name)
3439
3489
3440 return d
3490 return d
3441
3491
3442 allobs = sorted(obsolete.cachefuncs)
3492 allobs = sorted(obsolete.cachefuncs)
3443 if names:
3493 if names:
3444 allobs = [n for n in allobs if n in names]
3494 allobs = [n for n in allobs if n in names]
3445
3495
3446 for name in allobs:
3496 for name in allobs:
3447 timer(getobs(name), title=name)
3497 timer(getobs(name), title=name)
3448
3498
3449 def getfiltered(name):
3499 def getfiltered(name):
3450 def d():
3500 def d():
3451 repo.invalidatevolatilesets()
3501 repo.invalidatevolatilesets()
3452 if opts[b'clear_obsstore']:
3502 if opts[b'clear_obsstore']:
3453 clearfilecache(repo, b'obsstore')
3503 clearfilecache(repo, b'obsstore')
3454 repoview.filterrevs(repo, name)
3504 repoview.filterrevs(repo, name)
3455
3505
3456 return d
3506 return d
3457
3507
3458 allfilter = sorted(repoview.filtertable)
3508 allfilter = sorted(repoview.filtertable)
3459 if names:
3509 if names:
3460 allfilter = [n for n in allfilter if n in names]
3510 allfilter = [n for n in allfilter if n in names]
3461
3511
3462 for name in allfilter:
3512 for name in allfilter:
3463 timer(getfiltered(name), title=name)
3513 timer(getfiltered(name), title=name)
3464 fm.end()
3514 fm.end()
3465
3515
3466
3516
3467 @command(
3517 @command(
3468 b'perf::branchmap|perfbranchmap',
3518 b'perf::branchmap|perfbranchmap',
3469 [
3519 [
3470 (b'f', b'full', False, b'Includes build time of subset'),
3520 (b'f', b'full', False, b'Includes build time of subset'),
3471 (
3521 (
3472 b'',
3522 b'',
3473 b'clear-revbranch',
3523 b'clear-revbranch',
3474 False,
3524 False,
3475 b'purge the revbranch cache between computation',
3525 b'purge the revbranch cache between computation',
3476 ),
3526 ),
3477 ]
3527 ]
3478 + formatteropts,
3528 + formatteropts,
3479 )
3529 )
3480 def perfbranchmap(ui, repo, *filternames, **opts):
3530 def perfbranchmap(ui, repo, *filternames, **opts):
3481 """benchmark the update of a branchmap
3531 """benchmark the update of a branchmap
3482
3532
3483 This benchmarks the full repo.branchmap() call with read and write disabled
3533 This benchmarks the full repo.branchmap() call with read and write disabled
3484 """
3534 """
3485 opts = _byteskwargs(opts)
3535 opts = _byteskwargs(opts)
3486 full = opts.get(b"full", False)
3536 full = opts.get(b"full", False)
3487 clear_revbranch = opts.get(b"clear_revbranch", False)
3537 clear_revbranch = opts.get(b"clear_revbranch", False)
3488 timer, fm = gettimer(ui, opts)
3538 timer, fm = gettimer(ui, opts)
3489
3539
3490 def getbranchmap(filtername):
3540 def getbranchmap(filtername):
3491 """generate a benchmark function for the filtername"""
3541 """generate a benchmark function for the filtername"""
3492 if filtername is None:
3542 if filtername is None:
3493 view = repo
3543 view = repo
3494 else:
3544 else:
3495 view = repo.filtered(filtername)
3545 view = repo.filtered(filtername)
3496 if util.safehasattr(view._branchcaches, '_per_filter'):
3546 if util.safehasattr(view._branchcaches, '_per_filter'):
3497 filtered = view._branchcaches._per_filter
3547 filtered = view._branchcaches._per_filter
3498 else:
3548 else:
3499 # older versions
3549 # older versions
3500 filtered = view._branchcaches
3550 filtered = view._branchcaches
3501
3551
3502 def d():
3552 def d():
3503 if clear_revbranch:
3553 if clear_revbranch:
3504 repo.revbranchcache()._clear()
3554 repo.revbranchcache()._clear()
3505 if full:
3555 if full:
3506 view._branchcaches.clear()
3556 view._branchcaches.clear()
3507 else:
3557 else:
3508 filtered.pop(filtername, None)
3558 filtered.pop(filtername, None)
3509 view.branchmap()
3559 view.branchmap()
3510
3560
3511 return d
3561 return d
3512
3562
3513 # add filter in smaller subset to bigger subset
3563 # add filter in smaller subset to bigger subset
3514 possiblefilters = set(repoview.filtertable)
3564 possiblefilters = set(repoview.filtertable)
3515 if filternames:
3565 if filternames:
3516 possiblefilters &= set(filternames)
3566 possiblefilters &= set(filternames)
3517 subsettable = getbranchmapsubsettable()
3567 subsettable = getbranchmapsubsettable()
3518 allfilters = []
3568 allfilters = []
3519 while possiblefilters:
3569 while possiblefilters:
3520 for name in possiblefilters:
3570 for name in possiblefilters:
3521 subset = subsettable.get(name)
3571 subset = subsettable.get(name)
3522 if subset not in possiblefilters:
3572 if subset not in possiblefilters:
3523 break
3573 break
3524 else:
3574 else:
3525 assert False, b'subset cycle %s!' % possiblefilters
3575 assert False, b'subset cycle %s!' % possiblefilters
3526 allfilters.append(name)
3576 allfilters.append(name)
3527 possiblefilters.remove(name)
3577 possiblefilters.remove(name)
3528
3578
3529 # warm the cache
3579 # warm the cache
3530 if not full:
3580 if not full:
3531 for name in allfilters:
3581 for name in allfilters:
3532 repo.filtered(name).branchmap()
3582 repo.filtered(name).branchmap()
3533 if not filternames or b'unfiltered' in filternames:
3583 if not filternames or b'unfiltered' in filternames:
3534 # add unfiltered
3584 # add unfiltered
3535 allfilters.append(None)
3585 allfilters.append(None)
3536
3586
3537 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3587 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3538 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3588 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3539 branchcacheread.set(classmethod(lambda *args: None))
3589 branchcacheread.set(classmethod(lambda *args: None))
3540 else:
3590 else:
3541 # older versions
3591 # older versions
3542 branchcacheread = safeattrsetter(branchmap, b'read')
3592 branchcacheread = safeattrsetter(branchmap, b'read')
3543 branchcacheread.set(lambda *args: None)
3593 branchcacheread.set(lambda *args: None)
3544 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3594 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3545 branchcachewrite.set(lambda *args: None)
3595 branchcachewrite.set(lambda *args: None)
3546 try:
3596 try:
3547 for name in allfilters:
3597 for name in allfilters:
3548 printname = name
3598 printname = name
3549 if name is None:
3599 if name is None:
3550 printname = b'unfiltered'
3600 printname = b'unfiltered'
3551 timer(getbranchmap(name), title=printname)
3601 timer(getbranchmap(name), title=printname)
3552 finally:
3602 finally:
3553 branchcacheread.restore()
3603 branchcacheread.restore()
3554 branchcachewrite.restore()
3604 branchcachewrite.restore()
3555 fm.end()
3605 fm.end()
3556
3606
3557
3607
3558 @command(
3608 @command(
3559 b'perf::branchmapupdate|perfbranchmapupdate',
3609 b'perf::branchmapupdate|perfbranchmapupdate',
3560 [
3610 [
3561 (b'', b'base', [], b'subset of revision to start from'),
3611 (b'', b'base', [], b'subset of revision to start from'),
3562 (b'', b'target', [], b'subset of revision to end with'),
3612 (b'', b'target', [], b'subset of revision to end with'),
3563 (b'', b'clear-caches', False, b'clear cache between each runs'),
3613 (b'', b'clear-caches', False, b'clear cache between each runs'),
3564 ]
3614 ]
3565 + formatteropts,
3615 + formatteropts,
3566 )
3616 )
3567 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3617 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3568 """benchmark branchmap update from for <base> revs to <target> revs
3618 """benchmark branchmap update from for <base> revs to <target> revs
3569
3619
3570 If `--clear-caches` is passed, the following items will be reset before
3620 If `--clear-caches` is passed, the following items will be reset before
3571 each update:
3621 each update:
3572 * the changelog instance and associated indexes
3622 * the changelog instance and associated indexes
3573 * the rev-branch-cache instance
3623 * the rev-branch-cache instance
3574
3624
3575 Examples:
3625 Examples:
3576
3626
3577 # update for the one last revision
3627 # update for the one last revision
3578 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3628 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3579
3629
3580 $ update for change coming with a new branch
3630 $ update for change coming with a new branch
3581 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3631 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3582 """
3632 """
3583 from mercurial import branchmap
3633 from mercurial import branchmap
3584 from mercurial import repoview
3634 from mercurial import repoview
3585
3635
3586 opts = _byteskwargs(opts)
3636 opts = _byteskwargs(opts)
3587 timer, fm = gettimer(ui, opts)
3637 timer, fm = gettimer(ui, opts)
3588 clearcaches = opts[b'clear_caches']
3638 clearcaches = opts[b'clear_caches']
3589 unfi = repo.unfiltered()
3639 unfi = repo.unfiltered()
3590 x = [None] # used to pass data between closure
3640 x = [None] # used to pass data between closure
3591
3641
3592 # we use a `list` here to avoid possible side effect from smartset
3642 # we use a `list` here to avoid possible side effect from smartset
3593 baserevs = list(scmutil.revrange(repo, base))
3643 baserevs = list(scmutil.revrange(repo, base))
3594 targetrevs = list(scmutil.revrange(repo, target))
3644 targetrevs = list(scmutil.revrange(repo, target))
3595 if not baserevs:
3645 if not baserevs:
3596 raise error.Abort(b'no revisions selected for --base')
3646 raise error.Abort(b'no revisions selected for --base')
3597 if not targetrevs:
3647 if not targetrevs:
3598 raise error.Abort(b'no revisions selected for --target')
3648 raise error.Abort(b'no revisions selected for --target')
3599
3649
3600 # make sure the target branchmap also contains the one in the base
3650 # make sure the target branchmap also contains the one in the base
3601 targetrevs = list(set(baserevs) | set(targetrevs))
3651 targetrevs = list(set(baserevs) | set(targetrevs))
3602 targetrevs.sort()
3652 targetrevs.sort()
3603
3653
3604 cl = repo.changelog
3654 cl = repo.changelog
3605 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3655 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3606 allbaserevs.sort()
3656 allbaserevs.sort()
3607 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3657 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3608
3658
3609 newrevs = list(alltargetrevs.difference(allbaserevs))
3659 newrevs = list(alltargetrevs.difference(allbaserevs))
3610 newrevs.sort()
3660 newrevs.sort()
3611
3661
3612 allrevs = frozenset(unfi.changelog.revs())
3662 allrevs = frozenset(unfi.changelog.revs())
3613 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3663 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3614 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3664 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3615
3665
3616 def basefilter(repo, visibilityexceptions=None):
3666 def basefilter(repo, visibilityexceptions=None):
3617 return basefilterrevs
3667 return basefilterrevs
3618
3668
3619 def targetfilter(repo, visibilityexceptions=None):
3669 def targetfilter(repo, visibilityexceptions=None):
3620 return targetfilterrevs
3670 return targetfilterrevs
3621
3671
3622 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3672 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3623 ui.status(msg % (len(allbaserevs), len(newrevs)))
3673 ui.status(msg % (len(allbaserevs), len(newrevs)))
3624 if targetfilterrevs:
3674 if targetfilterrevs:
3625 msg = b'(%d revisions still filtered)\n'
3675 msg = b'(%d revisions still filtered)\n'
3626 ui.status(msg % len(targetfilterrevs))
3676 ui.status(msg % len(targetfilterrevs))
3627
3677
3628 try:
3678 try:
3629 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3679 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3630 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3680 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3631
3681
3632 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3682 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3633 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3683 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3634
3684
3635 # try to find an existing branchmap to reuse
3685 # try to find an existing branchmap to reuse
3636 subsettable = getbranchmapsubsettable()
3686 subsettable = getbranchmapsubsettable()
3637 candidatefilter = subsettable.get(None)
3687 candidatefilter = subsettable.get(None)
3638 while candidatefilter is not None:
3688 while candidatefilter is not None:
3639 candidatebm = repo.filtered(candidatefilter).branchmap()
3689 candidatebm = repo.filtered(candidatefilter).branchmap()
3640 if candidatebm.validfor(baserepo):
3690 if candidatebm.validfor(baserepo):
3641 filtered = repoview.filterrevs(repo, candidatefilter)
3691 filtered = repoview.filterrevs(repo, candidatefilter)
3642 missing = [r for r in allbaserevs if r in filtered]
3692 missing = [r for r in allbaserevs if r in filtered]
3643 base = candidatebm.copy()
3693 base = candidatebm.copy()
3644 base.update(baserepo, missing)
3694 base.update(baserepo, missing)
3645 break
3695 break
3646 candidatefilter = subsettable.get(candidatefilter)
3696 candidatefilter = subsettable.get(candidatefilter)
3647 else:
3697 else:
3648 # no suitable subset where found
3698 # no suitable subset where found
3649 base = branchmap.branchcache()
3699 base = branchmap.branchcache()
3650 base.update(baserepo, allbaserevs)
3700 base.update(baserepo, allbaserevs)
3651
3701
3652 def setup():
3702 def setup():
3653 x[0] = base.copy()
3703 x[0] = base.copy()
3654 if clearcaches:
3704 if clearcaches:
3655 unfi._revbranchcache = None
3705 unfi._revbranchcache = None
3656 clearchangelog(repo)
3706 clearchangelog(repo)
3657
3707
3658 def bench():
3708 def bench():
3659 x[0].update(targetrepo, newrevs)
3709 x[0].update(targetrepo, newrevs)
3660
3710
3661 timer(bench, setup=setup)
3711 timer(bench, setup=setup)
3662 fm.end()
3712 fm.end()
3663 finally:
3713 finally:
3664 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3714 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3665 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3715 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3666
3716
3667
3717
3668 @command(
3718 @command(
3669 b'perf::branchmapload|perfbranchmapload',
3719 b'perf::branchmapload|perfbranchmapload',
3670 [
3720 [
3671 (b'f', b'filter', b'', b'Specify repoview filter'),
3721 (b'f', b'filter', b'', b'Specify repoview filter'),
3672 (b'', b'list', False, b'List brachmap filter caches'),
3722 (b'', b'list', False, b'List brachmap filter caches'),
3673 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3723 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3674 ]
3724 ]
3675 + formatteropts,
3725 + formatteropts,
3676 )
3726 )
3677 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3727 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3678 """benchmark reading the branchmap"""
3728 """benchmark reading the branchmap"""
3679 opts = _byteskwargs(opts)
3729 opts = _byteskwargs(opts)
3680 clearrevlogs = opts[b'clear_revlogs']
3730 clearrevlogs = opts[b'clear_revlogs']
3681
3731
3682 if list:
3732 if list:
3683 for name, kind, st in repo.cachevfs.readdir(stat=True):
3733 for name, kind, st in repo.cachevfs.readdir(stat=True):
3684 if name.startswith(b'branch2'):
3734 if name.startswith(b'branch2'):
3685 filtername = name.partition(b'-')[2] or b'unfiltered'
3735 filtername = name.partition(b'-')[2] or b'unfiltered'
3686 ui.status(
3736 ui.status(
3687 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3737 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3688 )
3738 )
3689 return
3739 return
3690 if not filter:
3740 if not filter:
3691 filter = None
3741 filter = None
3692 subsettable = getbranchmapsubsettable()
3742 subsettable = getbranchmapsubsettable()
3693 if filter is None:
3743 if filter is None:
3694 repo = repo.unfiltered()
3744 repo = repo.unfiltered()
3695 else:
3745 else:
3696 repo = repoview.repoview(repo, filter)
3746 repo = repoview.repoview(repo, filter)
3697
3747
3698 repo.branchmap() # make sure we have a relevant, up to date branchmap
3748 repo.branchmap() # make sure we have a relevant, up to date branchmap
3699
3749
3700 try:
3750 try:
3701 fromfile = branchmap.branchcache.fromfile
3751 fromfile = branchmap.branchcache.fromfile
3702 except AttributeError:
3752 except AttributeError:
3703 # older versions
3753 # older versions
3704 fromfile = branchmap.read
3754 fromfile = branchmap.read
3705
3755
3706 currentfilter = filter
3756 currentfilter = filter
3707 # try once without timer, the filter may not be cached
3757 # try once without timer, the filter may not be cached
3708 while fromfile(repo) is None:
3758 while fromfile(repo) is None:
3709 currentfilter = subsettable.get(currentfilter)
3759 currentfilter = subsettable.get(currentfilter)
3710 if currentfilter is None:
3760 if currentfilter is None:
3711 raise error.Abort(
3761 raise error.Abort(
3712 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3762 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3713 )
3763 )
3714 repo = repo.filtered(currentfilter)
3764 repo = repo.filtered(currentfilter)
3715 timer, fm = gettimer(ui, opts)
3765 timer, fm = gettimer(ui, opts)
3716
3766
3717 def setup():
3767 def setup():
3718 if clearrevlogs:
3768 if clearrevlogs:
3719 clearchangelog(repo)
3769 clearchangelog(repo)
3720
3770
3721 def bench():
3771 def bench():
3722 fromfile(repo)
3772 fromfile(repo)
3723
3773
3724 timer(bench, setup=setup)
3774 timer(bench, setup=setup)
3725 fm.end()
3775 fm.end()
3726
3776
3727
3777
3728 @command(b'perf::loadmarkers|perfloadmarkers')
3778 @command(b'perf::loadmarkers|perfloadmarkers')
3729 def perfloadmarkers(ui, repo):
3779 def perfloadmarkers(ui, repo):
3730 """benchmark the time to parse the on-disk markers for a repo
3780 """benchmark the time to parse the on-disk markers for a repo
3731
3781
3732 Result is the number of markers in the repo."""
3782 Result is the number of markers in the repo."""
3733 timer, fm = gettimer(ui)
3783 timer, fm = gettimer(ui)
3734 svfs = getsvfs(repo)
3784 svfs = getsvfs(repo)
3735 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3785 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3736 fm.end()
3786 fm.end()
3737
3787
3738
3788
3739 @command(
3789 @command(
3740 b'perf::lrucachedict|perflrucachedict',
3790 b'perf::lrucachedict|perflrucachedict',
3741 formatteropts
3791 formatteropts
3742 + [
3792 + [
3743 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3793 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3744 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3794 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3745 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3795 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3746 (b'', b'size', 4, b'size of cache'),
3796 (b'', b'size', 4, b'size of cache'),
3747 (b'', b'gets', 10000, b'number of key lookups'),
3797 (b'', b'gets', 10000, b'number of key lookups'),
3748 (b'', b'sets', 10000, b'number of key sets'),
3798 (b'', b'sets', 10000, b'number of key sets'),
3749 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3799 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3750 (
3800 (
3751 b'',
3801 b'',
3752 b'mixedgetfreq',
3802 b'mixedgetfreq',
3753 50,
3803 50,
3754 b'frequency of get vs set ops in mixed mode',
3804 b'frequency of get vs set ops in mixed mode',
3755 ),
3805 ),
3756 ],
3806 ],
3757 norepo=True,
3807 norepo=True,
3758 )
3808 )
3759 def perflrucache(
3809 def perflrucache(
3760 ui,
3810 ui,
3761 mincost=0,
3811 mincost=0,
3762 maxcost=100,
3812 maxcost=100,
3763 costlimit=0,
3813 costlimit=0,
3764 size=4,
3814 size=4,
3765 gets=10000,
3815 gets=10000,
3766 sets=10000,
3816 sets=10000,
3767 mixed=10000,
3817 mixed=10000,
3768 mixedgetfreq=50,
3818 mixedgetfreq=50,
3769 **opts
3819 **opts
3770 ):
3820 ):
3771 opts = _byteskwargs(opts)
3821 opts = _byteskwargs(opts)
3772
3822
3773 def doinit():
3823 def doinit():
3774 for i in _xrange(10000):
3824 for i in _xrange(10000):
3775 util.lrucachedict(size)
3825 util.lrucachedict(size)
3776
3826
3777 costrange = list(range(mincost, maxcost + 1))
3827 costrange = list(range(mincost, maxcost + 1))
3778
3828
3779 values = []
3829 values = []
3780 for i in _xrange(size):
3830 for i in _xrange(size):
3781 values.append(random.randint(0, _maxint))
3831 values.append(random.randint(0, _maxint))
3782
3832
3783 # Get mode fills the cache and tests raw lookup performance with no
3833 # Get mode fills the cache and tests raw lookup performance with no
3784 # eviction.
3834 # eviction.
3785 getseq = []
3835 getseq = []
3786 for i in _xrange(gets):
3836 for i in _xrange(gets):
3787 getseq.append(random.choice(values))
3837 getseq.append(random.choice(values))
3788
3838
3789 def dogets():
3839 def dogets():
3790 d = util.lrucachedict(size)
3840 d = util.lrucachedict(size)
3791 for v in values:
3841 for v in values:
3792 d[v] = v
3842 d[v] = v
3793 for key in getseq:
3843 for key in getseq:
3794 value = d[key]
3844 value = d[key]
3795 value # silence pyflakes warning
3845 value # silence pyflakes warning
3796
3846
3797 def dogetscost():
3847 def dogetscost():
3798 d = util.lrucachedict(size, maxcost=costlimit)
3848 d = util.lrucachedict(size, maxcost=costlimit)
3799 for i, v in enumerate(values):
3849 for i, v in enumerate(values):
3800 d.insert(v, v, cost=costs[i])
3850 d.insert(v, v, cost=costs[i])
3801 for key in getseq:
3851 for key in getseq:
3802 try:
3852 try:
3803 value = d[key]
3853 value = d[key]
3804 value # silence pyflakes warning
3854 value # silence pyflakes warning
3805 except KeyError:
3855 except KeyError:
3806 pass
3856 pass
3807
3857
3808 # Set mode tests insertion speed with cache eviction.
3858 # Set mode tests insertion speed with cache eviction.
3809 setseq = []
3859 setseq = []
3810 costs = []
3860 costs = []
3811 for i in _xrange(sets):
3861 for i in _xrange(sets):
3812 setseq.append(random.randint(0, _maxint))
3862 setseq.append(random.randint(0, _maxint))
3813 costs.append(random.choice(costrange))
3863 costs.append(random.choice(costrange))
3814
3864
3815 def doinserts():
3865 def doinserts():
3816 d = util.lrucachedict(size)
3866 d = util.lrucachedict(size)
3817 for v in setseq:
3867 for v in setseq:
3818 d.insert(v, v)
3868 d.insert(v, v)
3819
3869
3820 def doinsertscost():
3870 def doinsertscost():
3821 d = util.lrucachedict(size, maxcost=costlimit)
3871 d = util.lrucachedict(size, maxcost=costlimit)
3822 for i, v in enumerate(setseq):
3872 for i, v in enumerate(setseq):
3823 d.insert(v, v, cost=costs[i])
3873 d.insert(v, v, cost=costs[i])
3824
3874
3825 def dosets():
3875 def dosets():
3826 d = util.lrucachedict(size)
3876 d = util.lrucachedict(size)
3827 for v in setseq:
3877 for v in setseq:
3828 d[v] = v
3878 d[v] = v
3829
3879
3830 # Mixed mode randomly performs gets and sets with eviction.
3880 # Mixed mode randomly performs gets and sets with eviction.
3831 mixedops = []
3881 mixedops = []
3832 for i in _xrange(mixed):
3882 for i in _xrange(mixed):
3833 r = random.randint(0, 100)
3883 r = random.randint(0, 100)
3834 if r < mixedgetfreq:
3884 if r < mixedgetfreq:
3835 op = 0
3885 op = 0
3836 else:
3886 else:
3837 op = 1
3887 op = 1
3838
3888
3839 mixedops.append(
3889 mixedops.append(
3840 (op, random.randint(0, size * 2), random.choice(costrange))
3890 (op, random.randint(0, size * 2), random.choice(costrange))
3841 )
3891 )
3842
3892
3843 def domixed():
3893 def domixed():
3844 d = util.lrucachedict(size)
3894 d = util.lrucachedict(size)
3845
3895
3846 for op, v, cost in mixedops:
3896 for op, v, cost in mixedops:
3847 if op == 0:
3897 if op == 0:
3848 try:
3898 try:
3849 d[v]
3899 d[v]
3850 except KeyError:
3900 except KeyError:
3851 pass
3901 pass
3852 else:
3902 else:
3853 d[v] = v
3903 d[v] = v
3854
3904
3855 def domixedcost():
3905 def domixedcost():
3856 d = util.lrucachedict(size, maxcost=costlimit)
3906 d = util.lrucachedict(size, maxcost=costlimit)
3857
3907
3858 for op, v, cost in mixedops:
3908 for op, v, cost in mixedops:
3859 if op == 0:
3909 if op == 0:
3860 try:
3910 try:
3861 d[v]
3911 d[v]
3862 except KeyError:
3912 except KeyError:
3863 pass
3913 pass
3864 else:
3914 else:
3865 d.insert(v, v, cost=cost)
3915 d.insert(v, v, cost=cost)
3866
3916
3867 benches = [
3917 benches = [
3868 (doinit, b'init'),
3918 (doinit, b'init'),
3869 ]
3919 ]
3870
3920
3871 if costlimit:
3921 if costlimit:
3872 benches.extend(
3922 benches.extend(
3873 [
3923 [
3874 (dogetscost, b'gets w/ cost limit'),
3924 (dogetscost, b'gets w/ cost limit'),
3875 (doinsertscost, b'inserts w/ cost limit'),
3925 (doinsertscost, b'inserts w/ cost limit'),
3876 (domixedcost, b'mixed w/ cost limit'),
3926 (domixedcost, b'mixed w/ cost limit'),
3877 ]
3927 ]
3878 )
3928 )
3879 else:
3929 else:
3880 benches.extend(
3930 benches.extend(
3881 [
3931 [
3882 (dogets, b'gets'),
3932 (dogets, b'gets'),
3883 (doinserts, b'inserts'),
3933 (doinserts, b'inserts'),
3884 (dosets, b'sets'),
3934 (dosets, b'sets'),
3885 (domixed, b'mixed'),
3935 (domixed, b'mixed'),
3886 ]
3936 ]
3887 )
3937 )
3888
3938
3889 for fn, title in benches:
3939 for fn, title in benches:
3890 timer, fm = gettimer(ui, opts)
3940 timer, fm = gettimer(ui, opts)
3891 timer(fn, title=title)
3941 timer(fn, title=title)
3892 fm.end()
3942 fm.end()
3893
3943
3894
3944
3895 @command(
3945 @command(
3896 b'perf::write|perfwrite',
3946 b'perf::write|perfwrite',
3897 formatteropts
3947 formatteropts
3898 + [
3948 + [
3899 (b'', b'write-method', b'write', b'ui write method'),
3949 (b'', b'write-method', b'write', b'ui write method'),
3900 (b'', b'nlines', 100, b'number of lines'),
3950 (b'', b'nlines', 100, b'number of lines'),
3901 (b'', b'nitems', 100, b'number of items (per line)'),
3951 (b'', b'nitems', 100, b'number of items (per line)'),
3902 (b'', b'item', b'x', b'item that is written'),
3952 (b'', b'item', b'x', b'item that is written'),
3903 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3953 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3904 (b'', b'flush-line', None, b'flush after each line'),
3954 (b'', b'flush-line', None, b'flush after each line'),
3905 ],
3955 ],
3906 )
3956 )
3907 def perfwrite(ui, repo, **opts):
3957 def perfwrite(ui, repo, **opts):
3908 """microbenchmark ui.write (and others)"""
3958 """microbenchmark ui.write (and others)"""
3909 opts = _byteskwargs(opts)
3959 opts = _byteskwargs(opts)
3910
3960
3911 write = getattr(ui, _sysstr(opts[b'write_method']))
3961 write = getattr(ui, _sysstr(opts[b'write_method']))
3912 nlines = int(opts[b'nlines'])
3962 nlines = int(opts[b'nlines'])
3913 nitems = int(opts[b'nitems'])
3963 nitems = int(opts[b'nitems'])
3914 item = opts[b'item']
3964 item = opts[b'item']
3915 batch_line = opts.get(b'batch_line')
3965 batch_line = opts.get(b'batch_line')
3916 flush_line = opts.get(b'flush_line')
3966 flush_line = opts.get(b'flush_line')
3917
3967
3918 if batch_line:
3968 if batch_line:
3919 line = item * nitems + b'\n'
3969 line = item * nitems + b'\n'
3920
3970
3921 def benchmark():
3971 def benchmark():
3922 for i in pycompat.xrange(nlines):
3972 for i in pycompat.xrange(nlines):
3923 if batch_line:
3973 if batch_line:
3924 write(line)
3974 write(line)
3925 else:
3975 else:
3926 for i in pycompat.xrange(nitems):
3976 for i in pycompat.xrange(nitems):
3927 write(item)
3977 write(item)
3928 write(b'\n')
3978 write(b'\n')
3929 if flush_line:
3979 if flush_line:
3930 ui.flush()
3980 ui.flush()
3931 ui.flush()
3981 ui.flush()
3932
3982
3933 timer, fm = gettimer(ui, opts)
3983 timer, fm = gettimer(ui, opts)
3934 timer(benchmark)
3984 timer(benchmark)
3935 fm.end()
3985 fm.end()
3936
3986
3937
3987
3938 def uisetup(ui):
3988 def uisetup(ui):
3939 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3989 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3940 commands, b'debugrevlogopts'
3990 commands, b'debugrevlogopts'
3941 ):
3991 ):
3942 # for "historical portability":
3992 # for "historical portability":
3943 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3993 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3944 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3994 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3945 # openrevlog() should cause failure, because it has been
3995 # openrevlog() should cause failure, because it has been
3946 # available since 3.5 (or 49c583ca48c4).
3996 # available since 3.5 (or 49c583ca48c4).
3947 def openrevlog(orig, repo, cmd, file_, opts):
3997 def openrevlog(orig, repo, cmd, file_, opts):
3948 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3998 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3949 raise error.Abort(
3999 raise error.Abort(
3950 b"This version doesn't support --dir option",
4000 b"This version doesn't support --dir option",
3951 hint=b"use 3.5 or later",
4001 hint=b"use 3.5 or later",
3952 )
4002 )
3953 return orig(repo, cmd, file_, opts)
4003 return orig(repo, cmd, file_, opts)
3954
4004
3955 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4005 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3956
4006
3957
4007
3958 @command(
4008 @command(
3959 b'perf::progress|perfprogress',
4009 b'perf::progress|perfprogress',
3960 formatteropts
4010 formatteropts
3961 + [
4011 + [
3962 (b'', b'topic', b'topic', b'topic for progress messages'),
4012 (b'', b'topic', b'topic', b'topic for progress messages'),
3963 (b'c', b'total', 1000000, b'total value we are progressing to'),
4013 (b'c', b'total', 1000000, b'total value we are progressing to'),
3964 ],
4014 ],
3965 norepo=True,
4015 norepo=True,
3966 )
4016 )
3967 def perfprogress(ui, topic=None, total=None, **opts):
4017 def perfprogress(ui, topic=None, total=None, **opts):
3968 """printing of progress bars"""
4018 """printing of progress bars"""
3969 opts = _byteskwargs(opts)
4019 opts = _byteskwargs(opts)
3970
4020
3971 timer, fm = gettimer(ui, opts)
4021 timer, fm = gettimer(ui, opts)
3972
4022
3973 def doprogress():
4023 def doprogress():
3974 with ui.makeprogress(topic, total=total) as progress:
4024 with ui.makeprogress(topic, total=total) as progress:
3975 for i in _xrange(total):
4025 for i in _xrange(total):
3976 progress.increment()
4026 progress.increment()
3977
4027
3978 timer(doprogress)
4028 timer(doprogress)
3979 fm.end()
4029 fm.end()
@@ -1,424 +1,426 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundleread
100 perf::bundleread
100 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
101 perf::cca (no help text available)
102 perf::cca (no help text available)
102 perf::changegroupchangelog
103 perf::changegroupchangelog
103 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
104 perf::changeset
105 perf::changeset
105 (no help text available)
106 (no help text available)
106 perf::ctxfiles
107 perf::ctxfiles
107 (no help text available)
108 (no help text available)
108 perf::diffwd Profile diff of working directory changes
109 perf::diffwd Profile diff of working directory changes
109 perf::dirfoldmap
110 perf::dirfoldmap
110 benchmap a 'dirstate._map.dirfoldmap.get()' request
111 benchmap a 'dirstate._map.dirfoldmap.get()' request
111 perf::dirs (no help text available)
112 perf::dirs (no help text available)
112 perf::dirstate
113 perf::dirstate
113 benchmap the time of various distate operations
114 benchmap the time of various distate operations
114 perf::dirstatedirs
115 perf::dirstatedirs
115 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
116 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
116 perf::dirstatefoldmap
117 perf::dirstatefoldmap
117 benchmap a 'dirstate._map.filefoldmap.get()' request
118 benchmap a 'dirstate._map.filefoldmap.get()' request
118 perf::dirstatewrite
119 perf::dirstatewrite
119 benchmap the time it take to write a dirstate on disk
120 benchmap the time it take to write a dirstate on disk
120 perf::discovery
121 perf::discovery
121 benchmark discovery between local repo and the peer at given
122 benchmark discovery between local repo and the peer at given
122 path
123 path
123 perf::fncacheencode
124 perf::fncacheencode
124 (no help text available)
125 (no help text available)
125 perf::fncacheload
126 perf::fncacheload
126 (no help text available)
127 (no help text available)
127 perf::fncachewrite
128 perf::fncachewrite
128 (no help text available)
129 (no help text available)
129 perf::heads benchmark the computation of a changelog heads
130 perf::heads benchmark the computation of a changelog heads
130 perf::helper-mergecopies
131 perf::helper-mergecopies
131 find statistics about potential parameters for
132 find statistics about potential parameters for
132 'perfmergecopies'
133 'perfmergecopies'
133 perf::helper-pathcopies
134 perf::helper-pathcopies
134 find statistic about potential parameters for the
135 find statistic about potential parameters for the
135 'perftracecopies'
136 'perftracecopies'
136 perf::ignore benchmark operation related to computing ignore
137 perf::ignore benchmark operation related to computing ignore
137 perf::index benchmark index creation time followed by a lookup
138 perf::index benchmark index creation time followed by a lookup
138 perf::linelogedits
139 perf::linelogedits
139 (no help text available)
140 (no help text available)
140 perf::loadmarkers
141 perf::loadmarkers
141 benchmark the time to parse the on-disk markers for a repo
142 benchmark the time to parse the on-disk markers for a repo
142 perf::log (no help text available)
143 perf::log (no help text available)
143 perf::lookup (no help text available)
144 perf::lookup (no help text available)
144 perf::lrucachedict
145 perf::lrucachedict
145 (no help text available)
146 (no help text available)
146 perf::manifest
147 perf::manifest
147 benchmark the time to read a manifest from disk and return a
148 benchmark the time to read a manifest from disk and return a
148 usable
149 usable
149 perf::mergecalculate
150 perf::mergecalculate
150 (no help text available)
151 (no help text available)
151 perf::mergecopies
152 perf::mergecopies
152 measure runtime of 'copies.mergecopies'
153 measure runtime of 'copies.mergecopies'
153 perf::moonwalk
154 perf::moonwalk
154 benchmark walking the changelog backwards
155 benchmark walking the changelog backwards
155 perf::nodelookup
156 perf::nodelookup
156 (no help text available)
157 (no help text available)
157 perf::nodemap
158 perf::nodemap
158 benchmark the time necessary to look up revision from a cold
159 benchmark the time necessary to look up revision from a cold
159 nodemap
160 nodemap
160 perf::parents
161 perf::parents
161 benchmark the time necessary to fetch one changeset's parents.
162 benchmark the time necessary to fetch one changeset's parents.
162 perf::pathcopies
163 perf::pathcopies
163 benchmark the copy tracing logic
164 benchmark the copy tracing logic
164 perf::phases benchmark phasesets computation
165 perf::phases benchmark phasesets computation
165 perf::phasesremote
166 perf::phasesremote
166 benchmark time needed to analyse phases of the remote server
167 benchmark time needed to analyse phases of the remote server
167 perf::progress
168 perf::progress
168 printing of progress bars
169 printing of progress bars
169 perf::rawfiles
170 perf::rawfiles
170 (no help text available)
171 (no help text available)
171 perf::revlogchunks
172 perf::revlogchunks
172 Benchmark operations on revlog chunks.
173 Benchmark operations on revlog chunks.
173 perf::revlogindex
174 perf::revlogindex
174 Benchmark operations against a revlog index.
175 Benchmark operations against a revlog index.
175 perf::revlogrevision
176 perf::revlogrevision
176 Benchmark obtaining a revlog revision.
177 Benchmark obtaining a revlog revision.
177 perf::revlogrevisions
178 perf::revlogrevisions
178 Benchmark reading a series of revisions from a revlog.
179 Benchmark reading a series of revisions from a revlog.
179 perf::revlogwrite
180 perf::revlogwrite
180 Benchmark writing a series of revisions to a revlog.
181 Benchmark writing a series of revisions to a revlog.
181 perf::revrange
182 perf::revrange
182 (no help text available)
183 (no help text available)
183 perf::revset benchmark the execution time of a revset
184 perf::revset benchmark the execution time of a revset
184 perf::startup
185 perf::startup
185 (no help text available)
186 (no help text available)
186 perf::status benchmark the performance of a single status call
187 perf::status benchmark the performance of a single status call
187 perf::tags (no help text available)
188 perf::tags (no help text available)
188 perf::templating
189 perf::templating
189 test the rendering time of a given template
190 test the rendering time of a given template
190 perf::unidiff
191 perf::unidiff
191 benchmark a unified diff between revisions
192 benchmark a unified diff between revisions
192 perf::volatilesets
193 perf::volatilesets
193 benchmark the computation of various volatile set
194 benchmark the computation of various volatile set
194 perf::walk (no help text available)
195 perf::walk (no help text available)
195 perf::write microbenchmark ui.write (and others)
196 perf::write microbenchmark ui.write (and others)
196
197
197 (use 'hg help -v perf' to show built-in aliases and global options)
198 (use 'hg help -v perf' to show built-in aliases and global options)
198
199
199 $ hg help perfaddremove
200 $ hg help perfaddremove
200 hg perf::addremove
201 hg perf::addremove
201
202
202 aliases: perfaddremove
203 aliases: perfaddremove
203
204
204 (no help text available)
205 (no help text available)
205
206
206 options:
207 options:
207
208
208 -T --template TEMPLATE display with template
209 -T --template TEMPLATE display with template
209
210
210 (some details hidden, use --verbose to show complete help)
211 (some details hidden, use --verbose to show complete help)
211
212
212 $ hg perfaddremove
213 $ hg perfaddremove
213 $ hg perfancestors
214 $ hg perfancestors
214 $ hg perfancestorset 2
215 $ hg perfancestorset 2
215 $ hg perfannotate a
216 $ hg perfannotate a
216 $ hg perfbdiff -c 1
217 $ hg perfbdiff -c 1
217 $ hg perfbdiff --alldata 1
218 $ hg perfbdiff --alldata 1
218 $ hg perfunidiff -c 1
219 $ hg perfunidiff -c 1
219 $ hg perfunidiff --alldata 1
220 $ hg perfunidiff --alldata 1
220 $ hg perfbookmarks
221 $ hg perfbookmarks
221 $ hg perfbranchmap
222 $ hg perfbranchmap
222 $ hg perfbranchmapload
223 $ hg perfbranchmapload
223 $ hg perfbranchmapupdate --base "not tip" --target "tip"
224 $ hg perfbranchmapupdate --base "not tip" --target "tip"
224 benchmark of branchmap with 3 revisions with 1 new ones
225 benchmark of branchmap with 3 revisions with 1 new ones
225 $ hg perfcca
226 $ hg perfcca
226 $ hg perfchangegroupchangelog
227 $ hg perfchangegroupchangelog
227 $ hg perfchangegroupchangelog --cgversion 01
228 $ hg perfchangegroupchangelog --cgversion 01
228 $ hg perfchangeset 2
229 $ hg perfchangeset 2
229 $ hg perfctxfiles 2
230 $ hg perfctxfiles 2
230 $ hg perfdiffwd
231 $ hg perfdiffwd
231 $ hg perfdirfoldmap
232 $ hg perfdirfoldmap
232 $ hg perfdirs
233 $ hg perfdirs
233 $ hg perfdirstate
234 $ hg perfdirstate
234 $ hg perfdirstate --contains
235 $ hg perfdirstate --contains
235 $ hg perfdirstate --iteration
236 $ hg perfdirstate --iteration
236 $ hg perfdirstatedirs
237 $ hg perfdirstatedirs
237 $ hg perfdirstatefoldmap
238 $ hg perfdirstatefoldmap
238 $ hg perfdirstatewrite
239 $ hg perfdirstatewrite
239 #if repofncache
240 #if repofncache
240 $ hg perffncacheencode
241 $ hg perffncacheencode
241 $ hg perffncacheload
242 $ hg perffncacheload
242 $ hg debugrebuildfncache
243 $ hg debugrebuildfncache
243 fncache already up to date
244 fncache already up to date
244 $ hg perffncachewrite
245 $ hg perffncachewrite
245 $ hg debugrebuildfncache
246 $ hg debugrebuildfncache
246 fncache already up to date
247 fncache already up to date
247 #endif
248 #endif
248 $ hg perfheads
249 $ hg perfheads
249 $ hg perfignore
250 $ hg perfignore
250 $ hg perfindex
251 $ hg perfindex
251 $ hg perflinelogedits -n 1
252 $ hg perflinelogedits -n 1
252 $ hg perfloadmarkers
253 $ hg perfloadmarkers
253 $ hg perflog
254 $ hg perflog
254 $ hg perflookup 2
255 $ hg perflookup 2
255 $ hg perflrucache
256 $ hg perflrucache
256 $ hg perfmanifest 2
257 $ hg perfmanifest 2
257 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
258 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
258 $ hg perfmanifest -m 44fe2c8352bb
259 $ hg perfmanifest -m 44fe2c8352bb
259 abort: manifest revision must be integer or full node
260 abort: manifest revision must be integer or full node
260 [255]
261 [255]
261 $ hg perfmergecalculate -r 3
262 $ hg perfmergecalculate -r 3
262 $ hg perfmoonwalk
263 $ hg perfmoonwalk
263 $ hg perfnodelookup 2
264 $ hg perfnodelookup 2
264 $ hg perfpathcopies 1 2
265 $ hg perfpathcopies 1 2
265 $ hg perfprogress --total 1000
266 $ hg perfprogress --total 1000
266 $ hg perfrawfiles 2
267 $ hg perfrawfiles 2
267 $ hg perfrevlogindex -c
268 $ hg perfrevlogindex -c
268 #if reporevlogstore
269 #if reporevlogstore
269 $ hg perfrevlogrevisions .hg/store/data/a.i
270 $ hg perfrevlogrevisions .hg/store/data/a.i
270 #endif
271 #endif
271 $ hg perfrevlogrevision -m 0
272 $ hg perfrevlogrevision -m 0
272 $ hg perfrevlogchunks -c
273 $ hg perfrevlogchunks -c
273 $ hg perfrevrange
274 $ hg perfrevrange
274 $ hg perfrevset 'all()'
275 $ hg perfrevset 'all()'
275 $ hg perfstartup
276 $ hg perfstartup
276 $ hg perfstatus
277 $ hg perfstatus
277 $ hg perfstatus --dirstate
278 $ hg perfstatus --dirstate
278 $ hg perftags
279 $ hg perftags
279 $ hg perftemplating
280 $ hg perftemplating
280 $ hg perfvolatilesets
281 $ hg perfvolatilesets
281 $ hg perfwalk
282 $ hg perfwalk
282 $ hg perfparents
283 $ hg perfparents
283 $ hg perfdiscovery -q .
284 $ hg perfdiscovery -q .
284
285
285 Test run control
286 Test run control
286 ----------------
287 ----------------
287
288
288 Simple single entry
289 Simple single entry
289
290
290 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
291 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
291 ! wall * comb * user * sys * (best of 15) (glob)
292 ! wall * comb * user * sys * (best of 15) (glob)
292
293
293 Multiple entries
294 Multiple entries
294
295
295 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
296 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
296 ! wall * comb * user * sys * (best of 5) (glob)
297 ! wall * comb * user * sys * (best of 5) (glob)
297
298
298 error case are ignored
299 error case are ignored
299
300
300 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
301 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
301 malformatted run limit entry, missing "-": 500
302 malformatted run limit entry, missing "-": 500
302 ! wall * comb * user * sys * (best of 5) (glob)
303 ! wall * comb * user * sys * (best of 5) (glob)
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
304 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
304 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
305 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
305 ! wall * comb * user * sys * (best of 5) (glob)
306 ! wall * comb * user * sys * (best of 5) (glob)
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
307 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
308 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
308 ! wall * comb * user * sys * (best of 5) (glob)
309 ! wall * comb * user * sys * (best of 5) (glob)
309
310
310 test actual output
311 test actual output
311 ------------------
312 ------------------
312
313
313 normal output:
314 normal output:
314
315
315 $ hg perfheads --config perf.stub=no
316 $ hg perfheads --config perf.stub=no
316 ! wall * comb * user * sys * (best of *) (glob)
317 ! wall * comb * user * sys * (best of *) (glob)
317
318
318 detailed output:
319 detailed output:
319
320
320 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
321 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
321 ! wall * comb * user * sys * (best of *) (glob)
322 ! wall * comb * user * sys * (best of *) (glob)
322 ! wall * comb * user * sys * (max of *) (glob)
323 ! wall * comb * user * sys * (max of *) (glob)
323 ! wall * comb * user * sys * (avg of *) (glob)
324 ! wall * comb * user * sys * (avg of *) (glob)
324 ! wall * comb * user * sys * (median of *) (glob)
325 ! wall * comb * user * sys * (median of *) (glob)
325
326
326 test json output
327 test json output
327 ----------------
328 ----------------
328
329
329 normal output:
330 normal output:
330
331
331 $ hg perfheads --template json --config perf.stub=no
332 $ hg perfheads --template json --config perf.stub=no
332 [
333 [
333 {
334 {
334 "comb": *, (glob)
335 "comb": *, (glob)
335 "count": *, (glob)
336 "count": *, (glob)
336 "sys": *, (glob)
337 "sys": *, (glob)
337 "user": *, (glob)
338 "user": *, (glob)
338 "wall": * (glob)
339 "wall": * (glob)
339 }
340 }
340 ]
341 ]
341
342
342 detailed output:
343 detailed output:
343
344
344 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
345 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
345 [
346 [
346 {
347 {
347 "avg.comb": *, (glob)
348 "avg.comb": *, (glob)
348 "avg.count": *, (glob)
349 "avg.count": *, (glob)
349 "avg.sys": *, (glob)
350 "avg.sys": *, (glob)
350 "avg.user": *, (glob)
351 "avg.user": *, (glob)
351 "avg.wall": *, (glob)
352 "avg.wall": *, (glob)
352 "comb": *, (glob)
353 "comb": *, (glob)
353 "count": *, (glob)
354 "count": *, (glob)
354 "max.comb": *, (glob)
355 "max.comb": *, (glob)
355 "max.count": *, (glob)
356 "max.count": *, (glob)
356 "max.sys": *, (glob)
357 "max.sys": *, (glob)
357 "max.user": *, (glob)
358 "max.user": *, (glob)
358 "max.wall": *, (glob)
359 "max.wall": *, (glob)
359 "median.comb": *, (glob)
360 "median.comb": *, (glob)
360 "median.count": *, (glob)
361 "median.count": *, (glob)
361 "median.sys": *, (glob)
362 "median.sys": *, (glob)
362 "median.user": *, (glob)
363 "median.user": *, (glob)
363 "median.wall": *, (glob)
364 "median.wall": *, (glob)
364 "sys": *, (glob)
365 "sys": *, (glob)
365 "user": *, (glob)
366 "user": *, (glob)
366 "wall": * (glob)
367 "wall": * (glob)
367 }
368 }
368 ]
369 ]
369
370
370 Test pre-run feature
371 Test pre-run feature
371 --------------------
372 --------------------
372
373
373 (perf discovery has some spurious output)
374 (perf discovery has some spurious output)
374
375
375 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
376 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
376 ! wall * comb * user * sys * (best of 1) (glob)
377 ! wall * comb * user * sys * (best of 1) (glob)
377 searching for changes
378 searching for changes
378 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
379 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
379 ! wall * comb * user * sys * (best of 1) (glob)
380 ! wall * comb * user * sys * (best of 1) (glob)
380 searching for changes
381 searching for changes
381 searching for changes
382 searching for changes
382 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
383 ! wall * comb * user * sys * (best of 1) (glob)
384 ! wall * comb * user * sys * (best of 1) (glob)
384 searching for changes
385 searching for changes
385 searching for changes
386 searching for changes
386 searching for changes
387 searching for changes
387 searching for changes
388 searching for changes
389 $ hg perf::bundle 'last(all(), 5)'
388
390
389 test profile-benchmark option
391 test profile-benchmark option
390 ------------------------------
392 ------------------------------
391
393
392 Function to check that statprof ran
394 Function to check that statprof ran
393 $ statprofran () {
395 $ statprofran () {
394 > egrep 'Sample count:|No samples recorded' > /dev/null
396 > egrep 'Sample count:|No samples recorded' > /dev/null
395 > }
397 > }
396 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
398 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
397
399
398 Check perf.py for historical portability
400 Check perf.py for historical portability
399 ----------------------------------------
401 ----------------------------------------
400
402
401 $ cd "$TESTDIR/.."
403 $ cd "$TESTDIR/.."
402
404
403 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
405 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
404 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
406 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
405 > "$TESTDIR"/check-perf-code.py contrib/perf.py
407 > "$TESTDIR"/check-perf-code.py contrib/perf.py
406 contrib/perf.py:\d+: (re)
408 contrib/perf.py:\d+: (re)
407 > from mercurial import (
409 > from mercurial import (
408 import newer module separately in try clause for early Mercurial
410 import newer module separately in try clause for early Mercurial
409 contrib/perf.py:\d+: (re)
411 contrib/perf.py:\d+: (re)
410 > from mercurial import (
412 > from mercurial import (
411 import newer module separately in try clause for early Mercurial
413 import newer module separately in try clause for early Mercurial
412 contrib/perf.py:\d+: (re)
414 contrib/perf.py:\d+: (re)
413 > origindexpath = orig.opener.join(indexfile)
415 > origindexpath = orig.opener.join(indexfile)
414 use getvfs()/getsvfs() for early Mercurial
416 use getvfs()/getsvfs() for early Mercurial
415 contrib/perf.py:\d+: (re)
417 contrib/perf.py:\d+: (re)
416 > origdatapath = orig.opener.join(datafile)
418 > origdatapath = orig.opener.join(datafile)
417 use getvfs()/getsvfs() for early Mercurial
419 use getvfs()/getsvfs() for early Mercurial
418 contrib/perf.py:\d+: (re)
420 contrib/perf.py:\d+: (re)
419 > vfs = vfsmod.vfs(tmpdir)
421 > vfs = vfsmod.vfs(tmpdir)
420 use getvfs()/getsvfs() for early Mercurial
422 use getvfs()/getsvfs() for early Mercurial
421 contrib/perf.py:\d+: (re)
423 contrib/perf.py:\d+: (re)
422 > vfs.options = getattr(orig.opener, 'options', None)
424 > vfs.options = getattr(orig.opener, 'options', None)
423 use getvfs()/getsvfs() for early Mercurial
425 use getvfs()/getsvfs() for early Mercurial
424 [1]
426 [1]
General Comments 0
You need to be logged in to leave comments. Login now