##// END OF EJS Templates
locking: grab the wlock before touching the dirstate in `perfdirstatewrite`...
marmoute -
r50903:e859f440 default
parent child Browse files
Show More
@@ -1,4239 +1,4240 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238 # for "historical portability":
239 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
242 def parsealiases(cmd):
242 return cmd.split(b"|")
243 return cmd.split(b"|")
243
244
244
245
245 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
251 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
253 _command = command
254 _command = command
254
255
255 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
257 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
259
260
260
261
261 else:
262 else:
262 # for "historical portability":
263 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
267 def decorator(func):
267 if synopsis:
268 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
269 else:
270 else:
270 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
271 if norepo:
272 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
274 return func
274
275
275 return decorator
276 return decorator
276
277
277
278
278 try:
279 try:
279 import mercurial.registrar
280 import mercurial.registrar
280 import mercurial.configitems
281 import mercurial.configitems
281
282
282 configtable = {}
283 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
285 configitem(
285 b'perf',
286 b'perf',
286 b'presleep',
287 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
289 experimental=True,
289 )
290 )
290 configitem(
291 configitem(
291 b'perf',
292 b'perf',
292 b'stub',
293 b'stub',
293 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
295 experimental=True,
295 )
296 )
296 configitem(
297 configitem(
297 b'perf',
298 b'perf',
298 b'parentscount',
299 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
301 experimental=True,
301 )
302 )
302 configitem(
303 configitem(
303 b'perf',
304 b'perf',
304 b'all-timing',
305 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
307 experimental=True,
307 )
308 )
308 configitem(
309 configitem(
309 b'perf',
310 b'perf',
310 b'pre-run',
311 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
312 )
313 )
313 configitem(
314 configitem(
314 b'perf',
315 b'perf',
315 b'profile-benchmark',
316 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
317 )
318 )
318 configitem(
319 configitem(
319 b'perf',
320 b'perf',
320 b'run-limits',
321 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
323 experimental=True,
323 )
324 )
324 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
325 pass
326 pass
326 except TypeError:
327 except TypeError:
327 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
329 # hg version: 5.2
329 configitem(
330 configitem(
330 b'perf',
331 b'perf',
331 b'presleep',
332 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
333 )
334 )
334 configitem(
335 configitem(
335 b'perf',
336 b'perf',
336 b'stub',
337 b'stub',
337 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
338 )
339 )
339 configitem(
340 configitem(
340 b'perf',
341 b'perf',
341 b'parentscount',
342 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
343 )
344 )
344 configitem(
345 configitem(
345 b'perf',
346 b'perf',
346 b'all-timing',
347 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
348 )
349 )
349 configitem(
350 configitem(
350 b'perf',
351 b'perf',
351 b'pre-run',
352 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
353 )
354 )
354 configitem(
355 configitem(
355 b'perf',
356 b'perf',
356 b'profile-benchmark',
357 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
358 )
359 )
359 configitem(
360 configitem(
360 b'perf',
361 b'perf',
361 b'run-limits',
362 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
363 )
364 )
364
365
365
366
366 def getlen(ui):
367 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
369 return lambda x: 1
369 return len
370 return len
370
371
371
372
372 class noop:
373 class noop:
373 """dummy context manager"""
374 """dummy context manager"""
374
375
375 def __enter__(self):
376 def __enter__(self):
376 pass
377 pass
377
378
378 def __exit__(self, *args):
379 def __exit__(self, *args):
379 pass
380 pass
380
381
381
382
382 NOOPCTX = noop()
383 NOOPCTX = noop()
383
384
384
385
385 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
387
388
388 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
390
391
391 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
393 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
395
395 if opts is None:
396 if opts is None:
396 opts = {}
397 opts = {}
397 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
399 if not ui._buffers:
399 ui = ui.copy()
400 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
402 if uifout:
402 # for "historical portability":
403 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
405
406
406 # get a formatter
407 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
409 if uiformatter:
409 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
410 else:
411 else:
411 # for "historical portability":
412 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
415 from mercurial import node
415
416
416 class defaultformatter:
417 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
418
419
419 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
420 self._ui = ui
421 self._ui = ui
421 if ui.debugflag:
422 if ui.debugflag:
422 self.hexfunc = node.hex
423 self.hexfunc = node.hex
423 else:
424 else:
424 self.hexfunc = node.short
425 self.hexfunc = node.short
425
426
426 def __nonzero__(self):
427 def __nonzero__(self):
427 return False
428 return False
428
429
429 __bool__ = __nonzero__
430 __bool__ = __nonzero__
430
431
431 def startitem(self):
432 def startitem(self):
432 pass
433 pass
433
434
434 def data(self, **data):
435 def data(self, **data):
435 pass
436 pass
436
437
437 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
439
440
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
442 if cond:
442 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
443
444
444 def plain(self, text, **opts):
445 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
446
447
447 def end(self):
448 def end(self):
448 pass
449 pass
449
450
450 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
451
452
452 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
454 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
456
457
457 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459
460
460 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
463 limits = []
463 for item in limitspec:
464 for item in limitspec:
464 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
465 if len(parts) < 2:
466 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
468 continue
468 try:
469 try:
469 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
471 except ValueError as e:
471 ui.warn(
472 ui.warn(
472 (
473 (
473 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
475 % (_bytestr(e), item)
475 )
476 )
476 )
477 )
477 continue
478 continue
478 try:
479 try:
479 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
481 except ValueError as e:
481 ui.warn(
482 ui.warn(
482 (
483 (
483 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
485 % (_bytestr(e), item)
485 )
486 )
486 )
487 )
487 continue
488 continue
488 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
489 if not limits:
490 if not limits:
490 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
491
492
492 profiler = None
493 profiler = None
493 if profiling is not None:
494 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
496
497
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
499 t = functools.partial(
499 _timer,
500 _timer,
500 fm,
501 fm,
501 displayall=displayall,
502 displayall=displayall,
502 limits=limits,
503 limits=limits,
503 prerun=prerun,
504 prerun=prerun,
504 profiler=profiler,
505 profiler=profiler,
505 )
506 )
506 return t, fm
507 return t, fm
507
508
508
509
509 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
511 if setup is not None:
511 setup()
512 setup()
512 func()
513 func()
513
514
514
515
515 @contextlib.contextmanager
516 @contextlib.contextmanager
516 def timeone():
517 def timeone():
517 r = []
518 r = []
518 ostart = os.times()
519 ostart = os.times()
519 cstart = util.timer()
520 cstart = util.timer()
520 yield r
521 yield r
521 cstop = util.timer()
522 cstop = util.timer()
522 ostop = os.times()
523 ostop = os.times()
523 a, b = ostart, ostop
524 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
526
526
527
527 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
529 (3.0, 100),
530 (3.0, 100),
530 (10.0, 3),
531 (10.0, 3),
531 )
532 )
532
533
533
534
534 def _timer(
535 def _timer(
535 fm,
536 fm,
536 func,
537 func,
537 setup=None,
538 setup=None,
538 title=None,
539 title=None,
539 displayall=False,
540 displayall=False,
540 limits=DEFAULTLIMITS,
541 limits=DEFAULTLIMITS,
541 prerun=0,
542 prerun=0,
542 profiler=None,
543 profiler=None,
543 ):
544 ):
544 gc.collect()
545 gc.collect()
545 results = []
546 results = []
546 begin = util.timer()
547 begin = util.timer()
547 count = 0
548 count = 0
548 if profiler is None:
549 if profiler is None:
549 profiler = NOOPCTX
550 profiler = NOOPCTX
550 for i in range(prerun):
551 for i in range(prerun):
551 if setup is not None:
552 if setup is not None:
552 setup()
553 setup()
553 func()
554 func()
554 keepgoing = True
555 keepgoing = True
555 while keepgoing:
556 while keepgoing:
556 if setup is not None:
557 if setup is not None:
557 setup()
558 setup()
558 with profiler:
559 with profiler:
559 with timeone() as item:
560 with timeone() as item:
560 r = func()
561 r = func()
561 profiler = NOOPCTX
562 profiler = NOOPCTX
562 count += 1
563 count += 1
563 results.append(item[0])
564 results.append(item[0])
564 cstop = util.timer()
565 cstop = util.timer()
565 # Look for a stop condition.
566 # Look for a stop condition.
566 elapsed = cstop - begin
567 elapsed = cstop - begin
567 for t, mincount in limits:
568 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
569 if elapsed >= t and count >= mincount:
569 keepgoing = False
570 keepgoing = False
570 break
571 break
571
572
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 formatone(fm, results, title=title, result=r, displayall=displayall)
573
574
574
575
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 def formatone(fm, timings, title=None, result=None, displayall=False):
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path_obj
1000 from mercurial.utils.urlutil import get_unique_pull_path_obj
1001
1001
1002 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1002 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 except ImportError:
1003 except ImportError:
1004 try:
1004 try:
1005 from mercurial.utils.urlutil import get_unique_pull_path
1005 from mercurial.utils.urlutil import get_unique_pull_path
1006
1006
1007 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1007 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1008 except ImportError:
1008 except ImportError:
1009 path = ui.expandpath(path)
1009 path = ui.expandpath(path)
1010
1010
1011 def s():
1011 def s():
1012 repos[1] = hg.peer(ui, opts, path)
1012 repos[1] = hg.peer(ui, opts, path)
1013
1013
1014 def d():
1014 def d():
1015 setdiscovery.findcommonheads(ui, *repos)
1015 setdiscovery.findcommonheads(ui, *repos)
1016
1016
1017 timer(d, setup=s)
1017 timer(d, setup=s)
1018 fm.end()
1018 fm.end()
1019
1019
1020
1020
1021 @command(
1021 @command(
1022 b'perf::bookmarks|perfbookmarks',
1022 b'perf::bookmarks|perfbookmarks',
1023 formatteropts
1023 formatteropts
1024 + [
1024 + [
1025 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1025 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1026 ],
1026 ],
1027 )
1027 )
1028 def perfbookmarks(ui, repo, **opts):
1028 def perfbookmarks(ui, repo, **opts):
1029 """benchmark parsing bookmarks from disk to memory"""
1029 """benchmark parsing bookmarks from disk to memory"""
1030 opts = _byteskwargs(opts)
1030 opts = _byteskwargs(opts)
1031 timer, fm = gettimer(ui, opts)
1031 timer, fm = gettimer(ui, opts)
1032
1032
1033 clearrevlogs = opts[b'clear_revlogs']
1033 clearrevlogs = opts[b'clear_revlogs']
1034
1034
1035 def s():
1035 def s():
1036 if clearrevlogs:
1036 if clearrevlogs:
1037 clearchangelog(repo)
1037 clearchangelog(repo)
1038 clearfilecache(repo, b'_bookmarks')
1038 clearfilecache(repo, b'_bookmarks')
1039
1039
1040 def d():
1040 def d():
1041 repo._bookmarks
1041 repo._bookmarks
1042
1042
1043 timer(d, setup=s)
1043 timer(d, setup=s)
1044 fm.end()
1044 fm.end()
1045
1045
1046
1046
1047 @command(
1047 @command(
1048 b'perf::bundle',
1048 b'perf::bundle',
1049 [
1049 [
1050 (
1050 (
1051 b'r',
1051 b'r',
1052 b'rev',
1052 b'rev',
1053 [],
1053 [],
1054 b'changesets to bundle',
1054 b'changesets to bundle',
1055 b'REV',
1055 b'REV',
1056 ),
1056 ),
1057 (
1057 (
1058 b't',
1058 b't',
1059 b'type',
1059 b'type',
1060 b'none',
1060 b'none',
1061 b'bundlespec to use (see `hg help bundlespec`)',
1061 b'bundlespec to use (see `hg help bundlespec`)',
1062 b'TYPE',
1062 b'TYPE',
1063 ),
1063 ),
1064 ]
1064 ]
1065 + formatteropts,
1065 + formatteropts,
1066 b'REVS',
1066 b'REVS',
1067 )
1067 )
1068 def perfbundle(ui, repo, *revs, **opts):
1068 def perfbundle(ui, repo, *revs, **opts):
1069 """benchmark the creation of a bundle from a repository
1069 """benchmark the creation of a bundle from a repository
1070
1070
1071 For now, this only supports "none" compression.
1071 For now, this only supports "none" compression.
1072 """
1072 """
1073 try:
1073 try:
1074 from mercurial import bundlecaches
1074 from mercurial import bundlecaches
1075
1075
1076 parsebundlespec = bundlecaches.parsebundlespec
1076 parsebundlespec = bundlecaches.parsebundlespec
1077 except ImportError:
1077 except ImportError:
1078 from mercurial import exchange
1078 from mercurial import exchange
1079
1079
1080 parsebundlespec = exchange.parsebundlespec
1080 parsebundlespec = exchange.parsebundlespec
1081
1081
1082 from mercurial import discovery
1082 from mercurial import discovery
1083 from mercurial import bundle2
1083 from mercurial import bundle2
1084
1084
1085 opts = _byteskwargs(opts)
1085 opts = _byteskwargs(opts)
1086 timer, fm = gettimer(ui, opts)
1086 timer, fm = gettimer(ui, opts)
1087
1087
1088 cl = repo.changelog
1088 cl = repo.changelog
1089 revs = list(revs)
1089 revs = list(revs)
1090 revs.extend(opts.get(b'rev', ()))
1090 revs.extend(opts.get(b'rev', ()))
1091 revs = scmutil.revrange(repo, revs)
1091 revs = scmutil.revrange(repo, revs)
1092 if not revs:
1092 if not revs:
1093 raise error.Abort(b"not revision specified")
1093 raise error.Abort(b"not revision specified")
1094 # make it a consistent set (ie: without topological gaps)
1094 # make it a consistent set (ie: without topological gaps)
1095 old_len = len(revs)
1095 old_len = len(revs)
1096 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1096 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1097 if old_len != len(revs):
1097 if old_len != len(revs):
1098 new_count = len(revs) - old_len
1098 new_count = len(revs) - old_len
1099 msg = b"add %d new revisions to make it a consistent set\n"
1099 msg = b"add %d new revisions to make it a consistent set\n"
1100 ui.write_err(msg % new_count)
1100 ui.write_err(msg % new_count)
1101
1101
1102 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1102 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1103 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1103 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1104 outgoing = discovery.outgoing(repo, bases, targets)
1104 outgoing = discovery.outgoing(repo, bases, targets)
1105
1105
1106 bundle_spec = opts.get(b'type')
1106 bundle_spec = opts.get(b'type')
1107
1107
1108 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1108 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1109
1109
1110 cgversion = bundle_spec.params.get(b"cg.version")
1110 cgversion = bundle_spec.params.get(b"cg.version")
1111 if cgversion is None:
1111 if cgversion is None:
1112 if bundle_spec.version == b'v1':
1112 if bundle_spec.version == b'v1':
1113 cgversion = b'01'
1113 cgversion = b'01'
1114 if bundle_spec.version == b'v2':
1114 if bundle_spec.version == b'v2':
1115 cgversion = b'02'
1115 cgversion = b'02'
1116 if cgversion not in changegroup.supportedoutgoingversions(repo):
1116 if cgversion not in changegroup.supportedoutgoingversions(repo):
1117 err = b"repository does not support bundle version %s"
1117 err = b"repository does not support bundle version %s"
1118 raise error.Abort(err % cgversion)
1118 raise error.Abort(err % cgversion)
1119
1119
1120 if cgversion == b'01': # bundle1
1120 if cgversion == b'01': # bundle1
1121 bversion = b'HG10' + bundle_spec.wirecompression
1121 bversion = b'HG10' + bundle_spec.wirecompression
1122 bcompression = None
1122 bcompression = None
1123 elif cgversion in (b'02', b'03'):
1123 elif cgversion in (b'02', b'03'):
1124 bversion = b'HG20'
1124 bversion = b'HG20'
1125 bcompression = bundle_spec.wirecompression
1125 bcompression = bundle_spec.wirecompression
1126 else:
1126 else:
1127 err = b'perf::bundle: unexpected changegroup version %s'
1127 err = b'perf::bundle: unexpected changegroup version %s'
1128 raise error.ProgrammingError(err % cgversion)
1128 raise error.ProgrammingError(err % cgversion)
1129
1129
1130 if bcompression is None:
1130 if bcompression is None:
1131 bcompression = b'UN'
1131 bcompression = b'UN'
1132
1132
1133 if bcompression != b'UN':
1133 if bcompression != b'UN':
1134 err = b'perf::bundle: compression currently unsupported: %s'
1134 err = b'perf::bundle: compression currently unsupported: %s'
1135 raise error.ProgrammingError(err % bcompression)
1135 raise error.ProgrammingError(err % bcompression)
1136
1136
1137 def do_bundle():
1137 def do_bundle():
1138 bundle2.writenewbundle(
1138 bundle2.writenewbundle(
1139 ui,
1139 ui,
1140 repo,
1140 repo,
1141 b'perf::bundle',
1141 b'perf::bundle',
1142 os.devnull,
1142 os.devnull,
1143 bversion,
1143 bversion,
1144 outgoing,
1144 outgoing,
1145 bundle_spec.params,
1145 bundle_spec.params,
1146 )
1146 )
1147
1147
1148 timer(do_bundle)
1148 timer(do_bundle)
1149 fm.end()
1149 fm.end()
1150
1150
1151
1151
1152 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1152 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1153 def perfbundleread(ui, repo, bundlepath, **opts):
1153 def perfbundleread(ui, repo, bundlepath, **opts):
1154 """Benchmark reading of bundle files.
1154 """Benchmark reading of bundle files.
1155
1155
1156 This command is meant to isolate the I/O part of bundle reading as
1156 This command is meant to isolate the I/O part of bundle reading as
1157 much as possible.
1157 much as possible.
1158 """
1158 """
1159 from mercurial import (
1159 from mercurial import (
1160 bundle2,
1160 bundle2,
1161 exchange,
1161 exchange,
1162 streamclone,
1162 streamclone,
1163 )
1163 )
1164
1164
1165 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1166
1166
1167 def makebench(fn):
1167 def makebench(fn):
1168 def run():
1168 def run():
1169 with open(bundlepath, b'rb') as fh:
1169 with open(bundlepath, b'rb') as fh:
1170 bundle = exchange.readbundle(ui, fh, bundlepath)
1170 bundle = exchange.readbundle(ui, fh, bundlepath)
1171 fn(bundle)
1171 fn(bundle)
1172
1172
1173 return run
1173 return run
1174
1174
1175 def makereadnbytes(size):
1175 def makereadnbytes(size):
1176 def run():
1176 def run():
1177 with open(bundlepath, b'rb') as fh:
1177 with open(bundlepath, b'rb') as fh:
1178 bundle = exchange.readbundle(ui, fh, bundlepath)
1178 bundle = exchange.readbundle(ui, fh, bundlepath)
1179 while bundle.read(size):
1179 while bundle.read(size):
1180 pass
1180 pass
1181
1181
1182 return run
1182 return run
1183
1183
1184 def makestdioread(size):
1184 def makestdioread(size):
1185 def run():
1185 def run():
1186 with open(bundlepath, b'rb') as fh:
1186 with open(bundlepath, b'rb') as fh:
1187 while fh.read(size):
1187 while fh.read(size):
1188 pass
1188 pass
1189
1189
1190 return run
1190 return run
1191
1191
1192 # bundle1
1192 # bundle1
1193
1193
1194 def deltaiter(bundle):
1194 def deltaiter(bundle):
1195 for delta in bundle.deltaiter():
1195 for delta in bundle.deltaiter():
1196 pass
1196 pass
1197
1197
1198 def iterchunks(bundle):
1198 def iterchunks(bundle):
1199 for chunk in bundle.getchunks():
1199 for chunk in bundle.getchunks():
1200 pass
1200 pass
1201
1201
1202 # bundle2
1202 # bundle2
1203
1203
1204 def forwardchunks(bundle):
1204 def forwardchunks(bundle):
1205 for chunk in bundle._forwardchunks():
1205 for chunk in bundle._forwardchunks():
1206 pass
1206 pass
1207
1207
1208 def iterparts(bundle):
1208 def iterparts(bundle):
1209 for part in bundle.iterparts():
1209 for part in bundle.iterparts():
1210 pass
1210 pass
1211
1211
1212 def iterpartsseekable(bundle):
1212 def iterpartsseekable(bundle):
1213 for part in bundle.iterparts(seekable=True):
1213 for part in bundle.iterparts(seekable=True):
1214 pass
1214 pass
1215
1215
1216 def seek(bundle):
1216 def seek(bundle):
1217 for part in bundle.iterparts(seekable=True):
1217 for part in bundle.iterparts(seekable=True):
1218 part.seek(0, os.SEEK_END)
1218 part.seek(0, os.SEEK_END)
1219
1219
1220 def makepartreadnbytes(size):
1220 def makepartreadnbytes(size):
1221 def run():
1221 def run():
1222 with open(bundlepath, b'rb') as fh:
1222 with open(bundlepath, b'rb') as fh:
1223 bundle = exchange.readbundle(ui, fh, bundlepath)
1223 bundle = exchange.readbundle(ui, fh, bundlepath)
1224 for part in bundle.iterparts():
1224 for part in bundle.iterparts():
1225 while part.read(size):
1225 while part.read(size):
1226 pass
1226 pass
1227
1227
1228 return run
1228 return run
1229
1229
1230 benches = [
1230 benches = [
1231 (makestdioread(8192), b'read(8k)'),
1231 (makestdioread(8192), b'read(8k)'),
1232 (makestdioread(16384), b'read(16k)'),
1232 (makestdioread(16384), b'read(16k)'),
1233 (makestdioread(32768), b'read(32k)'),
1233 (makestdioread(32768), b'read(32k)'),
1234 (makestdioread(131072), b'read(128k)'),
1234 (makestdioread(131072), b'read(128k)'),
1235 ]
1235 ]
1236
1236
1237 with open(bundlepath, b'rb') as fh:
1237 with open(bundlepath, b'rb') as fh:
1238 bundle = exchange.readbundle(ui, fh, bundlepath)
1238 bundle = exchange.readbundle(ui, fh, bundlepath)
1239
1239
1240 if isinstance(bundle, changegroup.cg1unpacker):
1240 if isinstance(bundle, changegroup.cg1unpacker):
1241 benches.extend(
1241 benches.extend(
1242 [
1242 [
1243 (makebench(deltaiter), b'cg1 deltaiter()'),
1243 (makebench(deltaiter), b'cg1 deltaiter()'),
1244 (makebench(iterchunks), b'cg1 getchunks()'),
1244 (makebench(iterchunks), b'cg1 getchunks()'),
1245 (makereadnbytes(8192), b'cg1 read(8k)'),
1245 (makereadnbytes(8192), b'cg1 read(8k)'),
1246 (makereadnbytes(16384), b'cg1 read(16k)'),
1246 (makereadnbytes(16384), b'cg1 read(16k)'),
1247 (makereadnbytes(32768), b'cg1 read(32k)'),
1247 (makereadnbytes(32768), b'cg1 read(32k)'),
1248 (makereadnbytes(131072), b'cg1 read(128k)'),
1248 (makereadnbytes(131072), b'cg1 read(128k)'),
1249 ]
1249 ]
1250 )
1250 )
1251 elif isinstance(bundle, bundle2.unbundle20):
1251 elif isinstance(bundle, bundle2.unbundle20):
1252 benches.extend(
1252 benches.extend(
1253 [
1253 [
1254 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1254 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1255 (makebench(iterparts), b'bundle2 iterparts()'),
1255 (makebench(iterparts), b'bundle2 iterparts()'),
1256 (
1256 (
1257 makebench(iterpartsseekable),
1257 makebench(iterpartsseekable),
1258 b'bundle2 iterparts() seekable',
1258 b'bundle2 iterparts() seekable',
1259 ),
1259 ),
1260 (makebench(seek), b'bundle2 part seek()'),
1260 (makebench(seek), b'bundle2 part seek()'),
1261 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1261 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1262 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1262 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1263 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1263 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1264 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1264 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1265 ]
1265 ]
1266 )
1266 )
1267 elif isinstance(bundle, streamclone.streamcloneapplier):
1267 elif isinstance(bundle, streamclone.streamcloneapplier):
1268 raise error.Abort(b'stream clone bundles not supported')
1268 raise error.Abort(b'stream clone bundles not supported')
1269 else:
1269 else:
1270 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1270 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1271
1271
1272 for fn, title in benches:
1272 for fn, title in benches:
1273 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1274 timer(fn, title=title)
1274 timer(fn, title=title)
1275 fm.end()
1275 fm.end()
1276
1276
1277
1277
1278 @command(
1278 @command(
1279 b'perf::changegroupchangelog|perfchangegroupchangelog',
1279 b'perf::changegroupchangelog|perfchangegroupchangelog',
1280 formatteropts
1280 formatteropts
1281 + [
1281 + [
1282 (b'', b'cgversion', b'02', b'changegroup version'),
1282 (b'', b'cgversion', b'02', b'changegroup version'),
1283 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1283 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1284 ],
1284 ],
1285 )
1285 )
1286 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1286 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1287 """Benchmark producing a changelog group for a changegroup.
1287 """Benchmark producing a changelog group for a changegroup.
1288
1288
1289 This measures the time spent processing the changelog during a
1289 This measures the time spent processing the changelog during a
1290 bundle operation. This occurs during `hg bundle` and on a server
1290 bundle operation. This occurs during `hg bundle` and on a server
1291 processing a `getbundle` wire protocol request (handles clones
1291 processing a `getbundle` wire protocol request (handles clones
1292 and pull requests).
1292 and pull requests).
1293
1293
1294 By default, all revisions are added to the changegroup.
1294 By default, all revisions are added to the changegroup.
1295 """
1295 """
1296 opts = _byteskwargs(opts)
1296 opts = _byteskwargs(opts)
1297 cl = repo.changelog
1297 cl = repo.changelog
1298 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1298 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1299 bundler = changegroup.getbundler(cgversion, repo)
1299 bundler = changegroup.getbundler(cgversion, repo)
1300
1300
1301 def d():
1301 def d():
1302 state, chunks = bundler._generatechangelog(cl, nodes)
1302 state, chunks = bundler._generatechangelog(cl, nodes)
1303 for chunk in chunks:
1303 for chunk in chunks:
1304 pass
1304 pass
1305
1305
1306 timer, fm = gettimer(ui, opts)
1306 timer, fm = gettimer(ui, opts)
1307
1307
1308 # Terminal printing can interfere with timing. So disable it.
1308 # Terminal printing can interfere with timing. So disable it.
1309 with ui.configoverride({(b'progress', b'disable'): True}):
1309 with ui.configoverride({(b'progress', b'disable'): True}):
1310 timer(d)
1310 timer(d)
1311
1311
1312 fm.end()
1312 fm.end()
1313
1313
1314
1314
1315 @command(b'perf::dirs|perfdirs', formatteropts)
1315 @command(b'perf::dirs|perfdirs', formatteropts)
1316 def perfdirs(ui, repo, **opts):
1316 def perfdirs(ui, repo, **opts):
1317 opts = _byteskwargs(opts)
1317 opts = _byteskwargs(opts)
1318 timer, fm = gettimer(ui, opts)
1318 timer, fm = gettimer(ui, opts)
1319 dirstate = repo.dirstate
1319 dirstate = repo.dirstate
1320 b'a' in dirstate
1320 b'a' in dirstate
1321
1321
1322 def d():
1322 def d():
1323 dirstate.hasdir(b'a')
1323 dirstate.hasdir(b'a')
1324 try:
1324 try:
1325 del dirstate._map._dirs
1325 del dirstate._map._dirs
1326 except AttributeError:
1326 except AttributeError:
1327 pass
1327 pass
1328
1328
1329 timer(d)
1329 timer(d)
1330 fm.end()
1330 fm.end()
1331
1331
1332
1332
1333 @command(
1333 @command(
1334 b'perf::dirstate|perfdirstate',
1334 b'perf::dirstate|perfdirstate',
1335 [
1335 [
1336 (
1336 (
1337 b'',
1337 b'',
1338 b'iteration',
1338 b'iteration',
1339 None,
1339 None,
1340 b'benchmark a full iteration for the dirstate',
1340 b'benchmark a full iteration for the dirstate',
1341 ),
1341 ),
1342 (
1342 (
1343 b'',
1343 b'',
1344 b'contains',
1344 b'contains',
1345 None,
1345 None,
1346 b'benchmark a large amount of `nf in dirstate` calls',
1346 b'benchmark a large amount of `nf in dirstate` calls',
1347 ),
1347 ),
1348 ]
1348 ]
1349 + formatteropts,
1349 + formatteropts,
1350 )
1350 )
1351 def perfdirstate(ui, repo, **opts):
1351 def perfdirstate(ui, repo, **opts):
1352 """benchmap the time of various distate operations
1352 """benchmap the time of various distate operations
1353
1353
1354 By default benchmark the time necessary to load a dirstate from scratch.
1354 By default benchmark the time necessary to load a dirstate from scratch.
1355 The dirstate is loaded to the point were a "contains" request can be
1355 The dirstate is loaded to the point were a "contains" request can be
1356 answered.
1356 answered.
1357 """
1357 """
1358 opts = _byteskwargs(opts)
1358 opts = _byteskwargs(opts)
1359 timer, fm = gettimer(ui, opts)
1359 timer, fm = gettimer(ui, opts)
1360 b"a" in repo.dirstate
1360 b"a" in repo.dirstate
1361
1361
1362 if opts[b'iteration'] and opts[b'contains']:
1362 if opts[b'iteration'] and opts[b'contains']:
1363 msg = b'only specify one of --iteration or --contains'
1363 msg = b'only specify one of --iteration or --contains'
1364 raise error.Abort(msg)
1364 raise error.Abort(msg)
1365
1365
1366 if opts[b'iteration']:
1366 if opts[b'iteration']:
1367 setup = None
1367 setup = None
1368 dirstate = repo.dirstate
1368 dirstate = repo.dirstate
1369
1369
1370 def d():
1370 def d():
1371 for f in dirstate:
1371 for f in dirstate:
1372 pass
1372 pass
1373
1373
1374 elif opts[b'contains']:
1374 elif opts[b'contains']:
1375 setup = None
1375 setup = None
1376 dirstate = repo.dirstate
1376 dirstate = repo.dirstate
1377 allfiles = list(dirstate)
1377 allfiles = list(dirstate)
1378 # also add file path that will be "missing" from the dirstate
1378 # also add file path that will be "missing" from the dirstate
1379 allfiles.extend([f[::-1] for f in allfiles])
1379 allfiles.extend([f[::-1] for f in allfiles])
1380
1380
1381 def d():
1381 def d():
1382 for f in allfiles:
1382 for f in allfiles:
1383 f in dirstate
1383 f in dirstate
1384
1384
1385 else:
1385 else:
1386
1386
1387 def setup():
1387 def setup():
1388 repo.dirstate.invalidate()
1388 repo.dirstate.invalidate()
1389
1389
1390 def d():
1390 def d():
1391 b"a" in repo.dirstate
1391 b"a" in repo.dirstate
1392
1392
1393 timer(d, setup=setup)
1393 timer(d, setup=setup)
1394 fm.end()
1394 fm.end()
1395
1395
1396
1396
1397 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1397 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1398 def perfdirstatedirs(ui, repo, **opts):
1398 def perfdirstatedirs(ui, repo, **opts):
1399 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1399 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1400 opts = _byteskwargs(opts)
1400 opts = _byteskwargs(opts)
1401 timer, fm = gettimer(ui, opts)
1401 timer, fm = gettimer(ui, opts)
1402 repo.dirstate.hasdir(b"a")
1402 repo.dirstate.hasdir(b"a")
1403
1403
1404 def setup():
1404 def setup():
1405 try:
1405 try:
1406 del repo.dirstate._map._dirs
1406 del repo.dirstate._map._dirs
1407 except AttributeError:
1407 except AttributeError:
1408 pass
1408 pass
1409
1409
1410 def d():
1410 def d():
1411 repo.dirstate.hasdir(b"a")
1411 repo.dirstate.hasdir(b"a")
1412
1412
1413 timer(d, setup=setup)
1413 timer(d, setup=setup)
1414 fm.end()
1414 fm.end()
1415
1415
1416
1416
1417 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1417 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1418 def perfdirstatefoldmap(ui, repo, **opts):
1418 def perfdirstatefoldmap(ui, repo, **opts):
1419 """benchmap a `dirstate._map.filefoldmap.get()` request
1419 """benchmap a `dirstate._map.filefoldmap.get()` request
1420
1420
1421 The dirstate filefoldmap cache is dropped between every request.
1421 The dirstate filefoldmap cache is dropped between every request.
1422 """
1422 """
1423 opts = _byteskwargs(opts)
1423 opts = _byteskwargs(opts)
1424 timer, fm = gettimer(ui, opts)
1424 timer, fm = gettimer(ui, opts)
1425 dirstate = repo.dirstate
1425 dirstate = repo.dirstate
1426 dirstate._map.filefoldmap.get(b'a')
1426 dirstate._map.filefoldmap.get(b'a')
1427
1427
1428 def setup():
1428 def setup():
1429 del dirstate._map.filefoldmap
1429 del dirstate._map.filefoldmap
1430
1430
1431 def d():
1431 def d():
1432 dirstate._map.filefoldmap.get(b'a')
1432 dirstate._map.filefoldmap.get(b'a')
1433
1433
1434 timer(d, setup=setup)
1434 timer(d, setup=setup)
1435 fm.end()
1435 fm.end()
1436
1436
1437
1437
1438 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1438 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1439 def perfdirfoldmap(ui, repo, **opts):
1439 def perfdirfoldmap(ui, repo, **opts):
1440 """benchmap a `dirstate._map.dirfoldmap.get()` request
1440 """benchmap a `dirstate._map.dirfoldmap.get()` request
1441
1441
1442 The dirstate dirfoldmap cache is dropped between every request.
1442 The dirstate dirfoldmap cache is dropped between every request.
1443 """
1443 """
1444 opts = _byteskwargs(opts)
1444 opts = _byteskwargs(opts)
1445 timer, fm = gettimer(ui, opts)
1445 timer, fm = gettimer(ui, opts)
1446 dirstate = repo.dirstate
1446 dirstate = repo.dirstate
1447 dirstate._map.dirfoldmap.get(b'a')
1447 dirstate._map.dirfoldmap.get(b'a')
1448
1448
1449 def setup():
1449 def setup():
1450 del dirstate._map.dirfoldmap
1450 del dirstate._map.dirfoldmap
1451 try:
1451 try:
1452 del dirstate._map._dirs
1452 del dirstate._map._dirs
1453 except AttributeError:
1453 except AttributeError:
1454 pass
1454 pass
1455
1455
1456 def d():
1456 def d():
1457 dirstate._map.dirfoldmap.get(b'a')
1457 dirstate._map.dirfoldmap.get(b'a')
1458
1458
1459 timer(d, setup=setup)
1459 timer(d, setup=setup)
1460 fm.end()
1460 fm.end()
1461
1461
1462
1462
1463 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1463 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1464 def perfdirstatewrite(ui, repo, **opts):
1464 def perfdirstatewrite(ui, repo, **opts):
1465 """benchmap the time it take to write a dirstate on disk"""
1465 """benchmap the time it take to write a dirstate on disk"""
1466 opts = _byteskwargs(opts)
1466 opts = _byteskwargs(opts)
1467 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1468 ds = repo.dirstate
1468 ds = repo.dirstate
1469 b"a" in ds
1469 b"a" in ds
1470
1470
1471 def setup():
1471 def setup():
1472 ds._dirty = True
1472 ds._dirty = True
1473
1473
1474 def d():
1474 def d():
1475 ds.write(repo.currenttransaction())
1475 ds.write(repo.currenttransaction())
1476
1476
1477 timer(d, setup=setup)
1477 with repo.wlock():
1478 timer(d, setup=setup)
1478 fm.end()
1479 fm.end()
1479
1480
1480
1481
1481 def _getmergerevs(repo, opts):
1482 def _getmergerevs(repo, opts):
1482 """parse command argument to return rev involved in merge
1483 """parse command argument to return rev involved in merge
1483
1484
1484 input: options dictionnary with `rev`, `from` and `bse`
1485 input: options dictionnary with `rev`, `from` and `bse`
1485 output: (localctx, otherctx, basectx)
1486 output: (localctx, otherctx, basectx)
1486 """
1487 """
1487 if opts[b'from']:
1488 if opts[b'from']:
1488 fromrev = scmutil.revsingle(repo, opts[b'from'])
1489 fromrev = scmutil.revsingle(repo, opts[b'from'])
1489 wctx = repo[fromrev]
1490 wctx = repo[fromrev]
1490 else:
1491 else:
1491 wctx = repo[None]
1492 wctx = repo[None]
1492 # we don't want working dir files to be stat'd in the benchmark, so
1493 # we don't want working dir files to be stat'd in the benchmark, so
1493 # prime that cache
1494 # prime that cache
1494 wctx.dirty()
1495 wctx.dirty()
1495 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1496 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1496 if opts[b'base']:
1497 if opts[b'base']:
1497 fromrev = scmutil.revsingle(repo, opts[b'base'])
1498 fromrev = scmutil.revsingle(repo, opts[b'base'])
1498 ancestor = repo[fromrev]
1499 ancestor = repo[fromrev]
1499 else:
1500 else:
1500 ancestor = wctx.ancestor(rctx)
1501 ancestor = wctx.ancestor(rctx)
1501 return (wctx, rctx, ancestor)
1502 return (wctx, rctx, ancestor)
1502
1503
1503
1504
1504 @command(
1505 @command(
1505 b'perf::mergecalculate|perfmergecalculate',
1506 b'perf::mergecalculate|perfmergecalculate',
1506 [
1507 [
1507 (b'r', b'rev', b'.', b'rev to merge against'),
1508 (b'r', b'rev', b'.', b'rev to merge against'),
1508 (b'', b'from', b'', b'rev to merge from'),
1509 (b'', b'from', b'', b'rev to merge from'),
1509 (b'', b'base', b'', b'the revision to use as base'),
1510 (b'', b'base', b'', b'the revision to use as base'),
1510 ]
1511 ]
1511 + formatteropts,
1512 + formatteropts,
1512 )
1513 )
1513 def perfmergecalculate(ui, repo, **opts):
1514 def perfmergecalculate(ui, repo, **opts):
1514 opts = _byteskwargs(opts)
1515 opts = _byteskwargs(opts)
1515 timer, fm = gettimer(ui, opts)
1516 timer, fm = gettimer(ui, opts)
1516
1517
1517 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1518 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1518
1519
1519 def d():
1520 def d():
1520 # acceptremote is True because we don't want prompts in the middle of
1521 # acceptremote is True because we don't want prompts in the middle of
1521 # our benchmark
1522 # our benchmark
1522 merge.calculateupdates(
1523 merge.calculateupdates(
1523 repo,
1524 repo,
1524 wctx,
1525 wctx,
1525 rctx,
1526 rctx,
1526 [ancestor],
1527 [ancestor],
1527 branchmerge=False,
1528 branchmerge=False,
1528 force=False,
1529 force=False,
1529 acceptremote=True,
1530 acceptremote=True,
1530 followcopies=True,
1531 followcopies=True,
1531 )
1532 )
1532
1533
1533 timer(d)
1534 timer(d)
1534 fm.end()
1535 fm.end()
1535
1536
1536
1537
1537 @command(
1538 @command(
1538 b'perf::mergecopies|perfmergecopies',
1539 b'perf::mergecopies|perfmergecopies',
1539 [
1540 [
1540 (b'r', b'rev', b'.', b'rev to merge against'),
1541 (b'r', b'rev', b'.', b'rev to merge against'),
1541 (b'', b'from', b'', b'rev to merge from'),
1542 (b'', b'from', b'', b'rev to merge from'),
1542 (b'', b'base', b'', b'the revision to use as base'),
1543 (b'', b'base', b'', b'the revision to use as base'),
1543 ]
1544 ]
1544 + formatteropts,
1545 + formatteropts,
1545 )
1546 )
1546 def perfmergecopies(ui, repo, **opts):
1547 def perfmergecopies(ui, repo, **opts):
1547 """measure runtime of `copies.mergecopies`"""
1548 """measure runtime of `copies.mergecopies`"""
1548 opts = _byteskwargs(opts)
1549 opts = _byteskwargs(opts)
1549 timer, fm = gettimer(ui, opts)
1550 timer, fm = gettimer(ui, opts)
1550 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1551 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1551
1552
1552 def d():
1553 def d():
1553 # acceptremote is True because we don't want prompts in the middle of
1554 # acceptremote is True because we don't want prompts in the middle of
1554 # our benchmark
1555 # our benchmark
1555 copies.mergecopies(repo, wctx, rctx, ancestor)
1556 copies.mergecopies(repo, wctx, rctx, ancestor)
1556
1557
1557 timer(d)
1558 timer(d)
1558 fm.end()
1559 fm.end()
1559
1560
1560
1561
1561 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1562 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1562 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1563 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1563 """benchmark the copy tracing logic"""
1564 """benchmark the copy tracing logic"""
1564 opts = _byteskwargs(opts)
1565 opts = _byteskwargs(opts)
1565 timer, fm = gettimer(ui, opts)
1566 timer, fm = gettimer(ui, opts)
1566 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1567 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1567 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1568 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1568
1569
1569 def d():
1570 def d():
1570 copies.pathcopies(ctx1, ctx2)
1571 copies.pathcopies(ctx1, ctx2)
1571
1572
1572 timer(d)
1573 timer(d)
1573 fm.end()
1574 fm.end()
1574
1575
1575
1576
1576 @command(
1577 @command(
1577 b'perf::phases|perfphases',
1578 b'perf::phases|perfphases',
1578 [
1579 [
1579 (b'', b'full', False, b'include file reading time too'),
1580 (b'', b'full', False, b'include file reading time too'),
1580 ],
1581 ],
1581 b"",
1582 b"",
1582 )
1583 )
1583 def perfphases(ui, repo, **opts):
1584 def perfphases(ui, repo, **opts):
1584 """benchmark phasesets computation"""
1585 """benchmark phasesets computation"""
1585 opts = _byteskwargs(opts)
1586 opts = _byteskwargs(opts)
1586 timer, fm = gettimer(ui, opts)
1587 timer, fm = gettimer(ui, opts)
1587 _phases = repo._phasecache
1588 _phases = repo._phasecache
1588 full = opts.get(b'full')
1589 full = opts.get(b'full')
1589
1590
1590 def d():
1591 def d():
1591 phases = _phases
1592 phases = _phases
1592 if full:
1593 if full:
1593 clearfilecache(repo, b'_phasecache')
1594 clearfilecache(repo, b'_phasecache')
1594 phases = repo._phasecache
1595 phases = repo._phasecache
1595 phases.invalidate()
1596 phases.invalidate()
1596 phases.loadphaserevs(repo)
1597 phases.loadphaserevs(repo)
1597
1598
1598 timer(d)
1599 timer(d)
1599 fm.end()
1600 fm.end()
1600
1601
1601
1602
1602 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1603 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1603 def perfphasesremote(ui, repo, dest=None, **opts):
1604 def perfphasesremote(ui, repo, dest=None, **opts):
1604 """benchmark time needed to analyse phases of the remote server"""
1605 """benchmark time needed to analyse phases of the remote server"""
1605 from mercurial.node import bin
1606 from mercurial.node import bin
1606 from mercurial import (
1607 from mercurial import (
1607 exchange,
1608 exchange,
1608 hg,
1609 hg,
1609 phases,
1610 phases,
1610 )
1611 )
1611
1612
1612 opts = _byteskwargs(opts)
1613 opts = _byteskwargs(opts)
1613 timer, fm = gettimer(ui, opts)
1614 timer, fm = gettimer(ui, opts)
1614
1615
1615 path = ui.getpath(dest, default=(b'default-push', b'default'))
1616 path = ui.getpath(dest, default=(b'default-push', b'default'))
1616 if not path:
1617 if not path:
1617 raise error.Abort(
1618 raise error.Abort(
1618 b'default repository not configured!',
1619 b'default repository not configured!',
1619 hint=b"see 'hg help config.paths'",
1620 hint=b"see 'hg help config.paths'",
1620 )
1621 )
1621 if util.safehasattr(path, 'main_path'):
1622 if util.safehasattr(path, 'main_path'):
1622 path = path.get_push_variant()
1623 path = path.get_push_variant()
1623 dest = path.loc
1624 dest = path.loc
1624 else:
1625 else:
1625 dest = path.pushloc or path.loc
1626 dest = path.pushloc or path.loc
1626 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1627 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1627 other = hg.peer(repo, opts, dest)
1628 other = hg.peer(repo, opts, dest)
1628
1629
1629 # easier to perform discovery through the operation
1630 # easier to perform discovery through the operation
1630 op = exchange.pushoperation(repo, other)
1631 op = exchange.pushoperation(repo, other)
1631 exchange._pushdiscoverychangeset(op)
1632 exchange._pushdiscoverychangeset(op)
1632
1633
1633 remotesubset = op.fallbackheads
1634 remotesubset = op.fallbackheads
1634
1635
1635 with other.commandexecutor() as e:
1636 with other.commandexecutor() as e:
1636 remotephases = e.callcommand(
1637 remotephases = e.callcommand(
1637 b'listkeys', {b'namespace': b'phases'}
1638 b'listkeys', {b'namespace': b'phases'}
1638 ).result()
1639 ).result()
1639 del other
1640 del other
1640 publishing = remotephases.get(b'publishing', False)
1641 publishing = remotephases.get(b'publishing', False)
1641 if publishing:
1642 if publishing:
1642 ui.statusnoi18n(b'publishing: yes\n')
1643 ui.statusnoi18n(b'publishing: yes\n')
1643 else:
1644 else:
1644 ui.statusnoi18n(b'publishing: no\n')
1645 ui.statusnoi18n(b'publishing: no\n')
1645
1646
1646 has_node = getattr(repo.changelog.index, 'has_node', None)
1647 has_node = getattr(repo.changelog.index, 'has_node', None)
1647 if has_node is None:
1648 if has_node is None:
1648 has_node = repo.changelog.nodemap.__contains__
1649 has_node = repo.changelog.nodemap.__contains__
1649 nonpublishroots = 0
1650 nonpublishroots = 0
1650 for nhex, phase in remotephases.iteritems():
1651 for nhex, phase in remotephases.iteritems():
1651 if nhex == b'publishing': # ignore data related to publish option
1652 if nhex == b'publishing': # ignore data related to publish option
1652 continue
1653 continue
1653 node = bin(nhex)
1654 node = bin(nhex)
1654 if has_node(node) and int(phase):
1655 if has_node(node) and int(phase):
1655 nonpublishroots += 1
1656 nonpublishroots += 1
1656 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1657 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1657 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1658 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1658
1659
1659 def d():
1660 def d():
1660 phases.remotephasessummary(repo, remotesubset, remotephases)
1661 phases.remotephasessummary(repo, remotesubset, remotephases)
1661
1662
1662 timer(d)
1663 timer(d)
1663 fm.end()
1664 fm.end()
1664
1665
1665
1666
1666 @command(
1667 @command(
1667 b'perf::manifest|perfmanifest',
1668 b'perf::manifest|perfmanifest',
1668 [
1669 [
1669 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1670 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1670 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1671 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1671 ]
1672 ]
1672 + formatteropts,
1673 + formatteropts,
1673 b'REV|NODE',
1674 b'REV|NODE',
1674 )
1675 )
1675 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1676 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1676 """benchmark the time to read a manifest from disk and return a usable
1677 """benchmark the time to read a manifest from disk and return a usable
1677 dict-like object
1678 dict-like object
1678
1679
1679 Manifest caches are cleared before retrieval."""
1680 Manifest caches are cleared before retrieval."""
1680 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1681 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1682 if not manifest_rev:
1683 if not manifest_rev:
1683 ctx = scmutil.revsingle(repo, rev, rev)
1684 ctx = scmutil.revsingle(repo, rev, rev)
1684 t = ctx.manifestnode()
1685 t = ctx.manifestnode()
1685 else:
1686 else:
1686 from mercurial.node import bin
1687 from mercurial.node import bin
1687
1688
1688 if len(rev) == 40:
1689 if len(rev) == 40:
1689 t = bin(rev)
1690 t = bin(rev)
1690 else:
1691 else:
1691 try:
1692 try:
1692 rev = int(rev)
1693 rev = int(rev)
1693
1694
1694 if util.safehasattr(repo.manifestlog, b'getstorage'):
1695 if util.safehasattr(repo.manifestlog, b'getstorage'):
1695 t = repo.manifestlog.getstorage(b'').node(rev)
1696 t = repo.manifestlog.getstorage(b'').node(rev)
1696 else:
1697 else:
1697 t = repo.manifestlog._revlog.lookup(rev)
1698 t = repo.manifestlog._revlog.lookup(rev)
1698 except ValueError:
1699 except ValueError:
1699 raise error.Abort(
1700 raise error.Abort(
1700 b'manifest revision must be integer or full node'
1701 b'manifest revision must be integer or full node'
1701 )
1702 )
1702
1703
1703 def d():
1704 def d():
1704 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1705 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1705 repo.manifestlog[t].read()
1706 repo.manifestlog[t].read()
1706
1707
1707 timer(d)
1708 timer(d)
1708 fm.end()
1709 fm.end()
1709
1710
1710
1711
1711 @command(b'perf::changeset|perfchangeset', formatteropts)
1712 @command(b'perf::changeset|perfchangeset', formatteropts)
1712 def perfchangeset(ui, repo, rev, **opts):
1713 def perfchangeset(ui, repo, rev, **opts):
1713 opts = _byteskwargs(opts)
1714 opts = _byteskwargs(opts)
1714 timer, fm = gettimer(ui, opts)
1715 timer, fm = gettimer(ui, opts)
1715 n = scmutil.revsingle(repo, rev).node()
1716 n = scmutil.revsingle(repo, rev).node()
1716
1717
1717 def d():
1718 def d():
1718 repo.changelog.read(n)
1719 repo.changelog.read(n)
1719 # repo.changelog._cache = None
1720 # repo.changelog._cache = None
1720
1721
1721 timer(d)
1722 timer(d)
1722 fm.end()
1723 fm.end()
1723
1724
1724
1725
1725 @command(b'perf::ignore|perfignore', formatteropts)
1726 @command(b'perf::ignore|perfignore', formatteropts)
1726 def perfignore(ui, repo, **opts):
1727 def perfignore(ui, repo, **opts):
1727 """benchmark operation related to computing ignore"""
1728 """benchmark operation related to computing ignore"""
1728 opts = _byteskwargs(opts)
1729 opts = _byteskwargs(opts)
1729 timer, fm = gettimer(ui, opts)
1730 timer, fm = gettimer(ui, opts)
1730 dirstate = repo.dirstate
1731 dirstate = repo.dirstate
1731
1732
1732 def setupone():
1733 def setupone():
1733 dirstate.invalidate()
1734 dirstate.invalidate()
1734 clearfilecache(dirstate, b'_ignore')
1735 clearfilecache(dirstate, b'_ignore')
1735
1736
1736 def runone():
1737 def runone():
1737 dirstate._ignore
1738 dirstate._ignore
1738
1739
1739 timer(runone, setup=setupone, title=b"load")
1740 timer(runone, setup=setupone, title=b"load")
1740 fm.end()
1741 fm.end()
1741
1742
1742
1743
1743 @command(
1744 @command(
1744 b'perf::index|perfindex',
1745 b'perf::index|perfindex',
1745 [
1746 [
1746 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1747 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1747 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1748 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1748 ]
1749 ]
1749 + formatteropts,
1750 + formatteropts,
1750 )
1751 )
1751 def perfindex(ui, repo, **opts):
1752 def perfindex(ui, repo, **opts):
1752 """benchmark index creation time followed by a lookup
1753 """benchmark index creation time followed by a lookup
1753
1754
1754 The default is to look `tip` up. Depending on the index implementation,
1755 The default is to look `tip` up. Depending on the index implementation,
1755 the revision looked up can matters. For example, an implementation
1756 the revision looked up can matters. For example, an implementation
1756 scanning the index will have a faster lookup time for `--rev tip` than for
1757 scanning the index will have a faster lookup time for `--rev tip` than for
1757 `--rev 0`. The number of looked up revisions and their order can also
1758 `--rev 0`. The number of looked up revisions and their order can also
1758 matters.
1759 matters.
1759
1760
1760 Example of useful set to test:
1761 Example of useful set to test:
1761
1762
1762 * tip
1763 * tip
1763 * 0
1764 * 0
1764 * -10:
1765 * -10:
1765 * :10
1766 * :10
1766 * -10: + :10
1767 * -10: + :10
1767 * :10: + -10:
1768 * :10: + -10:
1768 * -10000:
1769 * -10000:
1769 * -10000: + 0
1770 * -10000: + 0
1770
1771
1771 It is not currently possible to check for lookup of a missing node. For
1772 It is not currently possible to check for lookup of a missing node. For
1772 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1773 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1773 import mercurial.revlog
1774 import mercurial.revlog
1774
1775
1775 opts = _byteskwargs(opts)
1776 opts = _byteskwargs(opts)
1776 timer, fm = gettimer(ui, opts)
1777 timer, fm = gettimer(ui, opts)
1777 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1778 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1778 if opts[b'no_lookup']:
1779 if opts[b'no_lookup']:
1779 if opts['rev']:
1780 if opts['rev']:
1780 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1781 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1781 nodes = []
1782 nodes = []
1782 elif not opts[b'rev']:
1783 elif not opts[b'rev']:
1783 nodes = [repo[b"tip"].node()]
1784 nodes = [repo[b"tip"].node()]
1784 else:
1785 else:
1785 revs = scmutil.revrange(repo, opts[b'rev'])
1786 revs = scmutil.revrange(repo, opts[b'rev'])
1786 cl = repo.changelog
1787 cl = repo.changelog
1787 nodes = [cl.node(r) for r in revs]
1788 nodes = [cl.node(r) for r in revs]
1788
1789
1789 unfi = repo.unfiltered()
1790 unfi = repo.unfiltered()
1790 # find the filecache func directly
1791 # find the filecache func directly
1791 # This avoid polluting the benchmark with the filecache logic
1792 # This avoid polluting the benchmark with the filecache logic
1792 makecl = unfi.__class__.changelog.func
1793 makecl = unfi.__class__.changelog.func
1793
1794
1794 def setup():
1795 def setup():
1795 # probably not necessary, but for good measure
1796 # probably not necessary, but for good measure
1796 clearchangelog(unfi)
1797 clearchangelog(unfi)
1797
1798
1798 def d():
1799 def d():
1799 cl = makecl(unfi)
1800 cl = makecl(unfi)
1800 for n in nodes:
1801 for n in nodes:
1801 cl.rev(n)
1802 cl.rev(n)
1802
1803
1803 timer(d, setup=setup)
1804 timer(d, setup=setup)
1804 fm.end()
1805 fm.end()
1805
1806
1806
1807
1807 @command(
1808 @command(
1808 b'perf::nodemap|perfnodemap',
1809 b'perf::nodemap|perfnodemap',
1809 [
1810 [
1810 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1811 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1812 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1812 ]
1813 ]
1813 + formatteropts,
1814 + formatteropts,
1814 )
1815 )
1815 def perfnodemap(ui, repo, **opts):
1816 def perfnodemap(ui, repo, **opts):
1816 """benchmark the time necessary to look up revision from a cold nodemap
1817 """benchmark the time necessary to look up revision from a cold nodemap
1817
1818
1818 Depending on the implementation, the amount and order of revision we look
1819 Depending on the implementation, the amount and order of revision we look
1819 up can varies. Example of useful set to test:
1820 up can varies. Example of useful set to test:
1820 * tip
1821 * tip
1821 * 0
1822 * 0
1822 * -10:
1823 * -10:
1823 * :10
1824 * :10
1824 * -10: + :10
1825 * -10: + :10
1825 * :10: + -10:
1826 * :10: + -10:
1826 * -10000:
1827 * -10000:
1827 * -10000: + 0
1828 * -10000: + 0
1828
1829
1829 The command currently focus on valid binary lookup. Benchmarking for
1830 The command currently focus on valid binary lookup. Benchmarking for
1830 hexlookup, prefix lookup and missing lookup would also be valuable.
1831 hexlookup, prefix lookup and missing lookup would also be valuable.
1831 """
1832 """
1832 import mercurial.revlog
1833 import mercurial.revlog
1833
1834
1834 opts = _byteskwargs(opts)
1835 opts = _byteskwargs(opts)
1835 timer, fm = gettimer(ui, opts)
1836 timer, fm = gettimer(ui, opts)
1836 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1837 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1837
1838
1838 unfi = repo.unfiltered()
1839 unfi = repo.unfiltered()
1839 clearcaches = opts[b'clear_caches']
1840 clearcaches = opts[b'clear_caches']
1840 # find the filecache func directly
1841 # find the filecache func directly
1841 # This avoid polluting the benchmark with the filecache logic
1842 # This avoid polluting the benchmark with the filecache logic
1842 makecl = unfi.__class__.changelog.func
1843 makecl = unfi.__class__.changelog.func
1843 if not opts[b'rev']:
1844 if not opts[b'rev']:
1844 raise error.Abort(b'use --rev to specify revisions to look up')
1845 raise error.Abort(b'use --rev to specify revisions to look up')
1845 revs = scmutil.revrange(repo, opts[b'rev'])
1846 revs = scmutil.revrange(repo, opts[b'rev'])
1846 cl = repo.changelog
1847 cl = repo.changelog
1847 nodes = [cl.node(r) for r in revs]
1848 nodes = [cl.node(r) for r in revs]
1848
1849
1849 # use a list to pass reference to a nodemap from one closure to the next
1850 # use a list to pass reference to a nodemap from one closure to the next
1850 nodeget = [None]
1851 nodeget = [None]
1851
1852
1852 def setnodeget():
1853 def setnodeget():
1853 # probably not necessary, but for good measure
1854 # probably not necessary, but for good measure
1854 clearchangelog(unfi)
1855 clearchangelog(unfi)
1855 cl = makecl(unfi)
1856 cl = makecl(unfi)
1856 if util.safehasattr(cl.index, 'get_rev'):
1857 if util.safehasattr(cl.index, 'get_rev'):
1857 nodeget[0] = cl.index.get_rev
1858 nodeget[0] = cl.index.get_rev
1858 else:
1859 else:
1859 nodeget[0] = cl.nodemap.get
1860 nodeget[0] = cl.nodemap.get
1860
1861
1861 def d():
1862 def d():
1862 get = nodeget[0]
1863 get = nodeget[0]
1863 for n in nodes:
1864 for n in nodes:
1864 get(n)
1865 get(n)
1865
1866
1866 setup = None
1867 setup = None
1867 if clearcaches:
1868 if clearcaches:
1868
1869
1869 def setup():
1870 def setup():
1870 setnodeget()
1871 setnodeget()
1871
1872
1872 else:
1873 else:
1873 setnodeget()
1874 setnodeget()
1874 d() # prewarm the data structure
1875 d() # prewarm the data structure
1875 timer(d, setup=setup)
1876 timer(d, setup=setup)
1876 fm.end()
1877 fm.end()
1877
1878
1878
1879
1879 @command(b'perf::startup|perfstartup', formatteropts)
1880 @command(b'perf::startup|perfstartup', formatteropts)
1880 def perfstartup(ui, repo, **opts):
1881 def perfstartup(ui, repo, **opts):
1881 opts = _byteskwargs(opts)
1882 opts = _byteskwargs(opts)
1882 timer, fm = gettimer(ui, opts)
1883 timer, fm = gettimer(ui, opts)
1883
1884
1884 def d():
1885 def d():
1885 if os.name != 'nt':
1886 if os.name != 'nt':
1886 os.system(
1887 os.system(
1887 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1888 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1888 )
1889 )
1889 else:
1890 else:
1890 os.environ['HGRCPATH'] = r' '
1891 os.environ['HGRCPATH'] = r' '
1891 os.system("%s version -q > NUL" % sys.argv[0])
1892 os.system("%s version -q > NUL" % sys.argv[0])
1892
1893
1893 timer(d)
1894 timer(d)
1894 fm.end()
1895 fm.end()
1895
1896
1896
1897
1897 @command(b'perf::parents|perfparents', formatteropts)
1898 @command(b'perf::parents|perfparents', formatteropts)
1898 def perfparents(ui, repo, **opts):
1899 def perfparents(ui, repo, **opts):
1899 """benchmark the time necessary to fetch one changeset's parents.
1900 """benchmark the time necessary to fetch one changeset's parents.
1900
1901
1901 The fetch is done using the `node identifier`, traversing all object layers
1902 The fetch is done using the `node identifier`, traversing all object layers
1902 from the repository object. The first N revisions will be used for this
1903 from the repository object. The first N revisions will be used for this
1903 benchmark. N is controlled by the ``perf.parentscount`` config option
1904 benchmark. N is controlled by the ``perf.parentscount`` config option
1904 (default: 1000).
1905 (default: 1000).
1905 """
1906 """
1906 opts = _byteskwargs(opts)
1907 opts = _byteskwargs(opts)
1907 timer, fm = gettimer(ui, opts)
1908 timer, fm = gettimer(ui, opts)
1908 # control the number of commits perfparents iterates over
1909 # control the number of commits perfparents iterates over
1909 # experimental config: perf.parentscount
1910 # experimental config: perf.parentscount
1910 count = getint(ui, b"perf", b"parentscount", 1000)
1911 count = getint(ui, b"perf", b"parentscount", 1000)
1911 if len(repo.changelog) < count:
1912 if len(repo.changelog) < count:
1912 raise error.Abort(b"repo needs %d commits for this test" % count)
1913 raise error.Abort(b"repo needs %d commits for this test" % count)
1913 repo = repo.unfiltered()
1914 repo = repo.unfiltered()
1914 nl = [repo.changelog.node(i) for i in _xrange(count)]
1915 nl = [repo.changelog.node(i) for i in _xrange(count)]
1915
1916
1916 def d():
1917 def d():
1917 for n in nl:
1918 for n in nl:
1918 repo.changelog.parents(n)
1919 repo.changelog.parents(n)
1919
1920
1920 timer(d)
1921 timer(d)
1921 fm.end()
1922 fm.end()
1922
1923
1923
1924
1924 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1925 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1925 def perfctxfiles(ui, repo, x, **opts):
1926 def perfctxfiles(ui, repo, x, **opts):
1926 opts = _byteskwargs(opts)
1927 opts = _byteskwargs(opts)
1927 x = int(x)
1928 x = int(x)
1928 timer, fm = gettimer(ui, opts)
1929 timer, fm = gettimer(ui, opts)
1929
1930
1930 def d():
1931 def d():
1931 len(repo[x].files())
1932 len(repo[x].files())
1932
1933
1933 timer(d)
1934 timer(d)
1934 fm.end()
1935 fm.end()
1935
1936
1936
1937
1937 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1938 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1938 def perfrawfiles(ui, repo, x, **opts):
1939 def perfrawfiles(ui, repo, x, **opts):
1939 opts = _byteskwargs(opts)
1940 opts = _byteskwargs(opts)
1940 x = int(x)
1941 x = int(x)
1941 timer, fm = gettimer(ui, opts)
1942 timer, fm = gettimer(ui, opts)
1942 cl = repo.changelog
1943 cl = repo.changelog
1943
1944
1944 def d():
1945 def d():
1945 len(cl.read(x)[3])
1946 len(cl.read(x)[3])
1946
1947
1947 timer(d)
1948 timer(d)
1948 fm.end()
1949 fm.end()
1949
1950
1950
1951
1951 @command(b'perf::lookup|perflookup', formatteropts)
1952 @command(b'perf::lookup|perflookup', formatteropts)
1952 def perflookup(ui, repo, rev, **opts):
1953 def perflookup(ui, repo, rev, **opts):
1953 opts = _byteskwargs(opts)
1954 opts = _byteskwargs(opts)
1954 timer, fm = gettimer(ui, opts)
1955 timer, fm = gettimer(ui, opts)
1955 timer(lambda: len(repo.lookup(rev)))
1956 timer(lambda: len(repo.lookup(rev)))
1956 fm.end()
1957 fm.end()
1957
1958
1958
1959
1959 @command(
1960 @command(
1960 b'perf::linelogedits|perflinelogedits',
1961 b'perf::linelogedits|perflinelogedits',
1961 [
1962 [
1962 (b'n', b'edits', 10000, b'number of edits'),
1963 (b'n', b'edits', 10000, b'number of edits'),
1963 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1964 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1964 ],
1965 ],
1965 norepo=True,
1966 norepo=True,
1966 )
1967 )
1967 def perflinelogedits(ui, **opts):
1968 def perflinelogedits(ui, **opts):
1968 from mercurial import linelog
1969 from mercurial import linelog
1969
1970
1970 opts = _byteskwargs(opts)
1971 opts = _byteskwargs(opts)
1971
1972
1972 edits = opts[b'edits']
1973 edits = opts[b'edits']
1973 maxhunklines = opts[b'max_hunk_lines']
1974 maxhunklines = opts[b'max_hunk_lines']
1974
1975
1975 maxb1 = 100000
1976 maxb1 = 100000
1976 random.seed(0)
1977 random.seed(0)
1977 randint = random.randint
1978 randint = random.randint
1978 currentlines = 0
1979 currentlines = 0
1979 arglist = []
1980 arglist = []
1980 for rev in _xrange(edits):
1981 for rev in _xrange(edits):
1981 a1 = randint(0, currentlines)
1982 a1 = randint(0, currentlines)
1982 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1983 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1983 b1 = randint(0, maxb1)
1984 b1 = randint(0, maxb1)
1984 b2 = randint(b1, b1 + maxhunklines)
1985 b2 = randint(b1, b1 + maxhunklines)
1985 currentlines += (b2 - b1) - (a2 - a1)
1986 currentlines += (b2 - b1) - (a2 - a1)
1986 arglist.append((rev, a1, a2, b1, b2))
1987 arglist.append((rev, a1, a2, b1, b2))
1987
1988
1988 def d():
1989 def d():
1989 ll = linelog.linelog()
1990 ll = linelog.linelog()
1990 for args in arglist:
1991 for args in arglist:
1991 ll.replacelines(*args)
1992 ll.replacelines(*args)
1992
1993
1993 timer, fm = gettimer(ui, opts)
1994 timer, fm = gettimer(ui, opts)
1994 timer(d)
1995 timer(d)
1995 fm.end()
1996 fm.end()
1996
1997
1997
1998
1998 @command(b'perf::revrange|perfrevrange', formatteropts)
1999 @command(b'perf::revrange|perfrevrange', formatteropts)
1999 def perfrevrange(ui, repo, *specs, **opts):
2000 def perfrevrange(ui, repo, *specs, **opts):
2000 opts = _byteskwargs(opts)
2001 opts = _byteskwargs(opts)
2001 timer, fm = gettimer(ui, opts)
2002 timer, fm = gettimer(ui, opts)
2002 revrange = scmutil.revrange
2003 revrange = scmutil.revrange
2003 timer(lambda: len(revrange(repo, specs)))
2004 timer(lambda: len(revrange(repo, specs)))
2004 fm.end()
2005 fm.end()
2005
2006
2006
2007
2007 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2008 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2008 def perfnodelookup(ui, repo, rev, **opts):
2009 def perfnodelookup(ui, repo, rev, **opts):
2009 opts = _byteskwargs(opts)
2010 opts = _byteskwargs(opts)
2010 timer, fm = gettimer(ui, opts)
2011 timer, fm = gettimer(ui, opts)
2011 import mercurial.revlog
2012 import mercurial.revlog
2012
2013
2013 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2014 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2014 n = scmutil.revsingle(repo, rev).node()
2015 n = scmutil.revsingle(repo, rev).node()
2015
2016
2016 try:
2017 try:
2017 cl = revlog(getsvfs(repo), radix=b"00changelog")
2018 cl = revlog(getsvfs(repo), radix=b"00changelog")
2018 except TypeError:
2019 except TypeError:
2019 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2020 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2020
2021
2021 def d():
2022 def d():
2022 cl.rev(n)
2023 cl.rev(n)
2023 clearcaches(cl)
2024 clearcaches(cl)
2024
2025
2025 timer(d)
2026 timer(d)
2026 fm.end()
2027 fm.end()
2027
2028
2028
2029
2029 @command(
2030 @command(
2030 b'perf::log|perflog',
2031 b'perf::log|perflog',
2031 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2032 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2032 )
2033 )
2033 def perflog(ui, repo, rev=None, **opts):
2034 def perflog(ui, repo, rev=None, **opts):
2034 opts = _byteskwargs(opts)
2035 opts = _byteskwargs(opts)
2035 if rev is None:
2036 if rev is None:
2036 rev = []
2037 rev = []
2037 timer, fm = gettimer(ui, opts)
2038 timer, fm = gettimer(ui, opts)
2038 ui.pushbuffer()
2039 ui.pushbuffer()
2039 timer(
2040 timer(
2040 lambda: commands.log(
2041 lambda: commands.log(
2041 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2042 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2042 )
2043 )
2043 )
2044 )
2044 ui.popbuffer()
2045 ui.popbuffer()
2045 fm.end()
2046 fm.end()
2046
2047
2047
2048
2048 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2049 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2049 def perfmoonwalk(ui, repo, **opts):
2050 def perfmoonwalk(ui, repo, **opts):
2050 """benchmark walking the changelog backwards
2051 """benchmark walking the changelog backwards
2051
2052
2052 This also loads the changelog data for each revision in the changelog.
2053 This also loads the changelog data for each revision in the changelog.
2053 """
2054 """
2054 opts = _byteskwargs(opts)
2055 opts = _byteskwargs(opts)
2055 timer, fm = gettimer(ui, opts)
2056 timer, fm = gettimer(ui, opts)
2056
2057
2057 def moonwalk():
2058 def moonwalk():
2058 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2059 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2059 ctx = repo[i]
2060 ctx = repo[i]
2060 ctx.branch() # read changelog data (in addition to the index)
2061 ctx.branch() # read changelog data (in addition to the index)
2061
2062
2062 timer(moonwalk)
2063 timer(moonwalk)
2063 fm.end()
2064 fm.end()
2064
2065
2065
2066
2066 @command(
2067 @command(
2067 b'perf::templating|perftemplating',
2068 b'perf::templating|perftemplating',
2068 [
2069 [
2069 (b'r', b'rev', [], b'revisions to run the template on'),
2070 (b'r', b'rev', [], b'revisions to run the template on'),
2070 ]
2071 ]
2071 + formatteropts,
2072 + formatteropts,
2072 )
2073 )
2073 def perftemplating(ui, repo, testedtemplate=None, **opts):
2074 def perftemplating(ui, repo, testedtemplate=None, **opts):
2074 """test the rendering time of a given template"""
2075 """test the rendering time of a given template"""
2075 if makelogtemplater is None:
2076 if makelogtemplater is None:
2076 raise error.Abort(
2077 raise error.Abort(
2077 b"perftemplating not available with this Mercurial",
2078 b"perftemplating not available with this Mercurial",
2078 hint=b"use 4.3 or later",
2079 hint=b"use 4.3 or later",
2079 )
2080 )
2080
2081
2081 opts = _byteskwargs(opts)
2082 opts = _byteskwargs(opts)
2082
2083
2083 nullui = ui.copy()
2084 nullui = ui.copy()
2084 nullui.fout = open(os.devnull, 'wb')
2085 nullui.fout = open(os.devnull, 'wb')
2085 nullui.disablepager()
2086 nullui.disablepager()
2086 revs = opts.get(b'rev')
2087 revs = opts.get(b'rev')
2087 if not revs:
2088 if not revs:
2088 revs = [b'all()']
2089 revs = [b'all()']
2089 revs = list(scmutil.revrange(repo, revs))
2090 revs = list(scmutil.revrange(repo, revs))
2090
2091
2091 defaulttemplate = (
2092 defaulttemplate = (
2092 b'{date|shortdate} [{rev}:{node|short}]'
2093 b'{date|shortdate} [{rev}:{node|short}]'
2093 b' {author|person}: {desc|firstline}\n'
2094 b' {author|person}: {desc|firstline}\n'
2094 )
2095 )
2095 if testedtemplate is None:
2096 if testedtemplate is None:
2096 testedtemplate = defaulttemplate
2097 testedtemplate = defaulttemplate
2097 displayer = makelogtemplater(nullui, repo, testedtemplate)
2098 displayer = makelogtemplater(nullui, repo, testedtemplate)
2098
2099
2099 def format():
2100 def format():
2100 for r in revs:
2101 for r in revs:
2101 ctx = repo[r]
2102 ctx = repo[r]
2102 displayer.show(ctx)
2103 displayer.show(ctx)
2103 displayer.flush(ctx)
2104 displayer.flush(ctx)
2104
2105
2105 timer, fm = gettimer(ui, opts)
2106 timer, fm = gettimer(ui, opts)
2106 timer(format)
2107 timer(format)
2107 fm.end()
2108 fm.end()
2108
2109
2109
2110
2110 def _displaystats(ui, opts, entries, data):
2111 def _displaystats(ui, opts, entries, data):
2111 # use a second formatter because the data are quite different, not sure
2112 # use a second formatter because the data are quite different, not sure
2112 # how it flies with the templater.
2113 # how it flies with the templater.
2113 fm = ui.formatter(b'perf-stats', opts)
2114 fm = ui.formatter(b'perf-stats', opts)
2114 for key, title in entries:
2115 for key, title in entries:
2115 values = data[key]
2116 values = data[key]
2116 nbvalues = len(data)
2117 nbvalues = len(data)
2117 values.sort()
2118 values.sort()
2118 stats = {
2119 stats = {
2119 'key': key,
2120 'key': key,
2120 'title': title,
2121 'title': title,
2121 'nbitems': len(values),
2122 'nbitems': len(values),
2122 'min': values[0][0],
2123 'min': values[0][0],
2123 '10%': values[(nbvalues * 10) // 100][0],
2124 '10%': values[(nbvalues * 10) // 100][0],
2124 '25%': values[(nbvalues * 25) // 100][0],
2125 '25%': values[(nbvalues * 25) // 100][0],
2125 '50%': values[(nbvalues * 50) // 100][0],
2126 '50%': values[(nbvalues * 50) // 100][0],
2126 '75%': values[(nbvalues * 75) // 100][0],
2127 '75%': values[(nbvalues * 75) // 100][0],
2127 '80%': values[(nbvalues * 80) // 100][0],
2128 '80%': values[(nbvalues * 80) // 100][0],
2128 '85%': values[(nbvalues * 85) // 100][0],
2129 '85%': values[(nbvalues * 85) // 100][0],
2129 '90%': values[(nbvalues * 90) // 100][0],
2130 '90%': values[(nbvalues * 90) // 100][0],
2130 '95%': values[(nbvalues * 95) // 100][0],
2131 '95%': values[(nbvalues * 95) // 100][0],
2131 '99%': values[(nbvalues * 99) // 100][0],
2132 '99%': values[(nbvalues * 99) // 100][0],
2132 'max': values[-1][0],
2133 'max': values[-1][0],
2133 }
2134 }
2134 fm.startitem()
2135 fm.startitem()
2135 fm.data(**stats)
2136 fm.data(**stats)
2136 # make node pretty for the human output
2137 # make node pretty for the human output
2137 fm.plain('### %s (%d items)\n' % (title, len(values)))
2138 fm.plain('### %s (%d items)\n' % (title, len(values)))
2138 lines = [
2139 lines = [
2139 'min',
2140 'min',
2140 '10%',
2141 '10%',
2141 '25%',
2142 '25%',
2142 '50%',
2143 '50%',
2143 '75%',
2144 '75%',
2144 '80%',
2145 '80%',
2145 '85%',
2146 '85%',
2146 '90%',
2147 '90%',
2147 '95%',
2148 '95%',
2148 '99%',
2149 '99%',
2149 'max',
2150 'max',
2150 ]
2151 ]
2151 for l in lines:
2152 for l in lines:
2152 fm.plain('%s: %s\n' % (l, stats[l]))
2153 fm.plain('%s: %s\n' % (l, stats[l]))
2153 fm.end()
2154 fm.end()
2154
2155
2155
2156
2156 @command(
2157 @command(
2157 b'perf::helper-mergecopies|perfhelper-mergecopies',
2158 b'perf::helper-mergecopies|perfhelper-mergecopies',
2158 formatteropts
2159 formatteropts
2159 + [
2160 + [
2160 (b'r', b'revs', [], b'restrict search to these revisions'),
2161 (b'r', b'revs', [], b'restrict search to these revisions'),
2161 (b'', b'timing', False, b'provides extra data (costly)'),
2162 (b'', b'timing', False, b'provides extra data (costly)'),
2162 (b'', b'stats', False, b'provides statistic about the measured data'),
2163 (b'', b'stats', False, b'provides statistic about the measured data'),
2163 ],
2164 ],
2164 )
2165 )
2165 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2166 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2166 """find statistics about potential parameters for `perfmergecopies`
2167 """find statistics about potential parameters for `perfmergecopies`
2167
2168
2168 This command find (base, p1, p2) triplet relevant for copytracing
2169 This command find (base, p1, p2) triplet relevant for copytracing
2169 benchmarking in the context of a merge. It reports values for some of the
2170 benchmarking in the context of a merge. It reports values for some of the
2170 parameters that impact merge copy tracing time during merge.
2171 parameters that impact merge copy tracing time during merge.
2171
2172
2172 If `--timing` is set, rename detection is run and the associated timing
2173 If `--timing` is set, rename detection is run and the associated timing
2173 will be reported. The extra details come at the cost of slower command
2174 will be reported. The extra details come at the cost of slower command
2174 execution.
2175 execution.
2175
2176
2176 Since rename detection is only run once, other factors might easily
2177 Since rename detection is only run once, other factors might easily
2177 affect the precision of the timing. However it should give a good
2178 affect the precision of the timing. However it should give a good
2178 approximation of which revision triplets are very costly.
2179 approximation of which revision triplets are very costly.
2179 """
2180 """
2180 opts = _byteskwargs(opts)
2181 opts = _byteskwargs(opts)
2181 fm = ui.formatter(b'perf', opts)
2182 fm = ui.formatter(b'perf', opts)
2182 dotiming = opts[b'timing']
2183 dotiming = opts[b'timing']
2183 dostats = opts[b'stats']
2184 dostats = opts[b'stats']
2184
2185
2185 output_template = [
2186 output_template = [
2186 ("base", "%(base)12s"),
2187 ("base", "%(base)12s"),
2187 ("p1", "%(p1.node)12s"),
2188 ("p1", "%(p1.node)12s"),
2188 ("p2", "%(p2.node)12s"),
2189 ("p2", "%(p2.node)12s"),
2189 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2190 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2190 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2191 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2191 ("p1.renames", "%(p1.renamedfiles)12d"),
2192 ("p1.renames", "%(p1.renamedfiles)12d"),
2192 ("p1.time", "%(p1.time)12.3f"),
2193 ("p1.time", "%(p1.time)12.3f"),
2193 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2194 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2194 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2195 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2195 ("p2.renames", "%(p2.renamedfiles)12d"),
2196 ("p2.renames", "%(p2.renamedfiles)12d"),
2196 ("p2.time", "%(p2.time)12.3f"),
2197 ("p2.time", "%(p2.time)12.3f"),
2197 ("renames", "%(nbrenamedfiles)12d"),
2198 ("renames", "%(nbrenamedfiles)12d"),
2198 ("total.time", "%(time)12.3f"),
2199 ("total.time", "%(time)12.3f"),
2199 ]
2200 ]
2200 if not dotiming:
2201 if not dotiming:
2201 output_template = [
2202 output_template = [
2202 i
2203 i
2203 for i in output_template
2204 for i in output_template
2204 if not ('time' in i[0] or 'renames' in i[0])
2205 if not ('time' in i[0] or 'renames' in i[0])
2205 ]
2206 ]
2206 header_names = [h for (h, v) in output_template]
2207 header_names = [h for (h, v) in output_template]
2207 output = ' '.join([v for (h, v) in output_template]) + '\n'
2208 output = ' '.join([v for (h, v) in output_template]) + '\n'
2208 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2209 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2209 fm.plain(header % tuple(header_names))
2210 fm.plain(header % tuple(header_names))
2210
2211
2211 if not revs:
2212 if not revs:
2212 revs = ['all()']
2213 revs = ['all()']
2213 revs = scmutil.revrange(repo, revs)
2214 revs = scmutil.revrange(repo, revs)
2214
2215
2215 if dostats:
2216 if dostats:
2216 alldata = {
2217 alldata = {
2217 'nbrevs': [],
2218 'nbrevs': [],
2218 'nbmissingfiles': [],
2219 'nbmissingfiles': [],
2219 }
2220 }
2220 if dotiming:
2221 if dotiming:
2221 alldata['parentnbrenames'] = []
2222 alldata['parentnbrenames'] = []
2222 alldata['totalnbrenames'] = []
2223 alldata['totalnbrenames'] = []
2223 alldata['parenttime'] = []
2224 alldata['parenttime'] = []
2224 alldata['totaltime'] = []
2225 alldata['totaltime'] = []
2225
2226
2226 roi = repo.revs('merge() and %ld', revs)
2227 roi = repo.revs('merge() and %ld', revs)
2227 for r in roi:
2228 for r in roi:
2228 ctx = repo[r]
2229 ctx = repo[r]
2229 p1 = ctx.p1()
2230 p1 = ctx.p1()
2230 p2 = ctx.p2()
2231 p2 = ctx.p2()
2231 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2232 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2232 for b in bases:
2233 for b in bases:
2233 b = repo[b]
2234 b = repo[b]
2234 p1missing = copies._computeforwardmissing(b, p1)
2235 p1missing = copies._computeforwardmissing(b, p1)
2235 p2missing = copies._computeforwardmissing(b, p2)
2236 p2missing = copies._computeforwardmissing(b, p2)
2236 data = {
2237 data = {
2237 b'base': b.hex(),
2238 b'base': b.hex(),
2238 b'p1.node': p1.hex(),
2239 b'p1.node': p1.hex(),
2239 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2240 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2240 b'p1.nbmissingfiles': len(p1missing),
2241 b'p1.nbmissingfiles': len(p1missing),
2241 b'p2.node': p2.hex(),
2242 b'p2.node': p2.hex(),
2242 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2243 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2243 b'p2.nbmissingfiles': len(p2missing),
2244 b'p2.nbmissingfiles': len(p2missing),
2244 }
2245 }
2245 if dostats:
2246 if dostats:
2246 if p1missing:
2247 if p1missing:
2247 alldata['nbrevs'].append(
2248 alldata['nbrevs'].append(
2248 (data['p1.nbrevs'], b.hex(), p1.hex())
2249 (data['p1.nbrevs'], b.hex(), p1.hex())
2249 )
2250 )
2250 alldata['nbmissingfiles'].append(
2251 alldata['nbmissingfiles'].append(
2251 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2252 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2252 )
2253 )
2253 if p2missing:
2254 if p2missing:
2254 alldata['nbrevs'].append(
2255 alldata['nbrevs'].append(
2255 (data['p2.nbrevs'], b.hex(), p2.hex())
2256 (data['p2.nbrevs'], b.hex(), p2.hex())
2256 )
2257 )
2257 alldata['nbmissingfiles'].append(
2258 alldata['nbmissingfiles'].append(
2258 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2259 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2259 )
2260 )
2260 if dotiming:
2261 if dotiming:
2261 begin = util.timer()
2262 begin = util.timer()
2262 mergedata = copies.mergecopies(repo, p1, p2, b)
2263 mergedata = copies.mergecopies(repo, p1, p2, b)
2263 end = util.timer()
2264 end = util.timer()
2264 # not very stable timing since we did only one run
2265 # not very stable timing since we did only one run
2265 data['time'] = end - begin
2266 data['time'] = end - begin
2266 # mergedata contains five dicts: "copy", "movewithdir",
2267 # mergedata contains five dicts: "copy", "movewithdir",
2267 # "diverge", "renamedelete" and "dirmove".
2268 # "diverge", "renamedelete" and "dirmove".
2268 # The first 4 are about renamed file so lets count that.
2269 # The first 4 are about renamed file so lets count that.
2269 renames = len(mergedata[0])
2270 renames = len(mergedata[0])
2270 renames += len(mergedata[1])
2271 renames += len(mergedata[1])
2271 renames += len(mergedata[2])
2272 renames += len(mergedata[2])
2272 renames += len(mergedata[3])
2273 renames += len(mergedata[3])
2273 data['nbrenamedfiles'] = renames
2274 data['nbrenamedfiles'] = renames
2274 begin = util.timer()
2275 begin = util.timer()
2275 p1renames = copies.pathcopies(b, p1)
2276 p1renames = copies.pathcopies(b, p1)
2276 end = util.timer()
2277 end = util.timer()
2277 data['p1.time'] = end - begin
2278 data['p1.time'] = end - begin
2278 begin = util.timer()
2279 begin = util.timer()
2279 p2renames = copies.pathcopies(b, p2)
2280 p2renames = copies.pathcopies(b, p2)
2280 end = util.timer()
2281 end = util.timer()
2281 data['p2.time'] = end - begin
2282 data['p2.time'] = end - begin
2282 data['p1.renamedfiles'] = len(p1renames)
2283 data['p1.renamedfiles'] = len(p1renames)
2283 data['p2.renamedfiles'] = len(p2renames)
2284 data['p2.renamedfiles'] = len(p2renames)
2284
2285
2285 if dostats:
2286 if dostats:
2286 if p1missing:
2287 if p1missing:
2287 alldata['parentnbrenames'].append(
2288 alldata['parentnbrenames'].append(
2288 (data['p1.renamedfiles'], b.hex(), p1.hex())
2289 (data['p1.renamedfiles'], b.hex(), p1.hex())
2289 )
2290 )
2290 alldata['parenttime'].append(
2291 alldata['parenttime'].append(
2291 (data['p1.time'], b.hex(), p1.hex())
2292 (data['p1.time'], b.hex(), p1.hex())
2292 )
2293 )
2293 if p2missing:
2294 if p2missing:
2294 alldata['parentnbrenames'].append(
2295 alldata['parentnbrenames'].append(
2295 (data['p2.renamedfiles'], b.hex(), p2.hex())
2296 (data['p2.renamedfiles'], b.hex(), p2.hex())
2296 )
2297 )
2297 alldata['parenttime'].append(
2298 alldata['parenttime'].append(
2298 (data['p2.time'], b.hex(), p2.hex())
2299 (data['p2.time'], b.hex(), p2.hex())
2299 )
2300 )
2300 if p1missing or p2missing:
2301 if p1missing or p2missing:
2301 alldata['totalnbrenames'].append(
2302 alldata['totalnbrenames'].append(
2302 (
2303 (
2303 data['nbrenamedfiles'],
2304 data['nbrenamedfiles'],
2304 b.hex(),
2305 b.hex(),
2305 p1.hex(),
2306 p1.hex(),
2306 p2.hex(),
2307 p2.hex(),
2307 )
2308 )
2308 )
2309 )
2309 alldata['totaltime'].append(
2310 alldata['totaltime'].append(
2310 (data['time'], b.hex(), p1.hex(), p2.hex())
2311 (data['time'], b.hex(), p1.hex(), p2.hex())
2311 )
2312 )
2312 fm.startitem()
2313 fm.startitem()
2313 fm.data(**data)
2314 fm.data(**data)
2314 # make node pretty for the human output
2315 # make node pretty for the human output
2315 out = data.copy()
2316 out = data.copy()
2316 out['base'] = fm.hexfunc(b.node())
2317 out['base'] = fm.hexfunc(b.node())
2317 out['p1.node'] = fm.hexfunc(p1.node())
2318 out['p1.node'] = fm.hexfunc(p1.node())
2318 out['p2.node'] = fm.hexfunc(p2.node())
2319 out['p2.node'] = fm.hexfunc(p2.node())
2319 fm.plain(output % out)
2320 fm.plain(output % out)
2320
2321
2321 fm.end()
2322 fm.end()
2322 if dostats:
2323 if dostats:
2323 # use a second formatter because the data are quite different, not sure
2324 # use a second formatter because the data are quite different, not sure
2324 # how it flies with the templater.
2325 # how it flies with the templater.
2325 entries = [
2326 entries = [
2326 ('nbrevs', 'number of revision covered'),
2327 ('nbrevs', 'number of revision covered'),
2327 ('nbmissingfiles', 'number of missing files at head'),
2328 ('nbmissingfiles', 'number of missing files at head'),
2328 ]
2329 ]
2329 if dotiming:
2330 if dotiming:
2330 entries.append(
2331 entries.append(
2331 ('parentnbrenames', 'rename from one parent to base')
2332 ('parentnbrenames', 'rename from one parent to base')
2332 )
2333 )
2333 entries.append(('totalnbrenames', 'total number of renames'))
2334 entries.append(('totalnbrenames', 'total number of renames'))
2334 entries.append(('parenttime', 'time for one parent'))
2335 entries.append(('parenttime', 'time for one parent'))
2335 entries.append(('totaltime', 'time for both parents'))
2336 entries.append(('totaltime', 'time for both parents'))
2336 _displaystats(ui, opts, entries, alldata)
2337 _displaystats(ui, opts, entries, alldata)
2337
2338
2338
2339
2339 @command(
2340 @command(
2340 b'perf::helper-pathcopies|perfhelper-pathcopies',
2341 b'perf::helper-pathcopies|perfhelper-pathcopies',
2341 formatteropts
2342 formatteropts
2342 + [
2343 + [
2343 (b'r', b'revs', [], b'restrict search to these revisions'),
2344 (b'r', b'revs', [], b'restrict search to these revisions'),
2344 (b'', b'timing', False, b'provides extra data (costly)'),
2345 (b'', b'timing', False, b'provides extra data (costly)'),
2345 (b'', b'stats', False, b'provides statistic about the measured data'),
2346 (b'', b'stats', False, b'provides statistic about the measured data'),
2346 ],
2347 ],
2347 )
2348 )
2348 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2349 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2349 """find statistic about potential parameters for the `perftracecopies`
2350 """find statistic about potential parameters for the `perftracecopies`
2350
2351
2351 This command find source-destination pair relevant for copytracing testing.
2352 This command find source-destination pair relevant for copytracing testing.
2352 It report value for some of the parameters that impact copy tracing time.
2353 It report value for some of the parameters that impact copy tracing time.
2353
2354
2354 If `--timing` is set, rename detection is run and the associated timing
2355 If `--timing` is set, rename detection is run and the associated timing
2355 will be reported. The extra details comes at the cost of a slower command
2356 will be reported. The extra details comes at the cost of a slower command
2356 execution.
2357 execution.
2357
2358
2358 Since the rename detection is only run once, other factors might easily
2359 Since the rename detection is only run once, other factors might easily
2359 affect the precision of the timing. However it should give a good
2360 affect the precision of the timing. However it should give a good
2360 approximation of which revision pairs are very costly.
2361 approximation of which revision pairs are very costly.
2361 """
2362 """
2362 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2363 fm = ui.formatter(b'perf', opts)
2364 fm = ui.formatter(b'perf', opts)
2364 dotiming = opts[b'timing']
2365 dotiming = opts[b'timing']
2365 dostats = opts[b'stats']
2366 dostats = opts[b'stats']
2366
2367
2367 if dotiming:
2368 if dotiming:
2368 header = '%12s %12s %12s %12s %12s %12s\n'
2369 header = '%12s %12s %12s %12s %12s %12s\n'
2369 output = (
2370 output = (
2370 "%(source)12s %(destination)12s "
2371 "%(source)12s %(destination)12s "
2371 "%(nbrevs)12d %(nbmissingfiles)12d "
2372 "%(nbrevs)12d %(nbmissingfiles)12d "
2372 "%(nbrenamedfiles)12d %(time)18.5f\n"
2373 "%(nbrenamedfiles)12d %(time)18.5f\n"
2373 )
2374 )
2374 header_names = (
2375 header_names = (
2375 "source",
2376 "source",
2376 "destination",
2377 "destination",
2377 "nb-revs",
2378 "nb-revs",
2378 "nb-files",
2379 "nb-files",
2379 "nb-renames",
2380 "nb-renames",
2380 "time",
2381 "time",
2381 )
2382 )
2382 fm.plain(header % header_names)
2383 fm.plain(header % header_names)
2383 else:
2384 else:
2384 header = '%12s %12s %12s %12s\n'
2385 header = '%12s %12s %12s %12s\n'
2385 output = (
2386 output = (
2386 "%(source)12s %(destination)12s "
2387 "%(source)12s %(destination)12s "
2387 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2388 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2388 )
2389 )
2389 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2390 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2390
2391
2391 if not revs:
2392 if not revs:
2392 revs = ['all()']
2393 revs = ['all()']
2393 revs = scmutil.revrange(repo, revs)
2394 revs = scmutil.revrange(repo, revs)
2394
2395
2395 if dostats:
2396 if dostats:
2396 alldata = {
2397 alldata = {
2397 'nbrevs': [],
2398 'nbrevs': [],
2398 'nbmissingfiles': [],
2399 'nbmissingfiles': [],
2399 }
2400 }
2400 if dotiming:
2401 if dotiming:
2401 alldata['nbrenames'] = []
2402 alldata['nbrenames'] = []
2402 alldata['time'] = []
2403 alldata['time'] = []
2403
2404
2404 roi = repo.revs('merge() and %ld', revs)
2405 roi = repo.revs('merge() and %ld', revs)
2405 for r in roi:
2406 for r in roi:
2406 ctx = repo[r]
2407 ctx = repo[r]
2407 p1 = ctx.p1().rev()
2408 p1 = ctx.p1().rev()
2408 p2 = ctx.p2().rev()
2409 p2 = ctx.p2().rev()
2409 bases = repo.changelog._commonancestorsheads(p1, p2)
2410 bases = repo.changelog._commonancestorsheads(p1, p2)
2410 for p in (p1, p2):
2411 for p in (p1, p2):
2411 for b in bases:
2412 for b in bases:
2412 base = repo[b]
2413 base = repo[b]
2413 parent = repo[p]
2414 parent = repo[p]
2414 missing = copies._computeforwardmissing(base, parent)
2415 missing = copies._computeforwardmissing(base, parent)
2415 if not missing:
2416 if not missing:
2416 continue
2417 continue
2417 data = {
2418 data = {
2418 b'source': base.hex(),
2419 b'source': base.hex(),
2419 b'destination': parent.hex(),
2420 b'destination': parent.hex(),
2420 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2421 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2421 b'nbmissingfiles': len(missing),
2422 b'nbmissingfiles': len(missing),
2422 }
2423 }
2423 if dostats:
2424 if dostats:
2424 alldata['nbrevs'].append(
2425 alldata['nbrevs'].append(
2425 (
2426 (
2426 data['nbrevs'],
2427 data['nbrevs'],
2427 base.hex(),
2428 base.hex(),
2428 parent.hex(),
2429 parent.hex(),
2429 )
2430 )
2430 )
2431 )
2431 alldata['nbmissingfiles'].append(
2432 alldata['nbmissingfiles'].append(
2432 (
2433 (
2433 data['nbmissingfiles'],
2434 data['nbmissingfiles'],
2434 base.hex(),
2435 base.hex(),
2435 parent.hex(),
2436 parent.hex(),
2436 )
2437 )
2437 )
2438 )
2438 if dotiming:
2439 if dotiming:
2439 begin = util.timer()
2440 begin = util.timer()
2440 renames = copies.pathcopies(base, parent)
2441 renames = copies.pathcopies(base, parent)
2441 end = util.timer()
2442 end = util.timer()
2442 # not very stable timing since we did only one run
2443 # not very stable timing since we did only one run
2443 data['time'] = end - begin
2444 data['time'] = end - begin
2444 data['nbrenamedfiles'] = len(renames)
2445 data['nbrenamedfiles'] = len(renames)
2445 if dostats:
2446 if dostats:
2446 alldata['time'].append(
2447 alldata['time'].append(
2447 (
2448 (
2448 data['time'],
2449 data['time'],
2449 base.hex(),
2450 base.hex(),
2450 parent.hex(),
2451 parent.hex(),
2451 )
2452 )
2452 )
2453 )
2453 alldata['nbrenames'].append(
2454 alldata['nbrenames'].append(
2454 (
2455 (
2455 data['nbrenamedfiles'],
2456 data['nbrenamedfiles'],
2456 base.hex(),
2457 base.hex(),
2457 parent.hex(),
2458 parent.hex(),
2458 )
2459 )
2459 )
2460 )
2460 fm.startitem()
2461 fm.startitem()
2461 fm.data(**data)
2462 fm.data(**data)
2462 out = data.copy()
2463 out = data.copy()
2463 out['source'] = fm.hexfunc(base.node())
2464 out['source'] = fm.hexfunc(base.node())
2464 out['destination'] = fm.hexfunc(parent.node())
2465 out['destination'] = fm.hexfunc(parent.node())
2465 fm.plain(output % out)
2466 fm.plain(output % out)
2466
2467
2467 fm.end()
2468 fm.end()
2468 if dostats:
2469 if dostats:
2469 entries = [
2470 entries = [
2470 ('nbrevs', 'number of revision covered'),
2471 ('nbrevs', 'number of revision covered'),
2471 ('nbmissingfiles', 'number of missing files at head'),
2472 ('nbmissingfiles', 'number of missing files at head'),
2472 ]
2473 ]
2473 if dotiming:
2474 if dotiming:
2474 entries.append(('nbrenames', 'renamed files'))
2475 entries.append(('nbrenames', 'renamed files'))
2475 entries.append(('time', 'time'))
2476 entries.append(('time', 'time'))
2476 _displaystats(ui, opts, entries, alldata)
2477 _displaystats(ui, opts, entries, alldata)
2477
2478
2478
2479
2479 @command(b'perf::cca|perfcca', formatteropts)
2480 @command(b'perf::cca|perfcca', formatteropts)
2480 def perfcca(ui, repo, **opts):
2481 def perfcca(ui, repo, **opts):
2481 opts = _byteskwargs(opts)
2482 opts = _byteskwargs(opts)
2482 timer, fm = gettimer(ui, opts)
2483 timer, fm = gettimer(ui, opts)
2483 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2484 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2484 fm.end()
2485 fm.end()
2485
2486
2486
2487
2487 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2488 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2488 def perffncacheload(ui, repo, **opts):
2489 def perffncacheload(ui, repo, **opts):
2489 opts = _byteskwargs(opts)
2490 opts = _byteskwargs(opts)
2490 timer, fm = gettimer(ui, opts)
2491 timer, fm = gettimer(ui, opts)
2491 s = repo.store
2492 s = repo.store
2492
2493
2493 def d():
2494 def d():
2494 s.fncache._load()
2495 s.fncache._load()
2495
2496
2496 timer(d)
2497 timer(d)
2497 fm.end()
2498 fm.end()
2498
2499
2499
2500
2500 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2501 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2501 def perffncachewrite(ui, repo, **opts):
2502 def perffncachewrite(ui, repo, **opts):
2502 opts = _byteskwargs(opts)
2503 opts = _byteskwargs(opts)
2503 timer, fm = gettimer(ui, opts)
2504 timer, fm = gettimer(ui, opts)
2504 s = repo.store
2505 s = repo.store
2505 lock = repo.lock()
2506 lock = repo.lock()
2506 s.fncache._load()
2507 s.fncache._load()
2507 tr = repo.transaction(b'perffncachewrite')
2508 tr = repo.transaction(b'perffncachewrite')
2508 tr.addbackup(b'fncache')
2509 tr.addbackup(b'fncache')
2509
2510
2510 def d():
2511 def d():
2511 s.fncache._dirty = True
2512 s.fncache._dirty = True
2512 s.fncache.write(tr)
2513 s.fncache.write(tr)
2513
2514
2514 timer(d)
2515 timer(d)
2515 tr.close()
2516 tr.close()
2516 lock.release()
2517 lock.release()
2517 fm.end()
2518 fm.end()
2518
2519
2519
2520
2520 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2521 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2521 def perffncacheencode(ui, repo, **opts):
2522 def perffncacheencode(ui, repo, **opts):
2522 opts = _byteskwargs(opts)
2523 opts = _byteskwargs(opts)
2523 timer, fm = gettimer(ui, opts)
2524 timer, fm = gettimer(ui, opts)
2524 s = repo.store
2525 s = repo.store
2525 s.fncache._load()
2526 s.fncache._load()
2526
2527
2527 def d():
2528 def d():
2528 for p in s.fncache.entries:
2529 for p in s.fncache.entries:
2529 s.encode(p)
2530 s.encode(p)
2530
2531
2531 timer(d)
2532 timer(d)
2532 fm.end()
2533 fm.end()
2533
2534
2534
2535
2535 def _bdiffworker(q, blocks, xdiff, ready, done):
2536 def _bdiffworker(q, blocks, xdiff, ready, done):
2536 while not done.is_set():
2537 while not done.is_set():
2537 pair = q.get()
2538 pair = q.get()
2538 while pair is not None:
2539 while pair is not None:
2539 if xdiff:
2540 if xdiff:
2540 mdiff.bdiff.xdiffblocks(*pair)
2541 mdiff.bdiff.xdiffblocks(*pair)
2541 elif blocks:
2542 elif blocks:
2542 mdiff.bdiff.blocks(*pair)
2543 mdiff.bdiff.blocks(*pair)
2543 else:
2544 else:
2544 mdiff.textdiff(*pair)
2545 mdiff.textdiff(*pair)
2545 q.task_done()
2546 q.task_done()
2546 pair = q.get()
2547 pair = q.get()
2547 q.task_done() # for the None one
2548 q.task_done() # for the None one
2548 with ready:
2549 with ready:
2549 ready.wait()
2550 ready.wait()
2550
2551
2551
2552
2552 def _manifestrevision(repo, mnode):
2553 def _manifestrevision(repo, mnode):
2553 ml = repo.manifestlog
2554 ml = repo.manifestlog
2554
2555
2555 if util.safehasattr(ml, b'getstorage'):
2556 if util.safehasattr(ml, b'getstorage'):
2556 store = ml.getstorage(b'')
2557 store = ml.getstorage(b'')
2557 else:
2558 else:
2558 store = ml._revlog
2559 store = ml._revlog
2559
2560
2560 return store.revision(mnode)
2561 return store.revision(mnode)
2561
2562
2562
2563
2563 @command(
2564 @command(
2564 b'perf::bdiff|perfbdiff',
2565 b'perf::bdiff|perfbdiff',
2565 revlogopts
2566 revlogopts
2566 + formatteropts
2567 + formatteropts
2567 + [
2568 + [
2568 (
2569 (
2569 b'',
2570 b'',
2570 b'count',
2571 b'count',
2571 1,
2572 1,
2572 b'number of revisions to test (when using --startrev)',
2573 b'number of revisions to test (when using --startrev)',
2573 ),
2574 ),
2574 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2575 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2575 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2576 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2576 (b'', b'blocks', False, b'test computing diffs into blocks'),
2577 (b'', b'blocks', False, b'test computing diffs into blocks'),
2577 (b'', b'xdiff', False, b'use xdiff algorithm'),
2578 (b'', b'xdiff', False, b'use xdiff algorithm'),
2578 ],
2579 ],
2579 b'-c|-m|FILE REV',
2580 b'-c|-m|FILE REV',
2580 )
2581 )
2581 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2582 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2582 """benchmark a bdiff between revisions
2583 """benchmark a bdiff between revisions
2583
2584
2584 By default, benchmark a bdiff between its delta parent and itself.
2585 By default, benchmark a bdiff between its delta parent and itself.
2585
2586
2586 With ``--count``, benchmark bdiffs between delta parents and self for N
2587 With ``--count``, benchmark bdiffs between delta parents and self for N
2587 revisions starting at the specified revision.
2588 revisions starting at the specified revision.
2588
2589
2589 With ``--alldata``, assume the requested revision is a changeset and
2590 With ``--alldata``, assume the requested revision is a changeset and
2590 measure bdiffs for all changes related to that changeset (manifest
2591 measure bdiffs for all changes related to that changeset (manifest
2591 and filelogs).
2592 and filelogs).
2592 """
2593 """
2593 opts = _byteskwargs(opts)
2594 opts = _byteskwargs(opts)
2594
2595
2595 if opts[b'xdiff'] and not opts[b'blocks']:
2596 if opts[b'xdiff'] and not opts[b'blocks']:
2596 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2597 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2597
2598
2598 if opts[b'alldata']:
2599 if opts[b'alldata']:
2599 opts[b'changelog'] = True
2600 opts[b'changelog'] = True
2600
2601
2601 if opts.get(b'changelog') or opts.get(b'manifest'):
2602 if opts.get(b'changelog') or opts.get(b'manifest'):
2602 file_, rev = None, file_
2603 file_, rev = None, file_
2603 elif rev is None:
2604 elif rev is None:
2604 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2605 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2605
2606
2606 blocks = opts[b'blocks']
2607 blocks = opts[b'blocks']
2607 xdiff = opts[b'xdiff']
2608 xdiff = opts[b'xdiff']
2608 textpairs = []
2609 textpairs = []
2609
2610
2610 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2611 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2611
2612
2612 startrev = r.rev(r.lookup(rev))
2613 startrev = r.rev(r.lookup(rev))
2613 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2614 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2614 if opts[b'alldata']:
2615 if opts[b'alldata']:
2615 # Load revisions associated with changeset.
2616 # Load revisions associated with changeset.
2616 ctx = repo[rev]
2617 ctx = repo[rev]
2617 mtext = _manifestrevision(repo, ctx.manifestnode())
2618 mtext = _manifestrevision(repo, ctx.manifestnode())
2618 for pctx in ctx.parents():
2619 for pctx in ctx.parents():
2619 pman = _manifestrevision(repo, pctx.manifestnode())
2620 pman = _manifestrevision(repo, pctx.manifestnode())
2620 textpairs.append((pman, mtext))
2621 textpairs.append((pman, mtext))
2621
2622
2622 # Load filelog revisions by iterating manifest delta.
2623 # Load filelog revisions by iterating manifest delta.
2623 man = ctx.manifest()
2624 man = ctx.manifest()
2624 pman = ctx.p1().manifest()
2625 pman = ctx.p1().manifest()
2625 for filename, change in pman.diff(man).items():
2626 for filename, change in pman.diff(man).items():
2626 fctx = repo.file(filename)
2627 fctx = repo.file(filename)
2627 f1 = fctx.revision(change[0][0] or -1)
2628 f1 = fctx.revision(change[0][0] or -1)
2628 f2 = fctx.revision(change[1][0] or -1)
2629 f2 = fctx.revision(change[1][0] or -1)
2629 textpairs.append((f1, f2))
2630 textpairs.append((f1, f2))
2630 else:
2631 else:
2631 dp = r.deltaparent(rev)
2632 dp = r.deltaparent(rev)
2632 textpairs.append((r.revision(dp), r.revision(rev)))
2633 textpairs.append((r.revision(dp), r.revision(rev)))
2633
2634
2634 withthreads = threads > 0
2635 withthreads = threads > 0
2635 if not withthreads:
2636 if not withthreads:
2636
2637
2637 def d():
2638 def d():
2638 for pair in textpairs:
2639 for pair in textpairs:
2639 if xdiff:
2640 if xdiff:
2640 mdiff.bdiff.xdiffblocks(*pair)
2641 mdiff.bdiff.xdiffblocks(*pair)
2641 elif blocks:
2642 elif blocks:
2642 mdiff.bdiff.blocks(*pair)
2643 mdiff.bdiff.blocks(*pair)
2643 else:
2644 else:
2644 mdiff.textdiff(*pair)
2645 mdiff.textdiff(*pair)
2645
2646
2646 else:
2647 else:
2647 q = queue()
2648 q = queue()
2648 for i in _xrange(threads):
2649 for i in _xrange(threads):
2649 q.put(None)
2650 q.put(None)
2650 ready = threading.Condition()
2651 ready = threading.Condition()
2651 done = threading.Event()
2652 done = threading.Event()
2652 for i in _xrange(threads):
2653 for i in _xrange(threads):
2653 threading.Thread(
2654 threading.Thread(
2654 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2655 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2655 ).start()
2656 ).start()
2656 q.join()
2657 q.join()
2657
2658
2658 def d():
2659 def d():
2659 for pair in textpairs:
2660 for pair in textpairs:
2660 q.put(pair)
2661 q.put(pair)
2661 for i in _xrange(threads):
2662 for i in _xrange(threads):
2662 q.put(None)
2663 q.put(None)
2663 with ready:
2664 with ready:
2664 ready.notify_all()
2665 ready.notify_all()
2665 q.join()
2666 q.join()
2666
2667
2667 timer, fm = gettimer(ui, opts)
2668 timer, fm = gettimer(ui, opts)
2668 timer(d)
2669 timer(d)
2669 fm.end()
2670 fm.end()
2670
2671
2671 if withthreads:
2672 if withthreads:
2672 done.set()
2673 done.set()
2673 for i in _xrange(threads):
2674 for i in _xrange(threads):
2674 q.put(None)
2675 q.put(None)
2675 with ready:
2676 with ready:
2676 ready.notify_all()
2677 ready.notify_all()
2677
2678
2678
2679
2679 @command(
2680 @command(
2680 b'perf::unbundle',
2681 b'perf::unbundle',
2681 formatteropts,
2682 formatteropts,
2682 b'BUNDLE_FILE',
2683 b'BUNDLE_FILE',
2683 )
2684 )
2684 def perf_unbundle(ui, repo, fname, **opts):
2685 def perf_unbundle(ui, repo, fname, **opts):
2685 """benchmark application of a bundle in a repository.
2686 """benchmark application of a bundle in a repository.
2686
2687
2687 This does not include the final transaction processing"""
2688 This does not include the final transaction processing"""
2688
2689
2689 from mercurial import exchange
2690 from mercurial import exchange
2690 from mercurial import bundle2
2691 from mercurial import bundle2
2691 from mercurial import transaction
2692 from mercurial import transaction
2692
2693
2693 opts = _byteskwargs(opts)
2694 opts = _byteskwargs(opts)
2694
2695
2695 ### some compatibility hotfix
2696 ### some compatibility hotfix
2696 #
2697 #
2697 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2698 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2698 # critical regression that break transaction rollback for files that are
2699 # critical regression that break transaction rollback for files that are
2699 # de-inlined.
2700 # de-inlined.
2700 method = transaction.transaction._addentry
2701 method = transaction.transaction._addentry
2701 pre_63edc384d3b7 = "data" in getargspec(method).args
2702 pre_63edc384d3b7 = "data" in getargspec(method).args
2702 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2703 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2703 # a changeset that is a close descendant of 18415fc918a1, the changeset
2704 # a changeset that is a close descendant of 18415fc918a1, the changeset
2704 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2705 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2705 args = getargspec(error.Abort.__init__).args
2706 args = getargspec(error.Abort.__init__).args
2706 post_18415fc918a1 = "detailed_exit_code" in args
2707 post_18415fc918a1 = "detailed_exit_code" in args
2707
2708
2708 old_max_inline = None
2709 old_max_inline = None
2709 try:
2710 try:
2710 if not (pre_63edc384d3b7 or post_18415fc918a1):
2711 if not (pre_63edc384d3b7 or post_18415fc918a1):
2711 # disable inlining
2712 # disable inlining
2712 old_max_inline = mercurial.revlog._maxinline
2713 old_max_inline = mercurial.revlog._maxinline
2713 # large enough to never happen
2714 # large enough to never happen
2714 mercurial.revlog._maxinline = 2 ** 50
2715 mercurial.revlog._maxinline = 2 ** 50
2715
2716
2716 with repo.lock():
2717 with repo.lock():
2717 bundle = [None, None]
2718 bundle = [None, None]
2718 orig_quiet = repo.ui.quiet
2719 orig_quiet = repo.ui.quiet
2719 try:
2720 try:
2720 repo.ui.quiet = True
2721 repo.ui.quiet = True
2721 with open(fname, mode="rb") as f:
2722 with open(fname, mode="rb") as f:
2722
2723
2723 def noop_report(*args, **kwargs):
2724 def noop_report(*args, **kwargs):
2724 pass
2725 pass
2725
2726
2726 def setup():
2727 def setup():
2727 gen, tr = bundle
2728 gen, tr = bundle
2728 if tr is not None:
2729 if tr is not None:
2729 tr.abort()
2730 tr.abort()
2730 bundle[:] = [None, None]
2731 bundle[:] = [None, None]
2731 f.seek(0)
2732 f.seek(0)
2732 bundle[0] = exchange.readbundle(ui, f, fname)
2733 bundle[0] = exchange.readbundle(ui, f, fname)
2733 bundle[1] = repo.transaction(b'perf::unbundle')
2734 bundle[1] = repo.transaction(b'perf::unbundle')
2734 # silence the transaction
2735 # silence the transaction
2735 bundle[1]._report = noop_report
2736 bundle[1]._report = noop_report
2736
2737
2737 def apply():
2738 def apply():
2738 gen, tr = bundle
2739 gen, tr = bundle
2739 bundle2.applybundle(
2740 bundle2.applybundle(
2740 repo,
2741 repo,
2741 gen,
2742 gen,
2742 tr,
2743 tr,
2743 source=b'perf::unbundle',
2744 source=b'perf::unbundle',
2744 url=fname,
2745 url=fname,
2745 )
2746 )
2746
2747
2747 timer, fm = gettimer(ui, opts)
2748 timer, fm = gettimer(ui, opts)
2748 timer(apply, setup=setup)
2749 timer(apply, setup=setup)
2749 fm.end()
2750 fm.end()
2750 finally:
2751 finally:
2751 repo.ui.quiet == orig_quiet
2752 repo.ui.quiet == orig_quiet
2752 gen, tr = bundle
2753 gen, tr = bundle
2753 if tr is not None:
2754 if tr is not None:
2754 tr.abort()
2755 tr.abort()
2755 finally:
2756 finally:
2756 if old_max_inline is not None:
2757 if old_max_inline is not None:
2757 mercurial.revlog._maxinline = old_max_inline
2758 mercurial.revlog._maxinline = old_max_inline
2758
2759
2759
2760
2760 @command(
2761 @command(
2761 b'perf::unidiff|perfunidiff',
2762 b'perf::unidiff|perfunidiff',
2762 revlogopts
2763 revlogopts
2763 + formatteropts
2764 + formatteropts
2764 + [
2765 + [
2765 (
2766 (
2766 b'',
2767 b'',
2767 b'count',
2768 b'count',
2768 1,
2769 1,
2769 b'number of revisions to test (when using --startrev)',
2770 b'number of revisions to test (when using --startrev)',
2770 ),
2771 ),
2771 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2772 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2772 ],
2773 ],
2773 b'-c|-m|FILE REV',
2774 b'-c|-m|FILE REV',
2774 )
2775 )
2775 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2776 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2776 """benchmark a unified diff between revisions
2777 """benchmark a unified diff between revisions
2777
2778
2778 This doesn't include any copy tracing - it's just a unified diff
2779 This doesn't include any copy tracing - it's just a unified diff
2779 of the texts.
2780 of the texts.
2780
2781
2781 By default, benchmark a diff between its delta parent and itself.
2782 By default, benchmark a diff between its delta parent and itself.
2782
2783
2783 With ``--count``, benchmark diffs between delta parents and self for N
2784 With ``--count``, benchmark diffs between delta parents and self for N
2784 revisions starting at the specified revision.
2785 revisions starting at the specified revision.
2785
2786
2786 With ``--alldata``, assume the requested revision is a changeset and
2787 With ``--alldata``, assume the requested revision is a changeset and
2787 measure diffs for all changes related to that changeset (manifest
2788 measure diffs for all changes related to that changeset (manifest
2788 and filelogs).
2789 and filelogs).
2789 """
2790 """
2790 opts = _byteskwargs(opts)
2791 opts = _byteskwargs(opts)
2791 if opts[b'alldata']:
2792 if opts[b'alldata']:
2792 opts[b'changelog'] = True
2793 opts[b'changelog'] = True
2793
2794
2794 if opts.get(b'changelog') or opts.get(b'manifest'):
2795 if opts.get(b'changelog') or opts.get(b'manifest'):
2795 file_, rev = None, file_
2796 file_, rev = None, file_
2796 elif rev is None:
2797 elif rev is None:
2797 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2798 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2798
2799
2799 textpairs = []
2800 textpairs = []
2800
2801
2801 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2802 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2802
2803
2803 startrev = r.rev(r.lookup(rev))
2804 startrev = r.rev(r.lookup(rev))
2804 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2805 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2805 if opts[b'alldata']:
2806 if opts[b'alldata']:
2806 # Load revisions associated with changeset.
2807 # Load revisions associated with changeset.
2807 ctx = repo[rev]
2808 ctx = repo[rev]
2808 mtext = _manifestrevision(repo, ctx.manifestnode())
2809 mtext = _manifestrevision(repo, ctx.manifestnode())
2809 for pctx in ctx.parents():
2810 for pctx in ctx.parents():
2810 pman = _manifestrevision(repo, pctx.manifestnode())
2811 pman = _manifestrevision(repo, pctx.manifestnode())
2811 textpairs.append((pman, mtext))
2812 textpairs.append((pman, mtext))
2812
2813
2813 # Load filelog revisions by iterating manifest delta.
2814 # Load filelog revisions by iterating manifest delta.
2814 man = ctx.manifest()
2815 man = ctx.manifest()
2815 pman = ctx.p1().manifest()
2816 pman = ctx.p1().manifest()
2816 for filename, change in pman.diff(man).items():
2817 for filename, change in pman.diff(man).items():
2817 fctx = repo.file(filename)
2818 fctx = repo.file(filename)
2818 f1 = fctx.revision(change[0][0] or -1)
2819 f1 = fctx.revision(change[0][0] or -1)
2819 f2 = fctx.revision(change[1][0] or -1)
2820 f2 = fctx.revision(change[1][0] or -1)
2820 textpairs.append((f1, f2))
2821 textpairs.append((f1, f2))
2821 else:
2822 else:
2822 dp = r.deltaparent(rev)
2823 dp = r.deltaparent(rev)
2823 textpairs.append((r.revision(dp), r.revision(rev)))
2824 textpairs.append((r.revision(dp), r.revision(rev)))
2824
2825
2825 def d():
2826 def d():
2826 for left, right in textpairs:
2827 for left, right in textpairs:
2827 # The date strings don't matter, so we pass empty strings.
2828 # The date strings don't matter, so we pass empty strings.
2828 headerlines, hunks = mdiff.unidiff(
2829 headerlines, hunks = mdiff.unidiff(
2829 left, b'', right, b'', b'left', b'right', binary=False
2830 left, b'', right, b'', b'left', b'right', binary=False
2830 )
2831 )
2831 # consume iterators in roughly the way patch.py does
2832 # consume iterators in roughly the way patch.py does
2832 b'\n'.join(headerlines)
2833 b'\n'.join(headerlines)
2833 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2834 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2834
2835
2835 timer, fm = gettimer(ui, opts)
2836 timer, fm = gettimer(ui, opts)
2836 timer(d)
2837 timer(d)
2837 fm.end()
2838 fm.end()
2838
2839
2839
2840
2840 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2841 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2841 def perfdiffwd(ui, repo, **opts):
2842 def perfdiffwd(ui, repo, **opts):
2842 """Profile diff of working directory changes"""
2843 """Profile diff of working directory changes"""
2843 opts = _byteskwargs(opts)
2844 opts = _byteskwargs(opts)
2844 timer, fm = gettimer(ui, opts)
2845 timer, fm = gettimer(ui, opts)
2845 options = {
2846 options = {
2846 'w': 'ignore_all_space',
2847 'w': 'ignore_all_space',
2847 'b': 'ignore_space_change',
2848 'b': 'ignore_space_change',
2848 'B': 'ignore_blank_lines',
2849 'B': 'ignore_blank_lines',
2849 }
2850 }
2850
2851
2851 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2852 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2852 opts = {options[c]: b'1' for c in diffopt}
2853 opts = {options[c]: b'1' for c in diffopt}
2853
2854
2854 def d():
2855 def d():
2855 ui.pushbuffer()
2856 ui.pushbuffer()
2856 commands.diff(ui, repo, **opts)
2857 commands.diff(ui, repo, **opts)
2857 ui.popbuffer()
2858 ui.popbuffer()
2858
2859
2859 diffopt = diffopt.encode('ascii')
2860 diffopt = diffopt.encode('ascii')
2860 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2861 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2861 timer(d, title=title)
2862 timer(d, title=title)
2862 fm.end()
2863 fm.end()
2863
2864
2864
2865
2865 @command(
2866 @command(
2866 b'perf::revlogindex|perfrevlogindex',
2867 b'perf::revlogindex|perfrevlogindex',
2867 revlogopts + formatteropts,
2868 revlogopts + formatteropts,
2868 b'-c|-m|FILE',
2869 b'-c|-m|FILE',
2869 )
2870 )
2870 def perfrevlogindex(ui, repo, file_=None, **opts):
2871 def perfrevlogindex(ui, repo, file_=None, **opts):
2871 """Benchmark operations against a revlog index.
2872 """Benchmark operations against a revlog index.
2872
2873
2873 This tests constructing a revlog instance, reading index data,
2874 This tests constructing a revlog instance, reading index data,
2874 parsing index data, and performing various operations related to
2875 parsing index data, and performing various operations related to
2875 index data.
2876 index data.
2876 """
2877 """
2877
2878
2878 opts = _byteskwargs(opts)
2879 opts = _byteskwargs(opts)
2879
2880
2880 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2881 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2881
2882
2882 opener = getattr(rl, 'opener') # trick linter
2883 opener = getattr(rl, 'opener') # trick linter
2883 # compat with hg <= 5.8
2884 # compat with hg <= 5.8
2884 radix = getattr(rl, 'radix', None)
2885 radix = getattr(rl, 'radix', None)
2885 indexfile = getattr(rl, '_indexfile', None)
2886 indexfile = getattr(rl, '_indexfile', None)
2886 if indexfile is None:
2887 if indexfile is None:
2887 # compatibility with <= hg-5.8
2888 # compatibility with <= hg-5.8
2888 indexfile = getattr(rl, 'indexfile')
2889 indexfile = getattr(rl, 'indexfile')
2889 data = opener.read(indexfile)
2890 data = opener.read(indexfile)
2890
2891
2891 header = struct.unpack(b'>I', data[0:4])[0]
2892 header = struct.unpack(b'>I', data[0:4])[0]
2892 version = header & 0xFFFF
2893 version = header & 0xFFFF
2893 if version == 1:
2894 if version == 1:
2894 inline = header & (1 << 16)
2895 inline = header & (1 << 16)
2895 else:
2896 else:
2896 raise error.Abort(b'unsupported revlog version: %d' % version)
2897 raise error.Abort(b'unsupported revlog version: %d' % version)
2897
2898
2898 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2899 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2899 if parse_index_v1 is None:
2900 if parse_index_v1 is None:
2900 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2901 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2901
2902
2902 rllen = len(rl)
2903 rllen = len(rl)
2903
2904
2904 node0 = rl.node(0)
2905 node0 = rl.node(0)
2905 node25 = rl.node(rllen // 4)
2906 node25 = rl.node(rllen // 4)
2906 node50 = rl.node(rllen // 2)
2907 node50 = rl.node(rllen // 2)
2907 node75 = rl.node(rllen // 4 * 3)
2908 node75 = rl.node(rllen // 4 * 3)
2908 node100 = rl.node(rllen - 1)
2909 node100 = rl.node(rllen - 1)
2909
2910
2910 allrevs = range(rllen)
2911 allrevs = range(rllen)
2911 allrevsrev = list(reversed(allrevs))
2912 allrevsrev = list(reversed(allrevs))
2912 allnodes = [rl.node(rev) for rev in range(rllen)]
2913 allnodes = [rl.node(rev) for rev in range(rllen)]
2913 allnodesrev = list(reversed(allnodes))
2914 allnodesrev = list(reversed(allnodes))
2914
2915
2915 def constructor():
2916 def constructor():
2916 if radix is not None:
2917 if radix is not None:
2917 revlog(opener, radix=radix)
2918 revlog(opener, radix=radix)
2918 else:
2919 else:
2919 # hg <= 5.8
2920 # hg <= 5.8
2920 revlog(opener, indexfile=indexfile)
2921 revlog(opener, indexfile=indexfile)
2921
2922
2922 def read():
2923 def read():
2923 with opener(indexfile) as fh:
2924 with opener(indexfile) as fh:
2924 fh.read()
2925 fh.read()
2925
2926
2926 def parseindex():
2927 def parseindex():
2927 parse_index_v1(data, inline)
2928 parse_index_v1(data, inline)
2928
2929
2929 def getentry(revornode):
2930 def getentry(revornode):
2930 index = parse_index_v1(data, inline)[0]
2931 index = parse_index_v1(data, inline)[0]
2931 index[revornode]
2932 index[revornode]
2932
2933
2933 def getentries(revs, count=1):
2934 def getentries(revs, count=1):
2934 index = parse_index_v1(data, inline)[0]
2935 index = parse_index_v1(data, inline)[0]
2935
2936
2936 for i in range(count):
2937 for i in range(count):
2937 for rev in revs:
2938 for rev in revs:
2938 index[rev]
2939 index[rev]
2939
2940
2940 def resolvenode(node):
2941 def resolvenode(node):
2941 index = parse_index_v1(data, inline)[0]
2942 index = parse_index_v1(data, inline)[0]
2942 rev = getattr(index, 'rev', None)
2943 rev = getattr(index, 'rev', None)
2943 if rev is None:
2944 if rev is None:
2944 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2945 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2945 # This only works for the C code.
2946 # This only works for the C code.
2946 if nodemap is None:
2947 if nodemap is None:
2947 return
2948 return
2948 rev = nodemap.__getitem__
2949 rev = nodemap.__getitem__
2949
2950
2950 try:
2951 try:
2951 rev(node)
2952 rev(node)
2952 except error.RevlogError:
2953 except error.RevlogError:
2953 pass
2954 pass
2954
2955
2955 def resolvenodes(nodes, count=1):
2956 def resolvenodes(nodes, count=1):
2956 index = parse_index_v1(data, inline)[0]
2957 index = parse_index_v1(data, inline)[0]
2957 rev = getattr(index, 'rev', None)
2958 rev = getattr(index, 'rev', None)
2958 if rev is None:
2959 if rev is None:
2959 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2960 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2960 # This only works for the C code.
2961 # This only works for the C code.
2961 if nodemap is None:
2962 if nodemap is None:
2962 return
2963 return
2963 rev = nodemap.__getitem__
2964 rev = nodemap.__getitem__
2964
2965
2965 for i in range(count):
2966 for i in range(count):
2966 for node in nodes:
2967 for node in nodes:
2967 try:
2968 try:
2968 rev(node)
2969 rev(node)
2969 except error.RevlogError:
2970 except error.RevlogError:
2970 pass
2971 pass
2971
2972
2972 benches = [
2973 benches = [
2973 (constructor, b'revlog constructor'),
2974 (constructor, b'revlog constructor'),
2974 (read, b'read'),
2975 (read, b'read'),
2975 (parseindex, b'create index object'),
2976 (parseindex, b'create index object'),
2976 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2977 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2977 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2978 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2978 (lambda: resolvenode(node0), b'look up node at rev 0'),
2979 (lambda: resolvenode(node0), b'look up node at rev 0'),
2979 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2980 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2980 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2981 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2981 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2982 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2982 (lambda: resolvenode(node100), b'look up node at tip'),
2983 (lambda: resolvenode(node100), b'look up node at tip'),
2983 # 2x variation is to measure caching impact.
2984 # 2x variation is to measure caching impact.
2984 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2985 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2985 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2986 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2986 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2987 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2987 (
2988 (
2988 lambda: resolvenodes(allnodesrev, 2),
2989 lambda: resolvenodes(allnodesrev, 2),
2989 b'look up all nodes 2x (reverse)',
2990 b'look up all nodes 2x (reverse)',
2990 ),
2991 ),
2991 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2992 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2992 (
2993 (
2993 lambda: getentries(allrevs, 2),
2994 lambda: getentries(allrevs, 2),
2994 b'retrieve all index entries 2x (forward)',
2995 b'retrieve all index entries 2x (forward)',
2995 ),
2996 ),
2996 (
2997 (
2997 lambda: getentries(allrevsrev),
2998 lambda: getentries(allrevsrev),
2998 b'retrieve all index entries (reverse)',
2999 b'retrieve all index entries (reverse)',
2999 ),
3000 ),
3000 (
3001 (
3001 lambda: getentries(allrevsrev, 2),
3002 lambda: getentries(allrevsrev, 2),
3002 b'retrieve all index entries 2x (reverse)',
3003 b'retrieve all index entries 2x (reverse)',
3003 ),
3004 ),
3004 ]
3005 ]
3005
3006
3006 for fn, title in benches:
3007 for fn, title in benches:
3007 timer, fm = gettimer(ui, opts)
3008 timer, fm = gettimer(ui, opts)
3008 timer(fn, title=title)
3009 timer(fn, title=title)
3009 fm.end()
3010 fm.end()
3010
3011
3011
3012
3012 @command(
3013 @command(
3013 b'perf::revlogrevisions|perfrevlogrevisions',
3014 b'perf::revlogrevisions|perfrevlogrevisions',
3014 revlogopts
3015 revlogopts
3015 + formatteropts
3016 + formatteropts
3016 + [
3017 + [
3017 (b'd', b'dist', 100, b'distance between the revisions'),
3018 (b'd', b'dist', 100, b'distance between the revisions'),
3018 (b's', b'startrev', 0, b'revision to start reading at'),
3019 (b's', b'startrev', 0, b'revision to start reading at'),
3019 (b'', b'reverse', False, b'read in reverse'),
3020 (b'', b'reverse', False, b'read in reverse'),
3020 ],
3021 ],
3021 b'-c|-m|FILE',
3022 b'-c|-m|FILE',
3022 )
3023 )
3023 def perfrevlogrevisions(
3024 def perfrevlogrevisions(
3024 ui, repo, file_=None, startrev=0, reverse=False, **opts
3025 ui, repo, file_=None, startrev=0, reverse=False, **opts
3025 ):
3026 ):
3026 """Benchmark reading a series of revisions from a revlog.
3027 """Benchmark reading a series of revisions from a revlog.
3027
3028
3028 By default, we read every ``-d/--dist`` revision from 0 to tip of
3029 By default, we read every ``-d/--dist`` revision from 0 to tip of
3029 the specified revlog.
3030 the specified revlog.
3030
3031
3031 The start revision can be defined via ``-s/--startrev``.
3032 The start revision can be defined via ``-s/--startrev``.
3032 """
3033 """
3033 opts = _byteskwargs(opts)
3034 opts = _byteskwargs(opts)
3034
3035
3035 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3036 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3036 rllen = getlen(ui)(rl)
3037 rllen = getlen(ui)(rl)
3037
3038
3038 if startrev < 0:
3039 if startrev < 0:
3039 startrev = rllen + startrev
3040 startrev = rllen + startrev
3040
3041
3041 def d():
3042 def d():
3042 rl.clearcaches()
3043 rl.clearcaches()
3043
3044
3044 beginrev = startrev
3045 beginrev = startrev
3045 endrev = rllen
3046 endrev = rllen
3046 dist = opts[b'dist']
3047 dist = opts[b'dist']
3047
3048
3048 if reverse:
3049 if reverse:
3049 beginrev, endrev = endrev - 1, beginrev - 1
3050 beginrev, endrev = endrev - 1, beginrev - 1
3050 dist = -1 * dist
3051 dist = -1 * dist
3051
3052
3052 for x in _xrange(beginrev, endrev, dist):
3053 for x in _xrange(beginrev, endrev, dist):
3053 # Old revisions don't support passing int.
3054 # Old revisions don't support passing int.
3054 n = rl.node(x)
3055 n = rl.node(x)
3055 rl.revision(n)
3056 rl.revision(n)
3056
3057
3057 timer, fm = gettimer(ui, opts)
3058 timer, fm = gettimer(ui, opts)
3058 timer(d)
3059 timer(d)
3059 fm.end()
3060 fm.end()
3060
3061
3061
3062
3062 @command(
3063 @command(
3063 b'perf::revlogwrite|perfrevlogwrite',
3064 b'perf::revlogwrite|perfrevlogwrite',
3064 revlogopts
3065 revlogopts
3065 + formatteropts
3066 + formatteropts
3066 + [
3067 + [
3067 (b's', b'startrev', 1000, b'revision to start writing at'),
3068 (b's', b'startrev', 1000, b'revision to start writing at'),
3068 (b'', b'stoprev', -1, b'last revision to write'),
3069 (b'', b'stoprev', -1, b'last revision to write'),
3069 (b'', b'count', 3, b'number of passes to perform'),
3070 (b'', b'count', 3, b'number of passes to perform'),
3070 (b'', b'details', False, b'print timing for every revisions tested'),
3071 (b'', b'details', False, b'print timing for every revisions tested'),
3071 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3072 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3072 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3073 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3073 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3074 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3074 ],
3075 ],
3075 b'-c|-m|FILE',
3076 b'-c|-m|FILE',
3076 )
3077 )
3077 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3078 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3078 """Benchmark writing a series of revisions to a revlog.
3079 """Benchmark writing a series of revisions to a revlog.
3079
3080
3080 Possible source values are:
3081 Possible source values are:
3081 * `full`: add from a full text (default).
3082 * `full`: add from a full text (default).
3082 * `parent-1`: add from a delta to the first parent
3083 * `parent-1`: add from a delta to the first parent
3083 * `parent-2`: add from a delta to the second parent if it exists
3084 * `parent-2`: add from a delta to the second parent if it exists
3084 (use a delta from the first parent otherwise)
3085 (use a delta from the first parent otherwise)
3085 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3086 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3086 * `storage`: add from the existing precomputed deltas
3087 * `storage`: add from the existing precomputed deltas
3087
3088
3088 Note: This performance command measures performance in a custom way. As a
3089 Note: This performance command measures performance in a custom way. As a
3089 result some of the global configuration of the 'perf' command does not
3090 result some of the global configuration of the 'perf' command does not
3090 apply to it:
3091 apply to it:
3091
3092
3092 * ``pre-run``: disabled
3093 * ``pre-run``: disabled
3093
3094
3094 * ``profile-benchmark``: disabled
3095 * ``profile-benchmark``: disabled
3095
3096
3096 * ``run-limits``: disabled use --count instead
3097 * ``run-limits``: disabled use --count instead
3097 """
3098 """
3098 opts = _byteskwargs(opts)
3099 opts = _byteskwargs(opts)
3099
3100
3100 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3101 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3101 rllen = getlen(ui)(rl)
3102 rllen = getlen(ui)(rl)
3102 if startrev < 0:
3103 if startrev < 0:
3103 startrev = rllen + startrev
3104 startrev = rllen + startrev
3104 if stoprev < 0:
3105 if stoprev < 0:
3105 stoprev = rllen + stoprev
3106 stoprev = rllen + stoprev
3106
3107
3107 lazydeltabase = opts['lazydeltabase']
3108 lazydeltabase = opts['lazydeltabase']
3108 source = opts['source']
3109 source = opts['source']
3109 clearcaches = opts['clear_caches']
3110 clearcaches = opts['clear_caches']
3110 validsource = (
3111 validsource = (
3111 b'full',
3112 b'full',
3112 b'parent-1',
3113 b'parent-1',
3113 b'parent-2',
3114 b'parent-2',
3114 b'parent-smallest',
3115 b'parent-smallest',
3115 b'storage',
3116 b'storage',
3116 )
3117 )
3117 if source not in validsource:
3118 if source not in validsource:
3118 raise error.Abort('invalid source type: %s' % source)
3119 raise error.Abort('invalid source type: %s' % source)
3119
3120
3120 ### actually gather results
3121 ### actually gather results
3121 count = opts['count']
3122 count = opts['count']
3122 if count <= 0:
3123 if count <= 0:
3123 raise error.Abort('invalide run count: %d' % count)
3124 raise error.Abort('invalide run count: %d' % count)
3124 allresults = []
3125 allresults = []
3125 for c in range(count):
3126 for c in range(count):
3126 timing = _timeonewrite(
3127 timing = _timeonewrite(
3127 ui,
3128 ui,
3128 rl,
3129 rl,
3129 source,
3130 source,
3130 startrev,
3131 startrev,
3131 stoprev,
3132 stoprev,
3132 c + 1,
3133 c + 1,
3133 lazydeltabase=lazydeltabase,
3134 lazydeltabase=lazydeltabase,
3134 clearcaches=clearcaches,
3135 clearcaches=clearcaches,
3135 )
3136 )
3136 allresults.append(timing)
3137 allresults.append(timing)
3137
3138
3138 ### consolidate the results in a single list
3139 ### consolidate the results in a single list
3139 results = []
3140 results = []
3140 for idx, (rev, t) in enumerate(allresults[0]):
3141 for idx, (rev, t) in enumerate(allresults[0]):
3141 ts = [t]
3142 ts = [t]
3142 for other in allresults[1:]:
3143 for other in allresults[1:]:
3143 orev, ot = other[idx]
3144 orev, ot = other[idx]
3144 assert orev == rev
3145 assert orev == rev
3145 ts.append(ot)
3146 ts.append(ot)
3146 results.append((rev, ts))
3147 results.append((rev, ts))
3147 resultcount = len(results)
3148 resultcount = len(results)
3148
3149
3149 ### Compute and display relevant statistics
3150 ### Compute and display relevant statistics
3150
3151
3151 # get a formatter
3152 # get a formatter
3152 fm = ui.formatter(b'perf', opts)
3153 fm = ui.formatter(b'perf', opts)
3153 displayall = ui.configbool(b"perf", b"all-timing", False)
3154 displayall = ui.configbool(b"perf", b"all-timing", False)
3154
3155
3155 # print individual details if requested
3156 # print individual details if requested
3156 if opts['details']:
3157 if opts['details']:
3157 for idx, item in enumerate(results, 1):
3158 for idx, item in enumerate(results, 1):
3158 rev, data = item
3159 rev, data = item
3159 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3160 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3160 formatone(fm, data, title=title, displayall=displayall)
3161 formatone(fm, data, title=title, displayall=displayall)
3161
3162
3162 # sorts results by median time
3163 # sorts results by median time
3163 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3164 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3164 # list of (name, index) to display)
3165 # list of (name, index) to display)
3165 relevants = [
3166 relevants = [
3166 ("min", 0),
3167 ("min", 0),
3167 ("10%", resultcount * 10 // 100),
3168 ("10%", resultcount * 10 // 100),
3168 ("25%", resultcount * 25 // 100),
3169 ("25%", resultcount * 25 // 100),
3169 ("50%", resultcount * 70 // 100),
3170 ("50%", resultcount * 70 // 100),
3170 ("75%", resultcount * 75 // 100),
3171 ("75%", resultcount * 75 // 100),
3171 ("90%", resultcount * 90 // 100),
3172 ("90%", resultcount * 90 // 100),
3172 ("95%", resultcount * 95 // 100),
3173 ("95%", resultcount * 95 // 100),
3173 ("99%", resultcount * 99 // 100),
3174 ("99%", resultcount * 99 // 100),
3174 ("99.9%", resultcount * 999 // 1000),
3175 ("99.9%", resultcount * 999 // 1000),
3175 ("99.99%", resultcount * 9999 // 10000),
3176 ("99.99%", resultcount * 9999 // 10000),
3176 ("99.999%", resultcount * 99999 // 100000),
3177 ("99.999%", resultcount * 99999 // 100000),
3177 ("max", -1),
3178 ("max", -1),
3178 ]
3179 ]
3179 if not ui.quiet:
3180 if not ui.quiet:
3180 for name, idx in relevants:
3181 for name, idx in relevants:
3181 data = results[idx]
3182 data = results[idx]
3182 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3183 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3183 formatone(fm, data[1], title=title, displayall=displayall)
3184 formatone(fm, data[1], title=title, displayall=displayall)
3184
3185
3185 # XXX summing that many float will not be very precise, we ignore this fact
3186 # XXX summing that many float will not be very precise, we ignore this fact
3186 # for now
3187 # for now
3187 totaltime = []
3188 totaltime = []
3188 for item in allresults:
3189 for item in allresults:
3189 totaltime.append(
3190 totaltime.append(
3190 (
3191 (
3191 sum(x[1][0] for x in item),
3192 sum(x[1][0] for x in item),
3192 sum(x[1][1] for x in item),
3193 sum(x[1][1] for x in item),
3193 sum(x[1][2] for x in item),
3194 sum(x[1][2] for x in item),
3194 )
3195 )
3195 )
3196 )
3196 formatone(
3197 formatone(
3197 fm,
3198 fm,
3198 totaltime,
3199 totaltime,
3199 title="total time (%d revs)" % resultcount,
3200 title="total time (%d revs)" % resultcount,
3200 displayall=displayall,
3201 displayall=displayall,
3201 )
3202 )
3202 fm.end()
3203 fm.end()
3203
3204
3204
3205
3205 class _faketr:
3206 class _faketr:
3206 def add(s, x, y, z=None):
3207 def add(s, x, y, z=None):
3207 return None
3208 return None
3208
3209
3209
3210
3210 def _timeonewrite(
3211 def _timeonewrite(
3211 ui,
3212 ui,
3212 orig,
3213 orig,
3213 source,
3214 source,
3214 startrev,
3215 startrev,
3215 stoprev,
3216 stoprev,
3216 runidx=None,
3217 runidx=None,
3217 lazydeltabase=True,
3218 lazydeltabase=True,
3218 clearcaches=True,
3219 clearcaches=True,
3219 ):
3220 ):
3220 timings = []
3221 timings = []
3221 tr = _faketr()
3222 tr = _faketr()
3222 with _temprevlog(ui, orig, startrev) as dest:
3223 with _temprevlog(ui, orig, startrev) as dest:
3223 dest._lazydeltabase = lazydeltabase
3224 dest._lazydeltabase = lazydeltabase
3224 revs = list(orig.revs(startrev, stoprev))
3225 revs = list(orig.revs(startrev, stoprev))
3225 total = len(revs)
3226 total = len(revs)
3226 topic = 'adding'
3227 topic = 'adding'
3227 if runidx is not None:
3228 if runidx is not None:
3228 topic += ' (run #%d)' % runidx
3229 topic += ' (run #%d)' % runidx
3229 # Support both old and new progress API
3230 # Support both old and new progress API
3230 if util.safehasattr(ui, 'makeprogress'):
3231 if util.safehasattr(ui, 'makeprogress'):
3231 progress = ui.makeprogress(topic, unit='revs', total=total)
3232 progress = ui.makeprogress(topic, unit='revs', total=total)
3232
3233
3233 def updateprogress(pos):
3234 def updateprogress(pos):
3234 progress.update(pos)
3235 progress.update(pos)
3235
3236
3236 def completeprogress():
3237 def completeprogress():
3237 progress.complete()
3238 progress.complete()
3238
3239
3239 else:
3240 else:
3240
3241
3241 def updateprogress(pos):
3242 def updateprogress(pos):
3242 ui.progress(topic, pos, unit='revs', total=total)
3243 ui.progress(topic, pos, unit='revs', total=total)
3243
3244
3244 def completeprogress():
3245 def completeprogress():
3245 ui.progress(topic, None, unit='revs', total=total)
3246 ui.progress(topic, None, unit='revs', total=total)
3246
3247
3247 for idx, rev in enumerate(revs):
3248 for idx, rev in enumerate(revs):
3248 updateprogress(idx)
3249 updateprogress(idx)
3249 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3250 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3250 if clearcaches:
3251 if clearcaches:
3251 dest.index.clearcaches()
3252 dest.index.clearcaches()
3252 dest.clearcaches()
3253 dest.clearcaches()
3253 with timeone() as r:
3254 with timeone() as r:
3254 dest.addrawrevision(*addargs, **addkwargs)
3255 dest.addrawrevision(*addargs, **addkwargs)
3255 timings.append((rev, r[0]))
3256 timings.append((rev, r[0]))
3256 updateprogress(total)
3257 updateprogress(total)
3257 completeprogress()
3258 completeprogress()
3258 return timings
3259 return timings
3259
3260
3260
3261
3261 def _getrevisionseed(orig, rev, tr, source):
3262 def _getrevisionseed(orig, rev, tr, source):
3262 from mercurial.node import nullid
3263 from mercurial.node import nullid
3263
3264
3264 linkrev = orig.linkrev(rev)
3265 linkrev = orig.linkrev(rev)
3265 node = orig.node(rev)
3266 node = orig.node(rev)
3266 p1, p2 = orig.parents(node)
3267 p1, p2 = orig.parents(node)
3267 flags = orig.flags(rev)
3268 flags = orig.flags(rev)
3268 cachedelta = None
3269 cachedelta = None
3269 text = None
3270 text = None
3270
3271
3271 if source == b'full':
3272 if source == b'full':
3272 text = orig.revision(rev)
3273 text = orig.revision(rev)
3273 elif source == b'parent-1':
3274 elif source == b'parent-1':
3274 baserev = orig.rev(p1)
3275 baserev = orig.rev(p1)
3275 cachedelta = (baserev, orig.revdiff(p1, rev))
3276 cachedelta = (baserev, orig.revdiff(p1, rev))
3276 elif source == b'parent-2':
3277 elif source == b'parent-2':
3277 parent = p2
3278 parent = p2
3278 if p2 == nullid:
3279 if p2 == nullid:
3279 parent = p1
3280 parent = p1
3280 baserev = orig.rev(parent)
3281 baserev = orig.rev(parent)
3281 cachedelta = (baserev, orig.revdiff(parent, rev))
3282 cachedelta = (baserev, orig.revdiff(parent, rev))
3282 elif source == b'parent-smallest':
3283 elif source == b'parent-smallest':
3283 p1diff = orig.revdiff(p1, rev)
3284 p1diff = orig.revdiff(p1, rev)
3284 parent = p1
3285 parent = p1
3285 diff = p1diff
3286 diff = p1diff
3286 if p2 != nullid:
3287 if p2 != nullid:
3287 p2diff = orig.revdiff(p2, rev)
3288 p2diff = orig.revdiff(p2, rev)
3288 if len(p1diff) > len(p2diff):
3289 if len(p1diff) > len(p2diff):
3289 parent = p2
3290 parent = p2
3290 diff = p2diff
3291 diff = p2diff
3291 baserev = orig.rev(parent)
3292 baserev = orig.rev(parent)
3292 cachedelta = (baserev, diff)
3293 cachedelta = (baserev, diff)
3293 elif source == b'storage':
3294 elif source == b'storage':
3294 baserev = orig.deltaparent(rev)
3295 baserev = orig.deltaparent(rev)
3295 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3296 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3296
3297
3297 return (
3298 return (
3298 (text, tr, linkrev, p1, p2),
3299 (text, tr, linkrev, p1, p2),
3299 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3300 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3300 )
3301 )
3301
3302
3302
3303
3303 @contextlib.contextmanager
3304 @contextlib.contextmanager
3304 def _temprevlog(ui, orig, truncaterev):
3305 def _temprevlog(ui, orig, truncaterev):
3305 from mercurial import vfs as vfsmod
3306 from mercurial import vfs as vfsmod
3306
3307
3307 if orig._inline:
3308 if orig._inline:
3308 raise error.Abort('not supporting inline revlog (yet)')
3309 raise error.Abort('not supporting inline revlog (yet)')
3309 revlogkwargs = {}
3310 revlogkwargs = {}
3310 k = 'upperboundcomp'
3311 k = 'upperboundcomp'
3311 if util.safehasattr(orig, k):
3312 if util.safehasattr(orig, k):
3312 revlogkwargs[k] = getattr(orig, k)
3313 revlogkwargs[k] = getattr(orig, k)
3313
3314
3314 indexfile = getattr(orig, '_indexfile', None)
3315 indexfile = getattr(orig, '_indexfile', None)
3315 if indexfile is None:
3316 if indexfile is None:
3316 # compatibility with <= hg-5.8
3317 # compatibility with <= hg-5.8
3317 indexfile = getattr(orig, 'indexfile')
3318 indexfile = getattr(orig, 'indexfile')
3318 origindexpath = orig.opener.join(indexfile)
3319 origindexpath = orig.opener.join(indexfile)
3319
3320
3320 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3321 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3321 origdatapath = orig.opener.join(datafile)
3322 origdatapath = orig.opener.join(datafile)
3322 radix = b'revlog'
3323 radix = b'revlog'
3323 indexname = b'revlog.i'
3324 indexname = b'revlog.i'
3324 dataname = b'revlog.d'
3325 dataname = b'revlog.d'
3325
3326
3326 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3327 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3327 try:
3328 try:
3328 # copy the data file in a temporary directory
3329 # copy the data file in a temporary directory
3329 ui.debug('copying data in %s\n' % tmpdir)
3330 ui.debug('copying data in %s\n' % tmpdir)
3330 destindexpath = os.path.join(tmpdir, 'revlog.i')
3331 destindexpath = os.path.join(tmpdir, 'revlog.i')
3331 destdatapath = os.path.join(tmpdir, 'revlog.d')
3332 destdatapath = os.path.join(tmpdir, 'revlog.d')
3332 shutil.copyfile(origindexpath, destindexpath)
3333 shutil.copyfile(origindexpath, destindexpath)
3333 shutil.copyfile(origdatapath, destdatapath)
3334 shutil.copyfile(origdatapath, destdatapath)
3334
3335
3335 # remove the data we want to add again
3336 # remove the data we want to add again
3336 ui.debug('truncating data to be rewritten\n')
3337 ui.debug('truncating data to be rewritten\n')
3337 with open(destindexpath, 'ab') as index:
3338 with open(destindexpath, 'ab') as index:
3338 index.seek(0)
3339 index.seek(0)
3339 index.truncate(truncaterev * orig._io.size)
3340 index.truncate(truncaterev * orig._io.size)
3340 with open(destdatapath, 'ab') as data:
3341 with open(destdatapath, 'ab') as data:
3341 data.seek(0)
3342 data.seek(0)
3342 data.truncate(orig.start(truncaterev))
3343 data.truncate(orig.start(truncaterev))
3343
3344
3344 # instantiate a new revlog from the temporary copy
3345 # instantiate a new revlog from the temporary copy
3345 ui.debug('truncating adding to be rewritten\n')
3346 ui.debug('truncating adding to be rewritten\n')
3346 vfs = vfsmod.vfs(tmpdir)
3347 vfs = vfsmod.vfs(tmpdir)
3347 vfs.options = getattr(orig.opener, 'options', None)
3348 vfs.options = getattr(orig.opener, 'options', None)
3348
3349
3349 try:
3350 try:
3350 dest = revlog(vfs, radix=radix, **revlogkwargs)
3351 dest = revlog(vfs, radix=radix, **revlogkwargs)
3351 except TypeError:
3352 except TypeError:
3352 dest = revlog(
3353 dest = revlog(
3353 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3354 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3354 )
3355 )
3355 if dest._inline:
3356 if dest._inline:
3356 raise error.Abort('not supporting inline revlog (yet)')
3357 raise error.Abort('not supporting inline revlog (yet)')
3357 # make sure internals are initialized
3358 # make sure internals are initialized
3358 dest.revision(len(dest) - 1)
3359 dest.revision(len(dest) - 1)
3359 yield dest
3360 yield dest
3360 del dest, vfs
3361 del dest, vfs
3361 finally:
3362 finally:
3362 shutil.rmtree(tmpdir, True)
3363 shutil.rmtree(tmpdir, True)
3363
3364
3364
3365
3365 @command(
3366 @command(
3366 b'perf::revlogchunks|perfrevlogchunks',
3367 b'perf::revlogchunks|perfrevlogchunks',
3367 revlogopts
3368 revlogopts
3368 + formatteropts
3369 + formatteropts
3369 + [
3370 + [
3370 (b'e', b'engines', b'', b'compression engines to use'),
3371 (b'e', b'engines', b'', b'compression engines to use'),
3371 (b's', b'startrev', 0, b'revision to start at'),
3372 (b's', b'startrev', 0, b'revision to start at'),
3372 ],
3373 ],
3373 b'-c|-m|FILE',
3374 b'-c|-m|FILE',
3374 )
3375 )
3375 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3376 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3376 """Benchmark operations on revlog chunks.
3377 """Benchmark operations on revlog chunks.
3377
3378
3378 Logically, each revlog is a collection of fulltext revisions. However,
3379 Logically, each revlog is a collection of fulltext revisions. However,
3379 stored within each revlog are "chunks" of possibly compressed data. This
3380 stored within each revlog are "chunks" of possibly compressed data. This
3380 data needs to be read and decompressed or compressed and written.
3381 data needs to be read and decompressed or compressed and written.
3381
3382
3382 This command measures the time it takes to read+decompress and recompress
3383 This command measures the time it takes to read+decompress and recompress
3383 chunks in a revlog. It effectively isolates I/O and compression performance.
3384 chunks in a revlog. It effectively isolates I/O and compression performance.
3384 For measurements of higher-level operations like resolving revisions,
3385 For measurements of higher-level operations like resolving revisions,
3385 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3386 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3386 """
3387 """
3387 opts = _byteskwargs(opts)
3388 opts = _byteskwargs(opts)
3388
3389
3389 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3390 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3390
3391
3391 # _chunkraw was renamed to _getsegmentforrevs.
3392 # _chunkraw was renamed to _getsegmentforrevs.
3392 try:
3393 try:
3393 segmentforrevs = rl._getsegmentforrevs
3394 segmentforrevs = rl._getsegmentforrevs
3394 except AttributeError:
3395 except AttributeError:
3395 segmentforrevs = rl._chunkraw
3396 segmentforrevs = rl._chunkraw
3396
3397
3397 # Verify engines argument.
3398 # Verify engines argument.
3398 if engines:
3399 if engines:
3399 engines = {e.strip() for e in engines.split(b',')}
3400 engines = {e.strip() for e in engines.split(b',')}
3400 for engine in engines:
3401 for engine in engines:
3401 try:
3402 try:
3402 util.compressionengines[engine]
3403 util.compressionengines[engine]
3403 except KeyError:
3404 except KeyError:
3404 raise error.Abort(b'unknown compression engine: %s' % engine)
3405 raise error.Abort(b'unknown compression engine: %s' % engine)
3405 else:
3406 else:
3406 engines = []
3407 engines = []
3407 for e in util.compengines:
3408 for e in util.compengines:
3408 engine = util.compengines[e]
3409 engine = util.compengines[e]
3409 try:
3410 try:
3410 if engine.available():
3411 if engine.available():
3411 engine.revlogcompressor().compress(b'dummy')
3412 engine.revlogcompressor().compress(b'dummy')
3412 engines.append(e)
3413 engines.append(e)
3413 except NotImplementedError:
3414 except NotImplementedError:
3414 pass
3415 pass
3415
3416
3416 revs = list(rl.revs(startrev, len(rl) - 1))
3417 revs = list(rl.revs(startrev, len(rl) - 1))
3417
3418
3418 def rlfh(rl):
3419 def rlfh(rl):
3419 if rl._inline:
3420 if rl._inline:
3420 indexfile = getattr(rl, '_indexfile', None)
3421 indexfile = getattr(rl, '_indexfile', None)
3421 if indexfile is None:
3422 if indexfile is None:
3422 # compatibility with <= hg-5.8
3423 # compatibility with <= hg-5.8
3423 indexfile = getattr(rl, 'indexfile')
3424 indexfile = getattr(rl, 'indexfile')
3424 return getsvfs(repo)(indexfile)
3425 return getsvfs(repo)(indexfile)
3425 else:
3426 else:
3426 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3427 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3427 return getsvfs(repo)(datafile)
3428 return getsvfs(repo)(datafile)
3428
3429
3429 def doread():
3430 def doread():
3430 rl.clearcaches()
3431 rl.clearcaches()
3431 for rev in revs:
3432 for rev in revs:
3432 segmentforrevs(rev, rev)
3433 segmentforrevs(rev, rev)
3433
3434
3434 def doreadcachedfh():
3435 def doreadcachedfh():
3435 rl.clearcaches()
3436 rl.clearcaches()
3436 fh = rlfh(rl)
3437 fh = rlfh(rl)
3437 for rev in revs:
3438 for rev in revs:
3438 segmentforrevs(rev, rev, df=fh)
3439 segmentforrevs(rev, rev, df=fh)
3439
3440
3440 def doreadbatch():
3441 def doreadbatch():
3441 rl.clearcaches()
3442 rl.clearcaches()
3442 segmentforrevs(revs[0], revs[-1])
3443 segmentforrevs(revs[0], revs[-1])
3443
3444
3444 def doreadbatchcachedfh():
3445 def doreadbatchcachedfh():
3445 rl.clearcaches()
3446 rl.clearcaches()
3446 fh = rlfh(rl)
3447 fh = rlfh(rl)
3447 segmentforrevs(revs[0], revs[-1], df=fh)
3448 segmentforrevs(revs[0], revs[-1], df=fh)
3448
3449
3449 def dochunk():
3450 def dochunk():
3450 rl.clearcaches()
3451 rl.clearcaches()
3451 fh = rlfh(rl)
3452 fh = rlfh(rl)
3452 for rev in revs:
3453 for rev in revs:
3453 rl._chunk(rev, df=fh)
3454 rl._chunk(rev, df=fh)
3454
3455
3455 chunks = [None]
3456 chunks = [None]
3456
3457
3457 def dochunkbatch():
3458 def dochunkbatch():
3458 rl.clearcaches()
3459 rl.clearcaches()
3459 fh = rlfh(rl)
3460 fh = rlfh(rl)
3460 # Save chunks as a side-effect.
3461 # Save chunks as a side-effect.
3461 chunks[0] = rl._chunks(revs, df=fh)
3462 chunks[0] = rl._chunks(revs, df=fh)
3462
3463
3463 def docompress(compressor):
3464 def docompress(compressor):
3464 rl.clearcaches()
3465 rl.clearcaches()
3465
3466
3466 try:
3467 try:
3467 # Swap in the requested compression engine.
3468 # Swap in the requested compression engine.
3468 oldcompressor = rl._compressor
3469 oldcompressor = rl._compressor
3469 rl._compressor = compressor
3470 rl._compressor = compressor
3470 for chunk in chunks[0]:
3471 for chunk in chunks[0]:
3471 rl.compress(chunk)
3472 rl.compress(chunk)
3472 finally:
3473 finally:
3473 rl._compressor = oldcompressor
3474 rl._compressor = oldcompressor
3474
3475
3475 benches = [
3476 benches = [
3476 (lambda: doread(), b'read'),
3477 (lambda: doread(), b'read'),
3477 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3478 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3478 (lambda: doreadbatch(), b'read batch'),
3479 (lambda: doreadbatch(), b'read batch'),
3479 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3480 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3480 (lambda: dochunk(), b'chunk'),
3481 (lambda: dochunk(), b'chunk'),
3481 (lambda: dochunkbatch(), b'chunk batch'),
3482 (lambda: dochunkbatch(), b'chunk batch'),
3482 ]
3483 ]
3483
3484
3484 for engine in sorted(engines):
3485 for engine in sorted(engines):
3485 compressor = util.compengines[engine].revlogcompressor()
3486 compressor = util.compengines[engine].revlogcompressor()
3486 benches.append(
3487 benches.append(
3487 (
3488 (
3488 functools.partial(docompress, compressor),
3489 functools.partial(docompress, compressor),
3489 b'compress w/ %s' % engine,
3490 b'compress w/ %s' % engine,
3490 )
3491 )
3491 )
3492 )
3492
3493
3493 for fn, title in benches:
3494 for fn, title in benches:
3494 timer, fm = gettimer(ui, opts)
3495 timer, fm = gettimer(ui, opts)
3495 timer(fn, title=title)
3496 timer(fn, title=title)
3496 fm.end()
3497 fm.end()
3497
3498
3498
3499
3499 @command(
3500 @command(
3500 b'perf::revlogrevision|perfrevlogrevision',
3501 b'perf::revlogrevision|perfrevlogrevision',
3501 revlogopts
3502 revlogopts
3502 + formatteropts
3503 + formatteropts
3503 + [(b'', b'cache', False, b'use caches instead of clearing')],
3504 + [(b'', b'cache', False, b'use caches instead of clearing')],
3504 b'-c|-m|FILE REV',
3505 b'-c|-m|FILE REV',
3505 )
3506 )
3506 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3507 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3507 """Benchmark obtaining a revlog revision.
3508 """Benchmark obtaining a revlog revision.
3508
3509
3509 Obtaining a revlog revision consists of roughly the following steps:
3510 Obtaining a revlog revision consists of roughly the following steps:
3510
3511
3511 1. Compute the delta chain
3512 1. Compute the delta chain
3512 2. Slice the delta chain if applicable
3513 2. Slice the delta chain if applicable
3513 3. Obtain the raw chunks for that delta chain
3514 3. Obtain the raw chunks for that delta chain
3514 4. Decompress each raw chunk
3515 4. Decompress each raw chunk
3515 5. Apply binary patches to obtain fulltext
3516 5. Apply binary patches to obtain fulltext
3516 6. Verify hash of fulltext
3517 6. Verify hash of fulltext
3517
3518
3518 This command measures the time spent in each of these phases.
3519 This command measures the time spent in each of these phases.
3519 """
3520 """
3520 opts = _byteskwargs(opts)
3521 opts = _byteskwargs(opts)
3521
3522
3522 if opts.get(b'changelog') or opts.get(b'manifest'):
3523 if opts.get(b'changelog') or opts.get(b'manifest'):
3523 file_, rev = None, file_
3524 file_, rev = None, file_
3524 elif rev is None:
3525 elif rev is None:
3525 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3526 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3526
3527
3527 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3528 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3528
3529
3529 # _chunkraw was renamed to _getsegmentforrevs.
3530 # _chunkraw was renamed to _getsegmentforrevs.
3530 try:
3531 try:
3531 segmentforrevs = r._getsegmentforrevs
3532 segmentforrevs = r._getsegmentforrevs
3532 except AttributeError:
3533 except AttributeError:
3533 segmentforrevs = r._chunkraw
3534 segmentforrevs = r._chunkraw
3534
3535
3535 node = r.lookup(rev)
3536 node = r.lookup(rev)
3536 rev = r.rev(node)
3537 rev = r.rev(node)
3537
3538
3538 def getrawchunks(data, chain):
3539 def getrawchunks(data, chain):
3539 start = r.start
3540 start = r.start
3540 length = r.length
3541 length = r.length
3541 inline = r._inline
3542 inline = r._inline
3542 try:
3543 try:
3543 iosize = r.index.entry_size
3544 iosize = r.index.entry_size
3544 except AttributeError:
3545 except AttributeError:
3545 iosize = r._io.size
3546 iosize = r._io.size
3546 buffer = util.buffer
3547 buffer = util.buffer
3547
3548
3548 chunks = []
3549 chunks = []
3549 ladd = chunks.append
3550 ladd = chunks.append
3550 for idx, item in enumerate(chain):
3551 for idx, item in enumerate(chain):
3551 offset = start(item[0])
3552 offset = start(item[0])
3552 bits = data[idx]
3553 bits = data[idx]
3553 for rev in item:
3554 for rev in item:
3554 chunkstart = start(rev)
3555 chunkstart = start(rev)
3555 if inline:
3556 if inline:
3556 chunkstart += (rev + 1) * iosize
3557 chunkstart += (rev + 1) * iosize
3557 chunklength = length(rev)
3558 chunklength = length(rev)
3558 ladd(buffer(bits, chunkstart - offset, chunklength))
3559 ladd(buffer(bits, chunkstart - offset, chunklength))
3559
3560
3560 return chunks
3561 return chunks
3561
3562
3562 def dodeltachain(rev):
3563 def dodeltachain(rev):
3563 if not cache:
3564 if not cache:
3564 r.clearcaches()
3565 r.clearcaches()
3565 r._deltachain(rev)
3566 r._deltachain(rev)
3566
3567
3567 def doread(chain):
3568 def doread(chain):
3568 if not cache:
3569 if not cache:
3569 r.clearcaches()
3570 r.clearcaches()
3570 for item in slicedchain:
3571 for item in slicedchain:
3571 segmentforrevs(item[0], item[-1])
3572 segmentforrevs(item[0], item[-1])
3572
3573
3573 def doslice(r, chain, size):
3574 def doslice(r, chain, size):
3574 for s in slicechunk(r, chain, targetsize=size):
3575 for s in slicechunk(r, chain, targetsize=size):
3575 pass
3576 pass
3576
3577
3577 def dorawchunks(data, chain):
3578 def dorawchunks(data, chain):
3578 if not cache:
3579 if not cache:
3579 r.clearcaches()
3580 r.clearcaches()
3580 getrawchunks(data, chain)
3581 getrawchunks(data, chain)
3581
3582
3582 def dodecompress(chunks):
3583 def dodecompress(chunks):
3583 decomp = r.decompress
3584 decomp = r.decompress
3584 for chunk in chunks:
3585 for chunk in chunks:
3585 decomp(chunk)
3586 decomp(chunk)
3586
3587
3587 def dopatch(text, bins):
3588 def dopatch(text, bins):
3588 if not cache:
3589 if not cache:
3589 r.clearcaches()
3590 r.clearcaches()
3590 mdiff.patches(text, bins)
3591 mdiff.patches(text, bins)
3591
3592
3592 def dohash(text):
3593 def dohash(text):
3593 if not cache:
3594 if not cache:
3594 r.clearcaches()
3595 r.clearcaches()
3595 r.checkhash(text, node, rev=rev)
3596 r.checkhash(text, node, rev=rev)
3596
3597
3597 def dorevision():
3598 def dorevision():
3598 if not cache:
3599 if not cache:
3599 r.clearcaches()
3600 r.clearcaches()
3600 r.revision(node)
3601 r.revision(node)
3601
3602
3602 try:
3603 try:
3603 from mercurial.revlogutils.deltas import slicechunk
3604 from mercurial.revlogutils.deltas import slicechunk
3604 except ImportError:
3605 except ImportError:
3605 slicechunk = getattr(revlog, '_slicechunk', None)
3606 slicechunk = getattr(revlog, '_slicechunk', None)
3606
3607
3607 size = r.length(rev)
3608 size = r.length(rev)
3608 chain = r._deltachain(rev)[0]
3609 chain = r._deltachain(rev)[0]
3609 if not getattr(r, '_withsparseread', False):
3610 if not getattr(r, '_withsparseread', False):
3610 slicedchain = (chain,)
3611 slicedchain = (chain,)
3611 else:
3612 else:
3612 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3613 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3613 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3614 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3614 rawchunks = getrawchunks(data, slicedchain)
3615 rawchunks = getrawchunks(data, slicedchain)
3615 bins = r._chunks(chain)
3616 bins = r._chunks(chain)
3616 text = bytes(bins[0])
3617 text = bytes(bins[0])
3617 bins = bins[1:]
3618 bins = bins[1:]
3618 text = mdiff.patches(text, bins)
3619 text = mdiff.patches(text, bins)
3619
3620
3620 benches = [
3621 benches = [
3621 (lambda: dorevision(), b'full'),
3622 (lambda: dorevision(), b'full'),
3622 (lambda: dodeltachain(rev), b'deltachain'),
3623 (lambda: dodeltachain(rev), b'deltachain'),
3623 (lambda: doread(chain), b'read'),
3624 (lambda: doread(chain), b'read'),
3624 ]
3625 ]
3625
3626
3626 if getattr(r, '_withsparseread', False):
3627 if getattr(r, '_withsparseread', False):
3627 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3628 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3628 benches.append(slicing)
3629 benches.append(slicing)
3629
3630
3630 benches.extend(
3631 benches.extend(
3631 [
3632 [
3632 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3633 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3633 (lambda: dodecompress(rawchunks), b'decompress'),
3634 (lambda: dodecompress(rawchunks), b'decompress'),
3634 (lambda: dopatch(text, bins), b'patch'),
3635 (lambda: dopatch(text, bins), b'patch'),
3635 (lambda: dohash(text), b'hash'),
3636 (lambda: dohash(text), b'hash'),
3636 ]
3637 ]
3637 )
3638 )
3638
3639
3639 timer, fm = gettimer(ui, opts)
3640 timer, fm = gettimer(ui, opts)
3640 for fn, title in benches:
3641 for fn, title in benches:
3641 timer(fn, title=title)
3642 timer(fn, title=title)
3642 fm.end()
3643 fm.end()
3643
3644
3644
3645
3645 @command(
3646 @command(
3646 b'perf::revset|perfrevset',
3647 b'perf::revset|perfrevset',
3647 [
3648 [
3648 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3649 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3649 (b'', b'contexts', False, b'obtain changectx for each revision'),
3650 (b'', b'contexts', False, b'obtain changectx for each revision'),
3650 ]
3651 ]
3651 + formatteropts,
3652 + formatteropts,
3652 b"REVSET",
3653 b"REVSET",
3653 )
3654 )
3654 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3655 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3655 """benchmark the execution time of a revset
3656 """benchmark the execution time of a revset
3656
3657
3657 Use the --clean option if need to evaluate the impact of build volatile
3658 Use the --clean option if need to evaluate the impact of build volatile
3658 revisions set cache on the revset execution. Volatile cache hold filtered
3659 revisions set cache on the revset execution. Volatile cache hold filtered
3659 and obsolete related cache."""
3660 and obsolete related cache."""
3660 opts = _byteskwargs(opts)
3661 opts = _byteskwargs(opts)
3661
3662
3662 timer, fm = gettimer(ui, opts)
3663 timer, fm = gettimer(ui, opts)
3663
3664
3664 def d():
3665 def d():
3665 if clear:
3666 if clear:
3666 repo.invalidatevolatilesets()
3667 repo.invalidatevolatilesets()
3667 if contexts:
3668 if contexts:
3668 for ctx in repo.set(expr):
3669 for ctx in repo.set(expr):
3669 pass
3670 pass
3670 else:
3671 else:
3671 for r in repo.revs(expr):
3672 for r in repo.revs(expr):
3672 pass
3673 pass
3673
3674
3674 timer(d)
3675 timer(d)
3675 fm.end()
3676 fm.end()
3676
3677
3677
3678
3678 @command(
3679 @command(
3679 b'perf::volatilesets|perfvolatilesets',
3680 b'perf::volatilesets|perfvolatilesets',
3680 [
3681 [
3681 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3682 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3682 ]
3683 ]
3683 + formatteropts,
3684 + formatteropts,
3684 )
3685 )
3685 def perfvolatilesets(ui, repo, *names, **opts):
3686 def perfvolatilesets(ui, repo, *names, **opts):
3686 """benchmark the computation of various volatile set
3687 """benchmark the computation of various volatile set
3687
3688
3688 Volatile set computes element related to filtering and obsolescence."""
3689 Volatile set computes element related to filtering and obsolescence."""
3689 opts = _byteskwargs(opts)
3690 opts = _byteskwargs(opts)
3690 timer, fm = gettimer(ui, opts)
3691 timer, fm = gettimer(ui, opts)
3691 repo = repo.unfiltered()
3692 repo = repo.unfiltered()
3692
3693
3693 def getobs(name):
3694 def getobs(name):
3694 def d():
3695 def d():
3695 repo.invalidatevolatilesets()
3696 repo.invalidatevolatilesets()
3696 if opts[b'clear_obsstore']:
3697 if opts[b'clear_obsstore']:
3697 clearfilecache(repo, b'obsstore')
3698 clearfilecache(repo, b'obsstore')
3698 obsolete.getrevs(repo, name)
3699 obsolete.getrevs(repo, name)
3699
3700
3700 return d
3701 return d
3701
3702
3702 allobs = sorted(obsolete.cachefuncs)
3703 allobs = sorted(obsolete.cachefuncs)
3703 if names:
3704 if names:
3704 allobs = [n for n in allobs if n in names]
3705 allobs = [n for n in allobs if n in names]
3705
3706
3706 for name in allobs:
3707 for name in allobs:
3707 timer(getobs(name), title=name)
3708 timer(getobs(name), title=name)
3708
3709
3709 def getfiltered(name):
3710 def getfiltered(name):
3710 def d():
3711 def d():
3711 repo.invalidatevolatilesets()
3712 repo.invalidatevolatilesets()
3712 if opts[b'clear_obsstore']:
3713 if opts[b'clear_obsstore']:
3713 clearfilecache(repo, b'obsstore')
3714 clearfilecache(repo, b'obsstore')
3714 repoview.filterrevs(repo, name)
3715 repoview.filterrevs(repo, name)
3715
3716
3716 return d
3717 return d
3717
3718
3718 allfilter = sorted(repoview.filtertable)
3719 allfilter = sorted(repoview.filtertable)
3719 if names:
3720 if names:
3720 allfilter = [n for n in allfilter if n in names]
3721 allfilter = [n for n in allfilter if n in names]
3721
3722
3722 for name in allfilter:
3723 for name in allfilter:
3723 timer(getfiltered(name), title=name)
3724 timer(getfiltered(name), title=name)
3724 fm.end()
3725 fm.end()
3725
3726
3726
3727
3727 @command(
3728 @command(
3728 b'perf::branchmap|perfbranchmap',
3729 b'perf::branchmap|perfbranchmap',
3729 [
3730 [
3730 (b'f', b'full', False, b'Includes build time of subset'),
3731 (b'f', b'full', False, b'Includes build time of subset'),
3731 (
3732 (
3732 b'',
3733 b'',
3733 b'clear-revbranch',
3734 b'clear-revbranch',
3734 False,
3735 False,
3735 b'purge the revbranch cache between computation',
3736 b'purge the revbranch cache between computation',
3736 ),
3737 ),
3737 ]
3738 ]
3738 + formatteropts,
3739 + formatteropts,
3739 )
3740 )
3740 def perfbranchmap(ui, repo, *filternames, **opts):
3741 def perfbranchmap(ui, repo, *filternames, **opts):
3741 """benchmark the update of a branchmap
3742 """benchmark the update of a branchmap
3742
3743
3743 This benchmarks the full repo.branchmap() call with read and write disabled
3744 This benchmarks the full repo.branchmap() call with read and write disabled
3744 """
3745 """
3745 opts = _byteskwargs(opts)
3746 opts = _byteskwargs(opts)
3746 full = opts.get(b"full", False)
3747 full = opts.get(b"full", False)
3747 clear_revbranch = opts.get(b"clear_revbranch", False)
3748 clear_revbranch = opts.get(b"clear_revbranch", False)
3748 timer, fm = gettimer(ui, opts)
3749 timer, fm = gettimer(ui, opts)
3749
3750
3750 def getbranchmap(filtername):
3751 def getbranchmap(filtername):
3751 """generate a benchmark function for the filtername"""
3752 """generate a benchmark function for the filtername"""
3752 if filtername is None:
3753 if filtername is None:
3753 view = repo
3754 view = repo
3754 else:
3755 else:
3755 view = repo.filtered(filtername)
3756 view = repo.filtered(filtername)
3756 if util.safehasattr(view._branchcaches, '_per_filter'):
3757 if util.safehasattr(view._branchcaches, '_per_filter'):
3757 filtered = view._branchcaches._per_filter
3758 filtered = view._branchcaches._per_filter
3758 else:
3759 else:
3759 # older versions
3760 # older versions
3760 filtered = view._branchcaches
3761 filtered = view._branchcaches
3761
3762
3762 def d():
3763 def d():
3763 if clear_revbranch:
3764 if clear_revbranch:
3764 repo.revbranchcache()._clear()
3765 repo.revbranchcache()._clear()
3765 if full:
3766 if full:
3766 view._branchcaches.clear()
3767 view._branchcaches.clear()
3767 else:
3768 else:
3768 filtered.pop(filtername, None)
3769 filtered.pop(filtername, None)
3769 view.branchmap()
3770 view.branchmap()
3770
3771
3771 return d
3772 return d
3772
3773
3773 # add filter in smaller subset to bigger subset
3774 # add filter in smaller subset to bigger subset
3774 possiblefilters = set(repoview.filtertable)
3775 possiblefilters = set(repoview.filtertable)
3775 if filternames:
3776 if filternames:
3776 possiblefilters &= set(filternames)
3777 possiblefilters &= set(filternames)
3777 subsettable = getbranchmapsubsettable()
3778 subsettable = getbranchmapsubsettable()
3778 allfilters = []
3779 allfilters = []
3779 while possiblefilters:
3780 while possiblefilters:
3780 for name in possiblefilters:
3781 for name in possiblefilters:
3781 subset = subsettable.get(name)
3782 subset = subsettable.get(name)
3782 if subset not in possiblefilters:
3783 if subset not in possiblefilters:
3783 break
3784 break
3784 else:
3785 else:
3785 assert False, b'subset cycle %s!' % possiblefilters
3786 assert False, b'subset cycle %s!' % possiblefilters
3786 allfilters.append(name)
3787 allfilters.append(name)
3787 possiblefilters.remove(name)
3788 possiblefilters.remove(name)
3788
3789
3789 # warm the cache
3790 # warm the cache
3790 if not full:
3791 if not full:
3791 for name in allfilters:
3792 for name in allfilters:
3792 repo.filtered(name).branchmap()
3793 repo.filtered(name).branchmap()
3793 if not filternames or b'unfiltered' in filternames:
3794 if not filternames or b'unfiltered' in filternames:
3794 # add unfiltered
3795 # add unfiltered
3795 allfilters.append(None)
3796 allfilters.append(None)
3796
3797
3797 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3798 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3798 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3799 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3799 branchcacheread.set(classmethod(lambda *args: None))
3800 branchcacheread.set(classmethod(lambda *args: None))
3800 else:
3801 else:
3801 # older versions
3802 # older versions
3802 branchcacheread = safeattrsetter(branchmap, b'read')
3803 branchcacheread = safeattrsetter(branchmap, b'read')
3803 branchcacheread.set(lambda *args: None)
3804 branchcacheread.set(lambda *args: None)
3804 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3805 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3805 branchcachewrite.set(lambda *args: None)
3806 branchcachewrite.set(lambda *args: None)
3806 try:
3807 try:
3807 for name in allfilters:
3808 for name in allfilters:
3808 printname = name
3809 printname = name
3809 if name is None:
3810 if name is None:
3810 printname = b'unfiltered'
3811 printname = b'unfiltered'
3811 timer(getbranchmap(name), title=printname)
3812 timer(getbranchmap(name), title=printname)
3812 finally:
3813 finally:
3813 branchcacheread.restore()
3814 branchcacheread.restore()
3814 branchcachewrite.restore()
3815 branchcachewrite.restore()
3815 fm.end()
3816 fm.end()
3816
3817
3817
3818
3818 @command(
3819 @command(
3819 b'perf::branchmapupdate|perfbranchmapupdate',
3820 b'perf::branchmapupdate|perfbranchmapupdate',
3820 [
3821 [
3821 (b'', b'base', [], b'subset of revision to start from'),
3822 (b'', b'base', [], b'subset of revision to start from'),
3822 (b'', b'target', [], b'subset of revision to end with'),
3823 (b'', b'target', [], b'subset of revision to end with'),
3823 (b'', b'clear-caches', False, b'clear cache between each runs'),
3824 (b'', b'clear-caches', False, b'clear cache between each runs'),
3824 ]
3825 ]
3825 + formatteropts,
3826 + formatteropts,
3826 )
3827 )
3827 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3828 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3828 """benchmark branchmap update from for <base> revs to <target> revs
3829 """benchmark branchmap update from for <base> revs to <target> revs
3829
3830
3830 If `--clear-caches` is passed, the following items will be reset before
3831 If `--clear-caches` is passed, the following items will be reset before
3831 each update:
3832 each update:
3832 * the changelog instance and associated indexes
3833 * the changelog instance and associated indexes
3833 * the rev-branch-cache instance
3834 * the rev-branch-cache instance
3834
3835
3835 Examples:
3836 Examples:
3836
3837
3837 # update for the one last revision
3838 # update for the one last revision
3838 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3839 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3839
3840
3840 $ update for change coming with a new branch
3841 $ update for change coming with a new branch
3841 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3842 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3842 """
3843 """
3843 from mercurial import branchmap
3844 from mercurial import branchmap
3844 from mercurial import repoview
3845 from mercurial import repoview
3845
3846
3846 opts = _byteskwargs(opts)
3847 opts = _byteskwargs(opts)
3847 timer, fm = gettimer(ui, opts)
3848 timer, fm = gettimer(ui, opts)
3848 clearcaches = opts[b'clear_caches']
3849 clearcaches = opts[b'clear_caches']
3849 unfi = repo.unfiltered()
3850 unfi = repo.unfiltered()
3850 x = [None] # used to pass data between closure
3851 x = [None] # used to pass data between closure
3851
3852
3852 # we use a `list` here to avoid possible side effect from smartset
3853 # we use a `list` here to avoid possible side effect from smartset
3853 baserevs = list(scmutil.revrange(repo, base))
3854 baserevs = list(scmutil.revrange(repo, base))
3854 targetrevs = list(scmutil.revrange(repo, target))
3855 targetrevs = list(scmutil.revrange(repo, target))
3855 if not baserevs:
3856 if not baserevs:
3856 raise error.Abort(b'no revisions selected for --base')
3857 raise error.Abort(b'no revisions selected for --base')
3857 if not targetrevs:
3858 if not targetrevs:
3858 raise error.Abort(b'no revisions selected for --target')
3859 raise error.Abort(b'no revisions selected for --target')
3859
3860
3860 # make sure the target branchmap also contains the one in the base
3861 # make sure the target branchmap also contains the one in the base
3861 targetrevs = list(set(baserevs) | set(targetrevs))
3862 targetrevs = list(set(baserevs) | set(targetrevs))
3862 targetrevs.sort()
3863 targetrevs.sort()
3863
3864
3864 cl = repo.changelog
3865 cl = repo.changelog
3865 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3866 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3866 allbaserevs.sort()
3867 allbaserevs.sort()
3867 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3868 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3868
3869
3869 newrevs = list(alltargetrevs.difference(allbaserevs))
3870 newrevs = list(alltargetrevs.difference(allbaserevs))
3870 newrevs.sort()
3871 newrevs.sort()
3871
3872
3872 allrevs = frozenset(unfi.changelog.revs())
3873 allrevs = frozenset(unfi.changelog.revs())
3873 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3874 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3874 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3875 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3875
3876
3876 def basefilter(repo, visibilityexceptions=None):
3877 def basefilter(repo, visibilityexceptions=None):
3877 return basefilterrevs
3878 return basefilterrevs
3878
3879
3879 def targetfilter(repo, visibilityexceptions=None):
3880 def targetfilter(repo, visibilityexceptions=None):
3880 return targetfilterrevs
3881 return targetfilterrevs
3881
3882
3882 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3883 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3883 ui.status(msg % (len(allbaserevs), len(newrevs)))
3884 ui.status(msg % (len(allbaserevs), len(newrevs)))
3884 if targetfilterrevs:
3885 if targetfilterrevs:
3885 msg = b'(%d revisions still filtered)\n'
3886 msg = b'(%d revisions still filtered)\n'
3886 ui.status(msg % len(targetfilterrevs))
3887 ui.status(msg % len(targetfilterrevs))
3887
3888
3888 try:
3889 try:
3889 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3890 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3890 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3891 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3891
3892
3892 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3893 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3893 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3894 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3894
3895
3895 # try to find an existing branchmap to reuse
3896 # try to find an existing branchmap to reuse
3896 subsettable = getbranchmapsubsettable()
3897 subsettable = getbranchmapsubsettable()
3897 candidatefilter = subsettable.get(None)
3898 candidatefilter = subsettable.get(None)
3898 while candidatefilter is not None:
3899 while candidatefilter is not None:
3899 candidatebm = repo.filtered(candidatefilter).branchmap()
3900 candidatebm = repo.filtered(candidatefilter).branchmap()
3900 if candidatebm.validfor(baserepo):
3901 if candidatebm.validfor(baserepo):
3901 filtered = repoview.filterrevs(repo, candidatefilter)
3902 filtered = repoview.filterrevs(repo, candidatefilter)
3902 missing = [r for r in allbaserevs if r in filtered]
3903 missing = [r for r in allbaserevs if r in filtered]
3903 base = candidatebm.copy()
3904 base = candidatebm.copy()
3904 base.update(baserepo, missing)
3905 base.update(baserepo, missing)
3905 break
3906 break
3906 candidatefilter = subsettable.get(candidatefilter)
3907 candidatefilter = subsettable.get(candidatefilter)
3907 else:
3908 else:
3908 # no suitable subset where found
3909 # no suitable subset where found
3909 base = branchmap.branchcache()
3910 base = branchmap.branchcache()
3910 base.update(baserepo, allbaserevs)
3911 base.update(baserepo, allbaserevs)
3911
3912
3912 def setup():
3913 def setup():
3913 x[0] = base.copy()
3914 x[0] = base.copy()
3914 if clearcaches:
3915 if clearcaches:
3915 unfi._revbranchcache = None
3916 unfi._revbranchcache = None
3916 clearchangelog(repo)
3917 clearchangelog(repo)
3917
3918
3918 def bench():
3919 def bench():
3919 x[0].update(targetrepo, newrevs)
3920 x[0].update(targetrepo, newrevs)
3920
3921
3921 timer(bench, setup=setup)
3922 timer(bench, setup=setup)
3922 fm.end()
3923 fm.end()
3923 finally:
3924 finally:
3924 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3925 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3925 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3926 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3926
3927
3927
3928
3928 @command(
3929 @command(
3929 b'perf::branchmapload|perfbranchmapload',
3930 b'perf::branchmapload|perfbranchmapload',
3930 [
3931 [
3931 (b'f', b'filter', b'', b'Specify repoview filter'),
3932 (b'f', b'filter', b'', b'Specify repoview filter'),
3932 (b'', b'list', False, b'List brachmap filter caches'),
3933 (b'', b'list', False, b'List brachmap filter caches'),
3933 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3934 ]
3935 ]
3935 + formatteropts,
3936 + formatteropts,
3936 )
3937 )
3937 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3938 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3938 """benchmark reading the branchmap"""
3939 """benchmark reading the branchmap"""
3939 opts = _byteskwargs(opts)
3940 opts = _byteskwargs(opts)
3940 clearrevlogs = opts[b'clear_revlogs']
3941 clearrevlogs = opts[b'clear_revlogs']
3941
3942
3942 if list:
3943 if list:
3943 for name, kind, st in repo.cachevfs.readdir(stat=True):
3944 for name, kind, st in repo.cachevfs.readdir(stat=True):
3944 if name.startswith(b'branch2'):
3945 if name.startswith(b'branch2'):
3945 filtername = name.partition(b'-')[2] or b'unfiltered'
3946 filtername = name.partition(b'-')[2] or b'unfiltered'
3946 ui.status(
3947 ui.status(
3947 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3948 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3948 )
3949 )
3949 return
3950 return
3950 if not filter:
3951 if not filter:
3951 filter = None
3952 filter = None
3952 subsettable = getbranchmapsubsettable()
3953 subsettable = getbranchmapsubsettable()
3953 if filter is None:
3954 if filter is None:
3954 repo = repo.unfiltered()
3955 repo = repo.unfiltered()
3955 else:
3956 else:
3956 repo = repoview.repoview(repo, filter)
3957 repo = repoview.repoview(repo, filter)
3957
3958
3958 repo.branchmap() # make sure we have a relevant, up to date branchmap
3959 repo.branchmap() # make sure we have a relevant, up to date branchmap
3959
3960
3960 try:
3961 try:
3961 fromfile = branchmap.branchcache.fromfile
3962 fromfile = branchmap.branchcache.fromfile
3962 except AttributeError:
3963 except AttributeError:
3963 # older versions
3964 # older versions
3964 fromfile = branchmap.read
3965 fromfile = branchmap.read
3965
3966
3966 currentfilter = filter
3967 currentfilter = filter
3967 # try once without timer, the filter may not be cached
3968 # try once without timer, the filter may not be cached
3968 while fromfile(repo) is None:
3969 while fromfile(repo) is None:
3969 currentfilter = subsettable.get(currentfilter)
3970 currentfilter = subsettable.get(currentfilter)
3970 if currentfilter is None:
3971 if currentfilter is None:
3971 raise error.Abort(
3972 raise error.Abort(
3972 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3973 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3973 )
3974 )
3974 repo = repo.filtered(currentfilter)
3975 repo = repo.filtered(currentfilter)
3975 timer, fm = gettimer(ui, opts)
3976 timer, fm = gettimer(ui, opts)
3976
3977
3977 def setup():
3978 def setup():
3978 if clearrevlogs:
3979 if clearrevlogs:
3979 clearchangelog(repo)
3980 clearchangelog(repo)
3980
3981
3981 def bench():
3982 def bench():
3982 fromfile(repo)
3983 fromfile(repo)
3983
3984
3984 timer(bench, setup=setup)
3985 timer(bench, setup=setup)
3985 fm.end()
3986 fm.end()
3986
3987
3987
3988
3988 @command(b'perf::loadmarkers|perfloadmarkers')
3989 @command(b'perf::loadmarkers|perfloadmarkers')
3989 def perfloadmarkers(ui, repo):
3990 def perfloadmarkers(ui, repo):
3990 """benchmark the time to parse the on-disk markers for a repo
3991 """benchmark the time to parse the on-disk markers for a repo
3991
3992
3992 Result is the number of markers in the repo."""
3993 Result is the number of markers in the repo."""
3993 timer, fm = gettimer(ui)
3994 timer, fm = gettimer(ui)
3994 svfs = getsvfs(repo)
3995 svfs = getsvfs(repo)
3995 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3996 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3996 fm.end()
3997 fm.end()
3997
3998
3998
3999
3999 @command(
4000 @command(
4000 b'perf::lrucachedict|perflrucachedict',
4001 b'perf::lrucachedict|perflrucachedict',
4001 formatteropts
4002 formatteropts
4002 + [
4003 + [
4003 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4004 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4004 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4005 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4005 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4006 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4006 (b'', b'size', 4, b'size of cache'),
4007 (b'', b'size', 4, b'size of cache'),
4007 (b'', b'gets', 10000, b'number of key lookups'),
4008 (b'', b'gets', 10000, b'number of key lookups'),
4008 (b'', b'sets', 10000, b'number of key sets'),
4009 (b'', b'sets', 10000, b'number of key sets'),
4009 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4010 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4010 (
4011 (
4011 b'',
4012 b'',
4012 b'mixedgetfreq',
4013 b'mixedgetfreq',
4013 50,
4014 50,
4014 b'frequency of get vs set ops in mixed mode',
4015 b'frequency of get vs set ops in mixed mode',
4015 ),
4016 ),
4016 ],
4017 ],
4017 norepo=True,
4018 norepo=True,
4018 )
4019 )
4019 def perflrucache(
4020 def perflrucache(
4020 ui,
4021 ui,
4021 mincost=0,
4022 mincost=0,
4022 maxcost=100,
4023 maxcost=100,
4023 costlimit=0,
4024 costlimit=0,
4024 size=4,
4025 size=4,
4025 gets=10000,
4026 gets=10000,
4026 sets=10000,
4027 sets=10000,
4027 mixed=10000,
4028 mixed=10000,
4028 mixedgetfreq=50,
4029 mixedgetfreq=50,
4029 **opts
4030 **opts
4030 ):
4031 ):
4031 opts = _byteskwargs(opts)
4032 opts = _byteskwargs(opts)
4032
4033
4033 def doinit():
4034 def doinit():
4034 for i in _xrange(10000):
4035 for i in _xrange(10000):
4035 util.lrucachedict(size)
4036 util.lrucachedict(size)
4036
4037
4037 costrange = list(range(mincost, maxcost + 1))
4038 costrange = list(range(mincost, maxcost + 1))
4038
4039
4039 values = []
4040 values = []
4040 for i in _xrange(size):
4041 for i in _xrange(size):
4041 values.append(random.randint(0, _maxint))
4042 values.append(random.randint(0, _maxint))
4042
4043
4043 # Get mode fills the cache and tests raw lookup performance with no
4044 # Get mode fills the cache and tests raw lookup performance with no
4044 # eviction.
4045 # eviction.
4045 getseq = []
4046 getseq = []
4046 for i in _xrange(gets):
4047 for i in _xrange(gets):
4047 getseq.append(random.choice(values))
4048 getseq.append(random.choice(values))
4048
4049
4049 def dogets():
4050 def dogets():
4050 d = util.lrucachedict(size)
4051 d = util.lrucachedict(size)
4051 for v in values:
4052 for v in values:
4052 d[v] = v
4053 d[v] = v
4053 for key in getseq:
4054 for key in getseq:
4054 value = d[key]
4055 value = d[key]
4055 value # silence pyflakes warning
4056 value # silence pyflakes warning
4056
4057
4057 def dogetscost():
4058 def dogetscost():
4058 d = util.lrucachedict(size, maxcost=costlimit)
4059 d = util.lrucachedict(size, maxcost=costlimit)
4059 for i, v in enumerate(values):
4060 for i, v in enumerate(values):
4060 d.insert(v, v, cost=costs[i])
4061 d.insert(v, v, cost=costs[i])
4061 for key in getseq:
4062 for key in getseq:
4062 try:
4063 try:
4063 value = d[key]
4064 value = d[key]
4064 value # silence pyflakes warning
4065 value # silence pyflakes warning
4065 except KeyError:
4066 except KeyError:
4066 pass
4067 pass
4067
4068
4068 # Set mode tests insertion speed with cache eviction.
4069 # Set mode tests insertion speed with cache eviction.
4069 setseq = []
4070 setseq = []
4070 costs = []
4071 costs = []
4071 for i in _xrange(sets):
4072 for i in _xrange(sets):
4072 setseq.append(random.randint(0, _maxint))
4073 setseq.append(random.randint(0, _maxint))
4073 costs.append(random.choice(costrange))
4074 costs.append(random.choice(costrange))
4074
4075
4075 def doinserts():
4076 def doinserts():
4076 d = util.lrucachedict(size)
4077 d = util.lrucachedict(size)
4077 for v in setseq:
4078 for v in setseq:
4078 d.insert(v, v)
4079 d.insert(v, v)
4079
4080
4080 def doinsertscost():
4081 def doinsertscost():
4081 d = util.lrucachedict(size, maxcost=costlimit)
4082 d = util.lrucachedict(size, maxcost=costlimit)
4082 for i, v in enumerate(setseq):
4083 for i, v in enumerate(setseq):
4083 d.insert(v, v, cost=costs[i])
4084 d.insert(v, v, cost=costs[i])
4084
4085
4085 def dosets():
4086 def dosets():
4086 d = util.lrucachedict(size)
4087 d = util.lrucachedict(size)
4087 for v in setseq:
4088 for v in setseq:
4088 d[v] = v
4089 d[v] = v
4089
4090
4090 # Mixed mode randomly performs gets and sets with eviction.
4091 # Mixed mode randomly performs gets and sets with eviction.
4091 mixedops = []
4092 mixedops = []
4092 for i in _xrange(mixed):
4093 for i in _xrange(mixed):
4093 r = random.randint(0, 100)
4094 r = random.randint(0, 100)
4094 if r < mixedgetfreq:
4095 if r < mixedgetfreq:
4095 op = 0
4096 op = 0
4096 else:
4097 else:
4097 op = 1
4098 op = 1
4098
4099
4099 mixedops.append(
4100 mixedops.append(
4100 (op, random.randint(0, size * 2), random.choice(costrange))
4101 (op, random.randint(0, size * 2), random.choice(costrange))
4101 )
4102 )
4102
4103
4103 def domixed():
4104 def domixed():
4104 d = util.lrucachedict(size)
4105 d = util.lrucachedict(size)
4105
4106
4106 for op, v, cost in mixedops:
4107 for op, v, cost in mixedops:
4107 if op == 0:
4108 if op == 0:
4108 try:
4109 try:
4109 d[v]
4110 d[v]
4110 except KeyError:
4111 except KeyError:
4111 pass
4112 pass
4112 else:
4113 else:
4113 d[v] = v
4114 d[v] = v
4114
4115
4115 def domixedcost():
4116 def domixedcost():
4116 d = util.lrucachedict(size, maxcost=costlimit)
4117 d = util.lrucachedict(size, maxcost=costlimit)
4117
4118
4118 for op, v, cost in mixedops:
4119 for op, v, cost in mixedops:
4119 if op == 0:
4120 if op == 0:
4120 try:
4121 try:
4121 d[v]
4122 d[v]
4122 except KeyError:
4123 except KeyError:
4123 pass
4124 pass
4124 else:
4125 else:
4125 d.insert(v, v, cost=cost)
4126 d.insert(v, v, cost=cost)
4126
4127
4127 benches = [
4128 benches = [
4128 (doinit, b'init'),
4129 (doinit, b'init'),
4129 ]
4130 ]
4130
4131
4131 if costlimit:
4132 if costlimit:
4132 benches.extend(
4133 benches.extend(
4133 [
4134 [
4134 (dogetscost, b'gets w/ cost limit'),
4135 (dogetscost, b'gets w/ cost limit'),
4135 (doinsertscost, b'inserts w/ cost limit'),
4136 (doinsertscost, b'inserts w/ cost limit'),
4136 (domixedcost, b'mixed w/ cost limit'),
4137 (domixedcost, b'mixed w/ cost limit'),
4137 ]
4138 ]
4138 )
4139 )
4139 else:
4140 else:
4140 benches.extend(
4141 benches.extend(
4141 [
4142 [
4142 (dogets, b'gets'),
4143 (dogets, b'gets'),
4143 (doinserts, b'inserts'),
4144 (doinserts, b'inserts'),
4144 (dosets, b'sets'),
4145 (dosets, b'sets'),
4145 (domixed, b'mixed'),
4146 (domixed, b'mixed'),
4146 ]
4147 ]
4147 )
4148 )
4148
4149
4149 for fn, title in benches:
4150 for fn, title in benches:
4150 timer, fm = gettimer(ui, opts)
4151 timer, fm = gettimer(ui, opts)
4151 timer(fn, title=title)
4152 timer(fn, title=title)
4152 fm.end()
4153 fm.end()
4153
4154
4154
4155
4155 @command(
4156 @command(
4156 b'perf::write|perfwrite',
4157 b'perf::write|perfwrite',
4157 formatteropts
4158 formatteropts
4158 + [
4159 + [
4159 (b'', b'write-method', b'write', b'ui write method'),
4160 (b'', b'write-method', b'write', b'ui write method'),
4160 (b'', b'nlines', 100, b'number of lines'),
4161 (b'', b'nlines', 100, b'number of lines'),
4161 (b'', b'nitems', 100, b'number of items (per line)'),
4162 (b'', b'nitems', 100, b'number of items (per line)'),
4162 (b'', b'item', b'x', b'item that is written'),
4163 (b'', b'item', b'x', b'item that is written'),
4163 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4164 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4164 (b'', b'flush-line', None, b'flush after each line'),
4165 (b'', b'flush-line', None, b'flush after each line'),
4165 ],
4166 ],
4166 )
4167 )
4167 def perfwrite(ui, repo, **opts):
4168 def perfwrite(ui, repo, **opts):
4168 """microbenchmark ui.write (and others)"""
4169 """microbenchmark ui.write (and others)"""
4169 opts = _byteskwargs(opts)
4170 opts = _byteskwargs(opts)
4170
4171
4171 write = getattr(ui, _sysstr(opts[b'write_method']))
4172 write = getattr(ui, _sysstr(opts[b'write_method']))
4172 nlines = int(opts[b'nlines'])
4173 nlines = int(opts[b'nlines'])
4173 nitems = int(opts[b'nitems'])
4174 nitems = int(opts[b'nitems'])
4174 item = opts[b'item']
4175 item = opts[b'item']
4175 batch_line = opts.get(b'batch_line')
4176 batch_line = opts.get(b'batch_line')
4176 flush_line = opts.get(b'flush_line')
4177 flush_line = opts.get(b'flush_line')
4177
4178
4178 if batch_line:
4179 if batch_line:
4179 line = item * nitems + b'\n'
4180 line = item * nitems + b'\n'
4180
4181
4181 def benchmark():
4182 def benchmark():
4182 for i in pycompat.xrange(nlines):
4183 for i in pycompat.xrange(nlines):
4183 if batch_line:
4184 if batch_line:
4184 write(line)
4185 write(line)
4185 else:
4186 else:
4186 for i in pycompat.xrange(nitems):
4187 for i in pycompat.xrange(nitems):
4187 write(item)
4188 write(item)
4188 write(b'\n')
4189 write(b'\n')
4189 if flush_line:
4190 if flush_line:
4190 ui.flush()
4191 ui.flush()
4191 ui.flush()
4192 ui.flush()
4192
4193
4193 timer, fm = gettimer(ui, opts)
4194 timer, fm = gettimer(ui, opts)
4194 timer(benchmark)
4195 timer(benchmark)
4195 fm.end()
4196 fm.end()
4196
4197
4197
4198
4198 def uisetup(ui):
4199 def uisetup(ui):
4199 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4200 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4200 commands, b'debugrevlogopts'
4201 commands, b'debugrevlogopts'
4201 ):
4202 ):
4202 # for "historical portability":
4203 # for "historical portability":
4203 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4204 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4204 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4205 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4205 # openrevlog() should cause failure, because it has been
4206 # openrevlog() should cause failure, because it has been
4206 # available since 3.5 (or 49c583ca48c4).
4207 # available since 3.5 (or 49c583ca48c4).
4207 def openrevlog(orig, repo, cmd, file_, opts):
4208 def openrevlog(orig, repo, cmd, file_, opts):
4208 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4209 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4209 raise error.Abort(
4210 raise error.Abort(
4210 b"This version doesn't support --dir option",
4211 b"This version doesn't support --dir option",
4211 hint=b"use 3.5 or later",
4212 hint=b"use 3.5 or later",
4212 )
4213 )
4213 return orig(repo, cmd, file_, opts)
4214 return orig(repo, cmd, file_, opts)
4214
4215
4215 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4216 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4216
4217
4217
4218
4218 @command(
4219 @command(
4219 b'perf::progress|perfprogress',
4220 b'perf::progress|perfprogress',
4220 formatteropts
4221 formatteropts
4221 + [
4222 + [
4222 (b'', b'topic', b'topic', b'topic for progress messages'),
4223 (b'', b'topic', b'topic', b'topic for progress messages'),
4223 (b'c', b'total', 1000000, b'total value we are progressing to'),
4224 (b'c', b'total', 1000000, b'total value we are progressing to'),
4224 ],
4225 ],
4225 norepo=True,
4226 norepo=True,
4226 )
4227 )
4227 def perfprogress(ui, topic=None, total=None, **opts):
4228 def perfprogress(ui, topic=None, total=None, **opts):
4228 """printing of progress bars"""
4229 """printing of progress bars"""
4229 opts = _byteskwargs(opts)
4230 opts = _byteskwargs(opts)
4230
4231
4231 timer, fm = gettimer(ui, opts)
4232 timer, fm = gettimer(ui, opts)
4232
4233
4233 def doprogress():
4234 def doprogress():
4234 with ui.makeprogress(topic, total=total) as progress:
4235 with ui.makeprogress(topic, total=total) as progress:
4235 for i in _xrange(total):
4236 for i in _xrange(total):
4236 progress.increment()
4237 progress.increment()
4237
4238
4238 timer(doprogress)
4239 timer(doprogress)
4239 fm.end()
4240 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now