##// END OF EJS Templates
path: update logic in `perf` to use the push variant when available...
marmoute -
r50600:ec8140c4 default
parent child Browse files
Show More
@@ -1,4230 +1,4234 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(
928 @command(
929 b'perf::delta-find',
929 b'perf::delta-find',
930 revlogopts + formatteropts,
930 revlogopts + formatteropts,
931 b'-c|-m|FILE REV',
931 b'-c|-m|FILE REV',
932 )
932 )
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 """benchmark the process of finding a valid delta for a revlog revision
934 """benchmark the process of finding a valid delta for a revlog revision
935
935
936 When a revlog receives a new revision (e.g. from a commit, or from an
936 When a revlog receives a new revision (e.g. from a commit, or from an
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 This perf command measures how much time we spend in this process. It
938 This perf command measures how much time we spend in this process. It
939 operates on an already stored revision.
939 operates on an already stored revision.
940
940
941 See `hg help debug-delta-find` for another related command.
941 See `hg help debug-delta-find` for another related command.
942 """
942 """
943 from mercurial import revlogutils
943 from mercurial import revlogutils
944 import mercurial.revlogutils.deltas as deltautil
944 import mercurial.revlogutils.deltas as deltautil
945
945
946 opts = _byteskwargs(opts)
946 opts = _byteskwargs(opts)
947 if arg_2 is None:
947 if arg_2 is None:
948 file_ = None
948 file_ = None
949 rev = arg_1
949 rev = arg_1
950 else:
950 else:
951 file_ = arg_1
951 file_ = arg_1
952 rev = arg_2
952 rev = arg_2
953
953
954 repo = repo.unfiltered()
954 repo = repo.unfiltered()
955
955
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957
957
958 rev = int(rev)
958 rev = int(rev)
959
959
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961
961
962 deltacomputer = deltautil.deltacomputer(revlog)
962 deltacomputer = deltautil.deltacomputer(revlog)
963
963
964 node = revlog.node(rev)
964 node = revlog.node(rev)
965 p1r, p2r = revlog.parentrevs(rev)
965 p1r, p2r = revlog.parentrevs(rev)
966 p1 = revlog.node(p1r)
966 p1 = revlog.node(p1r)
967 p2 = revlog.node(p2r)
967 p2 = revlog.node(p2r)
968 full_text = revlog.revision(rev)
968 full_text = revlog.revision(rev)
969 textlen = len(full_text)
969 textlen = len(full_text)
970 cachedelta = None
970 cachedelta = None
971 flags = revlog.flags(rev)
971 flags = revlog.flags(rev)
972
972
973 revinfo = revlogutils.revisioninfo(
973 revinfo = revlogutils.revisioninfo(
974 node,
974 node,
975 p1,
975 p1,
976 p2,
976 p2,
977 [full_text], # btext
977 [full_text], # btext
978 textlen,
978 textlen,
979 cachedelta,
979 cachedelta,
980 flags,
980 flags,
981 )
981 )
982
982
983 # Note: we should probably purge the potential caches (like the full
983 # Note: we should probably purge the potential caches (like the full
984 # manifest cache) between runs.
984 # manifest cache) between runs.
985 def find_one():
985 def find_one():
986 with revlog._datafp() as fh:
986 with revlog._datafp() as fh:
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988
988
989 timer(find_one)
989 timer(find_one)
990 fm.end()
990 fm.end()
991
991
992
992
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 def perfdiscovery(ui, repo, path, **opts):
994 def perfdiscovery(ui, repo, path, **opts):
995 """benchmark discovery between local repo and the peer at given path"""
995 """benchmark discovery between local repo and the peer at given path"""
996 repos = [repo, None]
996 repos = [repo, None]
997 timer, fm = gettimer(ui, opts)
997 timer, fm = gettimer(ui, opts)
998
998
999 try:
999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 except ImportError:
1003 except ImportError:
1004 path = ui.expandpath(path)
1004 path = ui.expandpath(path)
1005
1005
1006 def s():
1006 def s():
1007 repos[1] = hg.peer(ui, opts, path)
1007 repos[1] = hg.peer(ui, opts, path)
1008
1008
1009 def d():
1009 def d():
1010 setdiscovery.findcommonheads(ui, *repos)
1010 setdiscovery.findcommonheads(ui, *repos)
1011
1011
1012 timer(d, setup=s)
1012 timer(d, setup=s)
1013 fm.end()
1013 fm.end()
1014
1014
1015
1015
1016 @command(
1016 @command(
1017 b'perf::bookmarks|perfbookmarks',
1017 b'perf::bookmarks|perfbookmarks',
1018 formatteropts
1018 formatteropts
1019 + [
1019 + [
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 ],
1021 ],
1022 )
1022 )
1023 def perfbookmarks(ui, repo, **opts):
1023 def perfbookmarks(ui, repo, **opts):
1024 """benchmark parsing bookmarks from disk to memory"""
1024 """benchmark parsing bookmarks from disk to memory"""
1025 opts = _byteskwargs(opts)
1025 opts = _byteskwargs(opts)
1026 timer, fm = gettimer(ui, opts)
1026 timer, fm = gettimer(ui, opts)
1027
1027
1028 clearrevlogs = opts[b'clear_revlogs']
1028 clearrevlogs = opts[b'clear_revlogs']
1029
1029
1030 def s():
1030 def s():
1031 if clearrevlogs:
1031 if clearrevlogs:
1032 clearchangelog(repo)
1032 clearchangelog(repo)
1033 clearfilecache(repo, b'_bookmarks')
1033 clearfilecache(repo, b'_bookmarks')
1034
1034
1035 def d():
1035 def d():
1036 repo._bookmarks
1036 repo._bookmarks
1037
1037
1038 timer(d, setup=s)
1038 timer(d, setup=s)
1039 fm.end()
1039 fm.end()
1040
1040
1041
1041
1042 @command(
1042 @command(
1043 b'perf::bundle',
1043 b'perf::bundle',
1044 [
1044 [
1045 (
1045 (
1046 b'r',
1046 b'r',
1047 b'rev',
1047 b'rev',
1048 [],
1048 [],
1049 b'changesets to bundle',
1049 b'changesets to bundle',
1050 b'REV',
1050 b'REV',
1051 ),
1051 ),
1052 (
1052 (
1053 b't',
1053 b't',
1054 b'type',
1054 b'type',
1055 b'none',
1055 b'none',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 b'TYPE',
1057 b'TYPE',
1058 ),
1058 ),
1059 ]
1059 ]
1060 + formatteropts,
1060 + formatteropts,
1061 b'REVS',
1061 b'REVS',
1062 )
1062 )
1063 def perfbundle(ui, repo, *revs, **opts):
1063 def perfbundle(ui, repo, *revs, **opts):
1064 """benchmark the creation of a bundle from a repository
1064 """benchmark the creation of a bundle from a repository
1065
1065
1066 For now, this only supports "none" compression.
1066 For now, this only supports "none" compression.
1067 """
1067 """
1068 try:
1068 try:
1069 from mercurial import bundlecaches
1069 from mercurial import bundlecaches
1070
1070
1071 parsebundlespec = bundlecaches.parsebundlespec
1071 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1072 except ImportError:
1073 from mercurial import exchange
1073 from mercurial import exchange
1074
1074
1075 parsebundlespec = exchange.parsebundlespec
1075 parsebundlespec = exchange.parsebundlespec
1076
1076
1077 from mercurial import discovery
1077 from mercurial import discovery
1078 from mercurial import bundle2
1078 from mercurial import bundle2
1079
1079
1080 opts = _byteskwargs(opts)
1080 opts = _byteskwargs(opts)
1081 timer, fm = gettimer(ui, opts)
1081 timer, fm = gettimer(ui, opts)
1082
1082
1083 cl = repo.changelog
1083 cl = repo.changelog
1084 revs = list(revs)
1084 revs = list(revs)
1085 revs.extend(opts.get(b'rev', ()))
1085 revs.extend(opts.get(b'rev', ()))
1086 revs = scmutil.revrange(repo, revs)
1086 revs = scmutil.revrange(repo, revs)
1087 if not revs:
1087 if not revs:
1088 raise error.Abort(b"not revision specified")
1088 raise error.Abort(b"not revision specified")
1089 # make it a consistent set (ie: without topological gaps)
1089 # make it a consistent set (ie: without topological gaps)
1090 old_len = len(revs)
1090 old_len = len(revs)
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 if old_len != len(revs):
1092 if old_len != len(revs):
1093 new_count = len(revs) - old_len
1093 new_count = len(revs) - old_len
1094 msg = b"add %d new revisions to make it a consistent set\n"
1094 msg = b"add %d new revisions to make it a consistent set\n"
1095 ui.write_err(msg % new_count)
1095 ui.write_err(msg % new_count)
1096
1096
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 outgoing = discovery.outgoing(repo, bases, targets)
1099 outgoing = discovery.outgoing(repo, bases, targets)
1100
1100
1101 bundle_spec = opts.get(b'type')
1101 bundle_spec = opts.get(b'type')
1102
1102
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104
1104
1105 cgversion = bundle_spec.params.get(b"cg.version")
1105 cgversion = bundle_spec.params.get(b"cg.version")
1106 if cgversion is None:
1106 if cgversion is None:
1107 if bundle_spec.version == b'v1':
1107 if bundle_spec.version == b'v1':
1108 cgversion = b'01'
1108 cgversion = b'01'
1109 if bundle_spec.version == b'v2':
1109 if bundle_spec.version == b'v2':
1110 cgversion = b'02'
1110 cgversion = b'02'
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 err = b"repository does not support bundle version %s"
1112 err = b"repository does not support bundle version %s"
1113 raise error.Abort(err % cgversion)
1113 raise error.Abort(err % cgversion)
1114
1114
1115 if cgversion == b'01': # bundle1
1115 if cgversion == b'01': # bundle1
1116 bversion = b'HG10' + bundle_spec.wirecompression
1116 bversion = b'HG10' + bundle_spec.wirecompression
1117 bcompression = None
1117 bcompression = None
1118 elif cgversion in (b'02', b'03'):
1118 elif cgversion in (b'02', b'03'):
1119 bversion = b'HG20'
1119 bversion = b'HG20'
1120 bcompression = bundle_spec.wirecompression
1120 bcompression = bundle_spec.wirecompression
1121 else:
1121 else:
1122 err = b'perf::bundle: unexpected changegroup version %s'
1122 err = b'perf::bundle: unexpected changegroup version %s'
1123 raise error.ProgrammingError(err % cgversion)
1123 raise error.ProgrammingError(err % cgversion)
1124
1124
1125 if bcompression is None:
1125 if bcompression is None:
1126 bcompression = b'UN'
1126 bcompression = b'UN'
1127
1127
1128 if bcompression != b'UN':
1128 if bcompression != b'UN':
1129 err = b'perf::bundle: compression currently unsupported: %s'
1129 err = b'perf::bundle: compression currently unsupported: %s'
1130 raise error.ProgrammingError(err % bcompression)
1130 raise error.ProgrammingError(err % bcompression)
1131
1131
1132 def do_bundle():
1132 def do_bundle():
1133 bundle2.writenewbundle(
1133 bundle2.writenewbundle(
1134 ui,
1134 ui,
1135 repo,
1135 repo,
1136 b'perf::bundle',
1136 b'perf::bundle',
1137 os.devnull,
1137 os.devnull,
1138 bversion,
1138 bversion,
1139 outgoing,
1139 outgoing,
1140 bundle_spec.params,
1140 bundle_spec.params,
1141 )
1141 )
1142
1142
1143 timer(do_bundle)
1143 timer(do_bundle)
1144 fm.end()
1144 fm.end()
1145
1145
1146
1146
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1148 def perfbundleread(ui, repo, bundlepath, **opts):
1149 """Benchmark reading of bundle files.
1149 """Benchmark reading of bundle files.
1150
1150
1151 This command is meant to isolate the I/O part of bundle reading as
1151 This command is meant to isolate the I/O part of bundle reading as
1152 much as possible.
1152 much as possible.
1153 """
1153 """
1154 from mercurial import (
1154 from mercurial import (
1155 bundle2,
1155 bundle2,
1156 exchange,
1156 exchange,
1157 streamclone,
1157 streamclone,
1158 )
1158 )
1159
1159
1160 opts = _byteskwargs(opts)
1160 opts = _byteskwargs(opts)
1161
1161
1162 def makebench(fn):
1162 def makebench(fn):
1163 def run():
1163 def run():
1164 with open(bundlepath, b'rb') as fh:
1164 with open(bundlepath, b'rb') as fh:
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 fn(bundle)
1166 fn(bundle)
1167
1167
1168 return run
1168 return run
1169
1169
1170 def makereadnbytes(size):
1170 def makereadnbytes(size):
1171 def run():
1171 def run():
1172 with open(bundlepath, b'rb') as fh:
1172 with open(bundlepath, b'rb') as fh:
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 while bundle.read(size):
1174 while bundle.read(size):
1175 pass
1175 pass
1176
1176
1177 return run
1177 return run
1178
1178
1179 def makestdioread(size):
1179 def makestdioread(size):
1180 def run():
1180 def run():
1181 with open(bundlepath, b'rb') as fh:
1181 with open(bundlepath, b'rb') as fh:
1182 while fh.read(size):
1182 while fh.read(size):
1183 pass
1183 pass
1184
1184
1185 return run
1185 return run
1186
1186
1187 # bundle1
1187 # bundle1
1188
1188
1189 def deltaiter(bundle):
1189 def deltaiter(bundle):
1190 for delta in bundle.deltaiter():
1190 for delta in bundle.deltaiter():
1191 pass
1191 pass
1192
1192
1193 def iterchunks(bundle):
1193 def iterchunks(bundle):
1194 for chunk in bundle.getchunks():
1194 for chunk in bundle.getchunks():
1195 pass
1195 pass
1196
1196
1197 # bundle2
1197 # bundle2
1198
1198
1199 def forwardchunks(bundle):
1199 def forwardchunks(bundle):
1200 for chunk in bundle._forwardchunks():
1200 for chunk in bundle._forwardchunks():
1201 pass
1201 pass
1202
1202
1203 def iterparts(bundle):
1203 def iterparts(bundle):
1204 for part in bundle.iterparts():
1204 for part in bundle.iterparts():
1205 pass
1205 pass
1206
1206
1207 def iterpartsseekable(bundle):
1207 def iterpartsseekable(bundle):
1208 for part in bundle.iterparts(seekable=True):
1208 for part in bundle.iterparts(seekable=True):
1209 pass
1209 pass
1210
1210
1211 def seek(bundle):
1211 def seek(bundle):
1212 for part in bundle.iterparts(seekable=True):
1212 for part in bundle.iterparts(seekable=True):
1213 part.seek(0, os.SEEK_END)
1213 part.seek(0, os.SEEK_END)
1214
1214
1215 def makepartreadnbytes(size):
1215 def makepartreadnbytes(size):
1216 def run():
1216 def run():
1217 with open(bundlepath, b'rb') as fh:
1217 with open(bundlepath, b'rb') as fh:
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 for part in bundle.iterparts():
1219 for part in bundle.iterparts():
1220 while part.read(size):
1220 while part.read(size):
1221 pass
1221 pass
1222
1222
1223 return run
1223 return run
1224
1224
1225 benches = [
1225 benches = [
1226 (makestdioread(8192), b'read(8k)'),
1226 (makestdioread(8192), b'read(8k)'),
1227 (makestdioread(16384), b'read(16k)'),
1227 (makestdioread(16384), b'read(16k)'),
1228 (makestdioread(32768), b'read(32k)'),
1228 (makestdioread(32768), b'read(32k)'),
1229 (makestdioread(131072), b'read(128k)'),
1229 (makestdioread(131072), b'read(128k)'),
1230 ]
1230 ]
1231
1231
1232 with open(bundlepath, b'rb') as fh:
1232 with open(bundlepath, b'rb') as fh:
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1234
1234
1235 if isinstance(bundle, changegroup.cg1unpacker):
1235 if isinstance(bundle, changegroup.cg1unpacker):
1236 benches.extend(
1236 benches.extend(
1237 [
1237 [
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1239 (makebench(iterchunks), b'cg1 getchunks()'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 ]
1244 ]
1245 )
1245 )
1246 elif isinstance(bundle, bundle2.unbundle20):
1246 elif isinstance(bundle, bundle2.unbundle20):
1247 benches.extend(
1247 benches.extend(
1248 [
1248 [
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1250 (makebench(iterparts), b'bundle2 iterparts()'),
1251 (
1251 (
1252 makebench(iterpartsseekable),
1252 makebench(iterpartsseekable),
1253 b'bundle2 iterparts() seekable',
1253 b'bundle2 iterparts() seekable',
1254 ),
1254 ),
1255 (makebench(seek), b'bundle2 part seek()'),
1255 (makebench(seek), b'bundle2 part seek()'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 ]
1260 ]
1261 )
1261 )
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 raise error.Abort(b'stream clone bundles not supported')
1263 raise error.Abort(b'stream clone bundles not supported')
1264 else:
1264 else:
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266
1266
1267 for fn, title in benches:
1267 for fn, title in benches:
1268 timer, fm = gettimer(ui, opts)
1268 timer, fm = gettimer(ui, opts)
1269 timer(fn, title=title)
1269 timer(fn, title=title)
1270 fm.end()
1270 fm.end()
1271
1271
1272
1272
1273 @command(
1273 @command(
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 formatteropts
1275 formatteropts
1276 + [
1276 + [
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1277 (b'', b'cgversion', b'02', b'changegroup version'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 ],
1279 ],
1280 )
1280 )
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 """Benchmark producing a changelog group for a changegroup.
1282 """Benchmark producing a changelog group for a changegroup.
1283
1283
1284 This measures the time spent processing the changelog during a
1284 This measures the time spent processing the changelog during a
1285 bundle operation. This occurs during `hg bundle` and on a server
1285 bundle operation. This occurs during `hg bundle` and on a server
1286 processing a `getbundle` wire protocol request (handles clones
1286 processing a `getbundle` wire protocol request (handles clones
1287 and pull requests).
1287 and pull requests).
1288
1288
1289 By default, all revisions are added to the changegroup.
1289 By default, all revisions are added to the changegroup.
1290 """
1290 """
1291 opts = _byteskwargs(opts)
1291 opts = _byteskwargs(opts)
1292 cl = repo.changelog
1292 cl = repo.changelog
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 bundler = changegroup.getbundler(cgversion, repo)
1294 bundler = changegroup.getbundler(cgversion, repo)
1295
1295
1296 def d():
1296 def d():
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1297 state, chunks = bundler._generatechangelog(cl, nodes)
1298 for chunk in chunks:
1298 for chunk in chunks:
1299 pass
1299 pass
1300
1300
1301 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1302
1302
1303 # Terminal printing can interfere with timing. So disable it.
1303 # Terminal printing can interfere with timing. So disable it.
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1304 with ui.configoverride({(b'progress', b'disable'): True}):
1305 timer(d)
1305 timer(d)
1306
1306
1307 fm.end()
1307 fm.end()
1308
1308
1309
1309
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1310 @command(b'perf::dirs|perfdirs', formatteropts)
1311 def perfdirs(ui, repo, **opts):
1311 def perfdirs(ui, repo, **opts):
1312 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1313 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1314 dirstate = repo.dirstate
1314 dirstate = repo.dirstate
1315 b'a' in dirstate
1315 b'a' in dirstate
1316
1316
1317 def d():
1317 def d():
1318 dirstate.hasdir(b'a')
1318 dirstate.hasdir(b'a')
1319 try:
1319 try:
1320 del dirstate._map._dirs
1320 del dirstate._map._dirs
1321 except AttributeError:
1321 except AttributeError:
1322 pass
1322 pass
1323
1323
1324 timer(d)
1324 timer(d)
1325 fm.end()
1325 fm.end()
1326
1326
1327
1327
1328 @command(
1328 @command(
1329 b'perf::dirstate|perfdirstate',
1329 b'perf::dirstate|perfdirstate',
1330 [
1330 [
1331 (
1331 (
1332 b'',
1332 b'',
1333 b'iteration',
1333 b'iteration',
1334 None,
1334 None,
1335 b'benchmark a full iteration for the dirstate',
1335 b'benchmark a full iteration for the dirstate',
1336 ),
1336 ),
1337 (
1337 (
1338 b'',
1338 b'',
1339 b'contains',
1339 b'contains',
1340 None,
1340 None,
1341 b'benchmark a large amount of `nf in dirstate` calls',
1341 b'benchmark a large amount of `nf in dirstate` calls',
1342 ),
1342 ),
1343 ]
1343 ]
1344 + formatteropts,
1344 + formatteropts,
1345 )
1345 )
1346 def perfdirstate(ui, repo, **opts):
1346 def perfdirstate(ui, repo, **opts):
1347 """benchmap the time of various distate operations
1347 """benchmap the time of various distate operations
1348
1348
1349 By default benchmark the time necessary to load a dirstate from scratch.
1349 By default benchmark the time necessary to load a dirstate from scratch.
1350 The dirstate is loaded to the point were a "contains" request can be
1350 The dirstate is loaded to the point were a "contains" request can be
1351 answered.
1351 answered.
1352 """
1352 """
1353 opts = _byteskwargs(opts)
1353 opts = _byteskwargs(opts)
1354 timer, fm = gettimer(ui, opts)
1354 timer, fm = gettimer(ui, opts)
1355 b"a" in repo.dirstate
1355 b"a" in repo.dirstate
1356
1356
1357 if opts[b'iteration'] and opts[b'contains']:
1357 if opts[b'iteration'] and opts[b'contains']:
1358 msg = b'only specify one of --iteration or --contains'
1358 msg = b'only specify one of --iteration or --contains'
1359 raise error.Abort(msg)
1359 raise error.Abort(msg)
1360
1360
1361 if opts[b'iteration']:
1361 if opts[b'iteration']:
1362 setup = None
1362 setup = None
1363 dirstate = repo.dirstate
1363 dirstate = repo.dirstate
1364
1364
1365 def d():
1365 def d():
1366 for f in dirstate:
1366 for f in dirstate:
1367 pass
1367 pass
1368
1368
1369 elif opts[b'contains']:
1369 elif opts[b'contains']:
1370 setup = None
1370 setup = None
1371 dirstate = repo.dirstate
1371 dirstate = repo.dirstate
1372 allfiles = list(dirstate)
1372 allfiles = list(dirstate)
1373 # also add file path that will be "missing" from the dirstate
1373 # also add file path that will be "missing" from the dirstate
1374 allfiles.extend([f[::-1] for f in allfiles])
1374 allfiles.extend([f[::-1] for f in allfiles])
1375
1375
1376 def d():
1376 def d():
1377 for f in allfiles:
1377 for f in allfiles:
1378 f in dirstate
1378 f in dirstate
1379
1379
1380 else:
1380 else:
1381
1381
1382 def setup():
1382 def setup():
1383 repo.dirstate.invalidate()
1383 repo.dirstate.invalidate()
1384
1384
1385 def d():
1385 def d():
1386 b"a" in repo.dirstate
1386 b"a" in repo.dirstate
1387
1387
1388 timer(d, setup=setup)
1388 timer(d, setup=setup)
1389 fm.end()
1389 fm.end()
1390
1390
1391
1391
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 def perfdirstatedirs(ui, repo, **opts):
1393 def perfdirstatedirs(ui, repo, **opts):
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 opts = _byteskwargs(opts)
1395 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1396 timer, fm = gettimer(ui, opts)
1397 repo.dirstate.hasdir(b"a")
1397 repo.dirstate.hasdir(b"a")
1398
1398
1399 def setup():
1399 def setup():
1400 try:
1400 try:
1401 del repo.dirstate._map._dirs
1401 del repo.dirstate._map._dirs
1402 except AttributeError:
1402 except AttributeError:
1403 pass
1403 pass
1404
1404
1405 def d():
1405 def d():
1406 repo.dirstate.hasdir(b"a")
1406 repo.dirstate.hasdir(b"a")
1407
1407
1408 timer(d, setup=setup)
1408 timer(d, setup=setup)
1409 fm.end()
1409 fm.end()
1410
1410
1411
1411
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 def perfdirstatefoldmap(ui, repo, **opts):
1413 def perfdirstatefoldmap(ui, repo, **opts):
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1415
1415
1416 The dirstate filefoldmap cache is dropped between every request.
1416 The dirstate filefoldmap cache is dropped between every request.
1417 """
1417 """
1418 opts = _byteskwargs(opts)
1418 opts = _byteskwargs(opts)
1419 timer, fm = gettimer(ui, opts)
1419 timer, fm = gettimer(ui, opts)
1420 dirstate = repo.dirstate
1420 dirstate = repo.dirstate
1421 dirstate._map.filefoldmap.get(b'a')
1421 dirstate._map.filefoldmap.get(b'a')
1422
1422
1423 def setup():
1423 def setup():
1424 del dirstate._map.filefoldmap
1424 del dirstate._map.filefoldmap
1425
1425
1426 def d():
1426 def d():
1427 dirstate._map.filefoldmap.get(b'a')
1427 dirstate._map.filefoldmap.get(b'a')
1428
1428
1429 timer(d, setup=setup)
1429 timer(d, setup=setup)
1430 fm.end()
1430 fm.end()
1431
1431
1432
1432
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 def perfdirfoldmap(ui, repo, **opts):
1434 def perfdirfoldmap(ui, repo, **opts):
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436
1436
1437 The dirstate dirfoldmap cache is dropped between every request.
1437 The dirstate dirfoldmap cache is dropped between every request.
1438 """
1438 """
1439 opts = _byteskwargs(opts)
1439 opts = _byteskwargs(opts)
1440 timer, fm = gettimer(ui, opts)
1440 timer, fm = gettimer(ui, opts)
1441 dirstate = repo.dirstate
1441 dirstate = repo.dirstate
1442 dirstate._map.dirfoldmap.get(b'a')
1442 dirstate._map.dirfoldmap.get(b'a')
1443
1443
1444 def setup():
1444 def setup():
1445 del dirstate._map.dirfoldmap
1445 del dirstate._map.dirfoldmap
1446 try:
1446 try:
1447 del dirstate._map._dirs
1447 del dirstate._map._dirs
1448 except AttributeError:
1448 except AttributeError:
1449 pass
1449 pass
1450
1450
1451 def d():
1451 def d():
1452 dirstate._map.dirfoldmap.get(b'a')
1452 dirstate._map.dirfoldmap.get(b'a')
1453
1453
1454 timer(d, setup=setup)
1454 timer(d, setup=setup)
1455 fm.end()
1455 fm.end()
1456
1456
1457
1457
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 def perfdirstatewrite(ui, repo, **opts):
1459 def perfdirstatewrite(ui, repo, **opts):
1460 """benchmap the time it take to write a dirstate on disk"""
1460 """benchmap the time it take to write a dirstate on disk"""
1461 opts = _byteskwargs(opts)
1461 opts = _byteskwargs(opts)
1462 timer, fm = gettimer(ui, opts)
1462 timer, fm = gettimer(ui, opts)
1463 ds = repo.dirstate
1463 ds = repo.dirstate
1464 b"a" in ds
1464 b"a" in ds
1465
1465
1466 def setup():
1466 def setup():
1467 ds._dirty = True
1467 ds._dirty = True
1468
1468
1469 def d():
1469 def d():
1470 ds.write(repo.currenttransaction())
1470 ds.write(repo.currenttransaction())
1471
1471
1472 timer(d, setup=setup)
1472 timer(d, setup=setup)
1473 fm.end()
1473 fm.end()
1474
1474
1475
1475
1476 def _getmergerevs(repo, opts):
1476 def _getmergerevs(repo, opts):
1477 """parse command argument to return rev involved in merge
1477 """parse command argument to return rev involved in merge
1478
1478
1479 input: options dictionnary with `rev`, `from` and `bse`
1479 input: options dictionnary with `rev`, `from` and `bse`
1480 output: (localctx, otherctx, basectx)
1480 output: (localctx, otherctx, basectx)
1481 """
1481 """
1482 if opts[b'from']:
1482 if opts[b'from']:
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 wctx = repo[fromrev]
1484 wctx = repo[fromrev]
1485 else:
1485 else:
1486 wctx = repo[None]
1486 wctx = repo[None]
1487 # we don't want working dir files to be stat'd in the benchmark, so
1487 # we don't want working dir files to be stat'd in the benchmark, so
1488 # prime that cache
1488 # prime that cache
1489 wctx.dirty()
1489 wctx.dirty()
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 if opts[b'base']:
1491 if opts[b'base']:
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 ancestor = repo[fromrev]
1493 ancestor = repo[fromrev]
1494 else:
1494 else:
1495 ancestor = wctx.ancestor(rctx)
1495 ancestor = wctx.ancestor(rctx)
1496 return (wctx, rctx, ancestor)
1496 return (wctx, rctx, ancestor)
1497
1497
1498
1498
1499 @command(
1499 @command(
1500 b'perf::mergecalculate|perfmergecalculate',
1500 b'perf::mergecalculate|perfmergecalculate',
1501 [
1501 [
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1502 (b'r', b'rev', b'.', b'rev to merge against'),
1503 (b'', b'from', b'', b'rev to merge from'),
1503 (b'', b'from', b'', b'rev to merge from'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1504 (b'', b'base', b'', b'the revision to use as base'),
1505 ]
1505 ]
1506 + formatteropts,
1506 + formatteropts,
1507 )
1507 )
1508 def perfmergecalculate(ui, repo, **opts):
1508 def perfmergecalculate(ui, repo, **opts):
1509 opts = _byteskwargs(opts)
1509 opts = _byteskwargs(opts)
1510 timer, fm = gettimer(ui, opts)
1510 timer, fm = gettimer(ui, opts)
1511
1511
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513
1513
1514 def d():
1514 def d():
1515 # acceptremote is True because we don't want prompts in the middle of
1515 # acceptremote is True because we don't want prompts in the middle of
1516 # our benchmark
1516 # our benchmark
1517 merge.calculateupdates(
1517 merge.calculateupdates(
1518 repo,
1518 repo,
1519 wctx,
1519 wctx,
1520 rctx,
1520 rctx,
1521 [ancestor],
1521 [ancestor],
1522 branchmerge=False,
1522 branchmerge=False,
1523 force=False,
1523 force=False,
1524 acceptremote=True,
1524 acceptremote=True,
1525 followcopies=True,
1525 followcopies=True,
1526 )
1526 )
1527
1527
1528 timer(d)
1528 timer(d)
1529 fm.end()
1529 fm.end()
1530
1530
1531
1531
1532 @command(
1532 @command(
1533 b'perf::mergecopies|perfmergecopies',
1533 b'perf::mergecopies|perfmergecopies',
1534 [
1534 [
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1535 (b'r', b'rev', b'.', b'rev to merge against'),
1536 (b'', b'from', b'', b'rev to merge from'),
1536 (b'', b'from', b'', b'rev to merge from'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1537 (b'', b'base', b'', b'the revision to use as base'),
1538 ]
1538 ]
1539 + formatteropts,
1539 + formatteropts,
1540 )
1540 )
1541 def perfmergecopies(ui, repo, **opts):
1541 def perfmergecopies(ui, repo, **opts):
1542 """measure runtime of `copies.mergecopies`"""
1542 """measure runtime of `copies.mergecopies`"""
1543 opts = _byteskwargs(opts)
1543 opts = _byteskwargs(opts)
1544 timer, fm = gettimer(ui, opts)
1544 timer, fm = gettimer(ui, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546
1546
1547 def d():
1547 def d():
1548 # acceptremote is True because we don't want prompts in the middle of
1548 # acceptremote is True because we don't want prompts in the middle of
1549 # our benchmark
1549 # our benchmark
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1551
1551
1552 timer(d)
1552 timer(d)
1553 fm.end()
1553 fm.end()
1554
1554
1555
1555
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 """benchmark the copy tracing logic"""
1558 """benchmark the copy tracing logic"""
1559 opts = _byteskwargs(opts)
1559 opts = _byteskwargs(opts)
1560 timer, fm = gettimer(ui, opts)
1560 timer, fm = gettimer(ui, opts)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563
1563
1564 def d():
1564 def d():
1565 copies.pathcopies(ctx1, ctx2)
1565 copies.pathcopies(ctx1, ctx2)
1566
1566
1567 timer(d)
1567 timer(d)
1568 fm.end()
1568 fm.end()
1569
1569
1570
1570
1571 @command(
1571 @command(
1572 b'perf::phases|perfphases',
1572 b'perf::phases|perfphases',
1573 [
1573 [
1574 (b'', b'full', False, b'include file reading time too'),
1574 (b'', b'full', False, b'include file reading time too'),
1575 ],
1575 ],
1576 b"",
1576 b"",
1577 )
1577 )
1578 def perfphases(ui, repo, **opts):
1578 def perfphases(ui, repo, **opts):
1579 """benchmark phasesets computation"""
1579 """benchmark phasesets computation"""
1580 opts = _byteskwargs(opts)
1580 opts = _byteskwargs(opts)
1581 timer, fm = gettimer(ui, opts)
1581 timer, fm = gettimer(ui, opts)
1582 _phases = repo._phasecache
1582 _phases = repo._phasecache
1583 full = opts.get(b'full')
1583 full = opts.get(b'full')
1584
1584
1585 def d():
1585 def d():
1586 phases = _phases
1586 phases = _phases
1587 if full:
1587 if full:
1588 clearfilecache(repo, b'_phasecache')
1588 clearfilecache(repo, b'_phasecache')
1589 phases = repo._phasecache
1589 phases = repo._phasecache
1590 phases.invalidate()
1590 phases.invalidate()
1591 phases.loadphaserevs(repo)
1591 phases.loadphaserevs(repo)
1592
1592
1593 timer(d)
1593 timer(d)
1594 fm.end()
1594 fm.end()
1595
1595
1596
1596
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1598 def perfphasesremote(ui, repo, dest=None, **opts):
1599 """benchmark time needed to analyse phases of the remote server"""
1599 """benchmark time needed to analyse phases of the remote server"""
1600 from mercurial.node import bin
1600 from mercurial.node import bin
1601 from mercurial import (
1601 from mercurial import (
1602 exchange,
1602 exchange,
1603 hg,
1603 hg,
1604 phases,
1604 phases,
1605 )
1605 )
1606
1606
1607 opts = _byteskwargs(opts)
1607 opts = _byteskwargs(opts)
1608 timer, fm = gettimer(ui, opts)
1608 timer, fm = gettimer(ui, opts)
1609
1609
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 if not path:
1611 if not path:
1612 raise error.Abort(
1612 raise error.Abort(
1613 b'default repository not configured!',
1613 b'default repository not configured!',
1614 hint=b"see 'hg help config.paths'",
1614 hint=b"see 'hg help config.paths'",
1615 )
1615 )
1616 if util.safehasattr(path, 'main_path'):
1617 path = path.get_push_variant()
1618 dest = path.loc
1619 else:
1616 dest = path.pushloc or path.loc
1620 dest = path.pushloc or path.loc
1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1621 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1618 other = hg.peer(repo, opts, dest)
1622 other = hg.peer(repo, opts, dest)
1619
1623
1620 # easier to perform discovery through the operation
1624 # easier to perform discovery through the operation
1621 op = exchange.pushoperation(repo, other)
1625 op = exchange.pushoperation(repo, other)
1622 exchange._pushdiscoverychangeset(op)
1626 exchange._pushdiscoverychangeset(op)
1623
1627
1624 remotesubset = op.fallbackheads
1628 remotesubset = op.fallbackheads
1625
1629
1626 with other.commandexecutor() as e:
1630 with other.commandexecutor() as e:
1627 remotephases = e.callcommand(
1631 remotephases = e.callcommand(
1628 b'listkeys', {b'namespace': b'phases'}
1632 b'listkeys', {b'namespace': b'phases'}
1629 ).result()
1633 ).result()
1630 del other
1634 del other
1631 publishing = remotephases.get(b'publishing', False)
1635 publishing = remotephases.get(b'publishing', False)
1632 if publishing:
1636 if publishing:
1633 ui.statusnoi18n(b'publishing: yes\n')
1637 ui.statusnoi18n(b'publishing: yes\n')
1634 else:
1638 else:
1635 ui.statusnoi18n(b'publishing: no\n')
1639 ui.statusnoi18n(b'publishing: no\n')
1636
1640
1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1641 has_node = getattr(repo.changelog.index, 'has_node', None)
1638 if has_node is None:
1642 if has_node is None:
1639 has_node = repo.changelog.nodemap.__contains__
1643 has_node = repo.changelog.nodemap.__contains__
1640 nonpublishroots = 0
1644 nonpublishroots = 0
1641 for nhex, phase in remotephases.iteritems():
1645 for nhex, phase in remotephases.iteritems():
1642 if nhex == b'publishing': # ignore data related to publish option
1646 if nhex == b'publishing': # ignore data related to publish option
1643 continue
1647 continue
1644 node = bin(nhex)
1648 node = bin(nhex)
1645 if has_node(node) and int(phase):
1649 if has_node(node) and int(phase):
1646 nonpublishroots += 1
1650 nonpublishroots += 1
1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1651 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1652 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1649
1653
1650 def d():
1654 def d():
1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1655 phases.remotephasessummary(repo, remotesubset, remotephases)
1652
1656
1653 timer(d)
1657 timer(d)
1654 fm.end()
1658 fm.end()
1655
1659
1656
1660
1657 @command(
1661 @command(
1658 b'perf::manifest|perfmanifest',
1662 b'perf::manifest|perfmanifest',
1659 [
1663 [
1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1664 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1665 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1662 ]
1666 ]
1663 + formatteropts,
1667 + formatteropts,
1664 b'REV|NODE',
1668 b'REV|NODE',
1665 )
1669 )
1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1670 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1667 """benchmark the time to read a manifest from disk and return a usable
1671 """benchmark the time to read a manifest from disk and return a usable
1668 dict-like object
1672 dict-like object
1669
1673
1670 Manifest caches are cleared before retrieval."""
1674 Manifest caches are cleared before retrieval."""
1671 opts = _byteskwargs(opts)
1675 opts = _byteskwargs(opts)
1672 timer, fm = gettimer(ui, opts)
1676 timer, fm = gettimer(ui, opts)
1673 if not manifest_rev:
1677 if not manifest_rev:
1674 ctx = scmutil.revsingle(repo, rev, rev)
1678 ctx = scmutil.revsingle(repo, rev, rev)
1675 t = ctx.manifestnode()
1679 t = ctx.manifestnode()
1676 else:
1680 else:
1677 from mercurial.node import bin
1681 from mercurial.node import bin
1678
1682
1679 if len(rev) == 40:
1683 if len(rev) == 40:
1680 t = bin(rev)
1684 t = bin(rev)
1681 else:
1685 else:
1682 try:
1686 try:
1683 rev = int(rev)
1687 rev = int(rev)
1684
1688
1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1689 if util.safehasattr(repo.manifestlog, b'getstorage'):
1686 t = repo.manifestlog.getstorage(b'').node(rev)
1690 t = repo.manifestlog.getstorage(b'').node(rev)
1687 else:
1691 else:
1688 t = repo.manifestlog._revlog.lookup(rev)
1692 t = repo.manifestlog._revlog.lookup(rev)
1689 except ValueError:
1693 except ValueError:
1690 raise error.Abort(
1694 raise error.Abort(
1691 b'manifest revision must be integer or full node'
1695 b'manifest revision must be integer or full node'
1692 )
1696 )
1693
1697
1694 def d():
1698 def d():
1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1699 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1696 repo.manifestlog[t].read()
1700 repo.manifestlog[t].read()
1697
1701
1698 timer(d)
1702 timer(d)
1699 fm.end()
1703 fm.end()
1700
1704
1701
1705
1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1706 @command(b'perf::changeset|perfchangeset', formatteropts)
1703 def perfchangeset(ui, repo, rev, **opts):
1707 def perfchangeset(ui, repo, rev, **opts):
1704 opts = _byteskwargs(opts)
1708 opts = _byteskwargs(opts)
1705 timer, fm = gettimer(ui, opts)
1709 timer, fm = gettimer(ui, opts)
1706 n = scmutil.revsingle(repo, rev).node()
1710 n = scmutil.revsingle(repo, rev).node()
1707
1711
1708 def d():
1712 def d():
1709 repo.changelog.read(n)
1713 repo.changelog.read(n)
1710 # repo.changelog._cache = None
1714 # repo.changelog._cache = None
1711
1715
1712 timer(d)
1716 timer(d)
1713 fm.end()
1717 fm.end()
1714
1718
1715
1719
1716 @command(b'perf::ignore|perfignore', formatteropts)
1720 @command(b'perf::ignore|perfignore', formatteropts)
1717 def perfignore(ui, repo, **opts):
1721 def perfignore(ui, repo, **opts):
1718 """benchmark operation related to computing ignore"""
1722 """benchmark operation related to computing ignore"""
1719 opts = _byteskwargs(opts)
1723 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1724 timer, fm = gettimer(ui, opts)
1721 dirstate = repo.dirstate
1725 dirstate = repo.dirstate
1722
1726
1723 def setupone():
1727 def setupone():
1724 dirstate.invalidate()
1728 dirstate.invalidate()
1725 clearfilecache(dirstate, b'_ignore')
1729 clearfilecache(dirstate, b'_ignore')
1726
1730
1727 def runone():
1731 def runone():
1728 dirstate._ignore
1732 dirstate._ignore
1729
1733
1730 timer(runone, setup=setupone, title=b"load")
1734 timer(runone, setup=setupone, title=b"load")
1731 fm.end()
1735 fm.end()
1732
1736
1733
1737
1734 @command(
1738 @command(
1735 b'perf::index|perfindex',
1739 b'perf::index|perfindex',
1736 [
1740 [
1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1741 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1742 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1739 ]
1743 ]
1740 + formatteropts,
1744 + formatteropts,
1741 )
1745 )
1742 def perfindex(ui, repo, **opts):
1746 def perfindex(ui, repo, **opts):
1743 """benchmark index creation time followed by a lookup
1747 """benchmark index creation time followed by a lookup
1744
1748
1745 The default is to look `tip` up. Depending on the index implementation,
1749 The default is to look `tip` up. Depending on the index implementation,
1746 the revision looked up can matters. For example, an implementation
1750 the revision looked up can matters. For example, an implementation
1747 scanning the index will have a faster lookup time for `--rev tip` than for
1751 scanning the index will have a faster lookup time for `--rev tip` than for
1748 `--rev 0`. The number of looked up revisions and their order can also
1752 `--rev 0`. The number of looked up revisions and their order can also
1749 matters.
1753 matters.
1750
1754
1751 Example of useful set to test:
1755 Example of useful set to test:
1752
1756
1753 * tip
1757 * tip
1754 * 0
1758 * 0
1755 * -10:
1759 * -10:
1756 * :10
1760 * :10
1757 * -10: + :10
1761 * -10: + :10
1758 * :10: + -10:
1762 * :10: + -10:
1759 * -10000:
1763 * -10000:
1760 * -10000: + 0
1764 * -10000: + 0
1761
1765
1762 It is not currently possible to check for lookup of a missing node. For
1766 It is not currently possible to check for lookup of a missing node. For
1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1767 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1764 import mercurial.revlog
1768 import mercurial.revlog
1765
1769
1766 opts = _byteskwargs(opts)
1770 opts = _byteskwargs(opts)
1767 timer, fm = gettimer(ui, opts)
1771 timer, fm = gettimer(ui, opts)
1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1772 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1769 if opts[b'no_lookup']:
1773 if opts[b'no_lookup']:
1770 if opts['rev']:
1774 if opts['rev']:
1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1775 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1772 nodes = []
1776 nodes = []
1773 elif not opts[b'rev']:
1777 elif not opts[b'rev']:
1774 nodes = [repo[b"tip"].node()]
1778 nodes = [repo[b"tip"].node()]
1775 else:
1779 else:
1776 revs = scmutil.revrange(repo, opts[b'rev'])
1780 revs = scmutil.revrange(repo, opts[b'rev'])
1777 cl = repo.changelog
1781 cl = repo.changelog
1778 nodes = [cl.node(r) for r in revs]
1782 nodes = [cl.node(r) for r in revs]
1779
1783
1780 unfi = repo.unfiltered()
1784 unfi = repo.unfiltered()
1781 # find the filecache func directly
1785 # find the filecache func directly
1782 # This avoid polluting the benchmark with the filecache logic
1786 # This avoid polluting the benchmark with the filecache logic
1783 makecl = unfi.__class__.changelog.func
1787 makecl = unfi.__class__.changelog.func
1784
1788
1785 def setup():
1789 def setup():
1786 # probably not necessary, but for good measure
1790 # probably not necessary, but for good measure
1787 clearchangelog(unfi)
1791 clearchangelog(unfi)
1788
1792
1789 def d():
1793 def d():
1790 cl = makecl(unfi)
1794 cl = makecl(unfi)
1791 for n in nodes:
1795 for n in nodes:
1792 cl.rev(n)
1796 cl.rev(n)
1793
1797
1794 timer(d, setup=setup)
1798 timer(d, setup=setup)
1795 fm.end()
1799 fm.end()
1796
1800
1797
1801
1798 @command(
1802 @command(
1799 b'perf::nodemap|perfnodemap',
1803 b'perf::nodemap|perfnodemap',
1800 [
1804 [
1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1805 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1806 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1803 ]
1807 ]
1804 + formatteropts,
1808 + formatteropts,
1805 )
1809 )
1806 def perfnodemap(ui, repo, **opts):
1810 def perfnodemap(ui, repo, **opts):
1807 """benchmark the time necessary to look up revision from a cold nodemap
1811 """benchmark the time necessary to look up revision from a cold nodemap
1808
1812
1809 Depending on the implementation, the amount and order of revision we look
1813 Depending on the implementation, the amount and order of revision we look
1810 up can varies. Example of useful set to test:
1814 up can varies. Example of useful set to test:
1811 * tip
1815 * tip
1812 * 0
1816 * 0
1813 * -10:
1817 * -10:
1814 * :10
1818 * :10
1815 * -10: + :10
1819 * -10: + :10
1816 * :10: + -10:
1820 * :10: + -10:
1817 * -10000:
1821 * -10000:
1818 * -10000: + 0
1822 * -10000: + 0
1819
1823
1820 The command currently focus on valid binary lookup. Benchmarking for
1824 The command currently focus on valid binary lookup. Benchmarking for
1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1825 hexlookup, prefix lookup and missing lookup would also be valuable.
1822 """
1826 """
1823 import mercurial.revlog
1827 import mercurial.revlog
1824
1828
1825 opts = _byteskwargs(opts)
1829 opts = _byteskwargs(opts)
1826 timer, fm = gettimer(ui, opts)
1830 timer, fm = gettimer(ui, opts)
1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1831 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1828
1832
1829 unfi = repo.unfiltered()
1833 unfi = repo.unfiltered()
1830 clearcaches = opts[b'clear_caches']
1834 clearcaches = opts[b'clear_caches']
1831 # find the filecache func directly
1835 # find the filecache func directly
1832 # This avoid polluting the benchmark with the filecache logic
1836 # This avoid polluting the benchmark with the filecache logic
1833 makecl = unfi.__class__.changelog.func
1837 makecl = unfi.__class__.changelog.func
1834 if not opts[b'rev']:
1838 if not opts[b'rev']:
1835 raise error.Abort(b'use --rev to specify revisions to look up')
1839 raise error.Abort(b'use --rev to specify revisions to look up')
1836 revs = scmutil.revrange(repo, opts[b'rev'])
1840 revs = scmutil.revrange(repo, opts[b'rev'])
1837 cl = repo.changelog
1841 cl = repo.changelog
1838 nodes = [cl.node(r) for r in revs]
1842 nodes = [cl.node(r) for r in revs]
1839
1843
1840 # use a list to pass reference to a nodemap from one closure to the next
1844 # use a list to pass reference to a nodemap from one closure to the next
1841 nodeget = [None]
1845 nodeget = [None]
1842
1846
1843 def setnodeget():
1847 def setnodeget():
1844 # probably not necessary, but for good measure
1848 # probably not necessary, but for good measure
1845 clearchangelog(unfi)
1849 clearchangelog(unfi)
1846 cl = makecl(unfi)
1850 cl = makecl(unfi)
1847 if util.safehasattr(cl.index, 'get_rev'):
1851 if util.safehasattr(cl.index, 'get_rev'):
1848 nodeget[0] = cl.index.get_rev
1852 nodeget[0] = cl.index.get_rev
1849 else:
1853 else:
1850 nodeget[0] = cl.nodemap.get
1854 nodeget[0] = cl.nodemap.get
1851
1855
1852 def d():
1856 def d():
1853 get = nodeget[0]
1857 get = nodeget[0]
1854 for n in nodes:
1858 for n in nodes:
1855 get(n)
1859 get(n)
1856
1860
1857 setup = None
1861 setup = None
1858 if clearcaches:
1862 if clearcaches:
1859
1863
1860 def setup():
1864 def setup():
1861 setnodeget()
1865 setnodeget()
1862
1866
1863 else:
1867 else:
1864 setnodeget()
1868 setnodeget()
1865 d() # prewarm the data structure
1869 d() # prewarm the data structure
1866 timer(d, setup=setup)
1870 timer(d, setup=setup)
1867 fm.end()
1871 fm.end()
1868
1872
1869
1873
1870 @command(b'perf::startup|perfstartup', formatteropts)
1874 @command(b'perf::startup|perfstartup', formatteropts)
1871 def perfstartup(ui, repo, **opts):
1875 def perfstartup(ui, repo, **opts):
1872 opts = _byteskwargs(opts)
1876 opts = _byteskwargs(opts)
1873 timer, fm = gettimer(ui, opts)
1877 timer, fm = gettimer(ui, opts)
1874
1878
1875 def d():
1879 def d():
1876 if os.name != 'nt':
1880 if os.name != 'nt':
1877 os.system(
1881 os.system(
1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1882 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1879 )
1883 )
1880 else:
1884 else:
1881 os.environ['HGRCPATH'] = r' '
1885 os.environ['HGRCPATH'] = r' '
1882 os.system("%s version -q > NUL" % sys.argv[0])
1886 os.system("%s version -q > NUL" % sys.argv[0])
1883
1887
1884 timer(d)
1888 timer(d)
1885 fm.end()
1889 fm.end()
1886
1890
1887
1891
1888 @command(b'perf::parents|perfparents', formatteropts)
1892 @command(b'perf::parents|perfparents', formatteropts)
1889 def perfparents(ui, repo, **opts):
1893 def perfparents(ui, repo, **opts):
1890 """benchmark the time necessary to fetch one changeset's parents.
1894 """benchmark the time necessary to fetch one changeset's parents.
1891
1895
1892 The fetch is done using the `node identifier`, traversing all object layers
1896 The fetch is done using the `node identifier`, traversing all object layers
1893 from the repository object. The first N revisions will be used for this
1897 from the repository object. The first N revisions will be used for this
1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1898 benchmark. N is controlled by the ``perf.parentscount`` config option
1895 (default: 1000).
1899 (default: 1000).
1896 """
1900 """
1897 opts = _byteskwargs(opts)
1901 opts = _byteskwargs(opts)
1898 timer, fm = gettimer(ui, opts)
1902 timer, fm = gettimer(ui, opts)
1899 # control the number of commits perfparents iterates over
1903 # control the number of commits perfparents iterates over
1900 # experimental config: perf.parentscount
1904 # experimental config: perf.parentscount
1901 count = getint(ui, b"perf", b"parentscount", 1000)
1905 count = getint(ui, b"perf", b"parentscount", 1000)
1902 if len(repo.changelog) < count:
1906 if len(repo.changelog) < count:
1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1907 raise error.Abort(b"repo needs %d commits for this test" % count)
1904 repo = repo.unfiltered()
1908 repo = repo.unfiltered()
1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1909 nl = [repo.changelog.node(i) for i in _xrange(count)]
1906
1910
1907 def d():
1911 def d():
1908 for n in nl:
1912 for n in nl:
1909 repo.changelog.parents(n)
1913 repo.changelog.parents(n)
1910
1914
1911 timer(d)
1915 timer(d)
1912 fm.end()
1916 fm.end()
1913
1917
1914
1918
1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1919 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1916 def perfctxfiles(ui, repo, x, **opts):
1920 def perfctxfiles(ui, repo, x, **opts):
1917 opts = _byteskwargs(opts)
1921 opts = _byteskwargs(opts)
1918 x = int(x)
1922 x = int(x)
1919 timer, fm = gettimer(ui, opts)
1923 timer, fm = gettimer(ui, opts)
1920
1924
1921 def d():
1925 def d():
1922 len(repo[x].files())
1926 len(repo[x].files())
1923
1927
1924 timer(d)
1928 timer(d)
1925 fm.end()
1929 fm.end()
1926
1930
1927
1931
1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1932 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1929 def perfrawfiles(ui, repo, x, **opts):
1933 def perfrawfiles(ui, repo, x, **opts):
1930 opts = _byteskwargs(opts)
1934 opts = _byteskwargs(opts)
1931 x = int(x)
1935 x = int(x)
1932 timer, fm = gettimer(ui, opts)
1936 timer, fm = gettimer(ui, opts)
1933 cl = repo.changelog
1937 cl = repo.changelog
1934
1938
1935 def d():
1939 def d():
1936 len(cl.read(x)[3])
1940 len(cl.read(x)[3])
1937
1941
1938 timer(d)
1942 timer(d)
1939 fm.end()
1943 fm.end()
1940
1944
1941
1945
1942 @command(b'perf::lookup|perflookup', formatteropts)
1946 @command(b'perf::lookup|perflookup', formatteropts)
1943 def perflookup(ui, repo, rev, **opts):
1947 def perflookup(ui, repo, rev, **opts):
1944 opts = _byteskwargs(opts)
1948 opts = _byteskwargs(opts)
1945 timer, fm = gettimer(ui, opts)
1949 timer, fm = gettimer(ui, opts)
1946 timer(lambda: len(repo.lookup(rev)))
1950 timer(lambda: len(repo.lookup(rev)))
1947 fm.end()
1951 fm.end()
1948
1952
1949
1953
1950 @command(
1954 @command(
1951 b'perf::linelogedits|perflinelogedits',
1955 b'perf::linelogedits|perflinelogedits',
1952 [
1956 [
1953 (b'n', b'edits', 10000, b'number of edits'),
1957 (b'n', b'edits', 10000, b'number of edits'),
1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1958 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1955 ],
1959 ],
1956 norepo=True,
1960 norepo=True,
1957 )
1961 )
1958 def perflinelogedits(ui, **opts):
1962 def perflinelogedits(ui, **opts):
1959 from mercurial import linelog
1963 from mercurial import linelog
1960
1964
1961 opts = _byteskwargs(opts)
1965 opts = _byteskwargs(opts)
1962
1966
1963 edits = opts[b'edits']
1967 edits = opts[b'edits']
1964 maxhunklines = opts[b'max_hunk_lines']
1968 maxhunklines = opts[b'max_hunk_lines']
1965
1969
1966 maxb1 = 100000
1970 maxb1 = 100000
1967 random.seed(0)
1971 random.seed(0)
1968 randint = random.randint
1972 randint = random.randint
1969 currentlines = 0
1973 currentlines = 0
1970 arglist = []
1974 arglist = []
1971 for rev in _xrange(edits):
1975 for rev in _xrange(edits):
1972 a1 = randint(0, currentlines)
1976 a1 = randint(0, currentlines)
1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1977 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1974 b1 = randint(0, maxb1)
1978 b1 = randint(0, maxb1)
1975 b2 = randint(b1, b1 + maxhunklines)
1979 b2 = randint(b1, b1 + maxhunklines)
1976 currentlines += (b2 - b1) - (a2 - a1)
1980 currentlines += (b2 - b1) - (a2 - a1)
1977 arglist.append((rev, a1, a2, b1, b2))
1981 arglist.append((rev, a1, a2, b1, b2))
1978
1982
1979 def d():
1983 def d():
1980 ll = linelog.linelog()
1984 ll = linelog.linelog()
1981 for args in arglist:
1985 for args in arglist:
1982 ll.replacelines(*args)
1986 ll.replacelines(*args)
1983
1987
1984 timer, fm = gettimer(ui, opts)
1988 timer, fm = gettimer(ui, opts)
1985 timer(d)
1989 timer(d)
1986 fm.end()
1990 fm.end()
1987
1991
1988
1992
1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1993 @command(b'perf::revrange|perfrevrange', formatteropts)
1990 def perfrevrange(ui, repo, *specs, **opts):
1994 def perfrevrange(ui, repo, *specs, **opts):
1991 opts = _byteskwargs(opts)
1995 opts = _byteskwargs(opts)
1992 timer, fm = gettimer(ui, opts)
1996 timer, fm = gettimer(ui, opts)
1993 revrange = scmutil.revrange
1997 revrange = scmutil.revrange
1994 timer(lambda: len(revrange(repo, specs)))
1998 timer(lambda: len(revrange(repo, specs)))
1995 fm.end()
1999 fm.end()
1996
2000
1997
2001
1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2002 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1999 def perfnodelookup(ui, repo, rev, **opts):
2003 def perfnodelookup(ui, repo, rev, **opts):
2000 opts = _byteskwargs(opts)
2004 opts = _byteskwargs(opts)
2001 timer, fm = gettimer(ui, opts)
2005 timer, fm = gettimer(ui, opts)
2002 import mercurial.revlog
2006 import mercurial.revlog
2003
2007
2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2008 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2005 n = scmutil.revsingle(repo, rev).node()
2009 n = scmutil.revsingle(repo, rev).node()
2006
2010
2007 try:
2011 try:
2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2012 cl = revlog(getsvfs(repo), radix=b"00changelog")
2009 except TypeError:
2013 except TypeError:
2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2014 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2011
2015
2012 def d():
2016 def d():
2013 cl.rev(n)
2017 cl.rev(n)
2014 clearcaches(cl)
2018 clearcaches(cl)
2015
2019
2016 timer(d)
2020 timer(d)
2017 fm.end()
2021 fm.end()
2018
2022
2019
2023
2020 @command(
2024 @command(
2021 b'perf::log|perflog',
2025 b'perf::log|perflog',
2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2026 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2023 )
2027 )
2024 def perflog(ui, repo, rev=None, **opts):
2028 def perflog(ui, repo, rev=None, **opts):
2025 opts = _byteskwargs(opts)
2029 opts = _byteskwargs(opts)
2026 if rev is None:
2030 if rev is None:
2027 rev = []
2031 rev = []
2028 timer, fm = gettimer(ui, opts)
2032 timer, fm = gettimer(ui, opts)
2029 ui.pushbuffer()
2033 ui.pushbuffer()
2030 timer(
2034 timer(
2031 lambda: commands.log(
2035 lambda: commands.log(
2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2036 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2033 )
2037 )
2034 )
2038 )
2035 ui.popbuffer()
2039 ui.popbuffer()
2036 fm.end()
2040 fm.end()
2037
2041
2038
2042
2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2043 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2040 def perfmoonwalk(ui, repo, **opts):
2044 def perfmoonwalk(ui, repo, **opts):
2041 """benchmark walking the changelog backwards
2045 """benchmark walking the changelog backwards
2042
2046
2043 This also loads the changelog data for each revision in the changelog.
2047 This also loads the changelog data for each revision in the changelog.
2044 """
2048 """
2045 opts = _byteskwargs(opts)
2049 opts = _byteskwargs(opts)
2046 timer, fm = gettimer(ui, opts)
2050 timer, fm = gettimer(ui, opts)
2047
2051
2048 def moonwalk():
2052 def moonwalk():
2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2053 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2050 ctx = repo[i]
2054 ctx = repo[i]
2051 ctx.branch() # read changelog data (in addition to the index)
2055 ctx.branch() # read changelog data (in addition to the index)
2052
2056
2053 timer(moonwalk)
2057 timer(moonwalk)
2054 fm.end()
2058 fm.end()
2055
2059
2056
2060
2057 @command(
2061 @command(
2058 b'perf::templating|perftemplating',
2062 b'perf::templating|perftemplating',
2059 [
2063 [
2060 (b'r', b'rev', [], b'revisions to run the template on'),
2064 (b'r', b'rev', [], b'revisions to run the template on'),
2061 ]
2065 ]
2062 + formatteropts,
2066 + formatteropts,
2063 )
2067 )
2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2068 def perftemplating(ui, repo, testedtemplate=None, **opts):
2065 """test the rendering time of a given template"""
2069 """test the rendering time of a given template"""
2066 if makelogtemplater is None:
2070 if makelogtemplater is None:
2067 raise error.Abort(
2071 raise error.Abort(
2068 b"perftemplating not available with this Mercurial",
2072 b"perftemplating not available with this Mercurial",
2069 hint=b"use 4.3 or later",
2073 hint=b"use 4.3 or later",
2070 )
2074 )
2071
2075
2072 opts = _byteskwargs(opts)
2076 opts = _byteskwargs(opts)
2073
2077
2074 nullui = ui.copy()
2078 nullui = ui.copy()
2075 nullui.fout = open(os.devnull, 'wb')
2079 nullui.fout = open(os.devnull, 'wb')
2076 nullui.disablepager()
2080 nullui.disablepager()
2077 revs = opts.get(b'rev')
2081 revs = opts.get(b'rev')
2078 if not revs:
2082 if not revs:
2079 revs = [b'all()']
2083 revs = [b'all()']
2080 revs = list(scmutil.revrange(repo, revs))
2084 revs = list(scmutil.revrange(repo, revs))
2081
2085
2082 defaulttemplate = (
2086 defaulttemplate = (
2083 b'{date|shortdate} [{rev}:{node|short}]'
2087 b'{date|shortdate} [{rev}:{node|short}]'
2084 b' {author|person}: {desc|firstline}\n'
2088 b' {author|person}: {desc|firstline}\n'
2085 )
2089 )
2086 if testedtemplate is None:
2090 if testedtemplate is None:
2087 testedtemplate = defaulttemplate
2091 testedtemplate = defaulttemplate
2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2092 displayer = makelogtemplater(nullui, repo, testedtemplate)
2089
2093
2090 def format():
2094 def format():
2091 for r in revs:
2095 for r in revs:
2092 ctx = repo[r]
2096 ctx = repo[r]
2093 displayer.show(ctx)
2097 displayer.show(ctx)
2094 displayer.flush(ctx)
2098 displayer.flush(ctx)
2095
2099
2096 timer, fm = gettimer(ui, opts)
2100 timer, fm = gettimer(ui, opts)
2097 timer(format)
2101 timer(format)
2098 fm.end()
2102 fm.end()
2099
2103
2100
2104
2101 def _displaystats(ui, opts, entries, data):
2105 def _displaystats(ui, opts, entries, data):
2102 # use a second formatter because the data are quite different, not sure
2106 # use a second formatter because the data are quite different, not sure
2103 # how it flies with the templater.
2107 # how it flies with the templater.
2104 fm = ui.formatter(b'perf-stats', opts)
2108 fm = ui.formatter(b'perf-stats', opts)
2105 for key, title in entries:
2109 for key, title in entries:
2106 values = data[key]
2110 values = data[key]
2107 nbvalues = len(data)
2111 nbvalues = len(data)
2108 values.sort()
2112 values.sort()
2109 stats = {
2113 stats = {
2110 'key': key,
2114 'key': key,
2111 'title': title,
2115 'title': title,
2112 'nbitems': len(values),
2116 'nbitems': len(values),
2113 'min': values[0][0],
2117 'min': values[0][0],
2114 '10%': values[(nbvalues * 10) // 100][0],
2118 '10%': values[(nbvalues * 10) // 100][0],
2115 '25%': values[(nbvalues * 25) // 100][0],
2119 '25%': values[(nbvalues * 25) // 100][0],
2116 '50%': values[(nbvalues * 50) // 100][0],
2120 '50%': values[(nbvalues * 50) // 100][0],
2117 '75%': values[(nbvalues * 75) // 100][0],
2121 '75%': values[(nbvalues * 75) // 100][0],
2118 '80%': values[(nbvalues * 80) // 100][0],
2122 '80%': values[(nbvalues * 80) // 100][0],
2119 '85%': values[(nbvalues * 85) // 100][0],
2123 '85%': values[(nbvalues * 85) // 100][0],
2120 '90%': values[(nbvalues * 90) // 100][0],
2124 '90%': values[(nbvalues * 90) // 100][0],
2121 '95%': values[(nbvalues * 95) // 100][0],
2125 '95%': values[(nbvalues * 95) // 100][0],
2122 '99%': values[(nbvalues * 99) // 100][0],
2126 '99%': values[(nbvalues * 99) // 100][0],
2123 'max': values[-1][0],
2127 'max': values[-1][0],
2124 }
2128 }
2125 fm.startitem()
2129 fm.startitem()
2126 fm.data(**stats)
2130 fm.data(**stats)
2127 # make node pretty for the human output
2131 # make node pretty for the human output
2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2132 fm.plain('### %s (%d items)\n' % (title, len(values)))
2129 lines = [
2133 lines = [
2130 'min',
2134 'min',
2131 '10%',
2135 '10%',
2132 '25%',
2136 '25%',
2133 '50%',
2137 '50%',
2134 '75%',
2138 '75%',
2135 '80%',
2139 '80%',
2136 '85%',
2140 '85%',
2137 '90%',
2141 '90%',
2138 '95%',
2142 '95%',
2139 '99%',
2143 '99%',
2140 'max',
2144 'max',
2141 ]
2145 ]
2142 for l in lines:
2146 for l in lines:
2143 fm.plain('%s: %s\n' % (l, stats[l]))
2147 fm.plain('%s: %s\n' % (l, stats[l]))
2144 fm.end()
2148 fm.end()
2145
2149
2146
2150
2147 @command(
2151 @command(
2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2152 b'perf::helper-mergecopies|perfhelper-mergecopies',
2149 formatteropts
2153 formatteropts
2150 + [
2154 + [
2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2155 (b'r', b'revs', [], b'restrict search to these revisions'),
2152 (b'', b'timing', False, b'provides extra data (costly)'),
2156 (b'', b'timing', False, b'provides extra data (costly)'),
2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2157 (b'', b'stats', False, b'provides statistic about the measured data'),
2154 ],
2158 ],
2155 )
2159 )
2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2160 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2157 """find statistics about potential parameters for `perfmergecopies`
2161 """find statistics about potential parameters for `perfmergecopies`
2158
2162
2159 This command find (base, p1, p2) triplet relevant for copytracing
2163 This command find (base, p1, p2) triplet relevant for copytracing
2160 benchmarking in the context of a merge. It reports values for some of the
2164 benchmarking in the context of a merge. It reports values for some of the
2161 parameters that impact merge copy tracing time during merge.
2165 parameters that impact merge copy tracing time during merge.
2162
2166
2163 If `--timing` is set, rename detection is run and the associated timing
2167 If `--timing` is set, rename detection is run and the associated timing
2164 will be reported. The extra details come at the cost of slower command
2168 will be reported. The extra details come at the cost of slower command
2165 execution.
2169 execution.
2166
2170
2167 Since rename detection is only run once, other factors might easily
2171 Since rename detection is only run once, other factors might easily
2168 affect the precision of the timing. However it should give a good
2172 affect the precision of the timing. However it should give a good
2169 approximation of which revision triplets are very costly.
2173 approximation of which revision triplets are very costly.
2170 """
2174 """
2171 opts = _byteskwargs(opts)
2175 opts = _byteskwargs(opts)
2172 fm = ui.formatter(b'perf', opts)
2176 fm = ui.formatter(b'perf', opts)
2173 dotiming = opts[b'timing']
2177 dotiming = opts[b'timing']
2174 dostats = opts[b'stats']
2178 dostats = opts[b'stats']
2175
2179
2176 output_template = [
2180 output_template = [
2177 ("base", "%(base)12s"),
2181 ("base", "%(base)12s"),
2178 ("p1", "%(p1.node)12s"),
2182 ("p1", "%(p1.node)12s"),
2179 ("p2", "%(p2.node)12s"),
2183 ("p2", "%(p2.node)12s"),
2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2184 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2185 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2186 ("p1.renames", "%(p1.renamedfiles)12d"),
2183 ("p1.time", "%(p1.time)12.3f"),
2187 ("p1.time", "%(p1.time)12.3f"),
2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2188 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2189 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2190 ("p2.renames", "%(p2.renamedfiles)12d"),
2187 ("p2.time", "%(p2.time)12.3f"),
2191 ("p2.time", "%(p2.time)12.3f"),
2188 ("renames", "%(nbrenamedfiles)12d"),
2192 ("renames", "%(nbrenamedfiles)12d"),
2189 ("total.time", "%(time)12.3f"),
2193 ("total.time", "%(time)12.3f"),
2190 ]
2194 ]
2191 if not dotiming:
2195 if not dotiming:
2192 output_template = [
2196 output_template = [
2193 i
2197 i
2194 for i in output_template
2198 for i in output_template
2195 if not ('time' in i[0] or 'renames' in i[0])
2199 if not ('time' in i[0] or 'renames' in i[0])
2196 ]
2200 ]
2197 header_names = [h for (h, v) in output_template]
2201 header_names = [h for (h, v) in output_template]
2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2202 output = ' '.join([v for (h, v) in output_template]) + '\n'
2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2203 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2200 fm.plain(header % tuple(header_names))
2204 fm.plain(header % tuple(header_names))
2201
2205
2202 if not revs:
2206 if not revs:
2203 revs = ['all()']
2207 revs = ['all()']
2204 revs = scmutil.revrange(repo, revs)
2208 revs = scmutil.revrange(repo, revs)
2205
2209
2206 if dostats:
2210 if dostats:
2207 alldata = {
2211 alldata = {
2208 'nbrevs': [],
2212 'nbrevs': [],
2209 'nbmissingfiles': [],
2213 'nbmissingfiles': [],
2210 }
2214 }
2211 if dotiming:
2215 if dotiming:
2212 alldata['parentnbrenames'] = []
2216 alldata['parentnbrenames'] = []
2213 alldata['totalnbrenames'] = []
2217 alldata['totalnbrenames'] = []
2214 alldata['parenttime'] = []
2218 alldata['parenttime'] = []
2215 alldata['totaltime'] = []
2219 alldata['totaltime'] = []
2216
2220
2217 roi = repo.revs('merge() and %ld', revs)
2221 roi = repo.revs('merge() and %ld', revs)
2218 for r in roi:
2222 for r in roi:
2219 ctx = repo[r]
2223 ctx = repo[r]
2220 p1 = ctx.p1()
2224 p1 = ctx.p1()
2221 p2 = ctx.p2()
2225 p2 = ctx.p2()
2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2226 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2223 for b in bases:
2227 for b in bases:
2224 b = repo[b]
2228 b = repo[b]
2225 p1missing = copies._computeforwardmissing(b, p1)
2229 p1missing = copies._computeforwardmissing(b, p1)
2226 p2missing = copies._computeforwardmissing(b, p2)
2230 p2missing = copies._computeforwardmissing(b, p2)
2227 data = {
2231 data = {
2228 b'base': b.hex(),
2232 b'base': b.hex(),
2229 b'p1.node': p1.hex(),
2233 b'p1.node': p1.hex(),
2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2234 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2231 b'p1.nbmissingfiles': len(p1missing),
2235 b'p1.nbmissingfiles': len(p1missing),
2232 b'p2.node': p2.hex(),
2236 b'p2.node': p2.hex(),
2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2237 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2234 b'p2.nbmissingfiles': len(p2missing),
2238 b'p2.nbmissingfiles': len(p2missing),
2235 }
2239 }
2236 if dostats:
2240 if dostats:
2237 if p1missing:
2241 if p1missing:
2238 alldata['nbrevs'].append(
2242 alldata['nbrevs'].append(
2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2243 (data['p1.nbrevs'], b.hex(), p1.hex())
2240 )
2244 )
2241 alldata['nbmissingfiles'].append(
2245 alldata['nbmissingfiles'].append(
2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2246 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2243 )
2247 )
2244 if p2missing:
2248 if p2missing:
2245 alldata['nbrevs'].append(
2249 alldata['nbrevs'].append(
2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2250 (data['p2.nbrevs'], b.hex(), p2.hex())
2247 )
2251 )
2248 alldata['nbmissingfiles'].append(
2252 alldata['nbmissingfiles'].append(
2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2253 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2250 )
2254 )
2251 if dotiming:
2255 if dotiming:
2252 begin = util.timer()
2256 begin = util.timer()
2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2257 mergedata = copies.mergecopies(repo, p1, p2, b)
2254 end = util.timer()
2258 end = util.timer()
2255 # not very stable timing since we did only one run
2259 # not very stable timing since we did only one run
2256 data['time'] = end - begin
2260 data['time'] = end - begin
2257 # mergedata contains five dicts: "copy", "movewithdir",
2261 # mergedata contains five dicts: "copy", "movewithdir",
2258 # "diverge", "renamedelete" and "dirmove".
2262 # "diverge", "renamedelete" and "dirmove".
2259 # The first 4 are about renamed file so lets count that.
2263 # The first 4 are about renamed file so lets count that.
2260 renames = len(mergedata[0])
2264 renames = len(mergedata[0])
2261 renames += len(mergedata[1])
2265 renames += len(mergedata[1])
2262 renames += len(mergedata[2])
2266 renames += len(mergedata[2])
2263 renames += len(mergedata[3])
2267 renames += len(mergedata[3])
2264 data['nbrenamedfiles'] = renames
2268 data['nbrenamedfiles'] = renames
2265 begin = util.timer()
2269 begin = util.timer()
2266 p1renames = copies.pathcopies(b, p1)
2270 p1renames = copies.pathcopies(b, p1)
2267 end = util.timer()
2271 end = util.timer()
2268 data['p1.time'] = end - begin
2272 data['p1.time'] = end - begin
2269 begin = util.timer()
2273 begin = util.timer()
2270 p2renames = copies.pathcopies(b, p2)
2274 p2renames = copies.pathcopies(b, p2)
2271 end = util.timer()
2275 end = util.timer()
2272 data['p2.time'] = end - begin
2276 data['p2.time'] = end - begin
2273 data['p1.renamedfiles'] = len(p1renames)
2277 data['p1.renamedfiles'] = len(p1renames)
2274 data['p2.renamedfiles'] = len(p2renames)
2278 data['p2.renamedfiles'] = len(p2renames)
2275
2279
2276 if dostats:
2280 if dostats:
2277 if p1missing:
2281 if p1missing:
2278 alldata['parentnbrenames'].append(
2282 alldata['parentnbrenames'].append(
2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2283 (data['p1.renamedfiles'], b.hex(), p1.hex())
2280 )
2284 )
2281 alldata['parenttime'].append(
2285 alldata['parenttime'].append(
2282 (data['p1.time'], b.hex(), p1.hex())
2286 (data['p1.time'], b.hex(), p1.hex())
2283 )
2287 )
2284 if p2missing:
2288 if p2missing:
2285 alldata['parentnbrenames'].append(
2289 alldata['parentnbrenames'].append(
2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2290 (data['p2.renamedfiles'], b.hex(), p2.hex())
2287 )
2291 )
2288 alldata['parenttime'].append(
2292 alldata['parenttime'].append(
2289 (data['p2.time'], b.hex(), p2.hex())
2293 (data['p2.time'], b.hex(), p2.hex())
2290 )
2294 )
2291 if p1missing or p2missing:
2295 if p1missing or p2missing:
2292 alldata['totalnbrenames'].append(
2296 alldata['totalnbrenames'].append(
2293 (
2297 (
2294 data['nbrenamedfiles'],
2298 data['nbrenamedfiles'],
2295 b.hex(),
2299 b.hex(),
2296 p1.hex(),
2300 p1.hex(),
2297 p2.hex(),
2301 p2.hex(),
2298 )
2302 )
2299 )
2303 )
2300 alldata['totaltime'].append(
2304 alldata['totaltime'].append(
2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2305 (data['time'], b.hex(), p1.hex(), p2.hex())
2302 )
2306 )
2303 fm.startitem()
2307 fm.startitem()
2304 fm.data(**data)
2308 fm.data(**data)
2305 # make node pretty for the human output
2309 # make node pretty for the human output
2306 out = data.copy()
2310 out = data.copy()
2307 out['base'] = fm.hexfunc(b.node())
2311 out['base'] = fm.hexfunc(b.node())
2308 out['p1.node'] = fm.hexfunc(p1.node())
2312 out['p1.node'] = fm.hexfunc(p1.node())
2309 out['p2.node'] = fm.hexfunc(p2.node())
2313 out['p2.node'] = fm.hexfunc(p2.node())
2310 fm.plain(output % out)
2314 fm.plain(output % out)
2311
2315
2312 fm.end()
2316 fm.end()
2313 if dostats:
2317 if dostats:
2314 # use a second formatter because the data are quite different, not sure
2318 # use a second formatter because the data are quite different, not sure
2315 # how it flies with the templater.
2319 # how it flies with the templater.
2316 entries = [
2320 entries = [
2317 ('nbrevs', 'number of revision covered'),
2321 ('nbrevs', 'number of revision covered'),
2318 ('nbmissingfiles', 'number of missing files at head'),
2322 ('nbmissingfiles', 'number of missing files at head'),
2319 ]
2323 ]
2320 if dotiming:
2324 if dotiming:
2321 entries.append(
2325 entries.append(
2322 ('parentnbrenames', 'rename from one parent to base')
2326 ('parentnbrenames', 'rename from one parent to base')
2323 )
2327 )
2324 entries.append(('totalnbrenames', 'total number of renames'))
2328 entries.append(('totalnbrenames', 'total number of renames'))
2325 entries.append(('parenttime', 'time for one parent'))
2329 entries.append(('parenttime', 'time for one parent'))
2326 entries.append(('totaltime', 'time for both parents'))
2330 entries.append(('totaltime', 'time for both parents'))
2327 _displaystats(ui, opts, entries, alldata)
2331 _displaystats(ui, opts, entries, alldata)
2328
2332
2329
2333
2330 @command(
2334 @command(
2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2335 b'perf::helper-pathcopies|perfhelper-pathcopies',
2332 formatteropts
2336 formatteropts
2333 + [
2337 + [
2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2338 (b'r', b'revs', [], b'restrict search to these revisions'),
2335 (b'', b'timing', False, b'provides extra data (costly)'),
2339 (b'', b'timing', False, b'provides extra data (costly)'),
2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2340 (b'', b'stats', False, b'provides statistic about the measured data'),
2337 ],
2341 ],
2338 )
2342 )
2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2343 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2340 """find statistic about potential parameters for the `perftracecopies`
2344 """find statistic about potential parameters for the `perftracecopies`
2341
2345
2342 This command find source-destination pair relevant for copytracing testing.
2346 This command find source-destination pair relevant for copytracing testing.
2343 It report value for some of the parameters that impact copy tracing time.
2347 It report value for some of the parameters that impact copy tracing time.
2344
2348
2345 If `--timing` is set, rename detection is run and the associated timing
2349 If `--timing` is set, rename detection is run and the associated timing
2346 will be reported. The extra details comes at the cost of a slower command
2350 will be reported. The extra details comes at the cost of a slower command
2347 execution.
2351 execution.
2348
2352
2349 Since the rename detection is only run once, other factors might easily
2353 Since the rename detection is only run once, other factors might easily
2350 affect the precision of the timing. However it should give a good
2354 affect the precision of the timing. However it should give a good
2351 approximation of which revision pairs are very costly.
2355 approximation of which revision pairs are very costly.
2352 """
2356 """
2353 opts = _byteskwargs(opts)
2357 opts = _byteskwargs(opts)
2354 fm = ui.formatter(b'perf', opts)
2358 fm = ui.formatter(b'perf', opts)
2355 dotiming = opts[b'timing']
2359 dotiming = opts[b'timing']
2356 dostats = opts[b'stats']
2360 dostats = opts[b'stats']
2357
2361
2358 if dotiming:
2362 if dotiming:
2359 header = '%12s %12s %12s %12s %12s %12s\n'
2363 header = '%12s %12s %12s %12s %12s %12s\n'
2360 output = (
2364 output = (
2361 "%(source)12s %(destination)12s "
2365 "%(source)12s %(destination)12s "
2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2366 "%(nbrevs)12d %(nbmissingfiles)12d "
2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2367 "%(nbrenamedfiles)12d %(time)18.5f\n"
2364 )
2368 )
2365 header_names = (
2369 header_names = (
2366 "source",
2370 "source",
2367 "destination",
2371 "destination",
2368 "nb-revs",
2372 "nb-revs",
2369 "nb-files",
2373 "nb-files",
2370 "nb-renames",
2374 "nb-renames",
2371 "time",
2375 "time",
2372 )
2376 )
2373 fm.plain(header % header_names)
2377 fm.plain(header % header_names)
2374 else:
2378 else:
2375 header = '%12s %12s %12s %12s\n'
2379 header = '%12s %12s %12s %12s\n'
2376 output = (
2380 output = (
2377 "%(source)12s %(destination)12s "
2381 "%(source)12s %(destination)12s "
2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2382 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2379 )
2383 )
2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2384 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2381
2385
2382 if not revs:
2386 if not revs:
2383 revs = ['all()']
2387 revs = ['all()']
2384 revs = scmutil.revrange(repo, revs)
2388 revs = scmutil.revrange(repo, revs)
2385
2389
2386 if dostats:
2390 if dostats:
2387 alldata = {
2391 alldata = {
2388 'nbrevs': [],
2392 'nbrevs': [],
2389 'nbmissingfiles': [],
2393 'nbmissingfiles': [],
2390 }
2394 }
2391 if dotiming:
2395 if dotiming:
2392 alldata['nbrenames'] = []
2396 alldata['nbrenames'] = []
2393 alldata['time'] = []
2397 alldata['time'] = []
2394
2398
2395 roi = repo.revs('merge() and %ld', revs)
2399 roi = repo.revs('merge() and %ld', revs)
2396 for r in roi:
2400 for r in roi:
2397 ctx = repo[r]
2401 ctx = repo[r]
2398 p1 = ctx.p1().rev()
2402 p1 = ctx.p1().rev()
2399 p2 = ctx.p2().rev()
2403 p2 = ctx.p2().rev()
2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2404 bases = repo.changelog._commonancestorsheads(p1, p2)
2401 for p in (p1, p2):
2405 for p in (p1, p2):
2402 for b in bases:
2406 for b in bases:
2403 base = repo[b]
2407 base = repo[b]
2404 parent = repo[p]
2408 parent = repo[p]
2405 missing = copies._computeforwardmissing(base, parent)
2409 missing = copies._computeforwardmissing(base, parent)
2406 if not missing:
2410 if not missing:
2407 continue
2411 continue
2408 data = {
2412 data = {
2409 b'source': base.hex(),
2413 b'source': base.hex(),
2410 b'destination': parent.hex(),
2414 b'destination': parent.hex(),
2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2415 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2412 b'nbmissingfiles': len(missing),
2416 b'nbmissingfiles': len(missing),
2413 }
2417 }
2414 if dostats:
2418 if dostats:
2415 alldata['nbrevs'].append(
2419 alldata['nbrevs'].append(
2416 (
2420 (
2417 data['nbrevs'],
2421 data['nbrevs'],
2418 base.hex(),
2422 base.hex(),
2419 parent.hex(),
2423 parent.hex(),
2420 )
2424 )
2421 )
2425 )
2422 alldata['nbmissingfiles'].append(
2426 alldata['nbmissingfiles'].append(
2423 (
2427 (
2424 data['nbmissingfiles'],
2428 data['nbmissingfiles'],
2425 base.hex(),
2429 base.hex(),
2426 parent.hex(),
2430 parent.hex(),
2427 )
2431 )
2428 )
2432 )
2429 if dotiming:
2433 if dotiming:
2430 begin = util.timer()
2434 begin = util.timer()
2431 renames = copies.pathcopies(base, parent)
2435 renames = copies.pathcopies(base, parent)
2432 end = util.timer()
2436 end = util.timer()
2433 # not very stable timing since we did only one run
2437 # not very stable timing since we did only one run
2434 data['time'] = end - begin
2438 data['time'] = end - begin
2435 data['nbrenamedfiles'] = len(renames)
2439 data['nbrenamedfiles'] = len(renames)
2436 if dostats:
2440 if dostats:
2437 alldata['time'].append(
2441 alldata['time'].append(
2438 (
2442 (
2439 data['time'],
2443 data['time'],
2440 base.hex(),
2444 base.hex(),
2441 parent.hex(),
2445 parent.hex(),
2442 )
2446 )
2443 )
2447 )
2444 alldata['nbrenames'].append(
2448 alldata['nbrenames'].append(
2445 (
2449 (
2446 data['nbrenamedfiles'],
2450 data['nbrenamedfiles'],
2447 base.hex(),
2451 base.hex(),
2448 parent.hex(),
2452 parent.hex(),
2449 )
2453 )
2450 )
2454 )
2451 fm.startitem()
2455 fm.startitem()
2452 fm.data(**data)
2456 fm.data(**data)
2453 out = data.copy()
2457 out = data.copy()
2454 out['source'] = fm.hexfunc(base.node())
2458 out['source'] = fm.hexfunc(base.node())
2455 out['destination'] = fm.hexfunc(parent.node())
2459 out['destination'] = fm.hexfunc(parent.node())
2456 fm.plain(output % out)
2460 fm.plain(output % out)
2457
2461
2458 fm.end()
2462 fm.end()
2459 if dostats:
2463 if dostats:
2460 entries = [
2464 entries = [
2461 ('nbrevs', 'number of revision covered'),
2465 ('nbrevs', 'number of revision covered'),
2462 ('nbmissingfiles', 'number of missing files at head'),
2466 ('nbmissingfiles', 'number of missing files at head'),
2463 ]
2467 ]
2464 if dotiming:
2468 if dotiming:
2465 entries.append(('nbrenames', 'renamed files'))
2469 entries.append(('nbrenames', 'renamed files'))
2466 entries.append(('time', 'time'))
2470 entries.append(('time', 'time'))
2467 _displaystats(ui, opts, entries, alldata)
2471 _displaystats(ui, opts, entries, alldata)
2468
2472
2469
2473
2470 @command(b'perf::cca|perfcca', formatteropts)
2474 @command(b'perf::cca|perfcca', formatteropts)
2471 def perfcca(ui, repo, **opts):
2475 def perfcca(ui, repo, **opts):
2472 opts = _byteskwargs(opts)
2476 opts = _byteskwargs(opts)
2473 timer, fm = gettimer(ui, opts)
2477 timer, fm = gettimer(ui, opts)
2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2478 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2475 fm.end()
2479 fm.end()
2476
2480
2477
2481
2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2482 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2479 def perffncacheload(ui, repo, **opts):
2483 def perffncacheload(ui, repo, **opts):
2480 opts = _byteskwargs(opts)
2484 opts = _byteskwargs(opts)
2481 timer, fm = gettimer(ui, opts)
2485 timer, fm = gettimer(ui, opts)
2482 s = repo.store
2486 s = repo.store
2483
2487
2484 def d():
2488 def d():
2485 s.fncache._load()
2489 s.fncache._load()
2486
2490
2487 timer(d)
2491 timer(d)
2488 fm.end()
2492 fm.end()
2489
2493
2490
2494
2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2495 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2492 def perffncachewrite(ui, repo, **opts):
2496 def perffncachewrite(ui, repo, **opts):
2493 opts = _byteskwargs(opts)
2497 opts = _byteskwargs(opts)
2494 timer, fm = gettimer(ui, opts)
2498 timer, fm = gettimer(ui, opts)
2495 s = repo.store
2499 s = repo.store
2496 lock = repo.lock()
2500 lock = repo.lock()
2497 s.fncache._load()
2501 s.fncache._load()
2498 tr = repo.transaction(b'perffncachewrite')
2502 tr = repo.transaction(b'perffncachewrite')
2499 tr.addbackup(b'fncache')
2503 tr.addbackup(b'fncache')
2500
2504
2501 def d():
2505 def d():
2502 s.fncache._dirty = True
2506 s.fncache._dirty = True
2503 s.fncache.write(tr)
2507 s.fncache.write(tr)
2504
2508
2505 timer(d)
2509 timer(d)
2506 tr.close()
2510 tr.close()
2507 lock.release()
2511 lock.release()
2508 fm.end()
2512 fm.end()
2509
2513
2510
2514
2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2515 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2512 def perffncacheencode(ui, repo, **opts):
2516 def perffncacheencode(ui, repo, **opts):
2513 opts = _byteskwargs(opts)
2517 opts = _byteskwargs(opts)
2514 timer, fm = gettimer(ui, opts)
2518 timer, fm = gettimer(ui, opts)
2515 s = repo.store
2519 s = repo.store
2516 s.fncache._load()
2520 s.fncache._load()
2517
2521
2518 def d():
2522 def d():
2519 for p in s.fncache.entries:
2523 for p in s.fncache.entries:
2520 s.encode(p)
2524 s.encode(p)
2521
2525
2522 timer(d)
2526 timer(d)
2523 fm.end()
2527 fm.end()
2524
2528
2525
2529
2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2530 def _bdiffworker(q, blocks, xdiff, ready, done):
2527 while not done.is_set():
2531 while not done.is_set():
2528 pair = q.get()
2532 pair = q.get()
2529 while pair is not None:
2533 while pair is not None:
2530 if xdiff:
2534 if xdiff:
2531 mdiff.bdiff.xdiffblocks(*pair)
2535 mdiff.bdiff.xdiffblocks(*pair)
2532 elif blocks:
2536 elif blocks:
2533 mdiff.bdiff.blocks(*pair)
2537 mdiff.bdiff.blocks(*pair)
2534 else:
2538 else:
2535 mdiff.textdiff(*pair)
2539 mdiff.textdiff(*pair)
2536 q.task_done()
2540 q.task_done()
2537 pair = q.get()
2541 pair = q.get()
2538 q.task_done() # for the None one
2542 q.task_done() # for the None one
2539 with ready:
2543 with ready:
2540 ready.wait()
2544 ready.wait()
2541
2545
2542
2546
2543 def _manifestrevision(repo, mnode):
2547 def _manifestrevision(repo, mnode):
2544 ml = repo.manifestlog
2548 ml = repo.manifestlog
2545
2549
2546 if util.safehasattr(ml, b'getstorage'):
2550 if util.safehasattr(ml, b'getstorage'):
2547 store = ml.getstorage(b'')
2551 store = ml.getstorage(b'')
2548 else:
2552 else:
2549 store = ml._revlog
2553 store = ml._revlog
2550
2554
2551 return store.revision(mnode)
2555 return store.revision(mnode)
2552
2556
2553
2557
2554 @command(
2558 @command(
2555 b'perf::bdiff|perfbdiff',
2559 b'perf::bdiff|perfbdiff',
2556 revlogopts
2560 revlogopts
2557 + formatteropts
2561 + formatteropts
2558 + [
2562 + [
2559 (
2563 (
2560 b'',
2564 b'',
2561 b'count',
2565 b'count',
2562 1,
2566 1,
2563 b'number of revisions to test (when using --startrev)',
2567 b'number of revisions to test (when using --startrev)',
2564 ),
2568 ),
2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2569 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2570 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2571 (b'', b'blocks', False, b'test computing diffs into blocks'),
2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2572 (b'', b'xdiff', False, b'use xdiff algorithm'),
2569 ],
2573 ],
2570 b'-c|-m|FILE REV',
2574 b'-c|-m|FILE REV',
2571 )
2575 )
2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2576 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2573 """benchmark a bdiff between revisions
2577 """benchmark a bdiff between revisions
2574
2578
2575 By default, benchmark a bdiff between its delta parent and itself.
2579 By default, benchmark a bdiff between its delta parent and itself.
2576
2580
2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2581 With ``--count``, benchmark bdiffs between delta parents and self for N
2578 revisions starting at the specified revision.
2582 revisions starting at the specified revision.
2579
2583
2580 With ``--alldata``, assume the requested revision is a changeset and
2584 With ``--alldata``, assume the requested revision is a changeset and
2581 measure bdiffs for all changes related to that changeset (manifest
2585 measure bdiffs for all changes related to that changeset (manifest
2582 and filelogs).
2586 and filelogs).
2583 """
2587 """
2584 opts = _byteskwargs(opts)
2588 opts = _byteskwargs(opts)
2585
2589
2586 if opts[b'xdiff'] and not opts[b'blocks']:
2590 if opts[b'xdiff'] and not opts[b'blocks']:
2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2591 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2588
2592
2589 if opts[b'alldata']:
2593 if opts[b'alldata']:
2590 opts[b'changelog'] = True
2594 opts[b'changelog'] = True
2591
2595
2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2596 if opts.get(b'changelog') or opts.get(b'manifest'):
2593 file_, rev = None, file_
2597 file_, rev = None, file_
2594 elif rev is None:
2598 elif rev is None:
2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2599 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2596
2600
2597 blocks = opts[b'blocks']
2601 blocks = opts[b'blocks']
2598 xdiff = opts[b'xdiff']
2602 xdiff = opts[b'xdiff']
2599 textpairs = []
2603 textpairs = []
2600
2604
2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2605 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2602
2606
2603 startrev = r.rev(r.lookup(rev))
2607 startrev = r.rev(r.lookup(rev))
2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2608 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2605 if opts[b'alldata']:
2609 if opts[b'alldata']:
2606 # Load revisions associated with changeset.
2610 # Load revisions associated with changeset.
2607 ctx = repo[rev]
2611 ctx = repo[rev]
2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2612 mtext = _manifestrevision(repo, ctx.manifestnode())
2609 for pctx in ctx.parents():
2613 for pctx in ctx.parents():
2610 pman = _manifestrevision(repo, pctx.manifestnode())
2614 pman = _manifestrevision(repo, pctx.manifestnode())
2611 textpairs.append((pman, mtext))
2615 textpairs.append((pman, mtext))
2612
2616
2613 # Load filelog revisions by iterating manifest delta.
2617 # Load filelog revisions by iterating manifest delta.
2614 man = ctx.manifest()
2618 man = ctx.manifest()
2615 pman = ctx.p1().manifest()
2619 pman = ctx.p1().manifest()
2616 for filename, change in pman.diff(man).items():
2620 for filename, change in pman.diff(man).items():
2617 fctx = repo.file(filename)
2621 fctx = repo.file(filename)
2618 f1 = fctx.revision(change[0][0] or -1)
2622 f1 = fctx.revision(change[0][0] or -1)
2619 f2 = fctx.revision(change[1][0] or -1)
2623 f2 = fctx.revision(change[1][0] or -1)
2620 textpairs.append((f1, f2))
2624 textpairs.append((f1, f2))
2621 else:
2625 else:
2622 dp = r.deltaparent(rev)
2626 dp = r.deltaparent(rev)
2623 textpairs.append((r.revision(dp), r.revision(rev)))
2627 textpairs.append((r.revision(dp), r.revision(rev)))
2624
2628
2625 withthreads = threads > 0
2629 withthreads = threads > 0
2626 if not withthreads:
2630 if not withthreads:
2627
2631
2628 def d():
2632 def d():
2629 for pair in textpairs:
2633 for pair in textpairs:
2630 if xdiff:
2634 if xdiff:
2631 mdiff.bdiff.xdiffblocks(*pair)
2635 mdiff.bdiff.xdiffblocks(*pair)
2632 elif blocks:
2636 elif blocks:
2633 mdiff.bdiff.blocks(*pair)
2637 mdiff.bdiff.blocks(*pair)
2634 else:
2638 else:
2635 mdiff.textdiff(*pair)
2639 mdiff.textdiff(*pair)
2636
2640
2637 else:
2641 else:
2638 q = queue()
2642 q = queue()
2639 for i in _xrange(threads):
2643 for i in _xrange(threads):
2640 q.put(None)
2644 q.put(None)
2641 ready = threading.Condition()
2645 ready = threading.Condition()
2642 done = threading.Event()
2646 done = threading.Event()
2643 for i in _xrange(threads):
2647 for i in _xrange(threads):
2644 threading.Thread(
2648 threading.Thread(
2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2649 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2646 ).start()
2650 ).start()
2647 q.join()
2651 q.join()
2648
2652
2649 def d():
2653 def d():
2650 for pair in textpairs:
2654 for pair in textpairs:
2651 q.put(pair)
2655 q.put(pair)
2652 for i in _xrange(threads):
2656 for i in _xrange(threads):
2653 q.put(None)
2657 q.put(None)
2654 with ready:
2658 with ready:
2655 ready.notify_all()
2659 ready.notify_all()
2656 q.join()
2660 q.join()
2657
2661
2658 timer, fm = gettimer(ui, opts)
2662 timer, fm = gettimer(ui, opts)
2659 timer(d)
2663 timer(d)
2660 fm.end()
2664 fm.end()
2661
2665
2662 if withthreads:
2666 if withthreads:
2663 done.set()
2667 done.set()
2664 for i in _xrange(threads):
2668 for i in _xrange(threads):
2665 q.put(None)
2669 q.put(None)
2666 with ready:
2670 with ready:
2667 ready.notify_all()
2671 ready.notify_all()
2668
2672
2669
2673
2670 @command(
2674 @command(
2671 b'perf::unbundle',
2675 b'perf::unbundle',
2672 formatteropts,
2676 formatteropts,
2673 b'BUNDLE_FILE',
2677 b'BUNDLE_FILE',
2674 )
2678 )
2675 def perf_unbundle(ui, repo, fname, **opts):
2679 def perf_unbundle(ui, repo, fname, **opts):
2676 """benchmark application of a bundle in a repository.
2680 """benchmark application of a bundle in a repository.
2677
2681
2678 This does not include the final transaction processing"""
2682 This does not include the final transaction processing"""
2679
2683
2680 from mercurial import exchange
2684 from mercurial import exchange
2681 from mercurial import bundle2
2685 from mercurial import bundle2
2682 from mercurial import transaction
2686 from mercurial import transaction
2683
2687
2684 opts = _byteskwargs(opts)
2688 opts = _byteskwargs(opts)
2685
2689
2686 ### some compatibility hotfix
2690 ### some compatibility hotfix
2687 #
2691 #
2688 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2692 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2689 # critical regression that break transaction rollback for files that are
2693 # critical regression that break transaction rollback for files that are
2690 # de-inlined.
2694 # de-inlined.
2691 method = transaction.transaction._addentry
2695 method = transaction.transaction._addentry
2692 pre_63edc384d3b7 = "data" in getargspec(method).args
2696 pre_63edc384d3b7 = "data" in getargspec(method).args
2693 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2697 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2694 # a changeset that is a close descendant of 18415fc918a1, the changeset
2698 # a changeset that is a close descendant of 18415fc918a1, the changeset
2695 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2699 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2696 args = getargspec(error.Abort.__init__).args
2700 args = getargspec(error.Abort.__init__).args
2697 post_18415fc918a1 = "detailed_exit_code" in args
2701 post_18415fc918a1 = "detailed_exit_code" in args
2698
2702
2699 old_max_inline = None
2703 old_max_inline = None
2700 try:
2704 try:
2701 if not (pre_63edc384d3b7 or post_18415fc918a1):
2705 if not (pre_63edc384d3b7 or post_18415fc918a1):
2702 # disable inlining
2706 # disable inlining
2703 old_max_inline = mercurial.revlog._maxinline
2707 old_max_inline = mercurial.revlog._maxinline
2704 # large enough to never happen
2708 # large enough to never happen
2705 mercurial.revlog._maxinline = 2 ** 50
2709 mercurial.revlog._maxinline = 2 ** 50
2706
2710
2707 with repo.lock():
2711 with repo.lock():
2708 bundle = [None, None]
2712 bundle = [None, None]
2709 orig_quiet = repo.ui.quiet
2713 orig_quiet = repo.ui.quiet
2710 try:
2714 try:
2711 repo.ui.quiet = True
2715 repo.ui.quiet = True
2712 with open(fname, mode="rb") as f:
2716 with open(fname, mode="rb") as f:
2713
2717
2714 def noop_report(*args, **kwargs):
2718 def noop_report(*args, **kwargs):
2715 pass
2719 pass
2716
2720
2717 def setup():
2721 def setup():
2718 gen, tr = bundle
2722 gen, tr = bundle
2719 if tr is not None:
2723 if tr is not None:
2720 tr.abort()
2724 tr.abort()
2721 bundle[:] = [None, None]
2725 bundle[:] = [None, None]
2722 f.seek(0)
2726 f.seek(0)
2723 bundle[0] = exchange.readbundle(ui, f, fname)
2727 bundle[0] = exchange.readbundle(ui, f, fname)
2724 bundle[1] = repo.transaction(b'perf::unbundle')
2728 bundle[1] = repo.transaction(b'perf::unbundle')
2725 # silence the transaction
2729 # silence the transaction
2726 bundle[1]._report = noop_report
2730 bundle[1]._report = noop_report
2727
2731
2728 def apply():
2732 def apply():
2729 gen, tr = bundle
2733 gen, tr = bundle
2730 bundle2.applybundle(
2734 bundle2.applybundle(
2731 repo,
2735 repo,
2732 gen,
2736 gen,
2733 tr,
2737 tr,
2734 source=b'perf::unbundle',
2738 source=b'perf::unbundle',
2735 url=fname,
2739 url=fname,
2736 )
2740 )
2737
2741
2738 timer, fm = gettimer(ui, opts)
2742 timer, fm = gettimer(ui, opts)
2739 timer(apply, setup=setup)
2743 timer(apply, setup=setup)
2740 fm.end()
2744 fm.end()
2741 finally:
2745 finally:
2742 repo.ui.quiet == orig_quiet
2746 repo.ui.quiet == orig_quiet
2743 gen, tr = bundle
2747 gen, tr = bundle
2744 if tr is not None:
2748 if tr is not None:
2745 tr.abort()
2749 tr.abort()
2746 finally:
2750 finally:
2747 if old_max_inline is not None:
2751 if old_max_inline is not None:
2748 mercurial.revlog._maxinline = old_max_inline
2752 mercurial.revlog._maxinline = old_max_inline
2749
2753
2750
2754
2751 @command(
2755 @command(
2752 b'perf::unidiff|perfunidiff',
2756 b'perf::unidiff|perfunidiff',
2753 revlogopts
2757 revlogopts
2754 + formatteropts
2758 + formatteropts
2755 + [
2759 + [
2756 (
2760 (
2757 b'',
2761 b'',
2758 b'count',
2762 b'count',
2759 1,
2763 1,
2760 b'number of revisions to test (when using --startrev)',
2764 b'number of revisions to test (when using --startrev)',
2761 ),
2765 ),
2762 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2766 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2763 ],
2767 ],
2764 b'-c|-m|FILE REV',
2768 b'-c|-m|FILE REV',
2765 )
2769 )
2766 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2770 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2767 """benchmark a unified diff between revisions
2771 """benchmark a unified diff between revisions
2768
2772
2769 This doesn't include any copy tracing - it's just a unified diff
2773 This doesn't include any copy tracing - it's just a unified diff
2770 of the texts.
2774 of the texts.
2771
2775
2772 By default, benchmark a diff between its delta parent and itself.
2776 By default, benchmark a diff between its delta parent and itself.
2773
2777
2774 With ``--count``, benchmark diffs between delta parents and self for N
2778 With ``--count``, benchmark diffs between delta parents and self for N
2775 revisions starting at the specified revision.
2779 revisions starting at the specified revision.
2776
2780
2777 With ``--alldata``, assume the requested revision is a changeset and
2781 With ``--alldata``, assume the requested revision is a changeset and
2778 measure diffs for all changes related to that changeset (manifest
2782 measure diffs for all changes related to that changeset (manifest
2779 and filelogs).
2783 and filelogs).
2780 """
2784 """
2781 opts = _byteskwargs(opts)
2785 opts = _byteskwargs(opts)
2782 if opts[b'alldata']:
2786 if opts[b'alldata']:
2783 opts[b'changelog'] = True
2787 opts[b'changelog'] = True
2784
2788
2785 if opts.get(b'changelog') or opts.get(b'manifest'):
2789 if opts.get(b'changelog') or opts.get(b'manifest'):
2786 file_, rev = None, file_
2790 file_, rev = None, file_
2787 elif rev is None:
2791 elif rev is None:
2788 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2792 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2789
2793
2790 textpairs = []
2794 textpairs = []
2791
2795
2792 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2796 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2793
2797
2794 startrev = r.rev(r.lookup(rev))
2798 startrev = r.rev(r.lookup(rev))
2795 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2799 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2796 if opts[b'alldata']:
2800 if opts[b'alldata']:
2797 # Load revisions associated with changeset.
2801 # Load revisions associated with changeset.
2798 ctx = repo[rev]
2802 ctx = repo[rev]
2799 mtext = _manifestrevision(repo, ctx.manifestnode())
2803 mtext = _manifestrevision(repo, ctx.manifestnode())
2800 for pctx in ctx.parents():
2804 for pctx in ctx.parents():
2801 pman = _manifestrevision(repo, pctx.manifestnode())
2805 pman = _manifestrevision(repo, pctx.manifestnode())
2802 textpairs.append((pman, mtext))
2806 textpairs.append((pman, mtext))
2803
2807
2804 # Load filelog revisions by iterating manifest delta.
2808 # Load filelog revisions by iterating manifest delta.
2805 man = ctx.manifest()
2809 man = ctx.manifest()
2806 pman = ctx.p1().manifest()
2810 pman = ctx.p1().manifest()
2807 for filename, change in pman.diff(man).items():
2811 for filename, change in pman.diff(man).items():
2808 fctx = repo.file(filename)
2812 fctx = repo.file(filename)
2809 f1 = fctx.revision(change[0][0] or -1)
2813 f1 = fctx.revision(change[0][0] or -1)
2810 f2 = fctx.revision(change[1][0] or -1)
2814 f2 = fctx.revision(change[1][0] or -1)
2811 textpairs.append((f1, f2))
2815 textpairs.append((f1, f2))
2812 else:
2816 else:
2813 dp = r.deltaparent(rev)
2817 dp = r.deltaparent(rev)
2814 textpairs.append((r.revision(dp), r.revision(rev)))
2818 textpairs.append((r.revision(dp), r.revision(rev)))
2815
2819
2816 def d():
2820 def d():
2817 for left, right in textpairs:
2821 for left, right in textpairs:
2818 # The date strings don't matter, so we pass empty strings.
2822 # The date strings don't matter, so we pass empty strings.
2819 headerlines, hunks = mdiff.unidiff(
2823 headerlines, hunks = mdiff.unidiff(
2820 left, b'', right, b'', b'left', b'right', binary=False
2824 left, b'', right, b'', b'left', b'right', binary=False
2821 )
2825 )
2822 # consume iterators in roughly the way patch.py does
2826 # consume iterators in roughly the way patch.py does
2823 b'\n'.join(headerlines)
2827 b'\n'.join(headerlines)
2824 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2828 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2825
2829
2826 timer, fm = gettimer(ui, opts)
2830 timer, fm = gettimer(ui, opts)
2827 timer(d)
2831 timer(d)
2828 fm.end()
2832 fm.end()
2829
2833
2830
2834
2831 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2835 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2832 def perfdiffwd(ui, repo, **opts):
2836 def perfdiffwd(ui, repo, **opts):
2833 """Profile diff of working directory changes"""
2837 """Profile diff of working directory changes"""
2834 opts = _byteskwargs(opts)
2838 opts = _byteskwargs(opts)
2835 timer, fm = gettimer(ui, opts)
2839 timer, fm = gettimer(ui, opts)
2836 options = {
2840 options = {
2837 'w': 'ignore_all_space',
2841 'w': 'ignore_all_space',
2838 'b': 'ignore_space_change',
2842 'b': 'ignore_space_change',
2839 'B': 'ignore_blank_lines',
2843 'B': 'ignore_blank_lines',
2840 }
2844 }
2841
2845
2842 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2846 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2843 opts = {options[c]: b'1' for c in diffopt}
2847 opts = {options[c]: b'1' for c in diffopt}
2844
2848
2845 def d():
2849 def d():
2846 ui.pushbuffer()
2850 ui.pushbuffer()
2847 commands.diff(ui, repo, **opts)
2851 commands.diff(ui, repo, **opts)
2848 ui.popbuffer()
2852 ui.popbuffer()
2849
2853
2850 diffopt = diffopt.encode('ascii')
2854 diffopt = diffopt.encode('ascii')
2851 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2855 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2852 timer(d, title=title)
2856 timer(d, title=title)
2853 fm.end()
2857 fm.end()
2854
2858
2855
2859
2856 @command(
2860 @command(
2857 b'perf::revlogindex|perfrevlogindex',
2861 b'perf::revlogindex|perfrevlogindex',
2858 revlogopts + formatteropts,
2862 revlogopts + formatteropts,
2859 b'-c|-m|FILE',
2863 b'-c|-m|FILE',
2860 )
2864 )
2861 def perfrevlogindex(ui, repo, file_=None, **opts):
2865 def perfrevlogindex(ui, repo, file_=None, **opts):
2862 """Benchmark operations against a revlog index.
2866 """Benchmark operations against a revlog index.
2863
2867
2864 This tests constructing a revlog instance, reading index data,
2868 This tests constructing a revlog instance, reading index data,
2865 parsing index data, and performing various operations related to
2869 parsing index data, and performing various operations related to
2866 index data.
2870 index data.
2867 """
2871 """
2868
2872
2869 opts = _byteskwargs(opts)
2873 opts = _byteskwargs(opts)
2870
2874
2871 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2875 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2872
2876
2873 opener = getattr(rl, 'opener') # trick linter
2877 opener = getattr(rl, 'opener') # trick linter
2874 # compat with hg <= 5.8
2878 # compat with hg <= 5.8
2875 radix = getattr(rl, 'radix', None)
2879 radix = getattr(rl, 'radix', None)
2876 indexfile = getattr(rl, '_indexfile', None)
2880 indexfile = getattr(rl, '_indexfile', None)
2877 if indexfile is None:
2881 if indexfile is None:
2878 # compatibility with <= hg-5.8
2882 # compatibility with <= hg-5.8
2879 indexfile = getattr(rl, 'indexfile')
2883 indexfile = getattr(rl, 'indexfile')
2880 data = opener.read(indexfile)
2884 data = opener.read(indexfile)
2881
2885
2882 header = struct.unpack(b'>I', data[0:4])[0]
2886 header = struct.unpack(b'>I', data[0:4])[0]
2883 version = header & 0xFFFF
2887 version = header & 0xFFFF
2884 if version == 1:
2888 if version == 1:
2885 inline = header & (1 << 16)
2889 inline = header & (1 << 16)
2886 else:
2890 else:
2887 raise error.Abort(b'unsupported revlog version: %d' % version)
2891 raise error.Abort(b'unsupported revlog version: %d' % version)
2888
2892
2889 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2893 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2890 if parse_index_v1 is None:
2894 if parse_index_v1 is None:
2891 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2895 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2892
2896
2893 rllen = len(rl)
2897 rllen = len(rl)
2894
2898
2895 node0 = rl.node(0)
2899 node0 = rl.node(0)
2896 node25 = rl.node(rllen // 4)
2900 node25 = rl.node(rllen // 4)
2897 node50 = rl.node(rllen // 2)
2901 node50 = rl.node(rllen // 2)
2898 node75 = rl.node(rllen // 4 * 3)
2902 node75 = rl.node(rllen // 4 * 3)
2899 node100 = rl.node(rllen - 1)
2903 node100 = rl.node(rllen - 1)
2900
2904
2901 allrevs = range(rllen)
2905 allrevs = range(rllen)
2902 allrevsrev = list(reversed(allrevs))
2906 allrevsrev = list(reversed(allrevs))
2903 allnodes = [rl.node(rev) for rev in range(rllen)]
2907 allnodes = [rl.node(rev) for rev in range(rllen)]
2904 allnodesrev = list(reversed(allnodes))
2908 allnodesrev = list(reversed(allnodes))
2905
2909
2906 def constructor():
2910 def constructor():
2907 if radix is not None:
2911 if radix is not None:
2908 revlog(opener, radix=radix)
2912 revlog(opener, radix=radix)
2909 else:
2913 else:
2910 # hg <= 5.8
2914 # hg <= 5.8
2911 revlog(opener, indexfile=indexfile)
2915 revlog(opener, indexfile=indexfile)
2912
2916
2913 def read():
2917 def read():
2914 with opener(indexfile) as fh:
2918 with opener(indexfile) as fh:
2915 fh.read()
2919 fh.read()
2916
2920
2917 def parseindex():
2921 def parseindex():
2918 parse_index_v1(data, inline)
2922 parse_index_v1(data, inline)
2919
2923
2920 def getentry(revornode):
2924 def getentry(revornode):
2921 index = parse_index_v1(data, inline)[0]
2925 index = parse_index_v1(data, inline)[0]
2922 index[revornode]
2926 index[revornode]
2923
2927
2924 def getentries(revs, count=1):
2928 def getentries(revs, count=1):
2925 index = parse_index_v1(data, inline)[0]
2929 index = parse_index_v1(data, inline)[0]
2926
2930
2927 for i in range(count):
2931 for i in range(count):
2928 for rev in revs:
2932 for rev in revs:
2929 index[rev]
2933 index[rev]
2930
2934
2931 def resolvenode(node):
2935 def resolvenode(node):
2932 index = parse_index_v1(data, inline)[0]
2936 index = parse_index_v1(data, inline)[0]
2933 rev = getattr(index, 'rev', None)
2937 rev = getattr(index, 'rev', None)
2934 if rev is None:
2938 if rev is None:
2935 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2939 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2936 # This only works for the C code.
2940 # This only works for the C code.
2937 if nodemap is None:
2941 if nodemap is None:
2938 return
2942 return
2939 rev = nodemap.__getitem__
2943 rev = nodemap.__getitem__
2940
2944
2941 try:
2945 try:
2942 rev(node)
2946 rev(node)
2943 except error.RevlogError:
2947 except error.RevlogError:
2944 pass
2948 pass
2945
2949
2946 def resolvenodes(nodes, count=1):
2950 def resolvenodes(nodes, count=1):
2947 index = parse_index_v1(data, inline)[0]
2951 index = parse_index_v1(data, inline)[0]
2948 rev = getattr(index, 'rev', None)
2952 rev = getattr(index, 'rev', None)
2949 if rev is None:
2953 if rev is None:
2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2954 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2951 # This only works for the C code.
2955 # This only works for the C code.
2952 if nodemap is None:
2956 if nodemap is None:
2953 return
2957 return
2954 rev = nodemap.__getitem__
2958 rev = nodemap.__getitem__
2955
2959
2956 for i in range(count):
2960 for i in range(count):
2957 for node in nodes:
2961 for node in nodes:
2958 try:
2962 try:
2959 rev(node)
2963 rev(node)
2960 except error.RevlogError:
2964 except error.RevlogError:
2961 pass
2965 pass
2962
2966
2963 benches = [
2967 benches = [
2964 (constructor, b'revlog constructor'),
2968 (constructor, b'revlog constructor'),
2965 (read, b'read'),
2969 (read, b'read'),
2966 (parseindex, b'create index object'),
2970 (parseindex, b'create index object'),
2967 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2971 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2968 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2972 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2969 (lambda: resolvenode(node0), b'look up node at rev 0'),
2973 (lambda: resolvenode(node0), b'look up node at rev 0'),
2970 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2974 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2971 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2975 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2972 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2976 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2973 (lambda: resolvenode(node100), b'look up node at tip'),
2977 (lambda: resolvenode(node100), b'look up node at tip'),
2974 # 2x variation is to measure caching impact.
2978 # 2x variation is to measure caching impact.
2975 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2979 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2976 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2980 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2977 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2981 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2978 (
2982 (
2979 lambda: resolvenodes(allnodesrev, 2),
2983 lambda: resolvenodes(allnodesrev, 2),
2980 b'look up all nodes 2x (reverse)',
2984 b'look up all nodes 2x (reverse)',
2981 ),
2985 ),
2982 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2986 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2983 (
2987 (
2984 lambda: getentries(allrevs, 2),
2988 lambda: getentries(allrevs, 2),
2985 b'retrieve all index entries 2x (forward)',
2989 b'retrieve all index entries 2x (forward)',
2986 ),
2990 ),
2987 (
2991 (
2988 lambda: getentries(allrevsrev),
2992 lambda: getentries(allrevsrev),
2989 b'retrieve all index entries (reverse)',
2993 b'retrieve all index entries (reverse)',
2990 ),
2994 ),
2991 (
2995 (
2992 lambda: getentries(allrevsrev, 2),
2996 lambda: getentries(allrevsrev, 2),
2993 b'retrieve all index entries 2x (reverse)',
2997 b'retrieve all index entries 2x (reverse)',
2994 ),
2998 ),
2995 ]
2999 ]
2996
3000
2997 for fn, title in benches:
3001 for fn, title in benches:
2998 timer, fm = gettimer(ui, opts)
3002 timer, fm = gettimer(ui, opts)
2999 timer(fn, title=title)
3003 timer(fn, title=title)
3000 fm.end()
3004 fm.end()
3001
3005
3002
3006
3003 @command(
3007 @command(
3004 b'perf::revlogrevisions|perfrevlogrevisions',
3008 b'perf::revlogrevisions|perfrevlogrevisions',
3005 revlogopts
3009 revlogopts
3006 + formatteropts
3010 + formatteropts
3007 + [
3011 + [
3008 (b'd', b'dist', 100, b'distance between the revisions'),
3012 (b'd', b'dist', 100, b'distance between the revisions'),
3009 (b's', b'startrev', 0, b'revision to start reading at'),
3013 (b's', b'startrev', 0, b'revision to start reading at'),
3010 (b'', b'reverse', False, b'read in reverse'),
3014 (b'', b'reverse', False, b'read in reverse'),
3011 ],
3015 ],
3012 b'-c|-m|FILE',
3016 b'-c|-m|FILE',
3013 )
3017 )
3014 def perfrevlogrevisions(
3018 def perfrevlogrevisions(
3015 ui, repo, file_=None, startrev=0, reverse=False, **opts
3019 ui, repo, file_=None, startrev=0, reverse=False, **opts
3016 ):
3020 ):
3017 """Benchmark reading a series of revisions from a revlog.
3021 """Benchmark reading a series of revisions from a revlog.
3018
3022
3019 By default, we read every ``-d/--dist`` revision from 0 to tip of
3023 By default, we read every ``-d/--dist`` revision from 0 to tip of
3020 the specified revlog.
3024 the specified revlog.
3021
3025
3022 The start revision can be defined via ``-s/--startrev``.
3026 The start revision can be defined via ``-s/--startrev``.
3023 """
3027 """
3024 opts = _byteskwargs(opts)
3028 opts = _byteskwargs(opts)
3025
3029
3026 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3030 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3027 rllen = getlen(ui)(rl)
3031 rllen = getlen(ui)(rl)
3028
3032
3029 if startrev < 0:
3033 if startrev < 0:
3030 startrev = rllen + startrev
3034 startrev = rllen + startrev
3031
3035
3032 def d():
3036 def d():
3033 rl.clearcaches()
3037 rl.clearcaches()
3034
3038
3035 beginrev = startrev
3039 beginrev = startrev
3036 endrev = rllen
3040 endrev = rllen
3037 dist = opts[b'dist']
3041 dist = opts[b'dist']
3038
3042
3039 if reverse:
3043 if reverse:
3040 beginrev, endrev = endrev - 1, beginrev - 1
3044 beginrev, endrev = endrev - 1, beginrev - 1
3041 dist = -1 * dist
3045 dist = -1 * dist
3042
3046
3043 for x in _xrange(beginrev, endrev, dist):
3047 for x in _xrange(beginrev, endrev, dist):
3044 # Old revisions don't support passing int.
3048 # Old revisions don't support passing int.
3045 n = rl.node(x)
3049 n = rl.node(x)
3046 rl.revision(n)
3050 rl.revision(n)
3047
3051
3048 timer, fm = gettimer(ui, opts)
3052 timer, fm = gettimer(ui, opts)
3049 timer(d)
3053 timer(d)
3050 fm.end()
3054 fm.end()
3051
3055
3052
3056
3053 @command(
3057 @command(
3054 b'perf::revlogwrite|perfrevlogwrite',
3058 b'perf::revlogwrite|perfrevlogwrite',
3055 revlogopts
3059 revlogopts
3056 + formatteropts
3060 + formatteropts
3057 + [
3061 + [
3058 (b's', b'startrev', 1000, b'revision to start writing at'),
3062 (b's', b'startrev', 1000, b'revision to start writing at'),
3059 (b'', b'stoprev', -1, b'last revision to write'),
3063 (b'', b'stoprev', -1, b'last revision to write'),
3060 (b'', b'count', 3, b'number of passes to perform'),
3064 (b'', b'count', 3, b'number of passes to perform'),
3061 (b'', b'details', False, b'print timing for every revisions tested'),
3065 (b'', b'details', False, b'print timing for every revisions tested'),
3062 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3066 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3063 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3067 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3064 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3068 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3065 ],
3069 ],
3066 b'-c|-m|FILE',
3070 b'-c|-m|FILE',
3067 )
3071 )
3068 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3072 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3069 """Benchmark writing a series of revisions to a revlog.
3073 """Benchmark writing a series of revisions to a revlog.
3070
3074
3071 Possible source values are:
3075 Possible source values are:
3072 * `full`: add from a full text (default).
3076 * `full`: add from a full text (default).
3073 * `parent-1`: add from a delta to the first parent
3077 * `parent-1`: add from a delta to the first parent
3074 * `parent-2`: add from a delta to the second parent if it exists
3078 * `parent-2`: add from a delta to the second parent if it exists
3075 (use a delta from the first parent otherwise)
3079 (use a delta from the first parent otherwise)
3076 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3080 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3077 * `storage`: add from the existing precomputed deltas
3081 * `storage`: add from the existing precomputed deltas
3078
3082
3079 Note: This performance command measures performance in a custom way. As a
3083 Note: This performance command measures performance in a custom way. As a
3080 result some of the global configuration of the 'perf' command does not
3084 result some of the global configuration of the 'perf' command does not
3081 apply to it:
3085 apply to it:
3082
3086
3083 * ``pre-run``: disabled
3087 * ``pre-run``: disabled
3084
3088
3085 * ``profile-benchmark``: disabled
3089 * ``profile-benchmark``: disabled
3086
3090
3087 * ``run-limits``: disabled use --count instead
3091 * ``run-limits``: disabled use --count instead
3088 """
3092 """
3089 opts = _byteskwargs(opts)
3093 opts = _byteskwargs(opts)
3090
3094
3091 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3095 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3092 rllen = getlen(ui)(rl)
3096 rllen = getlen(ui)(rl)
3093 if startrev < 0:
3097 if startrev < 0:
3094 startrev = rllen + startrev
3098 startrev = rllen + startrev
3095 if stoprev < 0:
3099 if stoprev < 0:
3096 stoprev = rllen + stoprev
3100 stoprev = rllen + stoprev
3097
3101
3098 lazydeltabase = opts['lazydeltabase']
3102 lazydeltabase = opts['lazydeltabase']
3099 source = opts['source']
3103 source = opts['source']
3100 clearcaches = opts['clear_caches']
3104 clearcaches = opts['clear_caches']
3101 validsource = (
3105 validsource = (
3102 b'full',
3106 b'full',
3103 b'parent-1',
3107 b'parent-1',
3104 b'parent-2',
3108 b'parent-2',
3105 b'parent-smallest',
3109 b'parent-smallest',
3106 b'storage',
3110 b'storage',
3107 )
3111 )
3108 if source not in validsource:
3112 if source not in validsource:
3109 raise error.Abort('invalid source type: %s' % source)
3113 raise error.Abort('invalid source type: %s' % source)
3110
3114
3111 ### actually gather results
3115 ### actually gather results
3112 count = opts['count']
3116 count = opts['count']
3113 if count <= 0:
3117 if count <= 0:
3114 raise error.Abort('invalide run count: %d' % count)
3118 raise error.Abort('invalide run count: %d' % count)
3115 allresults = []
3119 allresults = []
3116 for c in range(count):
3120 for c in range(count):
3117 timing = _timeonewrite(
3121 timing = _timeonewrite(
3118 ui,
3122 ui,
3119 rl,
3123 rl,
3120 source,
3124 source,
3121 startrev,
3125 startrev,
3122 stoprev,
3126 stoprev,
3123 c + 1,
3127 c + 1,
3124 lazydeltabase=lazydeltabase,
3128 lazydeltabase=lazydeltabase,
3125 clearcaches=clearcaches,
3129 clearcaches=clearcaches,
3126 )
3130 )
3127 allresults.append(timing)
3131 allresults.append(timing)
3128
3132
3129 ### consolidate the results in a single list
3133 ### consolidate the results in a single list
3130 results = []
3134 results = []
3131 for idx, (rev, t) in enumerate(allresults[0]):
3135 for idx, (rev, t) in enumerate(allresults[0]):
3132 ts = [t]
3136 ts = [t]
3133 for other in allresults[1:]:
3137 for other in allresults[1:]:
3134 orev, ot = other[idx]
3138 orev, ot = other[idx]
3135 assert orev == rev
3139 assert orev == rev
3136 ts.append(ot)
3140 ts.append(ot)
3137 results.append((rev, ts))
3141 results.append((rev, ts))
3138 resultcount = len(results)
3142 resultcount = len(results)
3139
3143
3140 ### Compute and display relevant statistics
3144 ### Compute and display relevant statistics
3141
3145
3142 # get a formatter
3146 # get a formatter
3143 fm = ui.formatter(b'perf', opts)
3147 fm = ui.formatter(b'perf', opts)
3144 displayall = ui.configbool(b"perf", b"all-timing", False)
3148 displayall = ui.configbool(b"perf", b"all-timing", False)
3145
3149
3146 # print individual details if requested
3150 # print individual details if requested
3147 if opts['details']:
3151 if opts['details']:
3148 for idx, item in enumerate(results, 1):
3152 for idx, item in enumerate(results, 1):
3149 rev, data = item
3153 rev, data = item
3150 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3154 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3151 formatone(fm, data, title=title, displayall=displayall)
3155 formatone(fm, data, title=title, displayall=displayall)
3152
3156
3153 # sorts results by median time
3157 # sorts results by median time
3154 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3158 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3155 # list of (name, index) to display)
3159 # list of (name, index) to display)
3156 relevants = [
3160 relevants = [
3157 ("min", 0),
3161 ("min", 0),
3158 ("10%", resultcount * 10 // 100),
3162 ("10%", resultcount * 10 // 100),
3159 ("25%", resultcount * 25 // 100),
3163 ("25%", resultcount * 25 // 100),
3160 ("50%", resultcount * 70 // 100),
3164 ("50%", resultcount * 70 // 100),
3161 ("75%", resultcount * 75 // 100),
3165 ("75%", resultcount * 75 // 100),
3162 ("90%", resultcount * 90 // 100),
3166 ("90%", resultcount * 90 // 100),
3163 ("95%", resultcount * 95 // 100),
3167 ("95%", resultcount * 95 // 100),
3164 ("99%", resultcount * 99 // 100),
3168 ("99%", resultcount * 99 // 100),
3165 ("99.9%", resultcount * 999 // 1000),
3169 ("99.9%", resultcount * 999 // 1000),
3166 ("99.99%", resultcount * 9999 // 10000),
3170 ("99.99%", resultcount * 9999 // 10000),
3167 ("99.999%", resultcount * 99999 // 100000),
3171 ("99.999%", resultcount * 99999 // 100000),
3168 ("max", -1),
3172 ("max", -1),
3169 ]
3173 ]
3170 if not ui.quiet:
3174 if not ui.quiet:
3171 for name, idx in relevants:
3175 for name, idx in relevants:
3172 data = results[idx]
3176 data = results[idx]
3173 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3177 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3174 formatone(fm, data[1], title=title, displayall=displayall)
3178 formatone(fm, data[1], title=title, displayall=displayall)
3175
3179
3176 # XXX summing that many float will not be very precise, we ignore this fact
3180 # XXX summing that many float will not be very precise, we ignore this fact
3177 # for now
3181 # for now
3178 totaltime = []
3182 totaltime = []
3179 for item in allresults:
3183 for item in allresults:
3180 totaltime.append(
3184 totaltime.append(
3181 (
3185 (
3182 sum(x[1][0] for x in item),
3186 sum(x[1][0] for x in item),
3183 sum(x[1][1] for x in item),
3187 sum(x[1][1] for x in item),
3184 sum(x[1][2] for x in item),
3188 sum(x[1][2] for x in item),
3185 )
3189 )
3186 )
3190 )
3187 formatone(
3191 formatone(
3188 fm,
3192 fm,
3189 totaltime,
3193 totaltime,
3190 title="total time (%d revs)" % resultcount,
3194 title="total time (%d revs)" % resultcount,
3191 displayall=displayall,
3195 displayall=displayall,
3192 )
3196 )
3193 fm.end()
3197 fm.end()
3194
3198
3195
3199
3196 class _faketr:
3200 class _faketr:
3197 def add(s, x, y, z=None):
3201 def add(s, x, y, z=None):
3198 return None
3202 return None
3199
3203
3200
3204
3201 def _timeonewrite(
3205 def _timeonewrite(
3202 ui,
3206 ui,
3203 orig,
3207 orig,
3204 source,
3208 source,
3205 startrev,
3209 startrev,
3206 stoprev,
3210 stoprev,
3207 runidx=None,
3211 runidx=None,
3208 lazydeltabase=True,
3212 lazydeltabase=True,
3209 clearcaches=True,
3213 clearcaches=True,
3210 ):
3214 ):
3211 timings = []
3215 timings = []
3212 tr = _faketr()
3216 tr = _faketr()
3213 with _temprevlog(ui, orig, startrev) as dest:
3217 with _temprevlog(ui, orig, startrev) as dest:
3214 dest._lazydeltabase = lazydeltabase
3218 dest._lazydeltabase = lazydeltabase
3215 revs = list(orig.revs(startrev, stoprev))
3219 revs = list(orig.revs(startrev, stoprev))
3216 total = len(revs)
3220 total = len(revs)
3217 topic = 'adding'
3221 topic = 'adding'
3218 if runidx is not None:
3222 if runidx is not None:
3219 topic += ' (run #%d)' % runidx
3223 topic += ' (run #%d)' % runidx
3220 # Support both old and new progress API
3224 # Support both old and new progress API
3221 if util.safehasattr(ui, 'makeprogress'):
3225 if util.safehasattr(ui, 'makeprogress'):
3222 progress = ui.makeprogress(topic, unit='revs', total=total)
3226 progress = ui.makeprogress(topic, unit='revs', total=total)
3223
3227
3224 def updateprogress(pos):
3228 def updateprogress(pos):
3225 progress.update(pos)
3229 progress.update(pos)
3226
3230
3227 def completeprogress():
3231 def completeprogress():
3228 progress.complete()
3232 progress.complete()
3229
3233
3230 else:
3234 else:
3231
3235
3232 def updateprogress(pos):
3236 def updateprogress(pos):
3233 ui.progress(topic, pos, unit='revs', total=total)
3237 ui.progress(topic, pos, unit='revs', total=total)
3234
3238
3235 def completeprogress():
3239 def completeprogress():
3236 ui.progress(topic, None, unit='revs', total=total)
3240 ui.progress(topic, None, unit='revs', total=total)
3237
3241
3238 for idx, rev in enumerate(revs):
3242 for idx, rev in enumerate(revs):
3239 updateprogress(idx)
3243 updateprogress(idx)
3240 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3244 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3241 if clearcaches:
3245 if clearcaches:
3242 dest.index.clearcaches()
3246 dest.index.clearcaches()
3243 dest.clearcaches()
3247 dest.clearcaches()
3244 with timeone() as r:
3248 with timeone() as r:
3245 dest.addrawrevision(*addargs, **addkwargs)
3249 dest.addrawrevision(*addargs, **addkwargs)
3246 timings.append((rev, r[0]))
3250 timings.append((rev, r[0]))
3247 updateprogress(total)
3251 updateprogress(total)
3248 completeprogress()
3252 completeprogress()
3249 return timings
3253 return timings
3250
3254
3251
3255
3252 def _getrevisionseed(orig, rev, tr, source):
3256 def _getrevisionseed(orig, rev, tr, source):
3253 from mercurial.node import nullid
3257 from mercurial.node import nullid
3254
3258
3255 linkrev = orig.linkrev(rev)
3259 linkrev = orig.linkrev(rev)
3256 node = orig.node(rev)
3260 node = orig.node(rev)
3257 p1, p2 = orig.parents(node)
3261 p1, p2 = orig.parents(node)
3258 flags = orig.flags(rev)
3262 flags = orig.flags(rev)
3259 cachedelta = None
3263 cachedelta = None
3260 text = None
3264 text = None
3261
3265
3262 if source == b'full':
3266 if source == b'full':
3263 text = orig.revision(rev)
3267 text = orig.revision(rev)
3264 elif source == b'parent-1':
3268 elif source == b'parent-1':
3265 baserev = orig.rev(p1)
3269 baserev = orig.rev(p1)
3266 cachedelta = (baserev, orig.revdiff(p1, rev))
3270 cachedelta = (baserev, orig.revdiff(p1, rev))
3267 elif source == b'parent-2':
3271 elif source == b'parent-2':
3268 parent = p2
3272 parent = p2
3269 if p2 == nullid:
3273 if p2 == nullid:
3270 parent = p1
3274 parent = p1
3271 baserev = orig.rev(parent)
3275 baserev = orig.rev(parent)
3272 cachedelta = (baserev, orig.revdiff(parent, rev))
3276 cachedelta = (baserev, orig.revdiff(parent, rev))
3273 elif source == b'parent-smallest':
3277 elif source == b'parent-smallest':
3274 p1diff = orig.revdiff(p1, rev)
3278 p1diff = orig.revdiff(p1, rev)
3275 parent = p1
3279 parent = p1
3276 diff = p1diff
3280 diff = p1diff
3277 if p2 != nullid:
3281 if p2 != nullid:
3278 p2diff = orig.revdiff(p2, rev)
3282 p2diff = orig.revdiff(p2, rev)
3279 if len(p1diff) > len(p2diff):
3283 if len(p1diff) > len(p2diff):
3280 parent = p2
3284 parent = p2
3281 diff = p2diff
3285 diff = p2diff
3282 baserev = orig.rev(parent)
3286 baserev = orig.rev(parent)
3283 cachedelta = (baserev, diff)
3287 cachedelta = (baserev, diff)
3284 elif source == b'storage':
3288 elif source == b'storage':
3285 baserev = orig.deltaparent(rev)
3289 baserev = orig.deltaparent(rev)
3286 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3290 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3287
3291
3288 return (
3292 return (
3289 (text, tr, linkrev, p1, p2),
3293 (text, tr, linkrev, p1, p2),
3290 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3294 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3291 )
3295 )
3292
3296
3293
3297
3294 @contextlib.contextmanager
3298 @contextlib.contextmanager
3295 def _temprevlog(ui, orig, truncaterev):
3299 def _temprevlog(ui, orig, truncaterev):
3296 from mercurial import vfs as vfsmod
3300 from mercurial import vfs as vfsmod
3297
3301
3298 if orig._inline:
3302 if orig._inline:
3299 raise error.Abort('not supporting inline revlog (yet)')
3303 raise error.Abort('not supporting inline revlog (yet)')
3300 revlogkwargs = {}
3304 revlogkwargs = {}
3301 k = 'upperboundcomp'
3305 k = 'upperboundcomp'
3302 if util.safehasattr(orig, k):
3306 if util.safehasattr(orig, k):
3303 revlogkwargs[k] = getattr(orig, k)
3307 revlogkwargs[k] = getattr(orig, k)
3304
3308
3305 indexfile = getattr(orig, '_indexfile', None)
3309 indexfile = getattr(orig, '_indexfile', None)
3306 if indexfile is None:
3310 if indexfile is None:
3307 # compatibility with <= hg-5.8
3311 # compatibility with <= hg-5.8
3308 indexfile = getattr(orig, 'indexfile')
3312 indexfile = getattr(orig, 'indexfile')
3309 origindexpath = orig.opener.join(indexfile)
3313 origindexpath = orig.opener.join(indexfile)
3310
3314
3311 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3315 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3312 origdatapath = orig.opener.join(datafile)
3316 origdatapath = orig.opener.join(datafile)
3313 radix = b'revlog'
3317 radix = b'revlog'
3314 indexname = b'revlog.i'
3318 indexname = b'revlog.i'
3315 dataname = b'revlog.d'
3319 dataname = b'revlog.d'
3316
3320
3317 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3321 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3318 try:
3322 try:
3319 # copy the data file in a temporary directory
3323 # copy the data file in a temporary directory
3320 ui.debug('copying data in %s\n' % tmpdir)
3324 ui.debug('copying data in %s\n' % tmpdir)
3321 destindexpath = os.path.join(tmpdir, 'revlog.i')
3325 destindexpath = os.path.join(tmpdir, 'revlog.i')
3322 destdatapath = os.path.join(tmpdir, 'revlog.d')
3326 destdatapath = os.path.join(tmpdir, 'revlog.d')
3323 shutil.copyfile(origindexpath, destindexpath)
3327 shutil.copyfile(origindexpath, destindexpath)
3324 shutil.copyfile(origdatapath, destdatapath)
3328 shutil.copyfile(origdatapath, destdatapath)
3325
3329
3326 # remove the data we want to add again
3330 # remove the data we want to add again
3327 ui.debug('truncating data to be rewritten\n')
3331 ui.debug('truncating data to be rewritten\n')
3328 with open(destindexpath, 'ab') as index:
3332 with open(destindexpath, 'ab') as index:
3329 index.seek(0)
3333 index.seek(0)
3330 index.truncate(truncaterev * orig._io.size)
3334 index.truncate(truncaterev * orig._io.size)
3331 with open(destdatapath, 'ab') as data:
3335 with open(destdatapath, 'ab') as data:
3332 data.seek(0)
3336 data.seek(0)
3333 data.truncate(orig.start(truncaterev))
3337 data.truncate(orig.start(truncaterev))
3334
3338
3335 # instantiate a new revlog from the temporary copy
3339 # instantiate a new revlog from the temporary copy
3336 ui.debug('truncating adding to be rewritten\n')
3340 ui.debug('truncating adding to be rewritten\n')
3337 vfs = vfsmod.vfs(tmpdir)
3341 vfs = vfsmod.vfs(tmpdir)
3338 vfs.options = getattr(orig.opener, 'options', None)
3342 vfs.options = getattr(orig.opener, 'options', None)
3339
3343
3340 try:
3344 try:
3341 dest = revlog(vfs, radix=radix, **revlogkwargs)
3345 dest = revlog(vfs, radix=radix, **revlogkwargs)
3342 except TypeError:
3346 except TypeError:
3343 dest = revlog(
3347 dest = revlog(
3344 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3348 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3345 )
3349 )
3346 if dest._inline:
3350 if dest._inline:
3347 raise error.Abort('not supporting inline revlog (yet)')
3351 raise error.Abort('not supporting inline revlog (yet)')
3348 # make sure internals are initialized
3352 # make sure internals are initialized
3349 dest.revision(len(dest) - 1)
3353 dest.revision(len(dest) - 1)
3350 yield dest
3354 yield dest
3351 del dest, vfs
3355 del dest, vfs
3352 finally:
3356 finally:
3353 shutil.rmtree(tmpdir, True)
3357 shutil.rmtree(tmpdir, True)
3354
3358
3355
3359
3356 @command(
3360 @command(
3357 b'perf::revlogchunks|perfrevlogchunks',
3361 b'perf::revlogchunks|perfrevlogchunks',
3358 revlogopts
3362 revlogopts
3359 + formatteropts
3363 + formatteropts
3360 + [
3364 + [
3361 (b'e', b'engines', b'', b'compression engines to use'),
3365 (b'e', b'engines', b'', b'compression engines to use'),
3362 (b's', b'startrev', 0, b'revision to start at'),
3366 (b's', b'startrev', 0, b'revision to start at'),
3363 ],
3367 ],
3364 b'-c|-m|FILE',
3368 b'-c|-m|FILE',
3365 )
3369 )
3366 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3370 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3367 """Benchmark operations on revlog chunks.
3371 """Benchmark operations on revlog chunks.
3368
3372
3369 Logically, each revlog is a collection of fulltext revisions. However,
3373 Logically, each revlog is a collection of fulltext revisions. However,
3370 stored within each revlog are "chunks" of possibly compressed data. This
3374 stored within each revlog are "chunks" of possibly compressed data. This
3371 data needs to be read and decompressed or compressed and written.
3375 data needs to be read and decompressed or compressed and written.
3372
3376
3373 This command measures the time it takes to read+decompress and recompress
3377 This command measures the time it takes to read+decompress and recompress
3374 chunks in a revlog. It effectively isolates I/O and compression performance.
3378 chunks in a revlog. It effectively isolates I/O and compression performance.
3375 For measurements of higher-level operations like resolving revisions,
3379 For measurements of higher-level operations like resolving revisions,
3376 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3380 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3377 """
3381 """
3378 opts = _byteskwargs(opts)
3382 opts = _byteskwargs(opts)
3379
3383
3380 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3384 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3381
3385
3382 # _chunkraw was renamed to _getsegmentforrevs.
3386 # _chunkraw was renamed to _getsegmentforrevs.
3383 try:
3387 try:
3384 segmentforrevs = rl._getsegmentforrevs
3388 segmentforrevs = rl._getsegmentforrevs
3385 except AttributeError:
3389 except AttributeError:
3386 segmentforrevs = rl._chunkraw
3390 segmentforrevs = rl._chunkraw
3387
3391
3388 # Verify engines argument.
3392 # Verify engines argument.
3389 if engines:
3393 if engines:
3390 engines = {e.strip() for e in engines.split(b',')}
3394 engines = {e.strip() for e in engines.split(b',')}
3391 for engine in engines:
3395 for engine in engines:
3392 try:
3396 try:
3393 util.compressionengines[engine]
3397 util.compressionengines[engine]
3394 except KeyError:
3398 except KeyError:
3395 raise error.Abort(b'unknown compression engine: %s' % engine)
3399 raise error.Abort(b'unknown compression engine: %s' % engine)
3396 else:
3400 else:
3397 engines = []
3401 engines = []
3398 for e in util.compengines:
3402 for e in util.compengines:
3399 engine = util.compengines[e]
3403 engine = util.compengines[e]
3400 try:
3404 try:
3401 if engine.available():
3405 if engine.available():
3402 engine.revlogcompressor().compress(b'dummy')
3406 engine.revlogcompressor().compress(b'dummy')
3403 engines.append(e)
3407 engines.append(e)
3404 except NotImplementedError:
3408 except NotImplementedError:
3405 pass
3409 pass
3406
3410
3407 revs = list(rl.revs(startrev, len(rl) - 1))
3411 revs = list(rl.revs(startrev, len(rl) - 1))
3408
3412
3409 def rlfh(rl):
3413 def rlfh(rl):
3410 if rl._inline:
3414 if rl._inline:
3411 indexfile = getattr(rl, '_indexfile', None)
3415 indexfile = getattr(rl, '_indexfile', None)
3412 if indexfile is None:
3416 if indexfile is None:
3413 # compatibility with <= hg-5.8
3417 # compatibility with <= hg-5.8
3414 indexfile = getattr(rl, 'indexfile')
3418 indexfile = getattr(rl, 'indexfile')
3415 return getsvfs(repo)(indexfile)
3419 return getsvfs(repo)(indexfile)
3416 else:
3420 else:
3417 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3421 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3418 return getsvfs(repo)(datafile)
3422 return getsvfs(repo)(datafile)
3419
3423
3420 def doread():
3424 def doread():
3421 rl.clearcaches()
3425 rl.clearcaches()
3422 for rev in revs:
3426 for rev in revs:
3423 segmentforrevs(rev, rev)
3427 segmentforrevs(rev, rev)
3424
3428
3425 def doreadcachedfh():
3429 def doreadcachedfh():
3426 rl.clearcaches()
3430 rl.clearcaches()
3427 fh = rlfh(rl)
3431 fh = rlfh(rl)
3428 for rev in revs:
3432 for rev in revs:
3429 segmentforrevs(rev, rev, df=fh)
3433 segmentforrevs(rev, rev, df=fh)
3430
3434
3431 def doreadbatch():
3435 def doreadbatch():
3432 rl.clearcaches()
3436 rl.clearcaches()
3433 segmentforrevs(revs[0], revs[-1])
3437 segmentforrevs(revs[0], revs[-1])
3434
3438
3435 def doreadbatchcachedfh():
3439 def doreadbatchcachedfh():
3436 rl.clearcaches()
3440 rl.clearcaches()
3437 fh = rlfh(rl)
3441 fh = rlfh(rl)
3438 segmentforrevs(revs[0], revs[-1], df=fh)
3442 segmentforrevs(revs[0], revs[-1], df=fh)
3439
3443
3440 def dochunk():
3444 def dochunk():
3441 rl.clearcaches()
3445 rl.clearcaches()
3442 fh = rlfh(rl)
3446 fh = rlfh(rl)
3443 for rev in revs:
3447 for rev in revs:
3444 rl._chunk(rev, df=fh)
3448 rl._chunk(rev, df=fh)
3445
3449
3446 chunks = [None]
3450 chunks = [None]
3447
3451
3448 def dochunkbatch():
3452 def dochunkbatch():
3449 rl.clearcaches()
3453 rl.clearcaches()
3450 fh = rlfh(rl)
3454 fh = rlfh(rl)
3451 # Save chunks as a side-effect.
3455 # Save chunks as a side-effect.
3452 chunks[0] = rl._chunks(revs, df=fh)
3456 chunks[0] = rl._chunks(revs, df=fh)
3453
3457
3454 def docompress(compressor):
3458 def docompress(compressor):
3455 rl.clearcaches()
3459 rl.clearcaches()
3456
3460
3457 try:
3461 try:
3458 # Swap in the requested compression engine.
3462 # Swap in the requested compression engine.
3459 oldcompressor = rl._compressor
3463 oldcompressor = rl._compressor
3460 rl._compressor = compressor
3464 rl._compressor = compressor
3461 for chunk in chunks[0]:
3465 for chunk in chunks[0]:
3462 rl.compress(chunk)
3466 rl.compress(chunk)
3463 finally:
3467 finally:
3464 rl._compressor = oldcompressor
3468 rl._compressor = oldcompressor
3465
3469
3466 benches = [
3470 benches = [
3467 (lambda: doread(), b'read'),
3471 (lambda: doread(), b'read'),
3468 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3472 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3469 (lambda: doreadbatch(), b'read batch'),
3473 (lambda: doreadbatch(), b'read batch'),
3470 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3474 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3471 (lambda: dochunk(), b'chunk'),
3475 (lambda: dochunk(), b'chunk'),
3472 (lambda: dochunkbatch(), b'chunk batch'),
3476 (lambda: dochunkbatch(), b'chunk batch'),
3473 ]
3477 ]
3474
3478
3475 for engine in sorted(engines):
3479 for engine in sorted(engines):
3476 compressor = util.compengines[engine].revlogcompressor()
3480 compressor = util.compengines[engine].revlogcompressor()
3477 benches.append(
3481 benches.append(
3478 (
3482 (
3479 functools.partial(docompress, compressor),
3483 functools.partial(docompress, compressor),
3480 b'compress w/ %s' % engine,
3484 b'compress w/ %s' % engine,
3481 )
3485 )
3482 )
3486 )
3483
3487
3484 for fn, title in benches:
3488 for fn, title in benches:
3485 timer, fm = gettimer(ui, opts)
3489 timer, fm = gettimer(ui, opts)
3486 timer(fn, title=title)
3490 timer(fn, title=title)
3487 fm.end()
3491 fm.end()
3488
3492
3489
3493
3490 @command(
3494 @command(
3491 b'perf::revlogrevision|perfrevlogrevision',
3495 b'perf::revlogrevision|perfrevlogrevision',
3492 revlogopts
3496 revlogopts
3493 + formatteropts
3497 + formatteropts
3494 + [(b'', b'cache', False, b'use caches instead of clearing')],
3498 + [(b'', b'cache', False, b'use caches instead of clearing')],
3495 b'-c|-m|FILE REV',
3499 b'-c|-m|FILE REV',
3496 )
3500 )
3497 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3501 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3498 """Benchmark obtaining a revlog revision.
3502 """Benchmark obtaining a revlog revision.
3499
3503
3500 Obtaining a revlog revision consists of roughly the following steps:
3504 Obtaining a revlog revision consists of roughly the following steps:
3501
3505
3502 1. Compute the delta chain
3506 1. Compute the delta chain
3503 2. Slice the delta chain if applicable
3507 2. Slice the delta chain if applicable
3504 3. Obtain the raw chunks for that delta chain
3508 3. Obtain the raw chunks for that delta chain
3505 4. Decompress each raw chunk
3509 4. Decompress each raw chunk
3506 5. Apply binary patches to obtain fulltext
3510 5. Apply binary patches to obtain fulltext
3507 6. Verify hash of fulltext
3511 6. Verify hash of fulltext
3508
3512
3509 This command measures the time spent in each of these phases.
3513 This command measures the time spent in each of these phases.
3510 """
3514 """
3511 opts = _byteskwargs(opts)
3515 opts = _byteskwargs(opts)
3512
3516
3513 if opts.get(b'changelog') or opts.get(b'manifest'):
3517 if opts.get(b'changelog') or opts.get(b'manifest'):
3514 file_, rev = None, file_
3518 file_, rev = None, file_
3515 elif rev is None:
3519 elif rev is None:
3516 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3520 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3517
3521
3518 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3522 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3519
3523
3520 # _chunkraw was renamed to _getsegmentforrevs.
3524 # _chunkraw was renamed to _getsegmentforrevs.
3521 try:
3525 try:
3522 segmentforrevs = r._getsegmentforrevs
3526 segmentforrevs = r._getsegmentforrevs
3523 except AttributeError:
3527 except AttributeError:
3524 segmentforrevs = r._chunkraw
3528 segmentforrevs = r._chunkraw
3525
3529
3526 node = r.lookup(rev)
3530 node = r.lookup(rev)
3527 rev = r.rev(node)
3531 rev = r.rev(node)
3528
3532
3529 def getrawchunks(data, chain):
3533 def getrawchunks(data, chain):
3530 start = r.start
3534 start = r.start
3531 length = r.length
3535 length = r.length
3532 inline = r._inline
3536 inline = r._inline
3533 try:
3537 try:
3534 iosize = r.index.entry_size
3538 iosize = r.index.entry_size
3535 except AttributeError:
3539 except AttributeError:
3536 iosize = r._io.size
3540 iosize = r._io.size
3537 buffer = util.buffer
3541 buffer = util.buffer
3538
3542
3539 chunks = []
3543 chunks = []
3540 ladd = chunks.append
3544 ladd = chunks.append
3541 for idx, item in enumerate(chain):
3545 for idx, item in enumerate(chain):
3542 offset = start(item[0])
3546 offset = start(item[0])
3543 bits = data[idx]
3547 bits = data[idx]
3544 for rev in item:
3548 for rev in item:
3545 chunkstart = start(rev)
3549 chunkstart = start(rev)
3546 if inline:
3550 if inline:
3547 chunkstart += (rev + 1) * iosize
3551 chunkstart += (rev + 1) * iosize
3548 chunklength = length(rev)
3552 chunklength = length(rev)
3549 ladd(buffer(bits, chunkstart - offset, chunklength))
3553 ladd(buffer(bits, chunkstart - offset, chunklength))
3550
3554
3551 return chunks
3555 return chunks
3552
3556
3553 def dodeltachain(rev):
3557 def dodeltachain(rev):
3554 if not cache:
3558 if not cache:
3555 r.clearcaches()
3559 r.clearcaches()
3556 r._deltachain(rev)
3560 r._deltachain(rev)
3557
3561
3558 def doread(chain):
3562 def doread(chain):
3559 if not cache:
3563 if not cache:
3560 r.clearcaches()
3564 r.clearcaches()
3561 for item in slicedchain:
3565 for item in slicedchain:
3562 segmentforrevs(item[0], item[-1])
3566 segmentforrevs(item[0], item[-1])
3563
3567
3564 def doslice(r, chain, size):
3568 def doslice(r, chain, size):
3565 for s in slicechunk(r, chain, targetsize=size):
3569 for s in slicechunk(r, chain, targetsize=size):
3566 pass
3570 pass
3567
3571
3568 def dorawchunks(data, chain):
3572 def dorawchunks(data, chain):
3569 if not cache:
3573 if not cache:
3570 r.clearcaches()
3574 r.clearcaches()
3571 getrawchunks(data, chain)
3575 getrawchunks(data, chain)
3572
3576
3573 def dodecompress(chunks):
3577 def dodecompress(chunks):
3574 decomp = r.decompress
3578 decomp = r.decompress
3575 for chunk in chunks:
3579 for chunk in chunks:
3576 decomp(chunk)
3580 decomp(chunk)
3577
3581
3578 def dopatch(text, bins):
3582 def dopatch(text, bins):
3579 if not cache:
3583 if not cache:
3580 r.clearcaches()
3584 r.clearcaches()
3581 mdiff.patches(text, bins)
3585 mdiff.patches(text, bins)
3582
3586
3583 def dohash(text):
3587 def dohash(text):
3584 if not cache:
3588 if not cache:
3585 r.clearcaches()
3589 r.clearcaches()
3586 r.checkhash(text, node, rev=rev)
3590 r.checkhash(text, node, rev=rev)
3587
3591
3588 def dorevision():
3592 def dorevision():
3589 if not cache:
3593 if not cache:
3590 r.clearcaches()
3594 r.clearcaches()
3591 r.revision(node)
3595 r.revision(node)
3592
3596
3593 try:
3597 try:
3594 from mercurial.revlogutils.deltas import slicechunk
3598 from mercurial.revlogutils.deltas import slicechunk
3595 except ImportError:
3599 except ImportError:
3596 slicechunk = getattr(revlog, '_slicechunk', None)
3600 slicechunk = getattr(revlog, '_slicechunk', None)
3597
3601
3598 size = r.length(rev)
3602 size = r.length(rev)
3599 chain = r._deltachain(rev)[0]
3603 chain = r._deltachain(rev)[0]
3600 if not getattr(r, '_withsparseread', False):
3604 if not getattr(r, '_withsparseread', False):
3601 slicedchain = (chain,)
3605 slicedchain = (chain,)
3602 else:
3606 else:
3603 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3607 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3604 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3608 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3605 rawchunks = getrawchunks(data, slicedchain)
3609 rawchunks = getrawchunks(data, slicedchain)
3606 bins = r._chunks(chain)
3610 bins = r._chunks(chain)
3607 text = bytes(bins[0])
3611 text = bytes(bins[0])
3608 bins = bins[1:]
3612 bins = bins[1:]
3609 text = mdiff.patches(text, bins)
3613 text = mdiff.patches(text, bins)
3610
3614
3611 benches = [
3615 benches = [
3612 (lambda: dorevision(), b'full'),
3616 (lambda: dorevision(), b'full'),
3613 (lambda: dodeltachain(rev), b'deltachain'),
3617 (lambda: dodeltachain(rev), b'deltachain'),
3614 (lambda: doread(chain), b'read'),
3618 (lambda: doread(chain), b'read'),
3615 ]
3619 ]
3616
3620
3617 if getattr(r, '_withsparseread', False):
3621 if getattr(r, '_withsparseread', False):
3618 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3622 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3619 benches.append(slicing)
3623 benches.append(slicing)
3620
3624
3621 benches.extend(
3625 benches.extend(
3622 [
3626 [
3623 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3627 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3624 (lambda: dodecompress(rawchunks), b'decompress'),
3628 (lambda: dodecompress(rawchunks), b'decompress'),
3625 (lambda: dopatch(text, bins), b'patch'),
3629 (lambda: dopatch(text, bins), b'patch'),
3626 (lambda: dohash(text), b'hash'),
3630 (lambda: dohash(text), b'hash'),
3627 ]
3631 ]
3628 )
3632 )
3629
3633
3630 timer, fm = gettimer(ui, opts)
3634 timer, fm = gettimer(ui, opts)
3631 for fn, title in benches:
3635 for fn, title in benches:
3632 timer(fn, title=title)
3636 timer(fn, title=title)
3633 fm.end()
3637 fm.end()
3634
3638
3635
3639
3636 @command(
3640 @command(
3637 b'perf::revset|perfrevset',
3641 b'perf::revset|perfrevset',
3638 [
3642 [
3639 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3643 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3640 (b'', b'contexts', False, b'obtain changectx for each revision'),
3644 (b'', b'contexts', False, b'obtain changectx for each revision'),
3641 ]
3645 ]
3642 + formatteropts,
3646 + formatteropts,
3643 b"REVSET",
3647 b"REVSET",
3644 )
3648 )
3645 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3649 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3646 """benchmark the execution time of a revset
3650 """benchmark the execution time of a revset
3647
3651
3648 Use the --clean option if need to evaluate the impact of build volatile
3652 Use the --clean option if need to evaluate the impact of build volatile
3649 revisions set cache on the revset execution. Volatile cache hold filtered
3653 revisions set cache on the revset execution. Volatile cache hold filtered
3650 and obsolete related cache."""
3654 and obsolete related cache."""
3651 opts = _byteskwargs(opts)
3655 opts = _byteskwargs(opts)
3652
3656
3653 timer, fm = gettimer(ui, opts)
3657 timer, fm = gettimer(ui, opts)
3654
3658
3655 def d():
3659 def d():
3656 if clear:
3660 if clear:
3657 repo.invalidatevolatilesets()
3661 repo.invalidatevolatilesets()
3658 if contexts:
3662 if contexts:
3659 for ctx in repo.set(expr):
3663 for ctx in repo.set(expr):
3660 pass
3664 pass
3661 else:
3665 else:
3662 for r in repo.revs(expr):
3666 for r in repo.revs(expr):
3663 pass
3667 pass
3664
3668
3665 timer(d)
3669 timer(d)
3666 fm.end()
3670 fm.end()
3667
3671
3668
3672
3669 @command(
3673 @command(
3670 b'perf::volatilesets|perfvolatilesets',
3674 b'perf::volatilesets|perfvolatilesets',
3671 [
3675 [
3672 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3676 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3673 ]
3677 ]
3674 + formatteropts,
3678 + formatteropts,
3675 )
3679 )
3676 def perfvolatilesets(ui, repo, *names, **opts):
3680 def perfvolatilesets(ui, repo, *names, **opts):
3677 """benchmark the computation of various volatile set
3681 """benchmark the computation of various volatile set
3678
3682
3679 Volatile set computes element related to filtering and obsolescence."""
3683 Volatile set computes element related to filtering and obsolescence."""
3680 opts = _byteskwargs(opts)
3684 opts = _byteskwargs(opts)
3681 timer, fm = gettimer(ui, opts)
3685 timer, fm = gettimer(ui, opts)
3682 repo = repo.unfiltered()
3686 repo = repo.unfiltered()
3683
3687
3684 def getobs(name):
3688 def getobs(name):
3685 def d():
3689 def d():
3686 repo.invalidatevolatilesets()
3690 repo.invalidatevolatilesets()
3687 if opts[b'clear_obsstore']:
3691 if opts[b'clear_obsstore']:
3688 clearfilecache(repo, b'obsstore')
3692 clearfilecache(repo, b'obsstore')
3689 obsolete.getrevs(repo, name)
3693 obsolete.getrevs(repo, name)
3690
3694
3691 return d
3695 return d
3692
3696
3693 allobs = sorted(obsolete.cachefuncs)
3697 allobs = sorted(obsolete.cachefuncs)
3694 if names:
3698 if names:
3695 allobs = [n for n in allobs if n in names]
3699 allobs = [n for n in allobs if n in names]
3696
3700
3697 for name in allobs:
3701 for name in allobs:
3698 timer(getobs(name), title=name)
3702 timer(getobs(name), title=name)
3699
3703
3700 def getfiltered(name):
3704 def getfiltered(name):
3701 def d():
3705 def d():
3702 repo.invalidatevolatilesets()
3706 repo.invalidatevolatilesets()
3703 if opts[b'clear_obsstore']:
3707 if opts[b'clear_obsstore']:
3704 clearfilecache(repo, b'obsstore')
3708 clearfilecache(repo, b'obsstore')
3705 repoview.filterrevs(repo, name)
3709 repoview.filterrevs(repo, name)
3706
3710
3707 return d
3711 return d
3708
3712
3709 allfilter = sorted(repoview.filtertable)
3713 allfilter = sorted(repoview.filtertable)
3710 if names:
3714 if names:
3711 allfilter = [n for n in allfilter if n in names]
3715 allfilter = [n for n in allfilter if n in names]
3712
3716
3713 for name in allfilter:
3717 for name in allfilter:
3714 timer(getfiltered(name), title=name)
3718 timer(getfiltered(name), title=name)
3715 fm.end()
3719 fm.end()
3716
3720
3717
3721
3718 @command(
3722 @command(
3719 b'perf::branchmap|perfbranchmap',
3723 b'perf::branchmap|perfbranchmap',
3720 [
3724 [
3721 (b'f', b'full', False, b'Includes build time of subset'),
3725 (b'f', b'full', False, b'Includes build time of subset'),
3722 (
3726 (
3723 b'',
3727 b'',
3724 b'clear-revbranch',
3728 b'clear-revbranch',
3725 False,
3729 False,
3726 b'purge the revbranch cache between computation',
3730 b'purge the revbranch cache between computation',
3727 ),
3731 ),
3728 ]
3732 ]
3729 + formatteropts,
3733 + formatteropts,
3730 )
3734 )
3731 def perfbranchmap(ui, repo, *filternames, **opts):
3735 def perfbranchmap(ui, repo, *filternames, **opts):
3732 """benchmark the update of a branchmap
3736 """benchmark the update of a branchmap
3733
3737
3734 This benchmarks the full repo.branchmap() call with read and write disabled
3738 This benchmarks the full repo.branchmap() call with read and write disabled
3735 """
3739 """
3736 opts = _byteskwargs(opts)
3740 opts = _byteskwargs(opts)
3737 full = opts.get(b"full", False)
3741 full = opts.get(b"full", False)
3738 clear_revbranch = opts.get(b"clear_revbranch", False)
3742 clear_revbranch = opts.get(b"clear_revbranch", False)
3739 timer, fm = gettimer(ui, opts)
3743 timer, fm = gettimer(ui, opts)
3740
3744
3741 def getbranchmap(filtername):
3745 def getbranchmap(filtername):
3742 """generate a benchmark function for the filtername"""
3746 """generate a benchmark function for the filtername"""
3743 if filtername is None:
3747 if filtername is None:
3744 view = repo
3748 view = repo
3745 else:
3749 else:
3746 view = repo.filtered(filtername)
3750 view = repo.filtered(filtername)
3747 if util.safehasattr(view._branchcaches, '_per_filter'):
3751 if util.safehasattr(view._branchcaches, '_per_filter'):
3748 filtered = view._branchcaches._per_filter
3752 filtered = view._branchcaches._per_filter
3749 else:
3753 else:
3750 # older versions
3754 # older versions
3751 filtered = view._branchcaches
3755 filtered = view._branchcaches
3752
3756
3753 def d():
3757 def d():
3754 if clear_revbranch:
3758 if clear_revbranch:
3755 repo.revbranchcache()._clear()
3759 repo.revbranchcache()._clear()
3756 if full:
3760 if full:
3757 view._branchcaches.clear()
3761 view._branchcaches.clear()
3758 else:
3762 else:
3759 filtered.pop(filtername, None)
3763 filtered.pop(filtername, None)
3760 view.branchmap()
3764 view.branchmap()
3761
3765
3762 return d
3766 return d
3763
3767
3764 # add filter in smaller subset to bigger subset
3768 # add filter in smaller subset to bigger subset
3765 possiblefilters = set(repoview.filtertable)
3769 possiblefilters = set(repoview.filtertable)
3766 if filternames:
3770 if filternames:
3767 possiblefilters &= set(filternames)
3771 possiblefilters &= set(filternames)
3768 subsettable = getbranchmapsubsettable()
3772 subsettable = getbranchmapsubsettable()
3769 allfilters = []
3773 allfilters = []
3770 while possiblefilters:
3774 while possiblefilters:
3771 for name in possiblefilters:
3775 for name in possiblefilters:
3772 subset = subsettable.get(name)
3776 subset = subsettable.get(name)
3773 if subset not in possiblefilters:
3777 if subset not in possiblefilters:
3774 break
3778 break
3775 else:
3779 else:
3776 assert False, b'subset cycle %s!' % possiblefilters
3780 assert False, b'subset cycle %s!' % possiblefilters
3777 allfilters.append(name)
3781 allfilters.append(name)
3778 possiblefilters.remove(name)
3782 possiblefilters.remove(name)
3779
3783
3780 # warm the cache
3784 # warm the cache
3781 if not full:
3785 if not full:
3782 for name in allfilters:
3786 for name in allfilters:
3783 repo.filtered(name).branchmap()
3787 repo.filtered(name).branchmap()
3784 if not filternames or b'unfiltered' in filternames:
3788 if not filternames or b'unfiltered' in filternames:
3785 # add unfiltered
3789 # add unfiltered
3786 allfilters.append(None)
3790 allfilters.append(None)
3787
3791
3788 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3792 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3789 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3793 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3790 branchcacheread.set(classmethod(lambda *args: None))
3794 branchcacheread.set(classmethod(lambda *args: None))
3791 else:
3795 else:
3792 # older versions
3796 # older versions
3793 branchcacheread = safeattrsetter(branchmap, b'read')
3797 branchcacheread = safeattrsetter(branchmap, b'read')
3794 branchcacheread.set(lambda *args: None)
3798 branchcacheread.set(lambda *args: None)
3795 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3799 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3796 branchcachewrite.set(lambda *args: None)
3800 branchcachewrite.set(lambda *args: None)
3797 try:
3801 try:
3798 for name in allfilters:
3802 for name in allfilters:
3799 printname = name
3803 printname = name
3800 if name is None:
3804 if name is None:
3801 printname = b'unfiltered'
3805 printname = b'unfiltered'
3802 timer(getbranchmap(name), title=printname)
3806 timer(getbranchmap(name), title=printname)
3803 finally:
3807 finally:
3804 branchcacheread.restore()
3808 branchcacheread.restore()
3805 branchcachewrite.restore()
3809 branchcachewrite.restore()
3806 fm.end()
3810 fm.end()
3807
3811
3808
3812
3809 @command(
3813 @command(
3810 b'perf::branchmapupdate|perfbranchmapupdate',
3814 b'perf::branchmapupdate|perfbranchmapupdate',
3811 [
3815 [
3812 (b'', b'base', [], b'subset of revision to start from'),
3816 (b'', b'base', [], b'subset of revision to start from'),
3813 (b'', b'target', [], b'subset of revision to end with'),
3817 (b'', b'target', [], b'subset of revision to end with'),
3814 (b'', b'clear-caches', False, b'clear cache between each runs'),
3818 (b'', b'clear-caches', False, b'clear cache between each runs'),
3815 ]
3819 ]
3816 + formatteropts,
3820 + formatteropts,
3817 )
3821 )
3818 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3822 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3819 """benchmark branchmap update from for <base> revs to <target> revs
3823 """benchmark branchmap update from for <base> revs to <target> revs
3820
3824
3821 If `--clear-caches` is passed, the following items will be reset before
3825 If `--clear-caches` is passed, the following items will be reset before
3822 each update:
3826 each update:
3823 * the changelog instance and associated indexes
3827 * the changelog instance and associated indexes
3824 * the rev-branch-cache instance
3828 * the rev-branch-cache instance
3825
3829
3826 Examples:
3830 Examples:
3827
3831
3828 # update for the one last revision
3832 # update for the one last revision
3829 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3833 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3830
3834
3831 $ update for change coming with a new branch
3835 $ update for change coming with a new branch
3832 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3836 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3833 """
3837 """
3834 from mercurial import branchmap
3838 from mercurial import branchmap
3835 from mercurial import repoview
3839 from mercurial import repoview
3836
3840
3837 opts = _byteskwargs(opts)
3841 opts = _byteskwargs(opts)
3838 timer, fm = gettimer(ui, opts)
3842 timer, fm = gettimer(ui, opts)
3839 clearcaches = opts[b'clear_caches']
3843 clearcaches = opts[b'clear_caches']
3840 unfi = repo.unfiltered()
3844 unfi = repo.unfiltered()
3841 x = [None] # used to pass data between closure
3845 x = [None] # used to pass data between closure
3842
3846
3843 # we use a `list` here to avoid possible side effect from smartset
3847 # we use a `list` here to avoid possible side effect from smartset
3844 baserevs = list(scmutil.revrange(repo, base))
3848 baserevs = list(scmutil.revrange(repo, base))
3845 targetrevs = list(scmutil.revrange(repo, target))
3849 targetrevs = list(scmutil.revrange(repo, target))
3846 if not baserevs:
3850 if not baserevs:
3847 raise error.Abort(b'no revisions selected for --base')
3851 raise error.Abort(b'no revisions selected for --base')
3848 if not targetrevs:
3852 if not targetrevs:
3849 raise error.Abort(b'no revisions selected for --target')
3853 raise error.Abort(b'no revisions selected for --target')
3850
3854
3851 # make sure the target branchmap also contains the one in the base
3855 # make sure the target branchmap also contains the one in the base
3852 targetrevs = list(set(baserevs) | set(targetrevs))
3856 targetrevs = list(set(baserevs) | set(targetrevs))
3853 targetrevs.sort()
3857 targetrevs.sort()
3854
3858
3855 cl = repo.changelog
3859 cl = repo.changelog
3856 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3860 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3857 allbaserevs.sort()
3861 allbaserevs.sort()
3858 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3862 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3859
3863
3860 newrevs = list(alltargetrevs.difference(allbaserevs))
3864 newrevs = list(alltargetrevs.difference(allbaserevs))
3861 newrevs.sort()
3865 newrevs.sort()
3862
3866
3863 allrevs = frozenset(unfi.changelog.revs())
3867 allrevs = frozenset(unfi.changelog.revs())
3864 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3868 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3865 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3869 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3866
3870
3867 def basefilter(repo, visibilityexceptions=None):
3871 def basefilter(repo, visibilityexceptions=None):
3868 return basefilterrevs
3872 return basefilterrevs
3869
3873
3870 def targetfilter(repo, visibilityexceptions=None):
3874 def targetfilter(repo, visibilityexceptions=None):
3871 return targetfilterrevs
3875 return targetfilterrevs
3872
3876
3873 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3877 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3874 ui.status(msg % (len(allbaserevs), len(newrevs)))
3878 ui.status(msg % (len(allbaserevs), len(newrevs)))
3875 if targetfilterrevs:
3879 if targetfilterrevs:
3876 msg = b'(%d revisions still filtered)\n'
3880 msg = b'(%d revisions still filtered)\n'
3877 ui.status(msg % len(targetfilterrevs))
3881 ui.status(msg % len(targetfilterrevs))
3878
3882
3879 try:
3883 try:
3880 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3884 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3881 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3885 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3882
3886
3883 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3887 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3884 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3888 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3885
3889
3886 # try to find an existing branchmap to reuse
3890 # try to find an existing branchmap to reuse
3887 subsettable = getbranchmapsubsettable()
3891 subsettable = getbranchmapsubsettable()
3888 candidatefilter = subsettable.get(None)
3892 candidatefilter = subsettable.get(None)
3889 while candidatefilter is not None:
3893 while candidatefilter is not None:
3890 candidatebm = repo.filtered(candidatefilter).branchmap()
3894 candidatebm = repo.filtered(candidatefilter).branchmap()
3891 if candidatebm.validfor(baserepo):
3895 if candidatebm.validfor(baserepo):
3892 filtered = repoview.filterrevs(repo, candidatefilter)
3896 filtered = repoview.filterrevs(repo, candidatefilter)
3893 missing = [r for r in allbaserevs if r in filtered]
3897 missing = [r for r in allbaserevs if r in filtered]
3894 base = candidatebm.copy()
3898 base = candidatebm.copy()
3895 base.update(baserepo, missing)
3899 base.update(baserepo, missing)
3896 break
3900 break
3897 candidatefilter = subsettable.get(candidatefilter)
3901 candidatefilter = subsettable.get(candidatefilter)
3898 else:
3902 else:
3899 # no suitable subset where found
3903 # no suitable subset where found
3900 base = branchmap.branchcache()
3904 base = branchmap.branchcache()
3901 base.update(baserepo, allbaserevs)
3905 base.update(baserepo, allbaserevs)
3902
3906
3903 def setup():
3907 def setup():
3904 x[0] = base.copy()
3908 x[0] = base.copy()
3905 if clearcaches:
3909 if clearcaches:
3906 unfi._revbranchcache = None
3910 unfi._revbranchcache = None
3907 clearchangelog(repo)
3911 clearchangelog(repo)
3908
3912
3909 def bench():
3913 def bench():
3910 x[0].update(targetrepo, newrevs)
3914 x[0].update(targetrepo, newrevs)
3911
3915
3912 timer(bench, setup=setup)
3916 timer(bench, setup=setup)
3913 fm.end()
3917 fm.end()
3914 finally:
3918 finally:
3915 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3919 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3916 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3920 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3917
3921
3918
3922
3919 @command(
3923 @command(
3920 b'perf::branchmapload|perfbranchmapload',
3924 b'perf::branchmapload|perfbranchmapload',
3921 [
3925 [
3922 (b'f', b'filter', b'', b'Specify repoview filter'),
3926 (b'f', b'filter', b'', b'Specify repoview filter'),
3923 (b'', b'list', False, b'List brachmap filter caches'),
3927 (b'', b'list', False, b'List brachmap filter caches'),
3924 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3928 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3925 ]
3929 ]
3926 + formatteropts,
3930 + formatteropts,
3927 )
3931 )
3928 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3932 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3929 """benchmark reading the branchmap"""
3933 """benchmark reading the branchmap"""
3930 opts = _byteskwargs(opts)
3934 opts = _byteskwargs(opts)
3931 clearrevlogs = opts[b'clear_revlogs']
3935 clearrevlogs = opts[b'clear_revlogs']
3932
3936
3933 if list:
3937 if list:
3934 for name, kind, st in repo.cachevfs.readdir(stat=True):
3938 for name, kind, st in repo.cachevfs.readdir(stat=True):
3935 if name.startswith(b'branch2'):
3939 if name.startswith(b'branch2'):
3936 filtername = name.partition(b'-')[2] or b'unfiltered'
3940 filtername = name.partition(b'-')[2] or b'unfiltered'
3937 ui.status(
3941 ui.status(
3938 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3942 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3939 )
3943 )
3940 return
3944 return
3941 if not filter:
3945 if not filter:
3942 filter = None
3946 filter = None
3943 subsettable = getbranchmapsubsettable()
3947 subsettable = getbranchmapsubsettable()
3944 if filter is None:
3948 if filter is None:
3945 repo = repo.unfiltered()
3949 repo = repo.unfiltered()
3946 else:
3950 else:
3947 repo = repoview.repoview(repo, filter)
3951 repo = repoview.repoview(repo, filter)
3948
3952
3949 repo.branchmap() # make sure we have a relevant, up to date branchmap
3953 repo.branchmap() # make sure we have a relevant, up to date branchmap
3950
3954
3951 try:
3955 try:
3952 fromfile = branchmap.branchcache.fromfile
3956 fromfile = branchmap.branchcache.fromfile
3953 except AttributeError:
3957 except AttributeError:
3954 # older versions
3958 # older versions
3955 fromfile = branchmap.read
3959 fromfile = branchmap.read
3956
3960
3957 currentfilter = filter
3961 currentfilter = filter
3958 # try once without timer, the filter may not be cached
3962 # try once without timer, the filter may not be cached
3959 while fromfile(repo) is None:
3963 while fromfile(repo) is None:
3960 currentfilter = subsettable.get(currentfilter)
3964 currentfilter = subsettable.get(currentfilter)
3961 if currentfilter is None:
3965 if currentfilter is None:
3962 raise error.Abort(
3966 raise error.Abort(
3963 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3967 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3964 )
3968 )
3965 repo = repo.filtered(currentfilter)
3969 repo = repo.filtered(currentfilter)
3966 timer, fm = gettimer(ui, opts)
3970 timer, fm = gettimer(ui, opts)
3967
3971
3968 def setup():
3972 def setup():
3969 if clearrevlogs:
3973 if clearrevlogs:
3970 clearchangelog(repo)
3974 clearchangelog(repo)
3971
3975
3972 def bench():
3976 def bench():
3973 fromfile(repo)
3977 fromfile(repo)
3974
3978
3975 timer(bench, setup=setup)
3979 timer(bench, setup=setup)
3976 fm.end()
3980 fm.end()
3977
3981
3978
3982
3979 @command(b'perf::loadmarkers|perfloadmarkers')
3983 @command(b'perf::loadmarkers|perfloadmarkers')
3980 def perfloadmarkers(ui, repo):
3984 def perfloadmarkers(ui, repo):
3981 """benchmark the time to parse the on-disk markers for a repo
3985 """benchmark the time to parse the on-disk markers for a repo
3982
3986
3983 Result is the number of markers in the repo."""
3987 Result is the number of markers in the repo."""
3984 timer, fm = gettimer(ui)
3988 timer, fm = gettimer(ui)
3985 svfs = getsvfs(repo)
3989 svfs = getsvfs(repo)
3986 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3990 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3987 fm.end()
3991 fm.end()
3988
3992
3989
3993
3990 @command(
3994 @command(
3991 b'perf::lrucachedict|perflrucachedict',
3995 b'perf::lrucachedict|perflrucachedict',
3992 formatteropts
3996 formatteropts
3993 + [
3997 + [
3994 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3998 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3995 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3999 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3996 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4000 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3997 (b'', b'size', 4, b'size of cache'),
4001 (b'', b'size', 4, b'size of cache'),
3998 (b'', b'gets', 10000, b'number of key lookups'),
4002 (b'', b'gets', 10000, b'number of key lookups'),
3999 (b'', b'sets', 10000, b'number of key sets'),
4003 (b'', b'sets', 10000, b'number of key sets'),
4000 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4004 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4001 (
4005 (
4002 b'',
4006 b'',
4003 b'mixedgetfreq',
4007 b'mixedgetfreq',
4004 50,
4008 50,
4005 b'frequency of get vs set ops in mixed mode',
4009 b'frequency of get vs set ops in mixed mode',
4006 ),
4010 ),
4007 ],
4011 ],
4008 norepo=True,
4012 norepo=True,
4009 )
4013 )
4010 def perflrucache(
4014 def perflrucache(
4011 ui,
4015 ui,
4012 mincost=0,
4016 mincost=0,
4013 maxcost=100,
4017 maxcost=100,
4014 costlimit=0,
4018 costlimit=0,
4015 size=4,
4019 size=4,
4016 gets=10000,
4020 gets=10000,
4017 sets=10000,
4021 sets=10000,
4018 mixed=10000,
4022 mixed=10000,
4019 mixedgetfreq=50,
4023 mixedgetfreq=50,
4020 **opts
4024 **opts
4021 ):
4025 ):
4022 opts = _byteskwargs(opts)
4026 opts = _byteskwargs(opts)
4023
4027
4024 def doinit():
4028 def doinit():
4025 for i in _xrange(10000):
4029 for i in _xrange(10000):
4026 util.lrucachedict(size)
4030 util.lrucachedict(size)
4027
4031
4028 costrange = list(range(mincost, maxcost + 1))
4032 costrange = list(range(mincost, maxcost + 1))
4029
4033
4030 values = []
4034 values = []
4031 for i in _xrange(size):
4035 for i in _xrange(size):
4032 values.append(random.randint(0, _maxint))
4036 values.append(random.randint(0, _maxint))
4033
4037
4034 # Get mode fills the cache and tests raw lookup performance with no
4038 # Get mode fills the cache and tests raw lookup performance with no
4035 # eviction.
4039 # eviction.
4036 getseq = []
4040 getseq = []
4037 for i in _xrange(gets):
4041 for i in _xrange(gets):
4038 getseq.append(random.choice(values))
4042 getseq.append(random.choice(values))
4039
4043
4040 def dogets():
4044 def dogets():
4041 d = util.lrucachedict(size)
4045 d = util.lrucachedict(size)
4042 for v in values:
4046 for v in values:
4043 d[v] = v
4047 d[v] = v
4044 for key in getseq:
4048 for key in getseq:
4045 value = d[key]
4049 value = d[key]
4046 value # silence pyflakes warning
4050 value # silence pyflakes warning
4047
4051
4048 def dogetscost():
4052 def dogetscost():
4049 d = util.lrucachedict(size, maxcost=costlimit)
4053 d = util.lrucachedict(size, maxcost=costlimit)
4050 for i, v in enumerate(values):
4054 for i, v in enumerate(values):
4051 d.insert(v, v, cost=costs[i])
4055 d.insert(v, v, cost=costs[i])
4052 for key in getseq:
4056 for key in getseq:
4053 try:
4057 try:
4054 value = d[key]
4058 value = d[key]
4055 value # silence pyflakes warning
4059 value # silence pyflakes warning
4056 except KeyError:
4060 except KeyError:
4057 pass
4061 pass
4058
4062
4059 # Set mode tests insertion speed with cache eviction.
4063 # Set mode tests insertion speed with cache eviction.
4060 setseq = []
4064 setseq = []
4061 costs = []
4065 costs = []
4062 for i in _xrange(sets):
4066 for i in _xrange(sets):
4063 setseq.append(random.randint(0, _maxint))
4067 setseq.append(random.randint(0, _maxint))
4064 costs.append(random.choice(costrange))
4068 costs.append(random.choice(costrange))
4065
4069
4066 def doinserts():
4070 def doinserts():
4067 d = util.lrucachedict(size)
4071 d = util.lrucachedict(size)
4068 for v in setseq:
4072 for v in setseq:
4069 d.insert(v, v)
4073 d.insert(v, v)
4070
4074
4071 def doinsertscost():
4075 def doinsertscost():
4072 d = util.lrucachedict(size, maxcost=costlimit)
4076 d = util.lrucachedict(size, maxcost=costlimit)
4073 for i, v in enumerate(setseq):
4077 for i, v in enumerate(setseq):
4074 d.insert(v, v, cost=costs[i])
4078 d.insert(v, v, cost=costs[i])
4075
4079
4076 def dosets():
4080 def dosets():
4077 d = util.lrucachedict(size)
4081 d = util.lrucachedict(size)
4078 for v in setseq:
4082 for v in setseq:
4079 d[v] = v
4083 d[v] = v
4080
4084
4081 # Mixed mode randomly performs gets and sets with eviction.
4085 # Mixed mode randomly performs gets and sets with eviction.
4082 mixedops = []
4086 mixedops = []
4083 for i in _xrange(mixed):
4087 for i in _xrange(mixed):
4084 r = random.randint(0, 100)
4088 r = random.randint(0, 100)
4085 if r < mixedgetfreq:
4089 if r < mixedgetfreq:
4086 op = 0
4090 op = 0
4087 else:
4091 else:
4088 op = 1
4092 op = 1
4089
4093
4090 mixedops.append(
4094 mixedops.append(
4091 (op, random.randint(0, size * 2), random.choice(costrange))
4095 (op, random.randint(0, size * 2), random.choice(costrange))
4092 )
4096 )
4093
4097
4094 def domixed():
4098 def domixed():
4095 d = util.lrucachedict(size)
4099 d = util.lrucachedict(size)
4096
4100
4097 for op, v, cost in mixedops:
4101 for op, v, cost in mixedops:
4098 if op == 0:
4102 if op == 0:
4099 try:
4103 try:
4100 d[v]
4104 d[v]
4101 except KeyError:
4105 except KeyError:
4102 pass
4106 pass
4103 else:
4107 else:
4104 d[v] = v
4108 d[v] = v
4105
4109
4106 def domixedcost():
4110 def domixedcost():
4107 d = util.lrucachedict(size, maxcost=costlimit)
4111 d = util.lrucachedict(size, maxcost=costlimit)
4108
4112
4109 for op, v, cost in mixedops:
4113 for op, v, cost in mixedops:
4110 if op == 0:
4114 if op == 0:
4111 try:
4115 try:
4112 d[v]
4116 d[v]
4113 except KeyError:
4117 except KeyError:
4114 pass
4118 pass
4115 else:
4119 else:
4116 d.insert(v, v, cost=cost)
4120 d.insert(v, v, cost=cost)
4117
4121
4118 benches = [
4122 benches = [
4119 (doinit, b'init'),
4123 (doinit, b'init'),
4120 ]
4124 ]
4121
4125
4122 if costlimit:
4126 if costlimit:
4123 benches.extend(
4127 benches.extend(
4124 [
4128 [
4125 (dogetscost, b'gets w/ cost limit'),
4129 (dogetscost, b'gets w/ cost limit'),
4126 (doinsertscost, b'inserts w/ cost limit'),
4130 (doinsertscost, b'inserts w/ cost limit'),
4127 (domixedcost, b'mixed w/ cost limit'),
4131 (domixedcost, b'mixed w/ cost limit'),
4128 ]
4132 ]
4129 )
4133 )
4130 else:
4134 else:
4131 benches.extend(
4135 benches.extend(
4132 [
4136 [
4133 (dogets, b'gets'),
4137 (dogets, b'gets'),
4134 (doinserts, b'inserts'),
4138 (doinserts, b'inserts'),
4135 (dosets, b'sets'),
4139 (dosets, b'sets'),
4136 (domixed, b'mixed'),
4140 (domixed, b'mixed'),
4137 ]
4141 ]
4138 )
4142 )
4139
4143
4140 for fn, title in benches:
4144 for fn, title in benches:
4141 timer, fm = gettimer(ui, opts)
4145 timer, fm = gettimer(ui, opts)
4142 timer(fn, title=title)
4146 timer(fn, title=title)
4143 fm.end()
4147 fm.end()
4144
4148
4145
4149
4146 @command(
4150 @command(
4147 b'perf::write|perfwrite',
4151 b'perf::write|perfwrite',
4148 formatteropts
4152 formatteropts
4149 + [
4153 + [
4150 (b'', b'write-method', b'write', b'ui write method'),
4154 (b'', b'write-method', b'write', b'ui write method'),
4151 (b'', b'nlines', 100, b'number of lines'),
4155 (b'', b'nlines', 100, b'number of lines'),
4152 (b'', b'nitems', 100, b'number of items (per line)'),
4156 (b'', b'nitems', 100, b'number of items (per line)'),
4153 (b'', b'item', b'x', b'item that is written'),
4157 (b'', b'item', b'x', b'item that is written'),
4154 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4158 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4155 (b'', b'flush-line', None, b'flush after each line'),
4159 (b'', b'flush-line', None, b'flush after each line'),
4156 ],
4160 ],
4157 )
4161 )
4158 def perfwrite(ui, repo, **opts):
4162 def perfwrite(ui, repo, **opts):
4159 """microbenchmark ui.write (and others)"""
4163 """microbenchmark ui.write (and others)"""
4160 opts = _byteskwargs(opts)
4164 opts = _byteskwargs(opts)
4161
4165
4162 write = getattr(ui, _sysstr(opts[b'write_method']))
4166 write = getattr(ui, _sysstr(opts[b'write_method']))
4163 nlines = int(opts[b'nlines'])
4167 nlines = int(opts[b'nlines'])
4164 nitems = int(opts[b'nitems'])
4168 nitems = int(opts[b'nitems'])
4165 item = opts[b'item']
4169 item = opts[b'item']
4166 batch_line = opts.get(b'batch_line')
4170 batch_line = opts.get(b'batch_line')
4167 flush_line = opts.get(b'flush_line')
4171 flush_line = opts.get(b'flush_line')
4168
4172
4169 if batch_line:
4173 if batch_line:
4170 line = item * nitems + b'\n'
4174 line = item * nitems + b'\n'
4171
4175
4172 def benchmark():
4176 def benchmark():
4173 for i in pycompat.xrange(nlines):
4177 for i in pycompat.xrange(nlines):
4174 if batch_line:
4178 if batch_line:
4175 write(line)
4179 write(line)
4176 else:
4180 else:
4177 for i in pycompat.xrange(nitems):
4181 for i in pycompat.xrange(nitems):
4178 write(item)
4182 write(item)
4179 write(b'\n')
4183 write(b'\n')
4180 if flush_line:
4184 if flush_line:
4181 ui.flush()
4185 ui.flush()
4182 ui.flush()
4186 ui.flush()
4183
4187
4184 timer, fm = gettimer(ui, opts)
4188 timer, fm = gettimer(ui, opts)
4185 timer(benchmark)
4189 timer(benchmark)
4186 fm.end()
4190 fm.end()
4187
4191
4188
4192
4189 def uisetup(ui):
4193 def uisetup(ui):
4190 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4194 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4191 commands, b'debugrevlogopts'
4195 commands, b'debugrevlogopts'
4192 ):
4196 ):
4193 # for "historical portability":
4197 # for "historical portability":
4194 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4198 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4195 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4199 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4196 # openrevlog() should cause failure, because it has been
4200 # openrevlog() should cause failure, because it has been
4197 # available since 3.5 (or 49c583ca48c4).
4201 # available since 3.5 (or 49c583ca48c4).
4198 def openrevlog(orig, repo, cmd, file_, opts):
4202 def openrevlog(orig, repo, cmd, file_, opts):
4199 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4203 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4200 raise error.Abort(
4204 raise error.Abort(
4201 b"This version doesn't support --dir option",
4205 b"This version doesn't support --dir option",
4202 hint=b"use 3.5 or later",
4206 hint=b"use 3.5 or later",
4203 )
4207 )
4204 return orig(repo, cmd, file_, opts)
4208 return orig(repo, cmd, file_, opts)
4205
4209
4206 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4210 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4207
4211
4208
4212
4209 @command(
4213 @command(
4210 b'perf::progress|perfprogress',
4214 b'perf::progress|perfprogress',
4211 formatteropts
4215 formatteropts
4212 + [
4216 + [
4213 (b'', b'topic', b'topic', b'topic for progress messages'),
4217 (b'', b'topic', b'topic', b'topic for progress messages'),
4214 (b'c', b'total', 1000000, b'total value we are progressing to'),
4218 (b'c', b'total', 1000000, b'total value we are progressing to'),
4215 ],
4219 ],
4216 norepo=True,
4220 norepo=True,
4217 )
4221 )
4218 def perfprogress(ui, topic=None, total=None, **opts):
4222 def perfprogress(ui, topic=None, total=None, **opts):
4219 """printing of progress bars"""
4223 """printing of progress bars"""
4220 opts = _byteskwargs(opts)
4224 opts = _byteskwargs(opts)
4221
4225
4222 timer, fm = gettimer(ui, opts)
4226 timer, fm = gettimer(ui, opts)
4223
4227
4224 def doprogress():
4228 def doprogress():
4225 with ui.makeprogress(topic, total=total) as progress:
4229 with ui.makeprogress(topic, total=total) as progress:
4226 for i in _xrange(total):
4230 for i in _xrange(total):
4227 progress.increment()
4231 progress.increment()
4228
4232
4229 timer(doprogress)
4233 timer(doprogress)
4230 fm.end()
4234 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now