##// END OF EJS Templates
perf: add a --update-last flag to perf::tags...
marmoute -
r51833:98a7f325 stable
parent child Browse files
Show More
@@ -1,4530 +1,4568 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
935 b'',
936 b'update-last',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
940 ),
934 ],
941 ],
935 )
942 )
936 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
937 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
938
945
939 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
940 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
941 destroy any stored data.
948 destroy any stored data.
942 """
949 """
943 from mercurial import tags
950 from mercurial import tags
944
951
945 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
946 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
947 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
948 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
949 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
950 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
951
958
952 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
961 update_last = None
962 if update_last_str:
963 try:
964 update_last = int(update_last_str)
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
953
970
954 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
955 tags,
972 tags,
956 "clear_cache_on_disk",
973 "clear_cache_on_disk",
957 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
958 )
975 )
959 clear_fnodes_fn = getattr(
976 clear_fnodes_fn = getattr(
960 tags,
977 tags,
961 "clear_cache_fnodes",
978 "clear_cache_fnodes",
962 _default_clear_on_disk_tags_fnodes_cache,
979 _default_clear_on_disk_tags_fnodes_cache,
963 )
980 )
964 clear_fnodes_rev_fn = getattr(
981 clear_fnodes_rev_fn = getattr(
965 tags,
982 tags,
966 "forget_fnodes",
983 "forget_fnodes",
967 _default_forget_fnodes,
984 _default_forget_fnodes,
968 )
985 )
969
986
970 clear_revs = None
987 clear_revs = []
971 if clear_fnode_revs:
988 if clear_fnode_revs:
972 clear_revs = scmutil.revrange(repo, clear_fnode_revs)
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
990
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
995
996 from mercurial import repoview
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1008 clear_revs = sorted(set(clear_revs))
973
1009
974 def s():
1010 def s():
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
975 if clearrevlogs:
1013 if clearrevlogs:
976 clearchangelog(repo)
1014 clearchangelog(repo)
977 clearfilecache(repo.unfiltered(), 'manifest')
1015 clearfilecache(repo.unfiltered(), 'manifest')
978 if clear_disk:
1016 if clear_disk:
979 clear_disk_fn(repo)
1017 clear_disk_fn(repo)
980 if clear_fnode:
1018 if clear_fnode:
981 clear_fnodes_fn(repo)
1019 clear_fnodes_fn(repo)
982 elif clear_revs is not None:
1020 elif clear_revs:
983 clear_fnodes_rev_fn(repo, clear_revs)
1021 clear_fnodes_rev_fn(repo, clear_revs)
984 repocleartagscache()
1022 repocleartagscache()
985
1023
986 def t():
1024 def t():
987 len(repo.tags())
1025 len(repo.tags())
988
1026
989 timer(t, setup=s)
1027 timer(t, setup=s)
990 fm.end()
1028 fm.end()
991
1029
992
1030
993 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 @command(b'perf::ancestors|perfancestors', formatteropts)
994 def perfancestors(ui, repo, **opts):
1032 def perfancestors(ui, repo, **opts):
995 opts = _byteskwargs(opts)
1033 opts = _byteskwargs(opts)
996 timer, fm = gettimer(ui, opts)
1034 timer, fm = gettimer(ui, opts)
997 heads = repo.changelog.headrevs()
1035 heads = repo.changelog.headrevs()
998
1036
999 def d():
1037 def d():
1000 for a in repo.changelog.ancestors(heads):
1038 for a in repo.changelog.ancestors(heads):
1001 pass
1039 pass
1002
1040
1003 timer(d)
1041 timer(d)
1004 fm.end()
1042 fm.end()
1005
1043
1006
1044
1007 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1008 def perfancestorset(ui, repo, revset, **opts):
1046 def perfancestorset(ui, repo, revset, **opts):
1009 opts = _byteskwargs(opts)
1047 opts = _byteskwargs(opts)
1010 timer, fm = gettimer(ui, opts)
1048 timer, fm = gettimer(ui, opts)
1011 revs = repo.revs(revset)
1049 revs = repo.revs(revset)
1012 heads = repo.changelog.headrevs()
1050 heads = repo.changelog.headrevs()
1013
1051
1014 def d():
1052 def d():
1015 s = repo.changelog.ancestors(heads)
1053 s = repo.changelog.ancestors(heads)
1016 for rev in revs:
1054 for rev in revs:
1017 rev in s
1055 rev in s
1018
1056
1019 timer(d)
1057 timer(d)
1020 fm.end()
1058 fm.end()
1021
1059
1022
1060
1023 @command(
1061 @command(
1024 b'perf::delta-find',
1062 b'perf::delta-find',
1025 revlogopts + formatteropts,
1063 revlogopts + formatteropts,
1026 b'-c|-m|FILE REV',
1064 b'-c|-m|FILE REV',
1027 )
1065 )
1028 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1029 """benchmark the process of finding a valid delta for a revlog revision
1067 """benchmark the process of finding a valid delta for a revlog revision
1030
1068
1031 When a revlog receives a new revision (e.g. from a commit, or from an
1069 When a revlog receives a new revision (e.g. from a commit, or from an
1032 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1033 This perf command measures how much time we spend in this process. It
1071 This perf command measures how much time we spend in this process. It
1034 operates on an already stored revision.
1072 operates on an already stored revision.
1035
1073
1036 See `hg help debug-delta-find` for another related command.
1074 See `hg help debug-delta-find` for another related command.
1037 """
1075 """
1038 from mercurial import revlogutils
1076 from mercurial import revlogutils
1039 import mercurial.revlogutils.deltas as deltautil
1077 import mercurial.revlogutils.deltas as deltautil
1040
1078
1041 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1042 if arg_2 is None:
1080 if arg_2 is None:
1043 file_ = None
1081 file_ = None
1044 rev = arg_1
1082 rev = arg_1
1045 else:
1083 else:
1046 file_ = arg_1
1084 file_ = arg_1
1047 rev = arg_2
1085 rev = arg_2
1048
1086
1049 repo = repo.unfiltered()
1087 repo = repo.unfiltered()
1050
1088
1051 timer, fm = gettimer(ui, opts)
1089 timer, fm = gettimer(ui, opts)
1052
1090
1053 rev = int(rev)
1091 rev = int(rev)
1054
1092
1055 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1056
1094
1057 deltacomputer = deltautil.deltacomputer(revlog)
1095 deltacomputer = deltautil.deltacomputer(revlog)
1058
1096
1059 node = revlog.node(rev)
1097 node = revlog.node(rev)
1060 p1r, p2r = revlog.parentrevs(rev)
1098 p1r, p2r = revlog.parentrevs(rev)
1061 p1 = revlog.node(p1r)
1099 p1 = revlog.node(p1r)
1062 p2 = revlog.node(p2r)
1100 p2 = revlog.node(p2r)
1063 full_text = revlog.revision(rev)
1101 full_text = revlog.revision(rev)
1064 textlen = len(full_text)
1102 textlen = len(full_text)
1065 cachedelta = None
1103 cachedelta = None
1066 flags = revlog.flags(rev)
1104 flags = revlog.flags(rev)
1067
1105
1068 revinfo = revlogutils.revisioninfo(
1106 revinfo = revlogutils.revisioninfo(
1069 node,
1107 node,
1070 p1,
1108 p1,
1071 p2,
1109 p2,
1072 [full_text], # btext
1110 [full_text], # btext
1073 textlen,
1111 textlen,
1074 cachedelta,
1112 cachedelta,
1075 flags,
1113 flags,
1076 )
1114 )
1077
1115
1078 # Note: we should probably purge the potential caches (like the full
1116 # Note: we should probably purge the potential caches (like the full
1079 # manifest cache) between runs.
1117 # manifest cache) between runs.
1080 def find_one():
1118 def find_one():
1081 with revlog._datafp() as fh:
1119 with revlog._datafp() as fh:
1082 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1083
1121
1084 timer(find_one)
1122 timer(find_one)
1085 fm.end()
1123 fm.end()
1086
1124
1087
1125
1088 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1089 def perfdiscovery(ui, repo, path, **opts):
1127 def perfdiscovery(ui, repo, path, **opts):
1090 """benchmark discovery between local repo and the peer at given path"""
1128 """benchmark discovery between local repo and the peer at given path"""
1091 repos = [repo, None]
1129 repos = [repo, None]
1092 timer, fm = gettimer(ui, opts)
1130 timer, fm = gettimer(ui, opts)
1093
1131
1094 try:
1132 try:
1095 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1096
1134
1097 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1098 except ImportError:
1136 except ImportError:
1099 try:
1137 try:
1100 from mercurial.utils.urlutil import get_unique_pull_path
1138 from mercurial.utils.urlutil import get_unique_pull_path
1101
1139
1102 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1103 except ImportError:
1141 except ImportError:
1104 path = ui.expandpath(path)
1142 path = ui.expandpath(path)
1105
1143
1106 def s():
1144 def s():
1107 repos[1] = hg.peer(ui, opts, path)
1145 repos[1] = hg.peer(ui, opts, path)
1108
1146
1109 def d():
1147 def d():
1110 setdiscovery.findcommonheads(ui, *repos)
1148 setdiscovery.findcommonheads(ui, *repos)
1111
1149
1112 timer(d, setup=s)
1150 timer(d, setup=s)
1113 fm.end()
1151 fm.end()
1114
1152
1115
1153
1116 @command(
1154 @command(
1117 b'perf::bookmarks|perfbookmarks',
1155 b'perf::bookmarks|perfbookmarks',
1118 formatteropts
1156 formatteropts
1119 + [
1157 + [
1120 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1121 ],
1159 ],
1122 )
1160 )
1123 def perfbookmarks(ui, repo, **opts):
1161 def perfbookmarks(ui, repo, **opts):
1124 """benchmark parsing bookmarks from disk to memory"""
1162 """benchmark parsing bookmarks from disk to memory"""
1125 opts = _byteskwargs(opts)
1163 opts = _byteskwargs(opts)
1126 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1127
1165
1128 clearrevlogs = opts[b'clear_revlogs']
1166 clearrevlogs = opts[b'clear_revlogs']
1129
1167
1130 def s():
1168 def s():
1131 if clearrevlogs:
1169 if clearrevlogs:
1132 clearchangelog(repo)
1170 clearchangelog(repo)
1133 clearfilecache(repo, b'_bookmarks')
1171 clearfilecache(repo, b'_bookmarks')
1134
1172
1135 def d():
1173 def d():
1136 repo._bookmarks
1174 repo._bookmarks
1137
1175
1138 timer(d, setup=s)
1176 timer(d, setup=s)
1139 fm.end()
1177 fm.end()
1140
1178
1141
1179
1142 @command(
1180 @command(
1143 b'perf::bundle',
1181 b'perf::bundle',
1144 [
1182 [
1145 (
1183 (
1146 b'r',
1184 b'r',
1147 b'rev',
1185 b'rev',
1148 [],
1186 [],
1149 b'changesets to bundle',
1187 b'changesets to bundle',
1150 b'REV',
1188 b'REV',
1151 ),
1189 ),
1152 (
1190 (
1153 b't',
1191 b't',
1154 b'type',
1192 b'type',
1155 b'none',
1193 b'none',
1156 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'bundlespec to use (see `hg help bundlespec`)',
1157 b'TYPE',
1195 b'TYPE',
1158 ),
1196 ),
1159 ]
1197 ]
1160 + formatteropts,
1198 + formatteropts,
1161 b'REVS',
1199 b'REVS',
1162 )
1200 )
1163 def perfbundle(ui, repo, *revs, **opts):
1201 def perfbundle(ui, repo, *revs, **opts):
1164 """benchmark the creation of a bundle from a repository
1202 """benchmark the creation of a bundle from a repository
1165
1203
1166 For now, this only supports "none" compression.
1204 For now, this only supports "none" compression.
1167 """
1205 """
1168 try:
1206 try:
1169 from mercurial import bundlecaches
1207 from mercurial import bundlecaches
1170
1208
1171 parsebundlespec = bundlecaches.parsebundlespec
1209 parsebundlespec = bundlecaches.parsebundlespec
1172 except ImportError:
1210 except ImportError:
1173 from mercurial import exchange
1211 from mercurial import exchange
1174
1212
1175 parsebundlespec = exchange.parsebundlespec
1213 parsebundlespec = exchange.parsebundlespec
1176
1214
1177 from mercurial import discovery
1215 from mercurial import discovery
1178 from mercurial import bundle2
1216 from mercurial import bundle2
1179
1217
1180 opts = _byteskwargs(opts)
1218 opts = _byteskwargs(opts)
1181 timer, fm = gettimer(ui, opts)
1219 timer, fm = gettimer(ui, opts)
1182
1220
1183 cl = repo.changelog
1221 cl = repo.changelog
1184 revs = list(revs)
1222 revs = list(revs)
1185 revs.extend(opts.get(b'rev', ()))
1223 revs.extend(opts.get(b'rev', ()))
1186 revs = scmutil.revrange(repo, revs)
1224 revs = scmutil.revrange(repo, revs)
1187 if not revs:
1225 if not revs:
1188 raise error.Abort(b"not revision specified")
1226 raise error.Abort(b"not revision specified")
1189 # make it a consistent set (ie: without topological gaps)
1227 # make it a consistent set (ie: without topological gaps)
1190 old_len = len(revs)
1228 old_len = len(revs)
1191 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1192 if old_len != len(revs):
1230 if old_len != len(revs):
1193 new_count = len(revs) - old_len
1231 new_count = len(revs) - old_len
1194 msg = b"add %d new revisions to make it a consistent set\n"
1232 msg = b"add %d new revisions to make it a consistent set\n"
1195 ui.write_err(msg % new_count)
1233 ui.write_err(msg % new_count)
1196
1234
1197 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1198 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1199 outgoing = discovery.outgoing(repo, bases, targets)
1237 outgoing = discovery.outgoing(repo, bases, targets)
1200
1238
1201 bundle_spec = opts.get(b'type')
1239 bundle_spec = opts.get(b'type')
1202
1240
1203 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1204
1242
1205 cgversion = bundle_spec.params.get(b"cg.version")
1243 cgversion = bundle_spec.params.get(b"cg.version")
1206 if cgversion is None:
1244 if cgversion is None:
1207 if bundle_spec.version == b'v1':
1245 if bundle_spec.version == b'v1':
1208 cgversion = b'01'
1246 cgversion = b'01'
1209 if bundle_spec.version == b'v2':
1247 if bundle_spec.version == b'v2':
1210 cgversion = b'02'
1248 cgversion = b'02'
1211 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1212 err = b"repository does not support bundle version %s"
1250 err = b"repository does not support bundle version %s"
1213 raise error.Abort(err % cgversion)
1251 raise error.Abort(err % cgversion)
1214
1252
1215 if cgversion == b'01': # bundle1
1253 if cgversion == b'01': # bundle1
1216 bversion = b'HG10' + bundle_spec.wirecompression
1254 bversion = b'HG10' + bundle_spec.wirecompression
1217 bcompression = None
1255 bcompression = None
1218 elif cgversion in (b'02', b'03'):
1256 elif cgversion in (b'02', b'03'):
1219 bversion = b'HG20'
1257 bversion = b'HG20'
1220 bcompression = bundle_spec.wirecompression
1258 bcompression = bundle_spec.wirecompression
1221 else:
1259 else:
1222 err = b'perf::bundle: unexpected changegroup version %s'
1260 err = b'perf::bundle: unexpected changegroup version %s'
1223 raise error.ProgrammingError(err % cgversion)
1261 raise error.ProgrammingError(err % cgversion)
1224
1262
1225 if bcompression is None:
1263 if bcompression is None:
1226 bcompression = b'UN'
1264 bcompression = b'UN'
1227
1265
1228 if bcompression != b'UN':
1266 if bcompression != b'UN':
1229 err = b'perf::bundle: compression currently unsupported: %s'
1267 err = b'perf::bundle: compression currently unsupported: %s'
1230 raise error.ProgrammingError(err % bcompression)
1268 raise error.ProgrammingError(err % bcompression)
1231
1269
1232 def do_bundle():
1270 def do_bundle():
1233 bundle2.writenewbundle(
1271 bundle2.writenewbundle(
1234 ui,
1272 ui,
1235 repo,
1273 repo,
1236 b'perf::bundle',
1274 b'perf::bundle',
1237 os.devnull,
1275 os.devnull,
1238 bversion,
1276 bversion,
1239 outgoing,
1277 outgoing,
1240 bundle_spec.params,
1278 bundle_spec.params,
1241 )
1279 )
1242
1280
1243 timer(do_bundle)
1281 timer(do_bundle)
1244 fm.end()
1282 fm.end()
1245
1283
1246
1284
1247 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1248 def perfbundleread(ui, repo, bundlepath, **opts):
1286 def perfbundleread(ui, repo, bundlepath, **opts):
1249 """Benchmark reading of bundle files.
1287 """Benchmark reading of bundle files.
1250
1288
1251 This command is meant to isolate the I/O part of bundle reading as
1289 This command is meant to isolate the I/O part of bundle reading as
1252 much as possible.
1290 much as possible.
1253 """
1291 """
1254 from mercurial import (
1292 from mercurial import (
1255 bundle2,
1293 bundle2,
1256 exchange,
1294 exchange,
1257 streamclone,
1295 streamclone,
1258 )
1296 )
1259
1297
1260 opts = _byteskwargs(opts)
1298 opts = _byteskwargs(opts)
1261
1299
1262 def makebench(fn):
1300 def makebench(fn):
1263 def run():
1301 def run():
1264 with open(bundlepath, b'rb') as fh:
1302 with open(bundlepath, b'rb') as fh:
1265 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1266 fn(bundle)
1304 fn(bundle)
1267
1305
1268 return run
1306 return run
1269
1307
1270 def makereadnbytes(size):
1308 def makereadnbytes(size):
1271 def run():
1309 def run():
1272 with open(bundlepath, b'rb') as fh:
1310 with open(bundlepath, b'rb') as fh:
1273 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1274 while bundle.read(size):
1312 while bundle.read(size):
1275 pass
1313 pass
1276
1314
1277 return run
1315 return run
1278
1316
1279 def makestdioread(size):
1317 def makestdioread(size):
1280 def run():
1318 def run():
1281 with open(bundlepath, b'rb') as fh:
1319 with open(bundlepath, b'rb') as fh:
1282 while fh.read(size):
1320 while fh.read(size):
1283 pass
1321 pass
1284
1322
1285 return run
1323 return run
1286
1324
1287 # bundle1
1325 # bundle1
1288
1326
1289 def deltaiter(bundle):
1327 def deltaiter(bundle):
1290 for delta in bundle.deltaiter():
1328 for delta in bundle.deltaiter():
1291 pass
1329 pass
1292
1330
1293 def iterchunks(bundle):
1331 def iterchunks(bundle):
1294 for chunk in bundle.getchunks():
1332 for chunk in bundle.getchunks():
1295 pass
1333 pass
1296
1334
1297 # bundle2
1335 # bundle2
1298
1336
1299 def forwardchunks(bundle):
1337 def forwardchunks(bundle):
1300 for chunk in bundle._forwardchunks():
1338 for chunk in bundle._forwardchunks():
1301 pass
1339 pass
1302
1340
1303 def iterparts(bundle):
1341 def iterparts(bundle):
1304 for part in bundle.iterparts():
1342 for part in bundle.iterparts():
1305 pass
1343 pass
1306
1344
1307 def iterpartsseekable(bundle):
1345 def iterpartsseekable(bundle):
1308 for part in bundle.iterparts(seekable=True):
1346 for part in bundle.iterparts(seekable=True):
1309 pass
1347 pass
1310
1348
1311 def seek(bundle):
1349 def seek(bundle):
1312 for part in bundle.iterparts(seekable=True):
1350 for part in bundle.iterparts(seekable=True):
1313 part.seek(0, os.SEEK_END)
1351 part.seek(0, os.SEEK_END)
1314
1352
1315 def makepartreadnbytes(size):
1353 def makepartreadnbytes(size):
1316 def run():
1354 def run():
1317 with open(bundlepath, b'rb') as fh:
1355 with open(bundlepath, b'rb') as fh:
1318 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1319 for part in bundle.iterparts():
1357 for part in bundle.iterparts():
1320 while part.read(size):
1358 while part.read(size):
1321 pass
1359 pass
1322
1360
1323 return run
1361 return run
1324
1362
1325 benches = [
1363 benches = [
1326 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(8192), b'read(8k)'),
1327 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(16384), b'read(16k)'),
1328 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(32768), b'read(32k)'),
1329 (makestdioread(131072), b'read(128k)'),
1367 (makestdioread(131072), b'read(128k)'),
1330 ]
1368 ]
1331
1369
1332 with open(bundlepath, b'rb') as fh:
1370 with open(bundlepath, b'rb') as fh:
1333 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1334
1372
1335 if isinstance(bundle, changegroup.cg1unpacker):
1373 if isinstance(bundle, changegroup.cg1unpacker):
1336 benches.extend(
1374 benches.extend(
1337 [
1375 [
1338 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1339 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makebench(iterchunks), b'cg1 getchunks()'),
1340 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1341 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1342 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1343 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1344 ]
1382 ]
1345 )
1383 )
1346 elif isinstance(bundle, bundle2.unbundle20):
1384 elif isinstance(bundle, bundle2.unbundle20):
1347 benches.extend(
1385 benches.extend(
1348 [
1386 [
1349 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1350 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (makebench(iterparts), b'bundle2 iterparts()'),
1351 (
1389 (
1352 makebench(iterpartsseekable),
1390 makebench(iterpartsseekable),
1353 b'bundle2 iterparts() seekable',
1391 b'bundle2 iterparts() seekable',
1354 ),
1392 ),
1355 (makebench(seek), b'bundle2 part seek()'),
1393 (makebench(seek), b'bundle2 part seek()'),
1356 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1357 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1358 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1359 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1360 ]
1398 ]
1361 )
1399 )
1362 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1363 raise error.Abort(b'stream clone bundles not supported')
1401 raise error.Abort(b'stream clone bundles not supported')
1364 else:
1402 else:
1365 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1366
1404
1367 for fn, title in benches:
1405 for fn, title in benches:
1368 timer, fm = gettimer(ui, opts)
1406 timer, fm = gettimer(ui, opts)
1369 timer(fn, title=title)
1407 timer(fn, title=title)
1370 fm.end()
1408 fm.end()
1371
1409
1372
1410
1373 @command(
1411 @command(
1374 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1375 formatteropts
1413 formatteropts
1376 + [
1414 + [
1377 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'', b'cgversion', b'02', b'changegroup version'),
1378 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1379 ],
1417 ],
1380 )
1418 )
1381 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1382 """Benchmark producing a changelog group for a changegroup.
1420 """Benchmark producing a changelog group for a changegroup.
1383
1421
1384 This measures the time spent processing the changelog during a
1422 This measures the time spent processing the changelog during a
1385 bundle operation. This occurs during `hg bundle` and on a server
1423 bundle operation. This occurs during `hg bundle` and on a server
1386 processing a `getbundle` wire protocol request (handles clones
1424 processing a `getbundle` wire protocol request (handles clones
1387 and pull requests).
1425 and pull requests).
1388
1426
1389 By default, all revisions are added to the changegroup.
1427 By default, all revisions are added to the changegroup.
1390 """
1428 """
1391 opts = _byteskwargs(opts)
1429 opts = _byteskwargs(opts)
1392 cl = repo.changelog
1430 cl = repo.changelog
1393 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1394 bundler = changegroup.getbundler(cgversion, repo)
1432 bundler = changegroup.getbundler(cgversion, repo)
1395
1433
1396 def d():
1434 def d():
1397 state, chunks = bundler._generatechangelog(cl, nodes)
1435 state, chunks = bundler._generatechangelog(cl, nodes)
1398 for chunk in chunks:
1436 for chunk in chunks:
1399 pass
1437 pass
1400
1438
1401 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1402
1440
1403 # Terminal printing can interfere with timing. So disable it.
1441 # Terminal printing can interfere with timing. So disable it.
1404 with ui.configoverride({(b'progress', b'disable'): True}):
1442 with ui.configoverride({(b'progress', b'disable'): True}):
1405 timer(d)
1443 timer(d)
1406
1444
1407 fm.end()
1445 fm.end()
1408
1446
1409
1447
1410 @command(b'perf::dirs|perfdirs', formatteropts)
1448 @command(b'perf::dirs|perfdirs', formatteropts)
1411 def perfdirs(ui, repo, **opts):
1449 def perfdirs(ui, repo, **opts):
1412 opts = _byteskwargs(opts)
1450 opts = _byteskwargs(opts)
1413 timer, fm = gettimer(ui, opts)
1451 timer, fm = gettimer(ui, opts)
1414 dirstate = repo.dirstate
1452 dirstate = repo.dirstate
1415 b'a' in dirstate
1453 b'a' in dirstate
1416
1454
1417 def d():
1455 def d():
1418 dirstate.hasdir(b'a')
1456 dirstate.hasdir(b'a')
1419 try:
1457 try:
1420 del dirstate._map._dirs
1458 del dirstate._map._dirs
1421 except AttributeError:
1459 except AttributeError:
1422 pass
1460 pass
1423
1461
1424 timer(d)
1462 timer(d)
1425 fm.end()
1463 fm.end()
1426
1464
1427
1465
1428 @command(
1466 @command(
1429 b'perf::dirstate|perfdirstate',
1467 b'perf::dirstate|perfdirstate',
1430 [
1468 [
1431 (
1469 (
1432 b'',
1470 b'',
1433 b'iteration',
1471 b'iteration',
1434 None,
1472 None,
1435 b'benchmark a full iteration for the dirstate',
1473 b'benchmark a full iteration for the dirstate',
1436 ),
1474 ),
1437 (
1475 (
1438 b'',
1476 b'',
1439 b'contains',
1477 b'contains',
1440 None,
1478 None,
1441 b'benchmark a large amount of `nf in dirstate` calls',
1479 b'benchmark a large amount of `nf in dirstate` calls',
1442 ),
1480 ),
1443 ]
1481 ]
1444 + formatteropts,
1482 + formatteropts,
1445 )
1483 )
1446 def perfdirstate(ui, repo, **opts):
1484 def perfdirstate(ui, repo, **opts):
1447 """benchmap the time of various distate operations
1485 """benchmap the time of various distate operations
1448
1486
1449 By default benchmark the time necessary to load a dirstate from scratch.
1487 By default benchmark the time necessary to load a dirstate from scratch.
1450 The dirstate is loaded to the point were a "contains" request can be
1488 The dirstate is loaded to the point were a "contains" request can be
1451 answered.
1489 answered.
1452 """
1490 """
1453 opts = _byteskwargs(opts)
1491 opts = _byteskwargs(opts)
1454 timer, fm = gettimer(ui, opts)
1492 timer, fm = gettimer(ui, opts)
1455 b"a" in repo.dirstate
1493 b"a" in repo.dirstate
1456
1494
1457 if opts[b'iteration'] and opts[b'contains']:
1495 if opts[b'iteration'] and opts[b'contains']:
1458 msg = b'only specify one of --iteration or --contains'
1496 msg = b'only specify one of --iteration or --contains'
1459 raise error.Abort(msg)
1497 raise error.Abort(msg)
1460
1498
1461 if opts[b'iteration']:
1499 if opts[b'iteration']:
1462 setup = None
1500 setup = None
1463 dirstate = repo.dirstate
1501 dirstate = repo.dirstate
1464
1502
1465 def d():
1503 def d():
1466 for f in dirstate:
1504 for f in dirstate:
1467 pass
1505 pass
1468
1506
1469 elif opts[b'contains']:
1507 elif opts[b'contains']:
1470 setup = None
1508 setup = None
1471 dirstate = repo.dirstate
1509 dirstate = repo.dirstate
1472 allfiles = list(dirstate)
1510 allfiles = list(dirstate)
1473 # also add file path that will be "missing" from the dirstate
1511 # also add file path that will be "missing" from the dirstate
1474 allfiles.extend([f[::-1] for f in allfiles])
1512 allfiles.extend([f[::-1] for f in allfiles])
1475
1513
1476 def d():
1514 def d():
1477 for f in allfiles:
1515 for f in allfiles:
1478 f in dirstate
1516 f in dirstate
1479
1517
1480 else:
1518 else:
1481
1519
1482 def setup():
1520 def setup():
1483 repo.dirstate.invalidate()
1521 repo.dirstate.invalidate()
1484
1522
1485 def d():
1523 def d():
1486 b"a" in repo.dirstate
1524 b"a" in repo.dirstate
1487
1525
1488 timer(d, setup=setup)
1526 timer(d, setup=setup)
1489 fm.end()
1527 fm.end()
1490
1528
1491
1529
1492 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1493 def perfdirstatedirs(ui, repo, **opts):
1531 def perfdirstatedirs(ui, repo, **opts):
1494 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1495 opts = _byteskwargs(opts)
1533 opts = _byteskwargs(opts)
1496 timer, fm = gettimer(ui, opts)
1534 timer, fm = gettimer(ui, opts)
1497 repo.dirstate.hasdir(b"a")
1535 repo.dirstate.hasdir(b"a")
1498
1536
1499 def setup():
1537 def setup():
1500 try:
1538 try:
1501 del repo.dirstate._map._dirs
1539 del repo.dirstate._map._dirs
1502 except AttributeError:
1540 except AttributeError:
1503 pass
1541 pass
1504
1542
1505 def d():
1543 def d():
1506 repo.dirstate.hasdir(b"a")
1544 repo.dirstate.hasdir(b"a")
1507
1545
1508 timer(d, setup=setup)
1546 timer(d, setup=setup)
1509 fm.end()
1547 fm.end()
1510
1548
1511
1549
1512 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1513 def perfdirstatefoldmap(ui, repo, **opts):
1551 def perfdirstatefoldmap(ui, repo, **opts):
1514 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1515
1553
1516 The dirstate filefoldmap cache is dropped between every request.
1554 The dirstate filefoldmap cache is dropped between every request.
1517 """
1555 """
1518 opts = _byteskwargs(opts)
1556 opts = _byteskwargs(opts)
1519 timer, fm = gettimer(ui, opts)
1557 timer, fm = gettimer(ui, opts)
1520 dirstate = repo.dirstate
1558 dirstate = repo.dirstate
1521 dirstate._map.filefoldmap.get(b'a')
1559 dirstate._map.filefoldmap.get(b'a')
1522
1560
1523 def setup():
1561 def setup():
1524 del dirstate._map.filefoldmap
1562 del dirstate._map.filefoldmap
1525
1563
1526 def d():
1564 def d():
1527 dirstate._map.filefoldmap.get(b'a')
1565 dirstate._map.filefoldmap.get(b'a')
1528
1566
1529 timer(d, setup=setup)
1567 timer(d, setup=setup)
1530 fm.end()
1568 fm.end()
1531
1569
1532
1570
1533 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1534 def perfdirfoldmap(ui, repo, **opts):
1572 def perfdirfoldmap(ui, repo, **opts):
1535 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1536
1574
1537 The dirstate dirfoldmap cache is dropped between every request.
1575 The dirstate dirfoldmap cache is dropped between every request.
1538 """
1576 """
1539 opts = _byteskwargs(opts)
1577 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1578 timer, fm = gettimer(ui, opts)
1541 dirstate = repo.dirstate
1579 dirstate = repo.dirstate
1542 dirstate._map.dirfoldmap.get(b'a')
1580 dirstate._map.dirfoldmap.get(b'a')
1543
1581
1544 def setup():
1582 def setup():
1545 del dirstate._map.dirfoldmap
1583 del dirstate._map.dirfoldmap
1546 try:
1584 try:
1547 del dirstate._map._dirs
1585 del dirstate._map._dirs
1548 except AttributeError:
1586 except AttributeError:
1549 pass
1587 pass
1550
1588
1551 def d():
1589 def d():
1552 dirstate._map.dirfoldmap.get(b'a')
1590 dirstate._map.dirfoldmap.get(b'a')
1553
1591
1554 timer(d, setup=setup)
1592 timer(d, setup=setup)
1555 fm.end()
1593 fm.end()
1556
1594
1557
1595
1558 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1559 def perfdirstatewrite(ui, repo, **opts):
1597 def perfdirstatewrite(ui, repo, **opts):
1560 """benchmap the time it take to write a dirstate on disk"""
1598 """benchmap the time it take to write a dirstate on disk"""
1561 opts = _byteskwargs(opts)
1599 opts = _byteskwargs(opts)
1562 timer, fm = gettimer(ui, opts)
1600 timer, fm = gettimer(ui, opts)
1563 ds = repo.dirstate
1601 ds = repo.dirstate
1564 b"a" in ds
1602 b"a" in ds
1565
1603
1566 def setup():
1604 def setup():
1567 ds._dirty = True
1605 ds._dirty = True
1568
1606
1569 def d():
1607 def d():
1570 ds.write(repo.currenttransaction())
1608 ds.write(repo.currenttransaction())
1571
1609
1572 with repo.wlock():
1610 with repo.wlock():
1573 timer(d, setup=setup)
1611 timer(d, setup=setup)
1574 fm.end()
1612 fm.end()
1575
1613
1576
1614
1577 def _getmergerevs(repo, opts):
1615 def _getmergerevs(repo, opts):
1578 """parse command argument to return rev involved in merge
1616 """parse command argument to return rev involved in merge
1579
1617
1580 input: options dictionnary with `rev`, `from` and `bse`
1618 input: options dictionnary with `rev`, `from` and `bse`
1581 output: (localctx, otherctx, basectx)
1619 output: (localctx, otherctx, basectx)
1582 """
1620 """
1583 if opts[b'from']:
1621 if opts[b'from']:
1584 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1585 wctx = repo[fromrev]
1623 wctx = repo[fromrev]
1586 else:
1624 else:
1587 wctx = repo[None]
1625 wctx = repo[None]
1588 # we don't want working dir files to be stat'd in the benchmark, so
1626 # we don't want working dir files to be stat'd in the benchmark, so
1589 # prime that cache
1627 # prime that cache
1590 wctx.dirty()
1628 wctx.dirty()
1591 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1592 if opts[b'base']:
1630 if opts[b'base']:
1593 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1594 ancestor = repo[fromrev]
1632 ancestor = repo[fromrev]
1595 else:
1633 else:
1596 ancestor = wctx.ancestor(rctx)
1634 ancestor = wctx.ancestor(rctx)
1597 return (wctx, rctx, ancestor)
1635 return (wctx, rctx, ancestor)
1598
1636
1599
1637
1600 @command(
1638 @command(
1601 b'perf::mergecalculate|perfmergecalculate',
1639 b'perf::mergecalculate|perfmergecalculate',
1602 [
1640 [
1603 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'r', b'rev', b'.', b'rev to merge against'),
1604 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'from', b'', b'rev to merge from'),
1605 (b'', b'base', b'', b'the revision to use as base'),
1643 (b'', b'base', b'', b'the revision to use as base'),
1606 ]
1644 ]
1607 + formatteropts,
1645 + formatteropts,
1608 )
1646 )
1609 def perfmergecalculate(ui, repo, **opts):
1647 def perfmergecalculate(ui, repo, **opts):
1610 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1611 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1612
1650
1613 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1614
1652
1615 def d():
1653 def d():
1616 # acceptremote is True because we don't want prompts in the middle of
1654 # acceptremote is True because we don't want prompts in the middle of
1617 # our benchmark
1655 # our benchmark
1618 merge.calculateupdates(
1656 merge.calculateupdates(
1619 repo,
1657 repo,
1620 wctx,
1658 wctx,
1621 rctx,
1659 rctx,
1622 [ancestor],
1660 [ancestor],
1623 branchmerge=False,
1661 branchmerge=False,
1624 force=False,
1662 force=False,
1625 acceptremote=True,
1663 acceptremote=True,
1626 followcopies=True,
1664 followcopies=True,
1627 )
1665 )
1628
1666
1629 timer(d)
1667 timer(d)
1630 fm.end()
1668 fm.end()
1631
1669
1632
1670
1633 @command(
1671 @command(
1634 b'perf::mergecopies|perfmergecopies',
1672 b'perf::mergecopies|perfmergecopies',
1635 [
1673 [
1636 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'r', b'rev', b'.', b'rev to merge against'),
1637 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'from', b'', b'rev to merge from'),
1638 (b'', b'base', b'', b'the revision to use as base'),
1676 (b'', b'base', b'', b'the revision to use as base'),
1639 ]
1677 ]
1640 + formatteropts,
1678 + formatteropts,
1641 )
1679 )
1642 def perfmergecopies(ui, repo, **opts):
1680 def perfmergecopies(ui, repo, **opts):
1643 """measure runtime of `copies.mergecopies`"""
1681 """measure runtime of `copies.mergecopies`"""
1644 opts = _byteskwargs(opts)
1682 opts = _byteskwargs(opts)
1645 timer, fm = gettimer(ui, opts)
1683 timer, fm = gettimer(ui, opts)
1646 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1647
1685
1648 def d():
1686 def d():
1649 # acceptremote is True because we don't want prompts in the middle of
1687 # acceptremote is True because we don't want prompts in the middle of
1650 # our benchmark
1688 # our benchmark
1651 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1652
1690
1653 timer(d)
1691 timer(d)
1654 fm.end()
1692 fm.end()
1655
1693
1656
1694
1657 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1658 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1659 """benchmark the copy tracing logic"""
1697 """benchmark the copy tracing logic"""
1660 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1661 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1662 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1663 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1664
1702
1665 def d():
1703 def d():
1666 copies.pathcopies(ctx1, ctx2)
1704 copies.pathcopies(ctx1, ctx2)
1667
1705
1668 timer(d)
1706 timer(d)
1669 fm.end()
1707 fm.end()
1670
1708
1671
1709
1672 @command(
1710 @command(
1673 b'perf::phases|perfphases',
1711 b'perf::phases|perfphases',
1674 [
1712 [
1675 (b'', b'full', False, b'include file reading time too'),
1713 (b'', b'full', False, b'include file reading time too'),
1676 ],
1714 ],
1677 b"",
1715 b"",
1678 )
1716 )
1679 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1680 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1681 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1683 _phases = repo._phasecache
1721 _phases = repo._phasecache
1684 full = opts.get(b'full')
1722 full = opts.get(b'full')
1685
1723
1686 def d():
1724 def d():
1687 phases = _phases
1725 phases = _phases
1688 if full:
1726 if full:
1689 clearfilecache(repo, b'_phasecache')
1727 clearfilecache(repo, b'_phasecache')
1690 phases = repo._phasecache
1728 phases = repo._phasecache
1691 phases.invalidate()
1729 phases.invalidate()
1692 phases.loadphaserevs(repo)
1730 phases.loadphaserevs(repo)
1693
1731
1694 timer(d)
1732 timer(d)
1695 fm.end()
1733 fm.end()
1696
1734
1697
1735
1698 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1699 def perfphasesremote(ui, repo, dest=None, **opts):
1737 def perfphasesremote(ui, repo, dest=None, **opts):
1700 """benchmark time needed to analyse phases of the remote server"""
1738 """benchmark time needed to analyse phases of the remote server"""
1701 from mercurial.node import bin
1739 from mercurial.node import bin
1702 from mercurial import (
1740 from mercurial import (
1703 exchange,
1741 exchange,
1704 hg,
1742 hg,
1705 phases,
1743 phases,
1706 )
1744 )
1707
1745
1708 opts = _byteskwargs(opts)
1746 opts = _byteskwargs(opts)
1709 timer, fm = gettimer(ui, opts)
1747 timer, fm = gettimer(ui, opts)
1710
1748
1711 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1712 if not path:
1750 if not path:
1713 raise error.Abort(
1751 raise error.Abort(
1714 b'default repository not configured!',
1752 b'default repository not configured!',
1715 hint=b"see 'hg help config.paths'",
1753 hint=b"see 'hg help config.paths'",
1716 )
1754 )
1717 if util.safehasattr(path, 'main_path'):
1755 if util.safehasattr(path, 'main_path'):
1718 path = path.get_push_variant()
1756 path = path.get_push_variant()
1719 dest = path.loc
1757 dest = path.loc
1720 else:
1758 else:
1721 dest = path.pushloc or path.loc
1759 dest = path.pushloc or path.loc
1722 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1723 other = hg.peer(repo, opts, dest)
1761 other = hg.peer(repo, opts, dest)
1724
1762
1725 # easier to perform discovery through the operation
1763 # easier to perform discovery through the operation
1726 op = exchange.pushoperation(repo, other)
1764 op = exchange.pushoperation(repo, other)
1727 exchange._pushdiscoverychangeset(op)
1765 exchange._pushdiscoverychangeset(op)
1728
1766
1729 remotesubset = op.fallbackheads
1767 remotesubset = op.fallbackheads
1730
1768
1731 with other.commandexecutor() as e:
1769 with other.commandexecutor() as e:
1732 remotephases = e.callcommand(
1770 remotephases = e.callcommand(
1733 b'listkeys', {b'namespace': b'phases'}
1771 b'listkeys', {b'namespace': b'phases'}
1734 ).result()
1772 ).result()
1735 del other
1773 del other
1736 publishing = remotephases.get(b'publishing', False)
1774 publishing = remotephases.get(b'publishing', False)
1737 if publishing:
1775 if publishing:
1738 ui.statusnoi18n(b'publishing: yes\n')
1776 ui.statusnoi18n(b'publishing: yes\n')
1739 else:
1777 else:
1740 ui.statusnoi18n(b'publishing: no\n')
1778 ui.statusnoi18n(b'publishing: no\n')
1741
1779
1742 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1743 if has_node is None:
1781 if has_node is None:
1744 has_node = repo.changelog.nodemap.__contains__
1782 has_node = repo.changelog.nodemap.__contains__
1745 nonpublishroots = 0
1783 nonpublishroots = 0
1746 for nhex, phase in remotephases.iteritems():
1784 for nhex, phase in remotephases.iteritems():
1747 if nhex == b'publishing': # ignore data related to publish option
1785 if nhex == b'publishing': # ignore data related to publish option
1748 continue
1786 continue
1749 node = bin(nhex)
1787 node = bin(nhex)
1750 if has_node(node) and int(phase):
1788 if has_node(node) and int(phase):
1751 nonpublishroots += 1
1789 nonpublishroots += 1
1752 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1753 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1754
1792
1755 def d():
1793 def d():
1756 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1757
1795
1758 timer(d)
1796 timer(d)
1759 fm.end()
1797 fm.end()
1760
1798
1761
1799
1762 @command(
1800 @command(
1763 b'perf::manifest|perfmanifest',
1801 b'perf::manifest|perfmanifest',
1764 [
1802 [
1765 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1766 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1767 ]
1805 ]
1768 + formatteropts,
1806 + formatteropts,
1769 b'REV|NODE',
1807 b'REV|NODE',
1770 )
1808 )
1771 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1772 """benchmark the time to read a manifest from disk and return a usable
1810 """benchmark the time to read a manifest from disk and return a usable
1773 dict-like object
1811 dict-like object
1774
1812
1775 Manifest caches are cleared before retrieval."""
1813 Manifest caches are cleared before retrieval."""
1776 opts = _byteskwargs(opts)
1814 opts = _byteskwargs(opts)
1777 timer, fm = gettimer(ui, opts)
1815 timer, fm = gettimer(ui, opts)
1778 if not manifest_rev:
1816 if not manifest_rev:
1779 ctx = scmutil.revsingle(repo, rev, rev)
1817 ctx = scmutil.revsingle(repo, rev, rev)
1780 t = ctx.manifestnode()
1818 t = ctx.manifestnode()
1781 else:
1819 else:
1782 from mercurial.node import bin
1820 from mercurial.node import bin
1783
1821
1784 if len(rev) == 40:
1822 if len(rev) == 40:
1785 t = bin(rev)
1823 t = bin(rev)
1786 else:
1824 else:
1787 try:
1825 try:
1788 rev = int(rev)
1826 rev = int(rev)
1789
1827
1790 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1791 t = repo.manifestlog.getstorage(b'').node(rev)
1829 t = repo.manifestlog.getstorage(b'').node(rev)
1792 else:
1830 else:
1793 t = repo.manifestlog._revlog.lookup(rev)
1831 t = repo.manifestlog._revlog.lookup(rev)
1794 except ValueError:
1832 except ValueError:
1795 raise error.Abort(
1833 raise error.Abort(
1796 b'manifest revision must be integer or full node'
1834 b'manifest revision must be integer or full node'
1797 )
1835 )
1798
1836
1799 def d():
1837 def d():
1800 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1801 repo.manifestlog[t].read()
1839 repo.manifestlog[t].read()
1802
1840
1803 timer(d)
1841 timer(d)
1804 fm.end()
1842 fm.end()
1805
1843
1806
1844
1807 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1808 def perfchangeset(ui, repo, rev, **opts):
1846 def perfchangeset(ui, repo, rev, **opts):
1809 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1810 timer, fm = gettimer(ui, opts)
1848 timer, fm = gettimer(ui, opts)
1811 n = scmutil.revsingle(repo, rev).node()
1849 n = scmutil.revsingle(repo, rev).node()
1812
1850
1813 def d():
1851 def d():
1814 repo.changelog.read(n)
1852 repo.changelog.read(n)
1815 # repo.changelog._cache = None
1853 # repo.changelog._cache = None
1816
1854
1817 timer(d)
1855 timer(d)
1818 fm.end()
1856 fm.end()
1819
1857
1820
1858
1821 @command(b'perf::ignore|perfignore', formatteropts)
1859 @command(b'perf::ignore|perfignore', formatteropts)
1822 def perfignore(ui, repo, **opts):
1860 def perfignore(ui, repo, **opts):
1823 """benchmark operation related to computing ignore"""
1861 """benchmark operation related to computing ignore"""
1824 opts = _byteskwargs(opts)
1862 opts = _byteskwargs(opts)
1825 timer, fm = gettimer(ui, opts)
1863 timer, fm = gettimer(ui, opts)
1826 dirstate = repo.dirstate
1864 dirstate = repo.dirstate
1827
1865
1828 def setupone():
1866 def setupone():
1829 dirstate.invalidate()
1867 dirstate.invalidate()
1830 clearfilecache(dirstate, b'_ignore')
1868 clearfilecache(dirstate, b'_ignore')
1831
1869
1832 def runone():
1870 def runone():
1833 dirstate._ignore
1871 dirstate._ignore
1834
1872
1835 timer(runone, setup=setupone, title=b"load")
1873 timer(runone, setup=setupone, title=b"load")
1836 fm.end()
1874 fm.end()
1837
1875
1838
1876
1839 @command(
1877 @command(
1840 b'perf::index|perfindex',
1878 b'perf::index|perfindex',
1841 [
1879 [
1842 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1843 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1844 ]
1882 ]
1845 + formatteropts,
1883 + formatteropts,
1846 )
1884 )
1847 def perfindex(ui, repo, **opts):
1885 def perfindex(ui, repo, **opts):
1848 """benchmark index creation time followed by a lookup
1886 """benchmark index creation time followed by a lookup
1849
1887
1850 The default is to look `tip` up. Depending on the index implementation,
1888 The default is to look `tip` up. Depending on the index implementation,
1851 the revision looked up can matters. For example, an implementation
1889 the revision looked up can matters. For example, an implementation
1852 scanning the index will have a faster lookup time for `--rev tip` than for
1890 scanning the index will have a faster lookup time for `--rev tip` than for
1853 `--rev 0`. The number of looked up revisions and their order can also
1891 `--rev 0`. The number of looked up revisions and their order can also
1854 matters.
1892 matters.
1855
1893
1856 Example of useful set to test:
1894 Example of useful set to test:
1857
1895
1858 * tip
1896 * tip
1859 * 0
1897 * 0
1860 * -10:
1898 * -10:
1861 * :10
1899 * :10
1862 * -10: + :10
1900 * -10: + :10
1863 * :10: + -10:
1901 * :10: + -10:
1864 * -10000:
1902 * -10000:
1865 * -10000: + 0
1903 * -10000: + 0
1866
1904
1867 It is not currently possible to check for lookup of a missing node. For
1905 It is not currently possible to check for lookup of a missing node. For
1868 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1869 import mercurial.revlog
1907 import mercurial.revlog
1870
1908
1871 opts = _byteskwargs(opts)
1909 opts = _byteskwargs(opts)
1872 timer, fm = gettimer(ui, opts)
1910 timer, fm = gettimer(ui, opts)
1873 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1874 if opts[b'no_lookup']:
1912 if opts[b'no_lookup']:
1875 if opts['rev']:
1913 if opts['rev']:
1876 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1877 nodes = []
1915 nodes = []
1878 elif not opts[b'rev']:
1916 elif not opts[b'rev']:
1879 nodes = [repo[b"tip"].node()]
1917 nodes = [repo[b"tip"].node()]
1880 else:
1918 else:
1881 revs = scmutil.revrange(repo, opts[b'rev'])
1919 revs = scmutil.revrange(repo, opts[b'rev'])
1882 cl = repo.changelog
1920 cl = repo.changelog
1883 nodes = [cl.node(r) for r in revs]
1921 nodes = [cl.node(r) for r in revs]
1884
1922
1885 unfi = repo.unfiltered()
1923 unfi = repo.unfiltered()
1886 # find the filecache func directly
1924 # find the filecache func directly
1887 # This avoid polluting the benchmark with the filecache logic
1925 # This avoid polluting the benchmark with the filecache logic
1888 makecl = unfi.__class__.changelog.func
1926 makecl = unfi.__class__.changelog.func
1889
1927
1890 def setup():
1928 def setup():
1891 # probably not necessary, but for good measure
1929 # probably not necessary, but for good measure
1892 clearchangelog(unfi)
1930 clearchangelog(unfi)
1893
1931
1894 def d():
1932 def d():
1895 cl = makecl(unfi)
1933 cl = makecl(unfi)
1896 for n in nodes:
1934 for n in nodes:
1897 cl.rev(n)
1935 cl.rev(n)
1898
1936
1899 timer(d, setup=setup)
1937 timer(d, setup=setup)
1900 fm.end()
1938 fm.end()
1901
1939
1902
1940
1903 @command(
1941 @command(
1904 b'perf::nodemap|perfnodemap',
1942 b'perf::nodemap|perfnodemap',
1905 [
1943 [
1906 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1907 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1908 ]
1946 ]
1909 + formatteropts,
1947 + formatteropts,
1910 )
1948 )
1911 def perfnodemap(ui, repo, **opts):
1949 def perfnodemap(ui, repo, **opts):
1912 """benchmark the time necessary to look up revision from a cold nodemap
1950 """benchmark the time necessary to look up revision from a cold nodemap
1913
1951
1914 Depending on the implementation, the amount and order of revision we look
1952 Depending on the implementation, the amount and order of revision we look
1915 up can varies. Example of useful set to test:
1953 up can varies. Example of useful set to test:
1916 * tip
1954 * tip
1917 * 0
1955 * 0
1918 * -10:
1956 * -10:
1919 * :10
1957 * :10
1920 * -10: + :10
1958 * -10: + :10
1921 * :10: + -10:
1959 * :10: + -10:
1922 * -10000:
1960 * -10000:
1923 * -10000: + 0
1961 * -10000: + 0
1924
1962
1925 The command currently focus on valid binary lookup. Benchmarking for
1963 The command currently focus on valid binary lookup. Benchmarking for
1926 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1927 """
1965 """
1928 import mercurial.revlog
1966 import mercurial.revlog
1929
1967
1930 opts = _byteskwargs(opts)
1968 opts = _byteskwargs(opts)
1931 timer, fm = gettimer(ui, opts)
1969 timer, fm = gettimer(ui, opts)
1932 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1933
1971
1934 unfi = repo.unfiltered()
1972 unfi = repo.unfiltered()
1935 clearcaches = opts[b'clear_caches']
1973 clearcaches = opts[b'clear_caches']
1936 # find the filecache func directly
1974 # find the filecache func directly
1937 # This avoid polluting the benchmark with the filecache logic
1975 # This avoid polluting the benchmark with the filecache logic
1938 makecl = unfi.__class__.changelog.func
1976 makecl = unfi.__class__.changelog.func
1939 if not opts[b'rev']:
1977 if not opts[b'rev']:
1940 raise error.Abort(b'use --rev to specify revisions to look up')
1978 raise error.Abort(b'use --rev to specify revisions to look up')
1941 revs = scmutil.revrange(repo, opts[b'rev'])
1979 revs = scmutil.revrange(repo, opts[b'rev'])
1942 cl = repo.changelog
1980 cl = repo.changelog
1943 nodes = [cl.node(r) for r in revs]
1981 nodes = [cl.node(r) for r in revs]
1944
1982
1945 # use a list to pass reference to a nodemap from one closure to the next
1983 # use a list to pass reference to a nodemap from one closure to the next
1946 nodeget = [None]
1984 nodeget = [None]
1947
1985
1948 def setnodeget():
1986 def setnodeget():
1949 # probably not necessary, but for good measure
1987 # probably not necessary, but for good measure
1950 clearchangelog(unfi)
1988 clearchangelog(unfi)
1951 cl = makecl(unfi)
1989 cl = makecl(unfi)
1952 if util.safehasattr(cl.index, 'get_rev'):
1990 if util.safehasattr(cl.index, 'get_rev'):
1953 nodeget[0] = cl.index.get_rev
1991 nodeget[0] = cl.index.get_rev
1954 else:
1992 else:
1955 nodeget[0] = cl.nodemap.get
1993 nodeget[0] = cl.nodemap.get
1956
1994
1957 def d():
1995 def d():
1958 get = nodeget[0]
1996 get = nodeget[0]
1959 for n in nodes:
1997 for n in nodes:
1960 get(n)
1998 get(n)
1961
1999
1962 setup = None
2000 setup = None
1963 if clearcaches:
2001 if clearcaches:
1964
2002
1965 def setup():
2003 def setup():
1966 setnodeget()
2004 setnodeget()
1967
2005
1968 else:
2006 else:
1969 setnodeget()
2007 setnodeget()
1970 d() # prewarm the data structure
2008 d() # prewarm the data structure
1971 timer(d, setup=setup)
2009 timer(d, setup=setup)
1972 fm.end()
2010 fm.end()
1973
2011
1974
2012
1975 @command(b'perf::startup|perfstartup', formatteropts)
2013 @command(b'perf::startup|perfstartup', formatteropts)
1976 def perfstartup(ui, repo, **opts):
2014 def perfstartup(ui, repo, **opts):
1977 opts = _byteskwargs(opts)
2015 opts = _byteskwargs(opts)
1978 timer, fm = gettimer(ui, opts)
2016 timer, fm = gettimer(ui, opts)
1979
2017
1980 def d():
2018 def d():
1981 if os.name != 'nt':
2019 if os.name != 'nt':
1982 os.system(
2020 os.system(
1983 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1984 )
2022 )
1985 else:
2023 else:
1986 os.environ['HGRCPATH'] = r' '
2024 os.environ['HGRCPATH'] = r' '
1987 os.system("%s version -q > NUL" % sys.argv[0])
2025 os.system("%s version -q > NUL" % sys.argv[0])
1988
2026
1989 timer(d)
2027 timer(d)
1990 fm.end()
2028 fm.end()
1991
2029
1992
2030
1993 def _find_stream_generator(version):
2031 def _find_stream_generator(version):
1994 """find the proper generator function for this stream version"""
2032 """find the proper generator function for this stream version"""
1995 import mercurial.streamclone
2033 import mercurial.streamclone
1996
2034
1997 available = {}
2035 available = {}
1998
2036
1999 # try to fetch a v1 generator
2037 # try to fetch a v1 generator
2000 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2001 if generatev1 is not None:
2039 if generatev1 is not None:
2002
2040
2003 def generate(repo):
2041 def generate(repo):
2004 entries, bytes, data = generatev2(repo, None, None, True)
2042 entries, bytes, data = generatev2(repo, None, None, True)
2005 return data
2043 return data
2006
2044
2007 available[b'v1'] = generatev1
2045 available[b'v1'] = generatev1
2008 # try to fetch a v2 generator
2046 # try to fetch a v2 generator
2009 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2010 if generatev2 is not None:
2048 if generatev2 is not None:
2011
2049
2012 def generate(repo):
2050 def generate(repo):
2013 entries, bytes, data = generatev2(repo, None, None, True)
2051 entries, bytes, data = generatev2(repo, None, None, True)
2014 return data
2052 return data
2015
2053
2016 available[b'v2'] = generate
2054 available[b'v2'] = generate
2017 # try to fetch a v3 generator
2055 # try to fetch a v3 generator
2018 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2019 if generatev3 is not None:
2057 if generatev3 is not None:
2020
2058
2021 def generate(repo):
2059 def generate(repo):
2022 entries, bytes, data = generatev3(repo, None, None, True)
2060 entries, bytes, data = generatev3(repo, None, None, True)
2023 return data
2061 return data
2024
2062
2025 available[b'v3-exp'] = generate
2063 available[b'v3-exp'] = generate
2026
2064
2027 # resolve the request
2065 # resolve the request
2028 if version == b"latest":
2066 if version == b"latest":
2029 # latest is the highest non experimental version
2067 # latest is the highest non experimental version
2030 latest_key = max(v for v in available if b'-exp' not in v)
2068 latest_key = max(v for v in available if b'-exp' not in v)
2031 return available[latest_key]
2069 return available[latest_key]
2032 elif version in available:
2070 elif version in available:
2033 return available[version]
2071 return available[version]
2034 else:
2072 else:
2035 msg = b"unkown or unavailable version: %s"
2073 msg = b"unkown or unavailable version: %s"
2036 msg %= version
2074 msg %= version
2037 hint = b"available versions: %s"
2075 hint = b"available versions: %s"
2038 hint %= b', '.join(sorted(available))
2076 hint %= b', '.join(sorted(available))
2039 raise error.Abort(msg, hint=hint)
2077 raise error.Abort(msg, hint=hint)
2040
2078
2041
2079
2042 @command(
2080 @command(
2043 b'perf::stream-locked-section',
2081 b'perf::stream-locked-section',
2044 [
2082 [
2045 (
2083 (
2046 b'',
2084 b'',
2047 b'stream-version',
2085 b'stream-version',
2048 b'latest',
2086 b'latest',
2049 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2050 ),
2088 ),
2051 ]
2089 ]
2052 + formatteropts,
2090 + formatteropts,
2053 )
2091 )
2054 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2055 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2056
2094
2057 opts = _byteskwargs(opts)
2095 opts = _byteskwargs(opts)
2058 timer, fm = gettimer(ui, opts)
2096 timer, fm = gettimer(ui, opts)
2059
2097
2060 # deletion of the generator may trigger some cleanup that we do not want to
2098 # deletion of the generator may trigger some cleanup that we do not want to
2061 # measure
2099 # measure
2062 result_holder = [None]
2100 result_holder = [None]
2063
2101
2064 def setupone():
2102 def setupone():
2065 result_holder[0] = None
2103 result_holder[0] = None
2066
2104
2067 generate = _find_stream_generator(stream_version)
2105 generate = _find_stream_generator(stream_version)
2068
2106
2069 def runone():
2107 def runone():
2070 # the lock is held for the duration the initialisation
2108 # the lock is held for the duration the initialisation
2071 result_holder[0] = generate(repo)
2109 result_holder[0] = generate(repo)
2072
2110
2073 timer(runone, setup=setupone, title=b"load")
2111 timer(runone, setup=setupone, title=b"load")
2074 fm.end()
2112 fm.end()
2075
2113
2076
2114
2077 @command(
2115 @command(
2078 b'perf::stream-generate',
2116 b'perf::stream-generate',
2079 [
2117 [
2080 (
2118 (
2081 b'',
2119 b'',
2082 b'stream-version',
2120 b'stream-version',
2083 b'latest',
2121 b'latest',
2084 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2085 ),
2123 ),
2086 ]
2124 ]
2087 + formatteropts,
2125 + formatteropts,
2088 )
2126 )
2089 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2090 """benchmark the full generation of a stream clone"""
2128 """benchmark the full generation of a stream clone"""
2091
2129
2092 opts = _byteskwargs(opts)
2130 opts = _byteskwargs(opts)
2093 timer, fm = gettimer(ui, opts)
2131 timer, fm = gettimer(ui, opts)
2094
2132
2095 # deletion of the generator may trigger some cleanup that we do not want to
2133 # deletion of the generator may trigger some cleanup that we do not want to
2096 # measure
2134 # measure
2097
2135
2098 generate = _find_stream_generator(stream_version)
2136 generate = _find_stream_generator(stream_version)
2099
2137
2100 def runone():
2138 def runone():
2101 # the lock is held for the duration the initialisation
2139 # the lock is held for the duration the initialisation
2102 for chunk in generate(repo):
2140 for chunk in generate(repo):
2103 pass
2141 pass
2104
2142
2105 timer(runone, title=b"generate")
2143 timer(runone, title=b"generate")
2106 fm.end()
2144 fm.end()
2107
2145
2108
2146
2109 @command(
2147 @command(
2110 b'perf::stream-consume',
2148 b'perf::stream-consume',
2111 formatteropts,
2149 formatteropts,
2112 )
2150 )
2113 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2114 """benchmark the full application of a stream clone
2152 """benchmark the full application of a stream clone
2115
2153
2116 This include the creation of the repository
2154 This include the creation of the repository
2117 """
2155 """
2118 # try except to appease check code
2156 # try except to appease check code
2119 msg = b"mercurial too old, missing necessary module: %s"
2157 msg = b"mercurial too old, missing necessary module: %s"
2120 try:
2158 try:
2121 from mercurial import bundle2
2159 from mercurial import bundle2
2122 except ImportError as exc:
2160 except ImportError as exc:
2123 msg %= _bytestr(exc)
2161 msg %= _bytestr(exc)
2124 raise error.Abort(msg)
2162 raise error.Abort(msg)
2125 try:
2163 try:
2126 from mercurial import exchange
2164 from mercurial import exchange
2127 except ImportError as exc:
2165 except ImportError as exc:
2128 msg %= _bytestr(exc)
2166 msg %= _bytestr(exc)
2129 raise error.Abort(msg)
2167 raise error.Abort(msg)
2130 try:
2168 try:
2131 from mercurial import hg
2169 from mercurial import hg
2132 except ImportError as exc:
2170 except ImportError as exc:
2133 msg %= _bytestr(exc)
2171 msg %= _bytestr(exc)
2134 raise error.Abort(msg)
2172 raise error.Abort(msg)
2135 try:
2173 try:
2136 from mercurial import localrepo
2174 from mercurial import localrepo
2137 except ImportError as exc:
2175 except ImportError as exc:
2138 msg %= _bytestr(exc)
2176 msg %= _bytestr(exc)
2139 raise error.Abort(msg)
2177 raise error.Abort(msg)
2140
2178
2141 opts = _byteskwargs(opts)
2179 opts = _byteskwargs(opts)
2142 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2143
2181
2144 # deletion of the generator may trigger some cleanup that we do not want to
2182 # deletion of the generator may trigger some cleanup that we do not want to
2145 # measure
2183 # measure
2146 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2147 raise error.Abort("not a readable file: %s" % filename)
2185 raise error.Abort("not a readable file: %s" % filename)
2148
2186
2149 run_variables = [None, None]
2187 run_variables = [None, None]
2150
2188
2151 @contextlib.contextmanager
2189 @contextlib.contextmanager
2152 def context():
2190 def context():
2153 with open(filename, mode='rb') as bundle:
2191 with open(filename, mode='rb') as bundle:
2154 with tempfile.TemporaryDirectory() as tmp_dir:
2192 with tempfile.TemporaryDirectory() as tmp_dir:
2155 tmp_dir = fsencode(tmp_dir)
2193 tmp_dir = fsencode(tmp_dir)
2156 run_variables[0] = bundle
2194 run_variables[0] = bundle
2157 run_variables[1] = tmp_dir
2195 run_variables[1] = tmp_dir
2158 yield
2196 yield
2159 run_variables[0] = None
2197 run_variables[0] = None
2160 run_variables[1] = None
2198 run_variables[1] = None
2161
2199
2162 def runone():
2200 def runone():
2163 bundle = run_variables[0]
2201 bundle = run_variables[0]
2164 tmp_dir = run_variables[1]
2202 tmp_dir = run_variables[1]
2165 # only pass ui when no srcrepo
2203 # only pass ui when no srcrepo
2166 localrepo.createrepository(
2204 localrepo.createrepository(
2167 repo.ui, tmp_dir, requirements=repo.requirements
2205 repo.ui, tmp_dir, requirements=repo.requirements
2168 )
2206 )
2169 target = hg.repository(repo.ui, tmp_dir)
2207 target = hg.repository(repo.ui, tmp_dir)
2170 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2171 # stream v1
2209 # stream v1
2172 if util.safehasattr(gen, 'apply'):
2210 if util.safehasattr(gen, 'apply'):
2173 gen.apply(target)
2211 gen.apply(target)
2174 else:
2212 else:
2175 with target.transaction(b"perf::stream-consume") as tr:
2213 with target.transaction(b"perf::stream-consume") as tr:
2176 bundle2.applybundle(
2214 bundle2.applybundle(
2177 target,
2215 target,
2178 gen,
2216 gen,
2179 tr,
2217 tr,
2180 source=b'unbundle',
2218 source=b'unbundle',
2181 url=filename,
2219 url=filename,
2182 )
2220 )
2183
2221
2184 timer(runone, context=context, title=b"consume")
2222 timer(runone, context=context, title=b"consume")
2185 fm.end()
2223 fm.end()
2186
2224
2187
2225
2188 @command(b'perf::parents|perfparents', formatteropts)
2226 @command(b'perf::parents|perfparents', formatteropts)
2189 def perfparents(ui, repo, **opts):
2227 def perfparents(ui, repo, **opts):
2190 """benchmark the time necessary to fetch one changeset's parents.
2228 """benchmark the time necessary to fetch one changeset's parents.
2191
2229
2192 The fetch is done using the `node identifier`, traversing all object layers
2230 The fetch is done using the `node identifier`, traversing all object layers
2193 from the repository object. The first N revisions will be used for this
2231 from the repository object. The first N revisions will be used for this
2194 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2195 (default: 1000).
2233 (default: 1000).
2196 """
2234 """
2197 opts = _byteskwargs(opts)
2235 opts = _byteskwargs(opts)
2198 timer, fm = gettimer(ui, opts)
2236 timer, fm = gettimer(ui, opts)
2199 # control the number of commits perfparents iterates over
2237 # control the number of commits perfparents iterates over
2200 # experimental config: perf.parentscount
2238 # experimental config: perf.parentscount
2201 count = getint(ui, b"perf", b"parentscount", 1000)
2239 count = getint(ui, b"perf", b"parentscount", 1000)
2202 if len(repo.changelog) < count:
2240 if len(repo.changelog) < count:
2203 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2204 repo = repo.unfiltered()
2242 repo = repo.unfiltered()
2205 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2206
2244
2207 def d():
2245 def d():
2208 for n in nl:
2246 for n in nl:
2209 repo.changelog.parents(n)
2247 repo.changelog.parents(n)
2210
2248
2211 timer(d)
2249 timer(d)
2212 fm.end()
2250 fm.end()
2213
2251
2214
2252
2215 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2216 def perfctxfiles(ui, repo, x, **opts):
2254 def perfctxfiles(ui, repo, x, **opts):
2217 opts = _byteskwargs(opts)
2255 opts = _byteskwargs(opts)
2218 x = int(x)
2256 x = int(x)
2219 timer, fm = gettimer(ui, opts)
2257 timer, fm = gettimer(ui, opts)
2220
2258
2221 def d():
2259 def d():
2222 len(repo[x].files())
2260 len(repo[x].files())
2223
2261
2224 timer(d)
2262 timer(d)
2225 fm.end()
2263 fm.end()
2226
2264
2227
2265
2228 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2229 def perfrawfiles(ui, repo, x, **opts):
2267 def perfrawfiles(ui, repo, x, **opts):
2230 opts = _byteskwargs(opts)
2268 opts = _byteskwargs(opts)
2231 x = int(x)
2269 x = int(x)
2232 timer, fm = gettimer(ui, opts)
2270 timer, fm = gettimer(ui, opts)
2233 cl = repo.changelog
2271 cl = repo.changelog
2234
2272
2235 def d():
2273 def d():
2236 len(cl.read(x)[3])
2274 len(cl.read(x)[3])
2237
2275
2238 timer(d)
2276 timer(d)
2239 fm.end()
2277 fm.end()
2240
2278
2241
2279
2242 @command(b'perf::lookup|perflookup', formatteropts)
2280 @command(b'perf::lookup|perflookup', formatteropts)
2243 def perflookup(ui, repo, rev, **opts):
2281 def perflookup(ui, repo, rev, **opts):
2244 opts = _byteskwargs(opts)
2282 opts = _byteskwargs(opts)
2245 timer, fm = gettimer(ui, opts)
2283 timer, fm = gettimer(ui, opts)
2246 timer(lambda: len(repo.lookup(rev)))
2284 timer(lambda: len(repo.lookup(rev)))
2247 fm.end()
2285 fm.end()
2248
2286
2249
2287
2250 @command(
2288 @command(
2251 b'perf::linelogedits|perflinelogedits',
2289 b'perf::linelogedits|perflinelogedits',
2252 [
2290 [
2253 (b'n', b'edits', 10000, b'number of edits'),
2291 (b'n', b'edits', 10000, b'number of edits'),
2254 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2255 ],
2293 ],
2256 norepo=True,
2294 norepo=True,
2257 )
2295 )
2258 def perflinelogedits(ui, **opts):
2296 def perflinelogedits(ui, **opts):
2259 from mercurial import linelog
2297 from mercurial import linelog
2260
2298
2261 opts = _byteskwargs(opts)
2299 opts = _byteskwargs(opts)
2262
2300
2263 edits = opts[b'edits']
2301 edits = opts[b'edits']
2264 maxhunklines = opts[b'max_hunk_lines']
2302 maxhunklines = opts[b'max_hunk_lines']
2265
2303
2266 maxb1 = 100000
2304 maxb1 = 100000
2267 random.seed(0)
2305 random.seed(0)
2268 randint = random.randint
2306 randint = random.randint
2269 currentlines = 0
2307 currentlines = 0
2270 arglist = []
2308 arglist = []
2271 for rev in _xrange(edits):
2309 for rev in _xrange(edits):
2272 a1 = randint(0, currentlines)
2310 a1 = randint(0, currentlines)
2273 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2274 b1 = randint(0, maxb1)
2312 b1 = randint(0, maxb1)
2275 b2 = randint(b1, b1 + maxhunklines)
2313 b2 = randint(b1, b1 + maxhunklines)
2276 currentlines += (b2 - b1) - (a2 - a1)
2314 currentlines += (b2 - b1) - (a2 - a1)
2277 arglist.append((rev, a1, a2, b1, b2))
2315 arglist.append((rev, a1, a2, b1, b2))
2278
2316
2279 def d():
2317 def d():
2280 ll = linelog.linelog()
2318 ll = linelog.linelog()
2281 for args in arglist:
2319 for args in arglist:
2282 ll.replacelines(*args)
2320 ll.replacelines(*args)
2283
2321
2284 timer, fm = gettimer(ui, opts)
2322 timer, fm = gettimer(ui, opts)
2285 timer(d)
2323 timer(d)
2286 fm.end()
2324 fm.end()
2287
2325
2288
2326
2289 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2290 def perfrevrange(ui, repo, *specs, **opts):
2328 def perfrevrange(ui, repo, *specs, **opts):
2291 opts = _byteskwargs(opts)
2329 opts = _byteskwargs(opts)
2292 timer, fm = gettimer(ui, opts)
2330 timer, fm = gettimer(ui, opts)
2293 revrange = scmutil.revrange
2331 revrange = scmutil.revrange
2294 timer(lambda: len(revrange(repo, specs)))
2332 timer(lambda: len(revrange(repo, specs)))
2295 fm.end()
2333 fm.end()
2296
2334
2297
2335
2298 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2299 def perfnodelookup(ui, repo, rev, **opts):
2337 def perfnodelookup(ui, repo, rev, **opts):
2300 opts = _byteskwargs(opts)
2338 opts = _byteskwargs(opts)
2301 timer, fm = gettimer(ui, opts)
2339 timer, fm = gettimer(ui, opts)
2302 import mercurial.revlog
2340 import mercurial.revlog
2303
2341
2304 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2305 n = scmutil.revsingle(repo, rev).node()
2343 n = scmutil.revsingle(repo, rev).node()
2306
2344
2307 try:
2345 try:
2308 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2309 except TypeError:
2347 except TypeError:
2310 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2311
2349
2312 def d():
2350 def d():
2313 cl.rev(n)
2351 cl.rev(n)
2314 clearcaches(cl)
2352 clearcaches(cl)
2315
2353
2316 timer(d)
2354 timer(d)
2317 fm.end()
2355 fm.end()
2318
2356
2319
2357
2320 @command(
2358 @command(
2321 b'perf::log|perflog',
2359 b'perf::log|perflog',
2322 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2323 )
2361 )
2324 def perflog(ui, repo, rev=None, **opts):
2362 def perflog(ui, repo, rev=None, **opts):
2325 opts = _byteskwargs(opts)
2363 opts = _byteskwargs(opts)
2326 if rev is None:
2364 if rev is None:
2327 rev = []
2365 rev = []
2328 timer, fm = gettimer(ui, opts)
2366 timer, fm = gettimer(ui, opts)
2329 ui.pushbuffer()
2367 ui.pushbuffer()
2330 timer(
2368 timer(
2331 lambda: commands.log(
2369 lambda: commands.log(
2332 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2333 )
2371 )
2334 )
2372 )
2335 ui.popbuffer()
2373 ui.popbuffer()
2336 fm.end()
2374 fm.end()
2337
2375
2338
2376
2339 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2340 def perfmoonwalk(ui, repo, **opts):
2378 def perfmoonwalk(ui, repo, **opts):
2341 """benchmark walking the changelog backwards
2379 """benchmark walking the changelog backwards
2342
2380
2343 This also loads the changelog data for each revision in the changelog.
2381 This also loads the changelog data for each revision in the changelog.
2344 """
2382 """
2345 opts = _byteskwargs(opts)
2383 opts = _byteskwargs(opts)
2346 timer, fm = gettimer(ui, opts)
2384 timer, fm = gettimer(ui, opts)
2347
2385
2348 def moonwalk():
2386 def moonwalk():
2349 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2350 ctx = repo[i]
2388 ctx = repo[i]
2351 ctx.branch() # read changelog data (in addition to the index)
2389 ctx.branch() # read changelog data (in addition to the index)
2352
2390
2353 timer(moonwalk)
2391 timer(moonwalk)
2354 fm.end()
2392 fm.end()
2355
2393
2356
2394
2357 @command(
2395 @command(
2358 b'perf::templating|perftemplating',
2396 b'perf::templating|perftemplating',
2359 [
2397 [
2360 (b'r', b'rev', [], b'revisions to run the template on'),
2398 (b'r', b'rev', [], b'revisions to run the template on'),
2361 ]
2399 ]
2362 + formatteropts,
2400 + formatteropts,
2363 )
2401 )
2364 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2365 """test the rendering time of a given template"""
2403 """test the rendering time of a given template"""
2366 if makelogtemplater is None:
2404 if makelogtemplater is None:
2367 raise error.Abort(
2405 raise error.Abort(
2368 b"perftemplating not available with this Mercurial",
2406 b"perftemplating not available with this Mercurial",
2369 hint=b"use 4.3 or later",
2407 hint=b"use 4.3 or later",
2370 )
2408 )
2371
2409
2372 opts = _byteskwargs(opts)
2410 opts = _byteskwargs(opts)
2373
2411
2374 nullui = ui.copy()
2412 nullui = ui.copy()
2375 nullui.fout = open(os.devnull, 'wb')
2413 nullui.fout = open(os.devnull, 'wb')
2376 nullui.disablepager()
2414 nullui.disablepager()
2377 revs = opts.get(b'rev')
2415 revs = opts.get(b'rev')
2378 if not revs:
2416 if not revs:
2379 revs = [b'all()']
2417 revs = [b'all()']
2380 revs = list(scmutil.revrange(repo, revs))
2418 revs = list(scmutil.revrange(repo, revs))
2381
2419
2382 defaulttemplate = (
2420 defaulttemplate = (
2383 b'{date|shortdate} [{rev}:{node|short}]'
2421 b'{date|shortdate} [{rev}:{node|short}]'
2384 b' {author|person}: {desc|firstline}\n'
2422 b' {author|person}: {desc|firstline}\n'
2385 )
2423 )
2386 if testedtemplate is None:
2424 if testedtemplate is None:
2387 testedtemplate = defaulttemplate
2425 testedtemplate = defaulttemplate
2388 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2389
2427
2390 def format():
2428 def format():
2391 for r in revs:
2429 for r in revs:
2392 ctx = repo[r]
2430 ctx = repo[r]
2393 displayer.show(ctx)
2431 displayer.show(ctx)
2394 displayer.flush(ctx)
2432 displayer.flush(ctx)
2395
2433
2396 timer, fm = gettimer(ui, opts)
2434 timer, fm = gettimer(ui, opts)
2397 timer(format)
2435 timer(format)
2398 fm.end()
2436 fm.end()
2399
2437
2400
2438
2401 def _displaystats(ui, opts, entries, data):
2439 def _displaystats(ui, opts, entries, data):
2402 # use a second formatter because the data are quite different, not sure
2440 # use a second formatter because the data are quite different, not sure
2403 # how it flies with the templater.
2441 # how it flies with the templater.
2404 fm = ui.formatter(b'perf-stats', opts)
2442 fm = ui.formatter(b'perf-stats', opts)
2405 for key, title in entries:
2443 for key, title in entries:
2406 values = data[key]
2444 values = data[key]
2407 nbvalues = len(data)
2445 nbvalues = len(data)
2408 values.sort()
2446 values.sort()
2409 stats = {
2447 stats = {
2410 'key': key,
2448 'key': key,
2411 'title': title,
2449 'title': title,
2412 'nbitems': len(values),
2450 'nbitems': len(values),
2413 'min': values[0][0],
2451 'min': values[0][0],
2414 '10%': values[(nbvalues * 10) // 100][0],
2452 '10%': values[(nbvalues * 10) // 100][0],
2415 '25%': values[(nbvalues * 25) // 100][0],
2453 '25%': values[(nbvalues * 25) // 100][0],
2416 '50%': values[(nbvalues * 50) // 100][0],
2454 '50%': values[(nbvalues * 50) // 100][0],
2417 '75%': values[(nbvalues * 75) // 100][0],
2455 '75%': values[(nbvalues * 75) // 100][0],
2418 '80%': values[(nbvalues * 80) // 100][0],
2456 '80%': values[(nbvalues * 80) // 100][0],
2419 '85%': values[(nbvalues * 85) // 100][0],
2457 '85%': values[(nbvalues * 85) // 100][0],
2420 '90%': values[(nbvalues * 90) // 100][0],
2458 '90%': values[(nbvalues * 90) // 100][0],
2421 '95%': values[(nbvalues * 95) // 100][0],
2459 '95%': values[(nbvalues * 95) // 100][0],
2422 '99%': values[(nbvalues * 99) // 100][0],
2460 '99%': values[(nbvalues * 99) // 100][0],
2423 'max': values[-1][0],
2461 'max': values[-1][0],
2424 }
2462 }
2425 fm.startitem()
2463 fm.startitem()
2426 fm.data(**stats)
2464 fm.data(**stats)
2427 # make node pretty for the human output
2465 # make node pretty for the human output
2428 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2429 lines = [
2467 lines = [
2430 'min',
2468 'min',
2431 '10%',
2469 '10%',
2432 '25%',
2470 '25%',
2433 '50%',
2471 '50%',
2434 '75%',
2472 '75%',
2435 '80%',
2473 '80%',
2436 '85%',
2474 '85%',
2437 '90%',
2475 '90%',
2438 '95%',
2476 '95%',
2439 '99%',
2477 '99%',
2440 'max',
2478 'max',
2441 ]
2479 ]
2442 for l in lines:
2480 for l in lines:
2443 fm.plain('%s: %s\n' % (l, stats[l]))
2481 fm.plain('%s: %s\n' % (l, stats[l]))
2444 fm.end()
2482 fm.end()
2445
2483
2446
2484
2447 @command(
2485 @command(
2448 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2449 formatteropts
2487 formatteropts
2450 + [
2488 + [
2451 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2452 (b'', b'timing', False, b'provides extra data (costly)'),
2490 (b'', b'timing', False, b'provides extra data (costly)'),
2453 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2454 ],
2492 ],
2455 )
2493 )
2456 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2457 """find statistics about potential parameters for `perfmergecopies`
2495 """find statistics about potential parameters for `perfmergecopies`
2458
2496
2459 This command find (base, p1, p2) triplet relevant for copytracing
2497 This command find (base, p1, p2) triplet relevant for copytracing
2460 benchmarking in the context of a merge. It reports values for some of the
2498 benchmarking in the context of a merge. It reports values for some of the
2461 parameters that impact merge copy tracing time during merge.
2499 parameters that impact merge copy tracing time during merge.
2462
2500
2463 If `--timing` is set, rename detection is run and the associated timing
2501 If `--timing` is set, rename detection is run and the associated timing
2464 will be reported. The extra details come at the cost of slower command
2502 will be reported. The extra details come at the cost of slower command
2465 execution.
2503 execution.
2466
2504
2467 Since rename detection is only run once, other factors might easily
2505 Since rename detection is only run once, other factors might easily
2468 affect the precision of the timing. However it should give a good
2506 affect the precision of the timing. However it should give a good
2469 approximation of which revision triplets are very costly.
2507 approximation of which revision triplets are very costly.
2470 """
2508 """
2471 opts = _byteskwargs(opts)
2509 opts = _byteskwargs(opts)
2472 fm = ui.formatter(b'perf', opts)
2510 fm = ui.formatter(b'perf', opts)
2473 dotiming = opts[b'timing']
2511 dotiming = opts[b'timing']
2474 dostats = opts[b'stats']
2512 dostats = opts[b'stats']
2475
2513
2476 output_template = [
2514 output_template = [
2477 ("base", "%(base)12s"),
2515 ("base", "%(base)12s"),
2478 ("p1", "%(p1.node)12s"),
2516 ("p1", "%(p1.node)12s"),
2479 ("p2", "%(p2.node)12s"),
2517 ("p2", "%(p2.node)12s"),
2480 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2481 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2482 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2483 ("p1.time", "%(p1.time)12.3f"),
2521 ("p1.time", "%(p1.time)12.3f"),
2484 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2485 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2486 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2487 ("p2.time", "%(p2.time)12.3f"),
2525 ("p2.time", "%(p2.time)12.3f"),
2488 ("renames", "%(nbrenamedfiles)12d"),
2526 ("renames", "%(nbrenamedfiles)12d"),
2489 ("total.time", "%(time)12.3f"),
2527 ("total.time", "%(time)12.3f"),
2490 ]
2528 ]
2491 if not dotiming:
2529 if not dotiming:
2492 output_template = [
2530 output_template = [
2493 i
2531 i
2494 for i in output_template
2532 for i in output_template
2495 if not ('time' in i[0] or 'renames' in i[0])
2533 if not ('time' in i[0] or 'renames' in i[0])
2496 ]
2534 ]
2497 header_names = [h for (h, v) in output_template]
2535 header_names = [h for (h, v) in output_template]
2498 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2499 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2500 fm.plain(header % tuple(header_names))
2538 fm.plain(header % tuple(header_names))
2501
2539
2502 if not revs:
2540 if not revs:
2503 revs = ['all()']
2541 revs = ['all()']
2504 revs = scmutil.revrange(repo, revs)
2542 revs = scmutil.revrange(repo, revs)
2505
2543
2506 if dostats:
2544 if dostats:
2507 alldata = {
2545 alldata = {
2508 'nbrevs': [],
2546 'nbrevs': [],
2509 'nbmissingfiles': [],
2547 'nbmissingfiles': [],
2510 }
2548 }
2511 if dotiming:
2549 if dotiming:
2512 alldata['parentnbrenames'] = []
2550 alldata['parentnbrenames'] = []
2513 alldata['totalnbrenames'] = []
2551 alldata['totalnbrenames'] = []
2514 alldata['parenttime'] = []
2552 alldata['parenttime'] = []
2515 alldata['totaltime'] = []
2553 alldata['totaltime'] = []
2516
2554
2517 roi = repo.revs('merge() and %ld', revs)
2555 roi = repo.revs('merge() and %ld', revs)
2518 for r in roi:
2556 for r in roi:
2519 ctx = repo[r]
2557 ctx = repo[r]
2520 p1 = ctx.p1()
2558 p1 = ctx.p1()
2521 p2 = ctx.p2()
2559 p2 = ctx.p2()
2522 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2523 for b in bases:
2561 for b in bases:
2524 b = repo[b]
2562 b = repo[b]
2525 p1missing = copies._computeforwardmissing(b, p1)
2563 p1missing = copies._computeforwardmissing(b, p1)
2526 p2missing = copies._computeforwardmissing(b, p2)
2564 p2missing = copies._computeforwardmissing(b, p2)
2527 data = {
2565 data = {
2528 b'base': b.hex(),
2566 b'base': b.hex(),
2529 b'p1.node': p1.hex(),
2567 b'p1.node': p1.hex(),
2530 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2531 b'p1.nbmissingfiles': len(p1missing),
2569 b'p1.nbmissingfiles': len(p1missing),
2532 b'p2.node': p2.hex(),
2570 b'p2.node': p2.hex(),
2533 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2534 b'p2.nbmissingfiles': len(p2missing),
2572 b'p2.nbmissingfiles': len(p2missing),
2535 }
2573 }
2536 if dostats:
2574 if dostats:
2537 if p1missing:
2575 if p1missing:
2538 alldata['nbrevs'].append(
2576 alldata['nbrevs'].append(
2539 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2540 )
2578 )
2541 alldata['nbmissingfiles'].append(
2579 alldata['nbmissingfiles'].append(
2542 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2543 )
2581 )
2544 if p2missing:
2582 if p2missing:
2545 alldata['nbrevs'].append(
2583 alldata['nbrevs'].append(
2546 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2547 )
2585 )
2548 alldata['nbmissingfiles'].append(
2586 alldata['nbmissingfiles'].append(
2549 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2550 )
2588 )
2551 if dotiming:
2589 if dotiming:
2552 begin = util.timer()
2590 begin = util.timer()
2553 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2554 end = util.timer()
2592 end = util.timer()
2555 # not very stable timing since we did only one run
2593 # not very stable timing since we did only one run
2556 data['time'] = end - begin
2594 data['time'] = end - begin
2557 # mergedata contains five dicts: "copy", "movewithdir",
2595 # mergedata contains five dicts: "copy", "movewithdir",
2558 # "diverge", "renamedelete" and "dirmove".
2596 # "diverge", "renamedelete" and "dirmove".
2559 # The first 4 are about renamed file so lets count that.
2597 # The first 4 are about renamed file so lets count that.
2560 renames = len(mergedata[0])
2598 renames = len(mergedata[0])
2561 renames += len(mergedata[1])
2599 renames += len(mergedata[1])
2562 renames += len(mergedata[2])
2600 renames += len(mergedata[2])
2563 renames += len(mergedata[3])
2601 renames += len(mergedata[3])
2564 data['nbrenamedfiles'] = renames
2602 data['nbrenamedfiles'] = renames
2565 begin = util.timer()
2603 begin = util.timer()
2566 p1renames = copies.pathcopies(b, p1)
2604 p1renames = copies.pathcopies(b, p1)
2567 end = util.timer()
2605 end = util.timer()
2568 data['p1.time'] = end - begin
2606 data['p1.time'] = end - begin
2569 begin = util.timer()
2607 begin = util.timer()
2570 p2renames = copies.pathcopies(b, p2)
2608 p2renames = copies.pathcopies(b, p2)
2571 end = util.timer()
2609 end = util.timer()
2572 data['p2.time'] = end - begin
2610 data['p2.time'] = end - begin
2573 data['p1.renamedfiles'] = len(p1renames)
2611 data['p1.renamedfiles'] = len(p1renames)
2574 data['p2.renamedfiles'] = len(p2renames)
2612 data['p2.renamedfiles'] = len(p2renames)
2575
2613
2576 if dostats:
2614 if dostats:
2577 if p1missing:
2615 if p1missing:
2578 alldata['parentnbrenames'].append(
2616 alldata['parentnbrenames'].append(
2579 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2580 )
2618 )
2581 alldata['parenttime'].append(
2619 alldata['parenttime'].append(
2582 (data['p1.time'], b.hex(), p1.hex())
2620 (data['p1.time'], b.hex(), p1.hex())
2583 )
2621 )
2584 if p2missing:
2622 if p2missing:
2585 alldata['parentnbrenames'].append(
2623 alldata['parentnbrenames'].append(
2586 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2587 )
2625 )
2588 alldata['parenttime'].append(
2626 alldata['parenttime'].append(
2589 (data['p2.time'], b.hex(), p2.hex())
2627 (data['p2.time'], b.hex(), p2.hex())
2590 )
2628 )
2591 if p1missing or p2missing:
2629 if p1missing or p2missing:
2592 alldata['totalnbrenames'].append(
2630 alldata['totalnbrenames'].append(
2593 (
2631 (
2594 data['nbrenamedfiles'],
2632 data['nbrenamedfiles'],
2595 b.hex(),
2633 b.hex(),
2596 p1.hex(),
2634 p1.hex(),
2597 p2.hex(),
2635 p2.hex(),
2598 )
2636 )
2599 )
2637 )
2600 alldata['totaltime'].append(
2638 alldata['totaltime'].append(
2601 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2602 )
2640 )
2603 fm.startitem()
2641 fm.startitem()
2604 fm.data(**data)
2642 fm.data(**data)
2605 # make node pretty for the human output
2643 # make node pretty for the human output
2606 out = data.copy()
2644 out = data.copy()
2607 out['base'] = fm.hexfunc(b.node())
2645 out['base'] = fm.hexfunc(b.node())
2608 out['p1.node'] = fm.hexfunc(p1.node())
2646 out['p1.node'] = fm.hexfunc(p1.node())
2609 out['p2.node'] = fm.hexfunc(p2.node())
2647 out['p2.node'] = fm.hexfunc(p2.node())
2610 fm.plain(output % out)
2648 fm.plain(output % out)
2611
2649
2612 fm.end()
2650 fm.end()
2613 if dostats:
2651 if dostats:
2614 # use a second formatter because the data are quite different, not sure
2652 # use a second formatter because the data are quite different, not sure
2615 # how it flies with the templater.
2653 # how it flies with the templater.
2616 entries = [
2654 entries = [
2617 ('nbrevs', 'number of revision covered'),
2655 ('nbrevs', 'number of revision covered'),
2618 ('nbmissingfiles', 'number of missing files at head'),
2656 ('nbmissingfiles', 'number of missing files at head'),
2619 ]
2657 ]
2620 if dotiming:
2658 if dotiming:
2621 entries.append(
2659 entries.append(
2622 ('parentnbrenames', 'rename from one parent to base')
2660 ('parentnbrenames', 'rename from one parent to base')
2623 )
2661 )
2624 entries.append(('totalnbrenames', 'total number of renames'))
2662 entries.append(('totalnbrenames', 'total number of renames'))
2625 entries.append(('parenttime', 'time for one parent'))
2663 entries.append(('parenttime', 'time for one parent'))
2626 entries.append(('totaltime', 'time for both parents'))
2664 entries.append(('totaltime', 'time for both parents'))
2627 _displaystats(ui, opts, entries, alldata)
2665 _displaystats(ui, opts, entries, alldata)
2628
2666
2629
2667
2630 @command(
2668 @command(
2631 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2632 formatteropts
2670 formatteropts
2633 + [
2671 + [
2634 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2635 (b'', b'timing', False, b'provides extra data (costly)'),
2673 (b'', b'timing', False, b'provides extra data (costly)'),
2636 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2637 ],
2675 ],
2638 )
2676 )
2639 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2640 """find statistic about potential parameters for the `perftracecopies`
2678 """find statistic about potential parameters for the `perftracecopies`
2641
2679
2642 This command find source-destination pair relevant for copytracing testing.
2680 This command find source-destination pair relevant for copytracing testing.
2643 It report value for some of the parameters that impact copy tracing time.
2681 It report value for some of the parameters that impact copy tracing time.
2644
2682
2645 If `--timing` is set, rename detection is run and the associated timing
2683 If `--timing` is set, rename detection is run and the associated timing
2646 will be reported. The extra details comes at the cost of a slower command
2684 will be reported. The extra details comes at the cost of a slower command
2647 execution.
2685 execution.
2648
2686
2649 Since the rename detection is only run once, other factors might easily
2687 Since the rename detection is only run once, other factors might easily
2650 affect the precision of the timing. However it should give a good
2688 affect the precision of the timing. However it should give a good
2651 approximation of which revision pairs are very costly.
2689 approximation of which revision pairs are very costly.
2652 """
2690 """
2653 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2654 fm = ui.formatter(b'perf', opts)
2692 fm = ui.formatter(b'perf', opts)
2655 dotiming = opts[b'timing']
2693 dotiming = opts[b'timing']
2656 dostats = opts[b'stats']
2694 dostats = opts[b'stats']
2657
2695
2658 if dotiming:
2696 if dotiming:
2659 header = '%12s %12s %12s %12s %12s %12s\n'
2697 header = '%12s %12s %12s %12s %12s %12s\n'
2660 output = (
2698 output = (
2661 "%(source)12s %(destination)12s "
2699 "%(source)12s %(destination)12s "
2662 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2663 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2664 )
2702 )
2665 header_names = (
2703 header_names = (
2666 "source",
2704 "source",
2667 "destination",
2705 "destination",
2668 "nb-revs",
2706 "nb-revs",
2669 "nb-files",
2707 "nb-files",
2670 "nb-renames",
2708 "nb-renames",
2671 "time",
2709 "time",
2672 )
2710 )
2673 fm.plain(header % header_names)
2711 fm.plain(header % header_names)
2674 else:
2712 else:
2675 header = '%12s %12s %12s %12s\n'
2713 header = '%12s %12s %12s %12s\n'
2676 output = (
2714 output = (
2677 "%(source)12s %(destination)12s "
2715 "%(source)12s %(destination)12s "
2678 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2679 )
2717 )
2680 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2681
2719
2682 if not revs:
2720 if not revs:
2683 revs = ['all()']
2721 revs = ['all()']
2684 revs = scmutil.revrange(repo, revs)
2722 revs = scmutil.revrange(repo, revs)
2685
2723
2686 if dostats:
2724 if dostats:
2687 alldata = {
2725 alldata = {
2688 'nbrevs': [],
2726 'nbrevs': [],
2689 'nbmissingfiles': [],
2727 'nbmissingfiles': [],
2690 }
2728 }
2691 if dotiming:
2729 if dotiming:
2692 alldata['nbrenames'] = []
2730 alldata['nbrenames'] = []
2693 alldata['time'] = []
2731 alldata['time'] = []
2694
2732
2695 roi = repo.revs('merge() and %ld', revs)
2733 roi = repo.revs('merge() and %ld', revs)
2696 for r in roi:
2734 for r in roi:
2697 ctx = repo[r]
2735 ctx = repo[r]
2698 p1 = ctx.p1().rev()
2736 p1 = ctx.p1().rev()
2699 p2 = ctx.p2().rev()
2737 p2 = ctx.p2().rev()
2700 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2701 for p in (p1, p2):
2739 for p in (p1, p2):
2702 for b in bases:
2740 for b in bases:
2703 base = repo[b]
2741 base = repo[b]
2704 parent = repo[p]
2742 parent = repo[p]
2705 missing = copies._computeforwardmissing(base, parent)
2743 missing = copies._computeforwardmissing(base, parent)
2706 if not missing:
2744 if not missing:
2707 continue
2745 continue
2708 data = {
2746 data = {
2709 b'source': base.hex(),
2747 b'source': base.hex(),
2710 b'destination': parent.hex(),
2748 b'destination': parent.hex(),
2711 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2712 b'nbmissingfiles': len(missing),
2750 b'nbmissingfiles': len(missing),
2713 }
2751 }
2714 if dostats:
2752 if dostats:
2715 alldata['nbrevs'].append(
2753 alldata['nbrevs'].append(
2716 (
2754 (
2717 data['nbrevs'],
2755 data['nbrevs'],
2718 base.hex(),
2756 base.hex(),
2719 parent.hex(),
2757 parent.hex(),
2720 )
2758 )
2721 )
2759 )
2722 alldata['nbmissingfiles'].append(
2760 alldata['nbmissingfiles'].append(
2723 (
2761 (
2724 data['nbmissingfiles'],
2762 data['nbmissingfiles'],
2725 base.hex(),
2763 base.hex(),
2726 parent.hex(),
2764 parent.hex(),
2727 )
2765 )
2728 )
2766 )
2729 if dotiming:
2767 if dotiming:
2730 begin = util.timer()
2768 begin = util.timer()
2731 renames = copies.pathcopies(base, parent)
2769 renames = copies.pathcopies(base, parent)
2732 end = util.timer()
2770 end = util.timer()
2733 # not very stable timing since we did only one run
2771 # not very stable timing since we did only one run
2734 data['time'] = end - begin
2772 data['time'] = end - begin
2735 data['nbrenamedfiles'] = len(renames)
2773 data['nbrenamedfiles'] = len(renames)
2736 if dostats:
2774 if dostats:
2737 alldata['time'].append(
2775 alldata['time'].append(
2738 (
2776 (
2739 data['time'],
2777 data['time'],
2740 base.hex(),
2778 base.hex(),
2741 parent.hex(),
2779 parent.hex(),
2742 )
2780 )
2743 )
2781 )
2744 alldata['nbrenames'].append(
2782 alldata['nbrenames'].append(
2745 (
2783 (
2746 data['nbrenamedfiles'],
2784 data['nbrenamedfiles'],
2747 base.hex(),
2785 base.hex(),
2748 parent.hex(),
2786 parent.hex(),
2749 )
2787 )
2750 )
2788 )
2751 fm.startitem()
2789 fm.startitem()
2752 fm.data(**data)
2790 fm.data(**data)
2753 out = data.copy()
2791 out = data.copy()
2754 out['source'] = fm.hexfunc(base.node())
2792 out['source'] = fm.hexfunc(base.node())
2755 out['destination'] = fm.hexfunc(parent.node())
2793 out['destination'] = fm.hexfunc(parent.node())
2756 fm.plain(output % out)
2794 fm.plain(output % out)
2757
2795
2758 fm.end()
2796 fm.end()
2759 if dostats:
2797 if dostats:
2760 entries = [
2798 entries = [
2761 ('nbrevs', 'number of revision covered'),
2799 ('nbrevs', 'number of revision covered'),
2762 ('nbmissingfiles', 'number of missing files at head'),
2800 ('nbmissingfiles', 'number of missing files at head'),
2763 ]
2801 ]
2764 if dotiming:
2802 if dotiming:
2765 entries.append(('nbrenames', 'renamed files'))
2803 entries.append(('nbrenames', 'renamed files'))
2766 entries.append(('time', 'time'))
2804 entries.append(('time', 'time'))
2767 _displaystats(ui, opts, entries, alldata)
2805 _displaystats(ui, opts, entries, alldata)
2768
2806
2769
2807
2770 @command(b'perf::cca|perfcca', formatteropts)
2808 @command(b'perf::cca|perfcca', formatteropts)
2771 def perfcca(ui, repo, **opts):
2809 def perfcca(ui, repo, **opts):
2772 opts = _byteskwargs(opts)
2810 opts = _byteskwargs(opts)
2773 timer, fm = gettimer(ui, opts)
2811 timer, fm = gettimer(ui, opts)
2774 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2775 fm.end()
2813 fm.end()
2776
2814
2777
2815
2778 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2779 def perffncacheload(ui, repo, **opts):
2817 def perffncacheload(ui, repo, **opts):
2780 opts = _byteskwargs(opts)
2818 opts = _byteskwargs(opts)
2781 timer, fm = gettimer(ui, opts)
2819 timer, fm = gettimer(ui, opts)
2782 s = repo.store
2820 s = repo.store
2783
2821
2784 def d():
2822 def d():
2785 s.fncache._load()
2823 s.fncache._load()
2786
2824
2787 timer(d)
2825 timer(d)
2788 fm.end()
2826 fm.end()
2789
2827
2790
2828
2791 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2792 def perffncachewrite(ui, repo, **opts):
2830 def perffncachewrite(ui, repo, **opts):
2793 opts = _byteskwargs(opts)
2831 opts = _byteskwargs(opts)
2794 timer, fm = gettimer(ui, opts)
2832 timer, fm = gettimer(ui, opts)
2795 s = repo.store
2833 s = repo.store
2796 lock = repo.lock()
2834 lock = repo.lock()
2797 s.fncache._load()
2835 s.fncache._load()
2798 tr = repo.transaction(b'perffncachewrite')
2836 tr = repo.transaction(b'perffncachewrite')
2799 tr.addbackup(b'fncache')
2837 tr.addbackup(b'fncache')
2800
2838
2801 def d():
2839 def d():
2802 s.fncache._dirty = True
2840 s.fncache._dirty = True
2803 s.fncache.write(tr)
2841 s.fncache.write(tr)
2804
2842
2805 timer(d)
2843 timer(d)
2806 tr.close()
2844 tr.close()
2807 lock.release()
2845 lock.release()
2808 fm.end()
2846 fm.end()
2809
2847
2810
2848
2811 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2812 def perffncacheencode(ui, repo, **opts):
2850 def perffncacheencode(ui, repo, **opts):
2813 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2814 timer, fm = gettimer(ui, opts)
2852 timer, fm = gettimer(ui, opts)
2815 s = repo.store
2853 s = repo.store
2816 s.fncache._load()
2854 s.fncache._load()
2817
2855
2818 def d():
2856 def d():
2819 for p in s.fncache.entries:
2857 for p in s.fncache.entries:
2820 s.encode(p)
2858 s.encode(p)
2821
2859
2822 timer(d)
2860 timer(d)
2823 fm.end()
2861 fm.end()
2824
2862
2825
2863
2826 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2827 while not done.is_set():
2865 while not done.is_set():
2828 pair = q.get()
2866 pair = q.get()
2829 while pair is not None:
2867 while pair is not None:
2830 if xdiff:
2868 if xdiff:
2831 mdiff.bdiff.xdiffblocks(*pair)
2869 mdiff.bdiff.xdiffblocks(*pair)
2832 elif blocks:
2870 elif blocks:
2833 mdiff.bdiff.blocks(*pair)
2871 mdiff.bdiff.blocks(*pair)
2834 else:
2872 else:
2835 mdiff.textdiff(*pair)
2873 mdiff.textdiff(*pair)
2836 q.task_done()
2874 q.task_done()
2837 pair = q.get()
2875 pair = q.get()
2838 q.task_done() # for the None one
2876 q.task_done() # for the None one
2839 with ready:
2877 with ready:
2840 ready.wait()
2878 ready.wait()
2841
2879
2842
2880
2843 def _manifestrevision(repo, mnode):
2881 def _manifestrevision(repo, mnode):
2844 ml = repo.manifestlog
2882 ml = repo.manifestlog
2845
2883
2846 if util.safehasattr(ml, b'getstorage'):
2884 if util.safehasattr(ml, b'getstorage'):
2847 store = ml.getstorage(b'')
2885 store = ml.getstorage(b'')
2848 else:
2886 else:
2849 store = ml._revlog
2887 store = ml._revlog
2850
2888
2851 return store.revision(mnode)
2889 return store.revision(mnode)
2852
2890
2853
2891
2854 @command(
2892 @command(
2855 b'perf::bdiff|perfbdiff',
2893 b'perf::bdiff|perfbdiff',
2856 revlogopts
2894 revlogopts
2857 + formatteropts
2895 + formatteropts
2858 + [
2896 + [
2859 (
2897 (
2860 b'',
2898 b'',
2861 b'count',
2899 b'count',
2862 1,
2900 1,
2863 b'number of revisions to test (when using --startrev)',
2901 b'number of revisions to test (when using --startrev)',
2864 ),
2902 ),
2865 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2866 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2867 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2868 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2869 ],
2907 ],
2870 b'-c|-m|FILE REV',
2908 b'-c|-m|FILE REV',
2871 )
2909 )
2872 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2873 """benchmark a bdiff between revisions
2911 """benchmark a bdiff between revisions
2874
2912
2875 By default, benchmark a bdiff between its delta parent and itself.
2913 By default, benchmark a bdiff between its delta parent and itself.
2876
2914
2877 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2878 revisions starting at the specified revision.
2916 revisions starting at the specified revision.
2879
2917
2880 With ``--alldata``, assume the requested revision is a changeset and
2918 With ``--alldata``, assume the requested revision is a changeset and
2881 measure bdiffs for all changes related to that changeset (manifest
2919 measure bdiffs for all changes related to that changeset (manifest
2882 and filelogs).
2920 and filelogs).
2883 """
2921 """
2884 opts = _byteskwargs(opts)
2922 opts = _byteskwargs(opts)
2885
2923
2886 if opts[b'xdiff'] and not opts[b'blocks']:
2924 if opts[b'xdiff'] and not opts[b'blocks']:
2887 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2888
2926
2889 if opts[b'alldata']:
2927 if opts[b'alldata']:
2890 opts[b'changelog'] = True
2928 opts[b'changelog'] = True
2891
2929
2892 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2893 file_, rev = None, file_
2931 file_, rev = None, file_
2894 elif rev is None:
2932 elif rev is None:
2895 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2896
2934
2897 blocks = opts[b'blocks']
2935 blocks = opts[b'blocks']
2898 xdiff = opts[b'xdiff']
2936 xdiff = opts[b'xdiff']
2899 textpairs = []
2937 textpairs = []
2900
2938
2901 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2902
2940
2903 startrev = r.rev(r.lookup(rev))
2941 startrev = r.rev(r.lookup(rev))
2904 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2905 if opts[b'alldata']:
2943 if opts[b'alldata']:
2906 # Load revisions associated with changeset.
2944 # Load revisions associated with changeset.
2907 ctx = repo[rev]
2945 ctx = repo[rev]
2908 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2909 for pctx in ctx.parents():
2947 for pctx in ctx.parents():
2910 pman = _manifestrevision(repo, pctx.manifestnode())
2948 pman = _manifestrevision(repo, pctx.manifestnode())
2911 textpairs.append((pman, mtext))
2949 textpairs.append((pman, mtext))
2912
2950
2913 # Load filelog revisions by iterating manifest delta.
2951 # Load filelog revisions by iterating manifest delta.
2914 man = ctx.manifest()
2952 man = ctx.manifest()
2915 pman = ctx.p1().manifest()
2953 pman = ctx.p1().manifest()
2916 for filename, change in pman.diff(man).items():
2954 for filename, change in pman.diff(man).items():
2917 fctx = repo.file(filename)
2955 fctx = repo.file(filename)
2918 f1 = fctx.revision(change[0][0] or -1)
2956 f1 = fctx.revision(change[0][0] or -1)
2919 f2 = fctx.revision(change[1][0] or -1)
2957 f2 = fctx.revision(change[1][0] or -1)
2920 textpairs.append((f1, f2))
2958 textpairs.append((f1, f2))
2921 else:
2959 else:
2922 dp = r.deltaparent(rev)
2960 dp = r.deltaparent(rev)
2923 textpairs.append((r.revision(dp), r.revision(rev)))
2961 textpairs.append((r.revision(dp), r.revision(rev)))
2924
2962
2925 withthreads = threads > 0
2963 withthreads = threads > 0
2926 if not withthreads:
2964 if not withthreads:
2927
2965
2928 def d():
2966 def d():
2929 for pair in textpairs:
2967 for pair in textpairs:
2930 if xdiff:
2968 if xdiff:
2931 mdiff.bdiff.xdiffblocks(*pair)
2969 mdiff.bdiff.xdiffblocks(*pair)
2932 elif blocks:
2970 elif blocks:
2933 mdiff.bdiff.blocks(*pair)
2971 mdiff.bdiff.blocks(*pair)
2934 else:
2972 else:
2935 mdiff.textdiff(*pair)
2973 mdiff.textdiff(*pair)
2936
2974
2937 else:
2975 else:
2938 q = queue()
2976 q = queue()
2939 for i in _xrange(threads):
2977 for i in _xrange(threads):
2940 q.put(None)
2978 q.put(None)
2941 ready = threading.Condition()
2979 ready = threading.Condition()
2942 done = threading.Event()
2980 done = threading.Event()
2943 for i in _xrange(threads):
2981 for i in _xrange(threads):
2944 threading.Thread(
2982 threading.Thread(
2945 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2946 ).start()
2984 ).start()
2947 q.join()
2985 q.join()
2948
2986
2949 def d():
2987 def d():
2950 for pair in textpairs:
2988 for pair in textpairs:
2951 q.put(pair)
2989 q.put(pair)
2952 for i in _xrange(threads):
2990 for i in _xrange(threads):
2953 q.put(None)
2991 q.put(None)
2954 with ready:
2992 with ready:
2955 ready.notify_all()
2993 ready.notify_all()
2956 q.join()
2994 q.join()
2957
2995
2958 timer, fm = gettimer(ui, opts)
2996 timer, fm = gettimer(ui, opts)
2959 timer(d)
2997 timer(d)
2960 fm.end()
2998 fm.end()
2961
2999
2962 if withthreads:
3000 if withthreads:
2963 done.set()
3001 done.set()
2964 for i in _xrange(threads):
3002 for i in _xrange(threads):
2965 q.put(None)
3003 q.put(None)
2966 with ready:
3004 with ready:
2967 ready.notify_all()
3005 ready.notify_all()
2968
3006
2969
3007
2970 @command(
3008 @command(
2971 b'perf::unbundle',
3009 b'perf::unbundle',
2972 formatteropts,
3010 formatteropts,
2973 b'BUNDLE_FILE',
3011 b'BUNDLE_FILE',
2974 )
3012 )
2975 def perf_unbundle(ui, repo, fname, **opts):
3013 def perf_unbundle(ui, repo, fname, **opts):
2976 """benchmark application of a bundle in a repository.
3014 """benchmark application of a bundle in a repository.
2977
3015
2978 This does not include the final transaction processing"""
3016 This does not include the final transaction processing"""
2979
3017
2980 from mercurial import exchange
3018 from mercurial import exchange
2981 from mercurial import bundle2
3019 from mercurial import bundle2
2982 from mercurial import transaction
3020 from mercurial import transaction
2983
3021
2984 opts = _byteskwargs(opts)
3022 opts = _byteskwargs(opts)
2985
3023
2986 ### some compatibility hotfix
3024 ### some compatibility hotfix
2987 #
3025 #
2988 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2989 # critical regression that break transaction rollback for files that are
3027 # critical regression that break transaction rollback for files that are
2990 # de-inlined.
3028 # de-inlined.
2991 method = transaction.transaction._addentry
3029 method = transaction.transaction._addentry
2992 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 pre_63edc384d3b7 = "data" in getargspec(method).args
2993 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2994 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
2995 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2996 args = getargspec(error.Abort.__init__).args
3034 args = getargspec(error.Abort.__init__).args
2997 post_18415fc918a1 = "detailed_exit_code" in args
3035 post_18415fc918a1 = "detailed_exit_code" in args
2998
3036
2999 old_max_inline = None
3037 old_max_inline = None
3000 try:
3038 try:
3001 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3002 # disable inlining
3040 # disable inlining
3003 old_max_inline = mercurial.revlog._maxinline
3041 old_max_inline = mercurial.revlog._maxinline
3004 # large enough to never happen
3042 # large enough to never happen
3005 mercurial.revlog._maxinline = 2 ** 50
3043 mercurial.revlog._maxinline = 2 ** 50
3006
3044
3007 with repo.lock():
3045 with repo.lock():
3008 bundle = [None, None]
3046 bundle = [None, None]
3009 orig_quiet = repo.ui.quiet
3047 orig_quiet = repo.ui.quiet
3010 try:
3048 try:
3011 repo.ui.quiet = True
3049 repo.ui.quiet = True
3012 with open(fname, mode="rb") as f:
3050 with open(fname, mode="rb") as f:
3013
3051
3014 def noop_report(*args, **kwargs):
3052 def noop_report(*args, **kwargs):
3015 pass
3053 pass
3016
3054
3017 def setup():
3055 def setup():
3018 gen, tr = bundle
3056 gen, tr = bundle
3019 if tr is not None:
3057 if tr is not None:
3020 tr.abort()
3058 tr.abort()
3021 bundle[:] = [None, None]
3059 bundle[:] = [None, None]
3022 f.seek(0)
3060 f.seek(0)
3023 bundle[0] = exchange.readbundle(ui, f, fname)
3061 bundle[0] = exchange.readbundle(ui, f, fname)
3024 bundle[1] = repo.transaction(b'perf::unbundle')
3062 bundle[1] = repo.transaction(b'perf::unbundle')
3025 # silence the transaction
3063 # silence the transaction
3026 bundle[1]._report = noop_report
3064 bundle[1]._report = noop_report
3027
3065
3028 def apply():
3066 def apply():
3029 gen, tr = bundle
3067 gen, tr = bundle
3030 bundle2.applybundle(
3068 bundle2.applybundle(
3031 repo,
3069 repo,
3032 gen,
3070 gen,
3033 tr,
3071 tr,
3034 source=b'perf::unbundle',
3072 source=b'perf::unbundle',
3035 url=fname,
3073 url=fname,
3036 )
3074 )
3037
3075
3038 timer, fm = gettimer(ui, opts)
3076 timer, fm = gettimer(ui, opts)
3039 timer(apply, setup=setup)
3077 timer(apply, setup=setup)
3040 fm.end()
3078 fm.end()
3041 finally:
3079 finally:
3042 repo.ui.quiet == orig_quiet
3080 repo.ui.quiet == orig_quiet
3043 gen, tr = bundle
3081 gen, tr = bundle
3044 if tr is not None:
3082 if tr is not None:
3045 tr.abort()
3083 tr.abort()
3046 finally:
3084 finally:
3047 if old_max_inline is not None:
3085 if old_max_inline is not None:
3048 mercurial.revlog._maxinline = old_max_inline
3086 mercurial.revlog._maxinline = old_max_inline
3049
3087
3050
3088
3051 @command(
3089 @command(
3052 b'perf::unidiff|perfunidiff',
3090 b'perf::unidiff|perfunidiff',
3053 revlogopts
3091 revlogopts
3054 + formatteropts
3092 + formatteropts
3055 + [
3093 + [
3056 (
3094 (
3057 b'',
3095 b'',
3058 b'count',
3096 b'count',
3059 1,
3097 1,
3060 b'number of revisions to test (when using --startrev)',
3098 b'number of revisions to test (when using --startrev)',
3061 ),
3099 ),
3062 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3063 ],
3101 ],
3064 b'-c|-m|FILE REV',
3102 b'-c|-m|FILE REV',
3065 )
3103 )
3066 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3067 """benchmark a unified diff between revisions
3105 """benchmark a unified diff between revisions
3068
3106
3069 This doesn't include any copy tracing - it's just a unified diff
3107 This doesn't include any copy tracing - it's just a unified diff
3070 of the texts.
3108 of the texts.
3071
3109
3072 By default, benchmark a diff between its delta parent and itself.
3110 By default, benchmark a diff between its delta parent and itself.
3073
3111
3074 With ``--count``, benchmark diffs between delta parents and self for N
3112 With ``--count``, benchmark diffs between delta parents and self for N
3075 revisions starting at the specified revision.
3113 revisions starting at the specified revision.
3076
3114
3077 With ``--alldata``, assume the requested revision is a changeset and
3115 With ``--alldata``, assume the requested revision is a changeset and
3078 measure diffs for all changes related to that changeset (manifest
3116 measure diffs for all changes related to that changeset (manifest
3079 and filelogs).
3117 and filelogs).
3080 """
3118 """
3081 opts = _byteskwargs(opts)
3119 opts = _byteskwargs(opts)
3082 if opts[b'alldata']:
3120 if opts[b'alldata']:
3083 opts[b'changelog'] = True
3121 opts[b'changelog'] = True
3084
3122
3085 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3086 file_, rev = None, file_
3124 file_, rev = None, file_
3087 elif rev is None:
3125 elif rev is None:
3088 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3089
3127
3090 textpairs = []
3128 textpairs = []
3091
3129
3092 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3093
3131
3094 startrev = r.rev(r.lookup(rev))
3132 startrev = r.rev(r.lookup(rev))
3095 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3096 if opts[b'alldata']:
3134 if opts[b'alldata']:
3097 # Load revisions associated with changeset.
3135 # Load revisions associated with changeset.
3098 ctx = repo[rev]
3136 ctx = repo[rev]
3099 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3100 for pctx in ctx.parents():
3138 for pctx in ctx.parents():
3101 pman = _manifestrevision(repo, pctx.manifestnode())
3139 pman = _manifestrevision(repo, pctx.manifestnode())
3102 textpairs.append((pman, mtext))
3140 textpairs.append((pman, mtext))
3103
3141
3104 # Load filelog revisions by iterating manifest delta.
3142 # Load filelog revisions by iterating manifest delta.
3105 man = ctx.manifest()
3143 man = ctx.manifest()
3106 pman = ctx.p1().manifest()
3144 pman = ctx.p1().manifest()
3107 for filename, change in pman.diff(man).items():
3145 for filename, change in pman.diff(man).items():
3108 fctx = repo.file(filename)
3146 fctx = repo.file(filename)
3109 f1 = fctx.revision(change[0][0] or -1)
3147 f1 = fctx.revision(change[0][0] or -1)
3110 f2 = fctx.revision(change[1][0] or -1)
3148 f2 = fctx.revision(change[1][0] or -1)
3111 textpairs.append((f1, f2))
3149 textpairs.append((f1, f2))
3112 else:
3150 else:
3113 dp = r.deltaparent(rev)
3151 dp = r.deltaparent(rev)
3114 textpairs.append((r.revision(dp), r.revision(rev)))
3152 textpairs.append((r.revision(dp), r.revision(rev)))
3115
3153
3116 def d():
3154 def d():
3117 for left, right in textpairs:
3155 for left, right in textpairs:
3118 # The date strings don't matter, so we pass empty strings.
3156 # The date strings don't matter, so we pass empty strings.
3119 headerlines, hunks = mdiff.unidiff(
3157 headerlines, hunks = mdiff.unidiff(
3120 left, b'', right, b'', b'left', b'right', binary=False
3158 left, b'', right, b'', b'left', b'right', binary=False
3121 )
3159 )
3122 # consume iterators in roughly the way patch.py does
3160 # consume iterators in roughly the way patch.py does
3123 b'\n'.join(headerlines)
3161 b'\n'.join(headerlines)
3124 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3125
3163
3126 timer, fm = gettimer(ui, opts)
3164 timer, fm = gettimer(ui, opts)
3127 timer(d)
3165 timer(d)
3128 fm.end()
3166 fm.end()
3129
3167
3130
3168
3131 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3132 def perfdiffwd(ui, repo, **opts):
3170 def perfdiffwd(ui, repo, **opts):
3133 """Profile diff of working directory changes"""
3171 """Profile diff of working directory changes"""
3134 opts = _byteskwargs(opts)
3172 opts = _byteskwargs(opts)
3135 timer, fm = gettimer(ui, opts)
3173 timer, fm = gettimer(ui, opts)
3136 options = {
3174 options = {
3137 'w': 'ignore_all_space',
3175 'w': 'ignore_all_space',
3138 'b': 'ignore_space_change',
3176 'b': 'ignore_space_change',
3139 'B': 'ignore_blank_lines',
3177 'B': 'ignore_blank_lines',
3140 }
3178 }
3141
3179
3142 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3143 opts = {options[c]: b'1' for c in diffopt}
3181 opts = {options[c]: b'1' for c in diffopt}
3144
3182
3145 def d():
3183 def d():
3146 ui.pushbuffer()
3184 ui.pushbuffer()
3147 commands.diff(ui, repo, **opts)
3185 commands.diff(ui, repo, **opts)
3148 ui.popbuffer()
3186 ui.popbuffer()
3149
3187
3150 diffopt = diffopt.encode('ascii')
3188 diffopt = diffopt.encode('ascii')
3151 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3152 timer(d, title=title)
3190 timer(d, title=title)
3153 fm.end()
3191 fm.end()
3154
3192
3155
3193
3156 @command(
3194 @command(
3157 b'perf::revlogindex|perfrevlogindex',
3195 b'perf::revlogindex|perfrevlogindex',
3158 revlogopts + formatteropts,
3196 revlogopts + formatteropts,
3159 b'-c|-m|FILE',
3197 b'-c|-m|FILE',
3160 )
3198 )
3161 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3162 """Benchmark operations against a revlog index.
3200 """Benchmark operations against a revlog index.
3163
3201
3164 This tests constructing a revlog instance, reading index data,
3202 This tests constructing a revlog instance, reading index data,
3165 parsing index data, and performing various operations related to
3203 parsing index data, and performing various operations related to
3166 index data.
3204 index data.
3167 """
3205 """
3168
3206
3169 opts = _byteskwargs(opts)
3207 opts = _byteskwargs(opts)
3170
3208
3171 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3172
3210
3173 opener = getattr(rl, 'opener') # trick linter
3211 opener = getattr(rl, 'opener') # trick linter
3174 # compat with hg <= 5.8
3212 # compat with hg <= 5.8
3175 radix = getattr(rl, 'radix', None)
3213 radix = getattr(rl, 'radix', None)
3176 indexfile = getattr(rl, '_indexfile', None)
3214 indexfile = getattr(rl, '_indexfile', None)
3177 if indexfile is None:
3215 if indexfile is None:
3178 # compatibility with <= hg-5.8
3216 # compatibility with <= hg-5.8
3179 indexfile = getattr(rl, 'indexfile')
3217 indexfile = getattr(rl, 'indexfile')
3180 data = opener.read(indexfile)
3218 data = opener.read(indexfile)
3181
3219
3182 header = struct.unpack(b'>I', data[0:4])[0]
3220 header = struct.unpack(b'>I', data[0:4])[0]
3183 version = header & 0xFFFF
3221 version = header & 0xFFFF
3184 if version == 1:
3222 if version == 1:
3185 inline = header & (1 << 16)
3223 inline = header & (1 << 16)
3186 else:
3224 else:
3187 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3188
3226
3189 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3190 if parse_index_v1 is None:
3228 if parse_index_v1 is None:
3191 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3192
3230
3193 rllen = len(rl)
3231 rllen = len(rl)
3194
3232
3195 node0 = rl.node(0)
3233 node0 = rl.node(0)
3196 node25 = rl.node(rllen // 4)
3234 node25 = rl.node(rllen // 4)
3197 node50 = rl.node(rllen // 2)
3235 node50 = rl.node(rllen // 2)
3198 node75 = rl.node(rllen // 4 * 3)
3236 node75 = rl.node(rllen // 4 * 3)
3199 node100 = rl.node(rllen - 1)
3237 node100 = rl.node(rllen - 1)
3200
3238
3201 allrevs = range(rllen)
3239 allrevs = range(rllen)
3202 allrevsrev = list(reversed(allrevs))
3240 allrevsrev = list(reversed(allrevs))
3203 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3204 allnodesrev = list(reversed(allnodes))
3242 allnodesrev = list(reversed(allnodes))
3205
3243
3206 def constructor():
3244 def constructor():
3207 if radix is not None:
3245 if radix is not None:
3208 revlog(opener, radix=radix)
3246 revlog(opener, radix=radix)
3209 else:
3247 else:
3210 # hg <= 5.8
3248 # hg <= 5.8
3211 revlog(opener, indexfile=indexfile)
3249 revlog(opener, indexfile=indexfile)
3212
3250
3213 def read():
3251 def read():
3214 with opener(indexfile) as fh:
3252 with opener(indexfile) as fh:
3215 fh.read()
3253 fh.read()
3216
3254
3217 def parseindex():
3255 def parseindex():
3218 parse_index_v1(data, inline)
3256 parse_index_v1(data, inline)
3219
3257
3220 def getentry(revornode):
3258 def getentry(revornode):
3221 index = parse_index_v1(data, inline)[0]
3259 index = parse_index_v1(data, inline)[0]
3222 index[revornode]
3260 index[revornode]
3223
3261
3224 def getentries(revs, count=1):
3262 def getentries(revs, count=1):
3225 index = parse_index_v1(data, inline)[0]
3263 index = parse_index_v1(data, inline)[0]
3226
3264
3227 for i in range(count):
3265 for i in range(count):
3228 for rev in revs:
3266 for rev in revs:
3229 index[rev]
3267 index[rev]
3230
3268
3231 def resolvenode(node):
3269 def resolvenode(node):
3232 index = parse_index_v1(data, inline)[0]
3270 index = parse_index_v1(data, inline)[0]
3233 rev = getattr(index, 'rev', None)
3271 rev = getattr(index, 'rev', None)
3234 if rev is None:
3272 if rev is None:
3235 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3236 # This only works for the C code.
3274 # This only works for the C code.
3237 if nodemap is None:
3275 if nodemap is None:
3238 return
3276 return
3239 rev = nodemap.__getitem__
3277 rev = nodemap.__getitem__
3240
3278
3241 try:
3279 try:
3242 rev(node)
3280 rev(node)
3243 except error.RevlogError:
3281 except error.RevlogError:
3244 pass
3282 pass
3245
3283
3246 def resolvenodes(nodes, count=1):
3284 def resolvenodes(nodes, count=1):
3247 index = parse_index_v1(data, inline)[0]
3285 index = parse_index_v1(data, inline)[0]
3248 rev = getattr(index, 'rev', None)
3286 rev = getattr(index, 'rev', None)
3249 if rev is None:
3287 if rev is None:
3250 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3251 # This only works for the C code.
3289 # This only works for the C code.
3252 if nodemap is None:
3290 if nodemap is None:
3253 return
3291 return
3254 rev = nodemap.__getitem__
3292 rev = nodemap.__getitem__
3255
3293
3256 for i in range(count):
3294 for i in range(count):
3257 for node in nodes:
3295 for node in nodes:
3258 try:
3296 try:
3259 rev(node)
3297 rev(node)
3260 except error.RevlogError:
3298 except error.RevlogError:
3261 pass
3299 pass
3262
3300
3263 benches = [
3301 benches = [
3264 (constructor, b'revlog constructor'),
3302 (constructor, b'revlog constructor'),
3265 (read, b'read'),
3303 (read, b'read'),
3266 (parseindex, b'create index object'),
3304 (parseindex, b'create index object'),
3267 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3268 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3269 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3270 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3271 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3272 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3273 (lambda: resolvenode(node100), b'look up node at tip'),
3311 (lambda: resolvenode(node100), b'look up node at tip'),
3274 # 2x variation is to measure caching impact.
3312 # 2x variation is to measure caching impact.
3275 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3276 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3277 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3278 (
3316 (
3279 lambda: resolvenodes(allnodesrev, 2),
3317 lambda: resolvenodes(allnodesrev, 2),
3280 b'look up all nodes 2x (reverse)',
3318 b'look up all nodes 2x (reverse)',
3281 ),
3319 ),
3282 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3283 (
3321 (
3284 lambda: getentries(allrevs, 2),
3322 lambda: getentries(allrevs, 2),
3285 b'retrieve all index entries 2x (forward)',
3323 b'retrieve all index entries 2x (forward)',
3286 ),
3324 ),
3287 (
3325 (
3288 lambda: getentries(allrevsrev),
3326 lambda: getentries(allrevsrev),
3289 b'retrieve all index entries (reverse)',
3327 b'retrieve all index entries (reverse)',
3290 ),
3328 ),
3291 (
3329 (
3292 lambda: getentries(allrevsrev, 2),
3330 lambda: getentries(allrevsrev, 2),
3293 b'retrieve all index entries 2x (reverse)',
3331 b'retrieve all index entries 2x (reverse)',
3294 ),
3332 ),
3295 ]
3333 ]
3296
3334
3297 for fn, title in benches:
3335 for fn, title in benches:
3298 timer, fm = gettimer(ui, opts)
3336 timer, fm = gettimer(ui, opts)
3299 timer(fn, title=title)
3337 timer(fn, title=title)
3300 fm.end()
3338 fm.end()
3301
3339
3302
3340
3303 @command(
3341 @command(
3304 b'perf::revlogrevisions|perfrevlogrevisions',
3342 b'perf::revlogrevisions|perfrevlogrevisions',
3305 revlogopts
3343 revlogopts
3306 + formatteropts
3344 + formatteropts
3307 + [
3345 + [
3308 (b'd', b'dist', 100, b'distance between the revisions'),
3346 (b'd', b'dist', 100, b'distance between the revisions'),
3309 (b's', b'startrev', 0, b'revision to start reading at'),
3347 (b's', b'startrev', 0, b'revision to start reading at'),
3310 (b'', b'reverse', False, b'read in reverse'),
3348 (b'', b'reverse', False, b'read in reverse'),
3311 ],
3349 ],
3312 b'-c|-m|FILE',
3350 b'-c|-m|FILE',
3313 )
3351 )
3314 def perfrevlogrevisions(
3352 def perfrevlogrevisions(
3315 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3316 ):
3354 ):
3317 """Benchmark reading a series of revisions from a revlog.
3355 """Benchmark reading a series of revisions from a revlog.
3318
3356
3319 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3320 the specified revlog.
3358 the specified revlog.
3321
3359
3322 The start revision can be defined via ``-s/--startrev``.
3360 The start revision can be defined via ``-s/--startrev``.
3323 """
3361 """
3324 opts = _byteskwargs(opts)
3362 opts = _byteskwargs(opts)
3325
3363
3326 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3327 rllen = getlen(ui)(rl)
3365 rllen = getlen(ui)(rl)
3328
3366
3329 if startrev < 0:
3367 if startrev < 0:
3330 startrev = rllen + startrev
3368 startrev = rllen + startrev
3331
3369
3332 def d():
3370 def d():
3333 rl.clearcaches()
3371 rl.clearcaches()
3334
3372
3335 beginrev = startrev
3373 beginrev = startrev
3336 endrev = rllen
3374 endrev = rllen
3337 dist = opts[b'dist']
3375 dist = opts[b'dist']
3338
3376
3339 if reverse:
3377 if reverse:
3340 beginrev, endrev = endrev - 1, beginrev - 1
3378 beginrev, endrev = endrev - 1, beginrev - 1
3341 dist = -1 * dist
3379 dist = -1 * dist
3342
3380
3343 for x in _xrange(beginrev, endrev, dist):
3381 for x in _xrange(beginrev, endrev, dist):
3344 # Old revisions don't support passing int.
3382 # Old revisions don't support passing int.
3345 n = rl.node(x)
3383 n = rl.node(x)
3346 rl.revision(n)
3384 rl.revision(n)
3347
3385
3348 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3349 timer(d)
3387 timer(d)
3350 fm.end()
3388 fm.end()
3351
3389
3352
3390
3353 @command(
3391 @command(
3354 b'perf::revlogwrite|perfrevlogwrite',
3392 b'perf::revlogwrite|perfrevlogwrite',
3355 revlogopts
3393 revlogopts
3356 + formatteropts
3394 + formatteropts
3357 + [
3395 + [
3358 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3359 (b'', b'stoprev', -1, b'last revision to write'),
3397 (b'', b'stoprev', -1, b'last revision to write'),
3360 (b'', b'count', 3, b'number of passes to perform'),
3398 (b'', b'count', 3, b'number of passes to perform'),
3361 (b'', b'details', False, b'print timing for every revisions tested'),
3399 (b'', b'details', False, b'print timing for every revisions tested'),
3362 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3363 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3364 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3365 ],
3403 ],
3366 b'-c|-m|FILE',
3404 b'-c|-m|FILE',
3367 )
3405 )
3368 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3369 """Benchmark writing a series of revisions to a revlog.
3407 """Benchmark writing a series of revisions to a revlog.
3370
3408
3371 Possible source values are:
3409 Possible source values are:
3372 * `full`: add from a full text (default).
3410 * `full`: add from a full text (default).
3373 * `parent-1`: add from a delta to the first parent
3411 * `parent-1`: add from a delta to the first parent
3374 * `parent-2`: add from a delta to the second parent if it exists
3412 * `parent-2`: add from a delta to the second parent if it exists
3375 (use a delta from the first parent otherwise)
3413 (use a delta from the first parent otherwise)
3376 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3377 * `storage`: add from the existing precomputed deltas
3415 * `storage`: add from the existing precomputed deltas
3378
3416
3379 Note: This performance command measures performance in a custom way. As a
3417 Note: This performance command measures performance in a custom way. As a
3380 result some of the global configuration of the 'perf' command does not
3418 result some of the global configuration of the 'perf' command does not
3381 apply to it:
3419 apply to it:
3382
3420
3383 * ``pre-run``: disabled
3421 * ``pre-run``: disabled
3384
3422
3385 * ``profile-benchmark``: disabled
3423 * ``profile-benchmark``: disabled
3386
3424
3387 * ``run-limits``: disabled use --count instead
3425 * ``run-limits``: disabled use --count instead
3388 """
3426 """
3389 opts = _byteskwargs(opts)
3427 opts = _byteskwargs(opts)
3390
3428
3391 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3392 rllen = getlen(ui)(rl)
3430 rllen = getlen(ui)(rl)
3393 if startrev < 0:
3431 if startrev < 0:
3394 startrev = rllen + startrev
3432 startrev = rllen + startrev
3395 if stoprev < 0:
3433 if stoprev < 0:
3396 stoprev = rllen + stoprev
3434 stoprev = rllen + stoprev
3397
3435
3398 lazydeltabase = opts['lazydeltabase']
3436 lazydeltabase = opts['lazydeltabase']
3399 source = opts['source']
3437 source = opts['source']
3400 clearcaches = opts['clear_caches']
3438 clearcaches = opts['clear_caches']
3401 validsource = (
3439 validsource = (
3402 b'full',
3440 b'full',
3403 b'parent-1',
3441 b'parent-1',
3404 b'parent-2',
3442 b'parent-2',
3405 b'parent-smallest',
3443 b'parent-smallest',
3406 b'storage',
3444 b'storage',
3407 )
3445 )
3408 if source not in validsource:
3446 if source not in validsource:
3409 raise error.Abort('invalid source type: %s' % source)
3447 raise error.Abort('invalid source type: %s' % source)
3410
3448
3411 ### actually gather results
3449 ### actually gather results
3412 count = opts['count']
3450 count = opts['count']
3413 if count <= 0:
3451 if count <= 0:
3414 raise error.Abort('invalide run count: %d' % count)
3452 raise error.Abort('invalide run count: %d' % count)
3415 allresults = []
3453 allresults = []
3416 for c in range(count):
3454 for c in range(count):
3417 timing = _timeonewrite(
3455 timing = _timeonewrite(
3418 ui,
3456 ui,
3419 rl,
3457 rl,
3420 source,
3458 source,
3421 startrev,
3459 startrev,
3422 stoprev,
3460 stoprev,
3423 c + 1,
3461 c + 1,
3424 lazydeltabase=lazydeltabase,
3462 lazydeltabase=lazydeltabase,
3425 clearcaches=clearcaches,
3463 clearcaches=clearcaches,
3426 )
3464 )
3427 allresults.append(timing)
3465 allresults.append(timing)
3428
3466
3429 ### consolidate the results in a single list
3467 ### consolidate the results in a single list
3430 results = []
3468 results = []
3431 for idx, (rev, t) in enumerate(allresults[0]):
3469 for idx, (rev, t) in enumerate(allresults[0]):
3432 ts = [t]
3470 ts = [t]
3433 for other in allresults[1:]:
3471 for other in allresults[1:]:
3434 orev, ot = other[idx]
3472 orev, ot = other[idx]
3435 assert orev == rev
3473 assert orev == rev
3436 ts.append(ot)
3474 ts.append(ot)
3437 results.append((rev, ts))
3475 results.append((rev, ts))
3438 resultcount = len(results)
3476 resultcount = len(results)
3439
3477
3440 ### Compute and display relevant statistics
3478 ### Compute and display relevant statistics
3441
3479
3442 # get a formatter
3480 # get a formatter
3443 fm = ui.formatter(b'perf', opts)
3481 fm = ui.formatter(b'perf', opts)
3444 displayall = ui.configbool(b"perf", b"all-timing", False)
3482 displayall = ui.configbool(b"perf", b"all-timing", False)
3445
3483
3446 # print individual details if requested
3484 # print individual details if requested
3447 if opts['details']:
3485 if opts['details']:
3448 for idx, item in enumerate(results, 1):
3486 for idx, item in enumerate(results, 1):
3449 rev, data = item
3487 rev, data = item
3450 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3451 formatone(fm, data, title=title, displayall=displayall)
3489 formatone(fm, data, title=title, displayall=displayall)
3452
3490
3453 # sorts results by median time
3491 # sorts results by median time
3454 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3455 # list of (name, index) to display)
3493 # list of (name, index) to display)
3456 relevants = [
3494 relevants = [
3457 ("min", 0),
3495 ("min", 0),
3458 ("10%", resultcount * 10 // 100),
3496 ("10%", resultcount * 10 // 100),
3459 ("25%", resultcount * 25 // 100),
3497 ("25%", resultcount * 25 // 100),
3460 ("50%", resultcount * 70 // 100),
3498 ("50%", resultcount * 70 // 100),
3461 ("75%", resultcount * 75 // 100),
3499 ("75%", resultcount * 75 // 100),
3462 ("90%", resultcount * 90 // 100),
3500 ("90%", resultcount * 90 // 100),
3463 ("95%", resultcount * 95 // 100),
3501 ("95%", resultcount * 95 // 100),
3464 ("99%", resultcount * 99 // 100),
3502 ("99%", resultcount * 99 // 100),
3465 ("99.9%", resultcount * 999 // 1000),
3503 ("99.9%", resultcount * 999 // 1000),
3466 ("99.99%", resultcount * 9999 // 10000),
3504 ("99.99%", resultcount * 9999 // 10000),
3467 ("99.999%", resultcount * 99999 // 100000),
3505 ("99.999%", resultcount * 99999 // 100000),
3468 ("max", -1),
3506 ("max", -1),
3469 ]
3507 ]
3470 if not ui.quiet:
3508 if not ui.quiet:
3471 for name, idx in relevants:
3509 for name, idx in relevants:
3472 data = results[idx]
3510 data = results[idx]
3473 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3474 formatone(fm, data[1], title=title, displayall=displayall)
3512 formatone(fm, data[1], title=title, displayall=displayall)
3475
3513
3476 # XXX summing that many float will not be very precise, we ignore this fact
3514 # XXX summing that many float will not be very precise, we ignore this fact
3477 # for now
3515 # for now
3478 totaltime = []
3516 totaltime = []
3479 for item in allresults:
3517 for item in allresults:
3480 totaltime.append(
3518 totaltime.append(
3481 (
3519 (
3482 sum(x[1][0] for x in item),
3520 sum(x[1][0] for x in item),
3483 sum(x[1][1] for x in item),
3521 sum(x[1][1] for x in item),
3484 sum(x[1][2] for x in item),
3522 sum(x[1][2] for x in item),
3485 )
3523 )
3486 )
3524 )
3487 formatone(
3525 formatone(
3488 fm,
3526 fm,
3489 totaltime,
3527 totaltime,
3490 title="total time (%d revs)" % resultcount,
3528 title="total time (%d revs)" % resultcount,
3491 displayall=displayall,
3529 displayall=displayall,
3492 )
3530 )
3493 fm.end()
3531 fm.end()
3494
3532
3495
3533
3496 class _faketr:
3534 class _faketr:
3497 def add(s, x, y, z=None):
3535 def add(s, x, y, z=None):
3498 return None
3536 return None
3499
3537
3500
3538
3501 def _timeonewrite(
3539 def _timeonewrite(
3502 ui,
3540 ui,
3503 orig,
3541 orig,
3504 source,
3542 source,
3505 startrev,
3543 startrev,
3506 stoprev,
3544 stoprev,
3507 runidx=None,
3545 runidx=None,
3508 lazydeltabase=True,
3546 lazydeltabase=True,
3509 clearcaches=True,
3547 clearcaches=True,
3510 ):
3548 ):
3511 timings = []
3549 timings = []
3512 tr = _faketr()
3550 tr = _faketr()
3513 with _temprevlog(ui, orig, startrev) as dest:
3551 with _temprevlog(ui, orig, startrev) as dest:
3514 dest._lazydeltabase = lazydeltabase
3552 dest._lazydeltabase = lazydeltabase
3515 revs = list(orig.revs(startrev, stoprev))
3553 revs = list(orig.revs(startrev, stoprev))
3516 total = len(revs)
3554 total = len(revs)
3517 topic = 'adding'
3555 topic = 'adding'
3518 if runidx is not None:
3556 if runidx is not None:
3519 topic += ' (run #%d)' % runidx
3557 topic += ' (run #%d)' % runidx
3520 # Support both old and new progress API
3558 # Support both old and new progress API
3521 if util.safehasattr(ui, 'makeprogress'):
3559 if util.safehasattr(ui, 'makeprogress'):
3522 progress = ui.makeprogress(topic, unit='revs', total=total)
3560 progress = ui.makeprogress(topic, unit='revs', total=total)
3523
3561
3524 def updateprogress(pos):
3562 def updateprogress(pos):
3525 progress.update(pos)
3563 progress.update(pos)
3526
3564
3527 def completeprogress():
3565 def completeprogress():
3528 progress.complete()
3566 progress.complete()
3529
3567
3530 else:
3568 else:
3531
3569
3532 def updateprogress(pos):
3570 def updateprogress(pos):
3533 ui.progress(topic, pos, unit='revs', total=total)
3571 ui.progress(topic, pos, unit='revs', total=total)
3534
3572
3535 def completeprogress():
3573 def completeprogress():
3536 ui.progress(topic, None, unit='revs', total=total)
3574 ui.progress(topic, None, unit='revs', total=total)
3537
3575
3538 for idx, rev in enumerate(revs):
3576 for idx, rev in enumerate(revs):
3539 updateprogress(idx)
3577 updateprogress(idx)
3540 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3578 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3541 if clearcaches:
3579 if clearcaches:
3542 dest.index.clearcaches()
3580 dest.index.clearcaches()
3543 dest.clearcaches()
3581 dest.clearcaches()
3544 with timeone() as r:
3582 with timeone() as r:
3545 dest.addrawrevision(*addargs, **addkwargs)
3583 dest.addrawrevision(*addargs, **addkwargs)
3546 timings.append((rev, r[0]))
3584 timings.append((rev, r[0]))
3547 updateprogress(total)
3585 updateprogress(total)
3548 completeprogress()
3586 completeprogress()
3549 return timings
3587 return timings
3550
3588
3551
3589
3552 def _getrevisionseed(orig, rev, tr, source):
3590 def _getrevisionseed(orig, rev, tr, source):
3553 from mercurial.node import nullid
3591 from mercurial.node import nullid
3554
3592
3555 linkrev = orig.linkrev(rev)
3593 linkrev = orig.linkrev(rev)
3556 node = orig.node(rev)
3594 node = orig.node(rev)
3557 p1, p2 = orig.parents(node)
3595 p1, p2 = orig.parents(node)
3558 flags = orig.flags(rev)
3596 flags = orig.flags(rev)
3559 cachedelta = None
3597 cachedelta = None
3560 text = None
3598 text = None
3561
3599
3562 if source == b'full':
3600 if source == b'full':
3563 text = orig.revision(rev)
3601 text = orig.revision(rev)
3564 elif source == b'parent-1':
3602 elif source == b'parent-1':
3565 baserev = orig.rev(p1)
3603 baserev = orig.rev(p1)
3566 cachedelta = (baserev, orig.revdiff(p1, rev))
3604 cachedelta = (baserev, orig.revdiff(p1, rev))
3567 elif source == b'parent-2':
3605 elif source == b'parent-2':
3568 parent = p2
3606 parent = p2
3569 if p2 == nullid:
3607 if p2 == nullid:
3570 parent = p1
3608 parent = p1
3571 baserev = orig.rev(parent)
3609 baserev = orig.rev(parent)
3572 cachedelta = (baserev, orig.revdiff(parent, rev))
3610 cachedelta = (baserev, orig.revdiff(parent, rev))
3573 elif source == b'parent-smallest':
3611 elif source == b'parent-smallest':
3574 p1diff = orig.revdiff(p1, rev)
3612 p1diff = orig.revdiff(p1, rev)
3575 parent = p1
3613 parent = p1
3576 diff = p1diff
3614 diff = p1diff
3577 if p2 != nullid:
3615 if p2 != nullid:
3578 p2diff = orig.revdiff(p2, rev)
3616 p2diff = orig.revdiff(p2, rev)
3579 if len(p1diff) > len(p2diff):
3617 if len(p1diff) > len(p2diff):
3580 parent = p2
3618 parent = p2
3581 diff = p2diff
3619 diff = p2diff
3582 baserev = orig.rev(parent)
3620 baserev = orig.rev(parent)
3583 cachedelta = (baserev, diff)
3621 cachedelta = (baserev, diff)
3584 elif source == b'storage':
3622 elif source == b'storage':
3585 baserev = orig.deltaparent(rev)
3623 baserev = orig.deltaparent(rev)
3586 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3624 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3587
3625
3588 return (
3626 return (
3589 (text, tr, linkrev, p1, p2),
3627 (text, tr, linkrev, p1, p2),
3590 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3628 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3591 )
3629 )
3592
3630
3593
3631
3594 @contextlib.contextmanager
3632 @contextlib.contextmanager
3595 def _temprevlog(ui, orig, truncaterev):
3633 def _temprevlog(ui, orig, truncaterev):
3596 from mercurial import vfs as vfsmod
3634 from mercurial import vfs as vfsmod
3597
3635
3598 if orig._inline:
3636 if orig._inline:
3599 raise error.Abort('not supporting inline revlog (yet)')
3637 raise error.Abort('not supporting inline revlog (yet)')
3600 revlogkwargs = {}
3638 revlogkwargs = {}
3601 k = 'upperboundcomp'
3639 k = 'upperboundcomp'
3602 if util.safehasattr(orig, k):
3640 if util.safehasattr(orig, k):
3603 revlogkwargs[k] = getattr(orig, k)
3641 revlogkwargs[k] = getattr(orig, k)
3604
3642
3605 indexfile = getattr(orig, '_indexfile', None)
3643 indexfile = getattr(orig, '_indexfile', None)
3606 if indexfile is None:
3644 if indexfile is None:
3607 # compatibility with <= hg-5.8
3645 # compatibility with <= hg-5.8
3608 indexfile = getattr(orig, 'indexfile')
3646 indexfile = getattr(orig, 'indexfile')
3609 origindexpath = orig.opener.join(indexfile)
3647 origindexpath = orig.opener.join(indexfile)
3610
3648
3611 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3649 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3612 origdatapath = orig.opener.join(datafile)
3650 origdatapath = orig.opener.join(datafile)
3613 radix = b'revlog'
3651 radix = b'revlog'
3614 indexname = b'revlog.i'
3652 indexname = b'revlog.i'
3615 dataname = b'revlog.d'
3653 dataname = b'revlog.d'
3616
3654
3617 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3655 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3618 try:
3656 try:
3619 # copy the data file in a temporary directory
3657 # copy the data file in a temporary directory
3620 ui.debug('copying data in %s\n' % tmpdir)
3658 ui.debug('copying data in %s\n' % tmpdir)
3621 destindexpath = os.path.join(tmpdir, 'revlog.i')
3659 destindexpath = os.path.join(tmpdir, 'revlog.i')
3622 destdatapath = os.path.join(tmpdir, 'revlog.d')
3660 destdatapath = os.path.join(tmpdir, 'revlog.d')
3623 shutil.copyfile(origindexpath, destindexpath)
3661 shutil.copyfile(origindexpath, destindexpath)
3624 shutil.copyfile(origdatapath, destdatapath)
3662 shutil.copyfile(origdatapath, destdatapath)
3625
3663
3626 # remove the data we want to add again
3664 # remove the data we want to add again
3627 ui.debug('truncating data to be rewritten\n')
3665 ui.debug('truncating data to be rewritten\n')
3628 with open(destindexpath, 'ab') as index:
3666 with open(destindexpath, 'ab') as index:
3629 index.seek(0)
3667 index.seek(0)
3630 index.truncate(truncaterev * orig._io.size)
3668 index.truncate(truncaterev * orig._io.size)
3631 with open(destdatapath, 'ab') as data:
3669 with open(destdatapath, 'ab') as data:
3632 data.seek(0)
3670 data.seek(0)
3633 data.truncate(orig.start(truncaterev))
3671 data.truncate(orig.start(truncaterev))
3634
3672
3635 # instantiate a new revlog from the temporary copy
3673 # instantiate a new revlog from the temporary copy
3636 ui.debug('truncating adding to be rewritten\n')
3674 ui.debug('truncating adding to be rewritten\n')
3637 vfs = vfsmod.vfs(tmpdir)
3675 vfs = vfsmod.vfs(tmpdir)
3638 vfs.options = getattr(orig.opener, 'options', None)
3676 vfs.options = getattr(orig.opener, 'options', None)
3639
3677
3640 try:
3678 try:
3641 dest = revlog(vfs, radix=radix, **revlogkwargs)
3679 dest = revlog(vfs, radix=radix, **revlogkwargs)
3642 except TypeError:
3680 except TypeError:
3643 dest = revlog(
3681 dest = revlog(
3644 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3682 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3645 )
3683 )
3646 if dest._inline:
3684 if dest._inline:
3647 raise error.Abort('not supporting inline revlog (yet)')
3685 raise error.Abort('not supporting inline revlog (yet)')
3648 # make sure internals are initialized
3686 # make sure internals are initialized
3649 dest.revision(len(dest) - 1)
3687 dest.revision(len(dest) - 1)
3650 yield dest
3688 yield dest
3651 del dest, vfs
3689 del dest, vfs
3652 finally:
3690 finally:
3653 shutil.rmtree(tmpdir, True)
3691 shutil.rmtree(tmpdir, True)
3654
3692
3655
3693
3656 @command(
3694 @command(
3657 b'perf::revlogchunks|perfrevlogchunks',
3695 b'perf::revlogchunks|perfrevlogchunks',
3658 revlogopts
3696 revlogopts
3659 + formatteropts
3697 + formatteropts
3660 + [
3698 + [
3661 (b'e', b'engines', b'', b'compression engines to use'),
3699 (b'e', b'engines', b'', b'compression engines to use'),
3662 (b's', b'startrev', 0, b'revision to start at'),
3700 (b's', b'startrev', 0, b'revision to start at'),
3663 ],
3701 ],
3664 b'-c|-m|FILE',
3702 b'-c|-m|FILE',
3665 )
3703 )
3666 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3704 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3667 """Benchmark operations on revlog chunks.
3705 """Benchmark operations on revlog chunks.
3668
3706
3669 Logically, each revlog is a collection of fulltext revisions. However,
3707 Logically, each revlog is a collection of fulltext revisions. However,
3670 stored within each revlog are "chunks" of possibly compressed data. This
3708 stored within each revlog are "chunks" of possibly compressed data. This
3671 data needs to be read and decompressed or compressed and written.
3709 data needs to be read and decompressed or compressed and written.
3672
3710
3673 This command measures the time it takes to read+decompress and recompress
3711 This command measures the time it takes to read+decompress and recompress
3674 chunks in a revlog. It effectively isolates I/O and compression performance.
3712 chunks in a revlog. It effectively isolates I/O and compression performance.
3675 For measurements of higher-level operations like resolving revisions,
3713 For measurements of higher-level operations like resolving revisions,
3676 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3714 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3677 """
3715 """
3678 opts = _byteskwargs(opts)
3716 opts = _byteskwargs(opts)
3679
3717
3680 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3718 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3681
3719
3682 # _chunkraw was renamed to _getsegmentforrevs.
3720 # _chunkraw was renamed to _getsegmentforrevs.
3683 try:
3721 try:
3684 segmentforrevs = rl._getsegmentforrevs
3722 segmentforrevs = rl._getsegmentforrevs
3685 except AttributeError:
3723 except AttributeError:
3686 segmentforrevs = rl._chunkraw
3724 segmentforrevs = rl._chunkraw
3687
3725
3688 # Verify engines argument.
3726 # Verify engines argument.
3689 if engines:
3727 if engines:
3690 engines = {e.strip() for e in engines.split(b',')}
3728 engines = {e.strip() for e in engines.split(b',')}
3691 for engine in engines:
3729 for engine in engines:
3692 try:
3730 try:
3693 util.compressionengines[engine]
3731 util.compressionengines[engine]
3694 except KeyError:
3732 except KeyError:
3695 raise error.Abort(b'unknown compression engine: %s' % engine)
3733 raise error.Abort(b'unknown compression engine: %s' % engine)
3696 else:
3734 else:
3697 engines = []
3735 engines = []
3698 for e in util.compengines:
3736 for e in util.compengines:
3699 engine = util.compengines[e]
3737 engine = util.compengines[e]
3700 try:
3738 try:
3701 if engine.available():
3739 if engine.available():
3702 engine.revlogcompressor().compress(b'dummy')
3740 engine.revlogcompressor().compress(b'dummy')
3703 engines.append(e)
3741 engines.append(e)
3704 except NotImplementedError:
3742 except NotImplementedError:
3705 pass
3743 pass
3706
3744
3707 revs = list(rl.revs(startrev, len(rl) - 1))
3745 revs = list(rl.revs(startrev, len(rl) - 1))
3708
3746
3709 def rlfh(rl):
3747 def rlfh(rl):
3710 if rl._inline:
3748 if rl._inline:
3711 indexfile = getattr(rl, '_indexfile', None)
3749 indexfile = getattr(rl, '_indexfile', None)
3712 if indexfile is None:
3750 if indexfile is None:
3713 # compatibility with <= hg-5.8
3751 # compatibility with <= hg-5.8
3714 indexfile = getattr(rl, 'indexfile')
3752 indexfile = getattr(rl, 'indexfile')
3715 return getsvfs(repo)(indexfile)
3753 return getsvfs(repo)(indexfile)
3716 else:
3754 else:
3717 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3755 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3718 return getsvfs(repo)(datafile)
3756 return getsvfs(repo)(datafile)
3719
3757
3720 def doread():
3758 def doread():
3721 rl.clearcaches()
3759 rl.clearcaches()
3722 for rev in revs:
3760 for rev in revs:
3723 segmentforrevs(rev, rev)
3761 segmentforrevs(rev, rev)
3724
3762
3725 def doreadcachedfh():
3763 def doreadcachedfh():
3726 rl.clearcaches()
3764 rl.clearcaches()
3727 fh = rlfh(rl)
3765 fh = rlfh(rl)
3728 for rev in revs:
3766 for rev in revs:
3729 segmentforrevs(rev, rev, df=fh)
3767 segmentforrevs(rev, rev, df=fh)
3730
3768
3731 def doreadbatch():
3769 def doreadbatch():
3732 rl.clearcaches()
3770 rl.clearcaches()
3733 segmentforrevs(revs[0], revs[-1])
3771 segmentforrevs(revs[0], revs[-1])
3734
3772
3735 def doreadbatchcachedfh():
3773 def doreadbatchcachedfh():
3736 rl.clearcaches()
3774 rl.clearcaches()
3737 fh = rlfh(rl)
3775 fh = rlfh(rl)
3738 segmentforrevs(revs[0], revs[-1], df=fh)
3776 segmentforrevs(revs[0], revs[-1], df=fh)
3739
3777
3740 def dochunk():
3778 def dochunk():
3741 rl.clearcaches()
3779 rl.clearcaches()
3742 fh = rlfh(rl)
3780 fh = rlfh(rl)
3743 for rev in revs:
3781 for rev in revs:
3744 rl._chunk(rev, df=fh)
3782 rl._chunk(rev, df=fh)
3745
3783
3746 chunks = [None]
3784 chunks = [None]
3747
3785
3748 def dochunkbatch():
3786 def dochunkbatch():
3749 rl.clearcaches()
3787 rl.clearcaches()
3750 fh = rlfh(rl)
3788 fh = rlfh(rl)
3751 # Save chunks as a side-effect.
3789 # Save chunks as a side-effect.
3752 chunks[0] = rl._chunks(revs, df=fh)
3790 chunks[0] = rl._chunks(revs, df=fh)
3753
3791
3754 def docompress(compressor):
3792 def docompress(compressor):
3755 rl.clearcaches()
3793 rl.clearcaches()
3756
3794
3757 try:
3795 try:
3758 # Swap in the requested compression engine.
3796 # Swap in the requested compression engine.
3759 oldcompressor = rl._compressor
3797 oldcompressor = rl._compressor
3760 rl._compressor = compressor
3798 rl._compressor = compressor
3761 for chunk in chunks[0]:
3799 for chunk in chunks[0]:
3762 rl.compress(chunk)
3800 rl.compress(chunk)
3763 finally:
3801 finally:
3764 rl._compressor = oldcompressor
3802 rl._compressor = oldcompressor
3765
3803
3766 benches = [
3804 benches = [
3767 (lambda: doread(), b'read'),
3805 (lambda: doread(), b'read'),
3768 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3806 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3769 (lambda: doreadbatch(), b'read batch'),
3807 (lambda: doreadbatch(), b'read batch'),
3770 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3808 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3771 (lambda: dochunk(), b'chunk'),
3809 (lambda: dochunk(), b'chunk'),
3772 (lambda: dochunkbatch(), b'chunk batch'),
3810 (lambda: dochunkbatch(), b'chunk batch'),
3773 ]
3811 ]
3774
3812
3775 for engine in sorted(engines):
3813 for engine in sorted(engines):
3776 compressor = util.compengines[engine].revlogcompressor()
3814 compressor = util.compengines[engine].revlogcompressor()
3777 benches.append(
3815 benches.append(
3778 (
3816 (
3779 functools.partial(docompress, compressor),
3817 functools.partial(docompress, compressor),
3780 b'compress w/ %s' % engine,
3818 b'compress w/ %s' % engine,
3781 )
3819 )
3782 )
3820 )
3783
3821
3784 for fn, title in benches:
3822 for fn, title in benches:
3785 timer, fm = gettimer(ui, opts)
3823 timer, fm = gettimer(ui, opts)
3786 timer(fn, title=title)
3824 timer(fn, title=title)
3787 fm.end()
3825 fm.end()
3788
3826
3789
3827
3790 @command(
3828 @command(
3791 b'perf::revlogrevision|perfrevlogrevision',
3829 b'perf::revlogrevision|perfrevlogrevision',
3792 revlogopts
3830 revlogopts
3793 + formatteropts
3831 + formatteropts
3794 + [(b'', b'cache', False, b'use caches instead of clearing')],
3832 + [(b'', b'cache', False, b'use caches instead of clearing')],
3795 b'-c|-m|FILE REV',
3833 b'-c|-m|FILE REV',
3796 )
3834 )
3797 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3835 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3798 """Benchmark obtaining a revlog revision.
3836 """Benchmark obtaining a revlog revision.
3799
3837
3800 Obtaining a revlog revision consists of roughly the following steps:
3838 Obtaining a revlog revision consists of roughly the following steps:
3801
3839
3802 1. Compute the delta chain
3840 1. Compute the delta chain
3803 2. Slice the delta chain if applicable
3841 2. Slice the delta chain if applicable
3804 3. Obtain the raw chunks for that delta chain
3842 3. Obtain the raw chunks for that delta chain
3805 4. Decompress each raw chunk
3843 4. Decompress each raw chunk
3806 5. Apply binary patches to obtain fulltext
3844 5. Apply binary patches to obtain fulltext
3807 6. Verify hash of fulltext
3845 6. Verify hash of fulltext
3808
3846
3809 This command measures the time spent in each of these phases.
3847 This command measures the time spent in each of these phases.
3810 """
3848 """
3811 opts = _byteskwargs(opts)
3849 opts = _byteskwargs(opts)
3812
3850
3813 if opts.get(b'changelog') or opts.get(b'manifest'):
3851 if opts.get(b'changelog') or opts.get(b'manifest'):
3814 file_, rev = None, file_
3852 file_, rev = None, file_
3815 elif rev is None:
3853 elif rev is None:
3816 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3854 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3817
3855
3818 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3856 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3819
3857
3820 # _chunkraw was renamed to _getsegmentforrevs.
3858 # _chunkraw was renamed to _getsegmentforrevs.
3821 try:
3859 try:
3822 segmentforrevs = r._getsegmentforrevs
3860 segmentforrevs = r._getsegmentforrevs
3823 except AttributeError:
3861 except AttributeError:
3824 segmentforrevs = r._chunkraw
3862 segmentforrevs = r._chunkraw
3825
3863
3826 node = r.lookup(rev)
3864 node = r.lookup(rev)
3827 rev = r.rev(node)
3865 rev = r.rev(node)
3828
3866
3829 def getrawchunks(data, chain):
3867 def getrawchunks(data, chain):
3830 start = r.start
3868 start = r.start
3831 length = r.length
3869 length = r.length
3832 inline = r._inline
3870 inline = r._inline
3833 try:
3871 try:
3834 iosize = r.index.entry_size
3872 iosize = r.index.entry_size
3835 except AttributeError:
3873 except AttributeError:
3836 iosize = r._io.size
3874 iosize = r._io.size
3837 buffer = util.buffer
3875 buffer = util.buffer
3838
3876
3839 chunks = []
3877 chunks = []
3840 ladd = chunks.append
3878 ladd = chunks.append
3841 for idx, item in enumerate(chain):
3879 for idx, item in enumerate(chain):
3842 offset = start(item[0])
3880 offset = start(item[0])
3843 bits = data[idx]
3881 bits = data[idx]
3844 for rev in item:
3882 for rev in item:
3845 chunkstart = start(rev)
3883 chunkstart = start(rev)
3846 if inline:
3884 if inline:
3847 chunkstart += (rev + 1) * iosize
3885 chunkstart += (rev + 1) * iosize
3848 chunklength = length(rev)
3886 chunklength = length(rev)
3849 ladd(buffer(bits, chunkstart - offset, chunklength))
3887 ladd(buffer(bits, chunkstart - offset, chunklength))
3850
3888
3851 return chunks
3889 return chunks
3852
3890
3853 def dodeltachain(rev):
3891 def dodeltachain(rev):
3854 if not cache:
3892 if not cache:
3855 r.clearcaches()
3893 r.clearcaches()
3856 r._deltachain(rev)
3894 r._deltachain(rev)
3857
3895
3858 def doread(chain):
3896 def doread(chain):
3859 if not cache:
3897 if not cache:
3860 r.clearcaches()
3898 r.clearcaches()
3861 for item in slicedchain:
3899 for item in slicedchain:
3862 segmentforrevs(item[0], item[-1])
3900 segmentforrevs(item[0], item[-1])
3863
3901
3864 def doslice(r, chain, size):
3902 def doslice(r, chain, size):
3865 for s in slicechunk(r, chain, targetsize=size):
3903 for s in slicechunk(r, chain, targetsize=size):
3866 pass
3904 pass
3867
3905
3868 def dorawchunks(data, chain):
3906 def dorawchunks(data, chain):
3869 if not cache:
3907 if not cache:
3870 r.clearcaches()
3908 r.clearcaches()
3871 getrawchunks(data, chain)
3909 getrawchunks(data, chain)
3872
3910
3873 def dodecompress(chunks):
3911 def dodecompress(chunks):
3874 decomp = r.decompress
3912 decomp = r.decompress
3875 for chunk in chunks:
3913 for chunk in chunks:
3876 decomp(chunk)
3914 decomp(chunk)
3877
3915
3878 def dopatch(text, bins):
3916 def dopatch(text, bins):
3879 if not cache:
3917 if not cache:
3880 r.clearcaches()
3918 r.clearcaches()
3881 mdiff.patches(text, bins)
3919 mdiff.patches(text, bins)
3882
3920
3883 def dohash(text):
3921 def dohash(text):
3884 if not cache:
3922 if not cache:
3885 r.clearcaches()
3923 r.clearcaches()
3886 r.checkhash(text, node, rev=rev)
3924 r.checkhash(text, node, rev=rev)
3887
3925
3888 def dorevision():
3926 def dorevision():
3889 if not cache:
3927 if not cache:
3890 r.clearcaches()
3928 r.clearcaches()
3891 r.revision(node)
3929 r.revision(node)
3892
3930
3893 try:
3931 try:
3894 from mercurial.revlogutils.deltas import slicechunk
3932 from mercurial.revlogutils.deltas import slicechunk
3895 except ImportError:
3933 except ImportError:
3896 slicechunk = getattr(revlog, '_slicechunk', None)
3934 slicechunk = getattr(revlog, '_slicechunk', None)
3897
3935
3898 size = r.length(rev)
3936 size = r.length(rev)
3899 chain = r._deltachain(rev)[0]
3937 chain = r._deltachain(rev)[0]
3900 if not getattr(r, '_withsparseread', False):
3938 if not getattr(r, '_withsparseread', False):
3901 slicedchain = (chain,)
3939 slicedchain = (chain,)
3902 else:
3940 else:
3903 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3941 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3904 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3942 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3905 rawchunks = getrawchunks(data, slicedchain)
3943 rawchunks = getrawchunks(data, slicedchain)
3906 bins = r._chunks(chain)
3944 bins = r._chunks(chain)
3907 text = bytes(bins[0])
3945 text = bytes(bins[0])
3908 bins = bins[1:]
3946 bins = bins[1:]
3909 text = mdiff.patches(text, bins)
3947 text = mdiff.patches(text, bins)
3910
3948
3911 benches = [
3949 benches = [
3912 (lambda: dorevision(), b'full'),
3950 (lambda: dorevision(), b'full'),
3913 (lambda: dodeltachain(rev), b'deltachain'),
3951 (lambda: dodeltachain(rev), b'deltachain'),
3914 (lambda: doread(chain), b'read'),
3952 (lambda: doread(chain), b'read'),
3915 ]
3953 ]
3916
3954
3917 if getattr(r, '_withsparseread', False):
3955 if getattr(r, '_withsparseread', False):
3918 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3956 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3919 benches.append(slicing)
3957 benches.append(slicing)
3920
3958
3921 benches.extend(
3959 benches.extend(
3922 [
3960 [
3923 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3961 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3924 (lambda: dodecompress(rawchunks), b'decompress'),
3962 (lambda: dodecompress(rawchunks), b'decompress'),
3925 (lambda: dopatch(text, bins), b'patch'),
3963 (lambda: dopatch(text, bins), b'patch'),
3926 (lambda: dohash(text), b'hash'),
3964 (lambda: dohash(text), b'hash'),
3927 ]
3965 ]
3928 )
3966 )
3929
3967
3930 timer, fm = gettimer(ui, opts)
3968 timer, fm = gettimer(ui, opts)
3931 for fn, title in benches:
3969 for fn, title in benches:
3932 timer(fn, title=title)
3970 timer(fn, title=title)
3933 fm.end()
3971 fm.end()
3934
3972
3935
3973
3936 @command(
3974 @command(
3937 b'perf::revset|perfrevset',
3975 b'perf::revset|perfrevset',
3938 [
3976 [
3939 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3977 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3940 (b'', b'contexts', False, b'obtain changectx for each revision'),
3978 (b'', b'contexts', False, b'obtain changectx for each revision'),
3941 ]
3979 ]
3942 + formatteropts,
3980 + formatteropts,
3943 b"REVSET",
3981 b"REVSET",
3944 )
3982 )
3945 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3983 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3946 """benchmark the execution time of a revset
3984 """benchmark the execution time of a revset
3947
3985
3948 Use the --clean option if need to evaluate the impact of build volatile
3986 Use the --clean option if need to evaluate the impact of build volatile
3949 revisions set cache on the revset execution. Volatile cache hold filtered
3987 revisions set cache on the revset execution. Volatile cache hold filtered
3950 and obsolete related cache."""
3988 and obsolete related cache."""
3951 opts = _byteskwargs(opts)
3989 opts = _byteskwargs(opts)
3952
3990
3953 timer, fm = gettimer(ui, opts)
3991 timer, fm = gettimer(ui, opts)
3954
3992
3955 def d():
3993 def d():
3956 if clear:
3994 if clear:
3957 repo.invalidatevolatilesets()
3995 repo.invalidatevolatilesets()
3958 if contexts:
3996 if contexts:
3959 for ctx in repo.set(expr):
3997 for ctx in repo.set(expr):
3960 pass
3998 pass
3961 else:
3999 else:
3962 for r in repo.revs(expr):
4000 for r in repo.revs(expr):
3963 pass
4001 pass
3964
4002
3965 timer(d)
4003 timer(d)
3966 fm.end()
4004 fm.end()
3967
4005
3968
4006
3969 @command(
4007 @command(
3970 b'perf::volatilesets|perfvolatilesets',
4008 b'perf::volatilesets|perfvolatilesets',
3971 [
4009 [
3972 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4010 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3973 ]
4011 ]
3974 + formatteropts,
4012 + formatteropts,
3975 )
4013 )
3976 def perfvolatilesets(ui, repo, *names, **opts):
4014 def perfvolatilesets(ui, repo, *names, **opts):
3977 """benchmark the computation of various volatile set
4015 """benchmark the computation of various volatile set
3978
4016
3979 Volatile set computes element related to filtering and obsolescence."""
4017 Volatile set computes element related to filtering and obsolescence."""
3980 opts = _byteskwargs(opts)
4018 opts = _byteskwargs(opts)
3981 timer, fm = gettimer(ui, opts)
4019 timer, fm = gettimer(ui, opts)
3982 repo = repo.unfiltered()
4020 repo = repo.unfiltered()
3983
4021
3984 def getobs(name):
4022 def getobs(name):
3985 def d():
4023 def d():
3986 repo.invalidatevolatilesets()
4024 repo.invalidatevolatilesets()
3987 if opts[b'clear_obsstore']:
4025 if opts[b'clear_obsstore']:
3988 clearfilecache(repo, b'obsstore')
4026 clearfilecache(repo, b'obsstore')
3989 obsolete.getrevs(repo, name)
4027 obsolete.getrevs(repo, name)
3990
4028
3991 return d
4029 return d
3992
4030
3993 allobs = sorted(obsolete.cachefuncs)
4031 allobs = sorted(obsolete.cachefuncs)
3994 if names:
4032 if names:
3995 allobs = [n for n in allobs if n in names]
4033 allobs = [n for n in allobs if n in names]
3996
4034
3997 for name in allobs:
4035 for name in allobs:
3998 timer(getobs(name), title=name)
4036 timer(getobs(name), title=name)
3999
4037
4000 def getfiltered(name):
4038 def getfiltered(name):
4001 def d():
4039 def d():
4002 repo.invalidatevolatilesets()
4040 repo.invalidatevolatilesets()
4003 if opts[b'clear_obsstore']:
4041 if opts[b'clear_obsstore']:
4004 clearfilecache(repo, b'obsstore')
4042 clearfilecache(repo, b'obsstore')
4005 repoview.filterrevs(repo, name)
4043 repoview.filterrevs(repo, name)
4006
4044
4007 return d
4045 return d
4008
4046
4009 allfilter = sorted(repoview.filtertable)
4047 allfilter = sorted(repoview.filtertable)
4010 if names:
4048 if names:
4011 allfilter = [n for n in allfilter if n in names]
4049 allfilter = [n for n in allfilter if n in names]
4012
4050
4013 for name in allfilter:
4051 for name in allfilter:
4014 timer(getfiltered(name), title=name)
4052 timer(getfiltered(name), title=name)
4015 fm.end()
4053 fm.end()
4016
4054
4017
4055
4018 @command(
4056 @command(
4019 b'perf::branchmap|perfbranchmap',
4057 b'perf::branchmap|perfbranchmap',
4020 [
4058 [
4021 (b'f', b'full', False, b'Includes build time of subset'),
4059 (b'f', b'full', False, b'Includes build time of subset'),
4022 (
4060 (
4023 b'',
4061 b'',
4024 b'clear-revbranch',
4062 b'clear-revbranch',
4025 False,
4063 False,
4026 b'purge the revbranch cache between computation',
4064 b'purge the revbranch cache between computation',
4027 ),
4065 ),
4028 ]
4066 ]
4029 + formatteropts,
4067 + formatteropts,
4030 )
4068 )
4031 def perfbranchmap(ui, repo, *filternames, **opts):
4069 def perfbranchmap(ui, repo, *filternames, **opts):
4032 """benchmark the update of a branchmap
4070 """benchmark the update of a branchmap
4033
4071
4034 This benchmarks the full repo.branchmap() call with read and write disabled
4072 This benchmarks the full repo.branchmap() call with read and write disabled
4035 """
4073 """
4036 opts = _byteskwargs(opts)
4074 opts = _byteskwargs(opts)
4037 full = opts.get(b"full", False)
4075 full = opts.get(b"full", False)
4038 clear_revbranch = opts.get(b"clear_revbranch", False)
4076 clear_revbranch = opts.get(b"clear_revbranch", False)
4039 timer, fm = gettimer(ui, opts)
4077 timer, fm = gettimer(ui, opts)
4040
4078
4041 def getbranchmap(filtername):
4079 def getbranchmap(filtername):
4042 """generate a benchmark function for the filtername"""
4080 """generate a benchmark function for the filtername"""
4043 if filtername is None:
4081 if filtername is None:
4044 view = repo
4082 view = repo
4045 else:
4083 else:
4046 view = repo.filtered(filtername)
4084 view = repo.filtered(filtername)
4047 if util.safehasattr(view._branchcaches, '_per_filter'):
4085 if util.safehasattr(view._branchcaches, '_per_filter'):
4048 filtered = view._branchcaches._per_filter
4086 filtered = view._branchcaches._per_filter
4049 else:
4087 else:
4050 # older versions
4088 # older versions
4051 filtered = view._branchcaches
4089 filtered = view._branchcaches
4052
4090
4053 def d():
4091 def d():
4054 if clear_revbranch:
4092 if clear_revbranch:
4055 repo.revbranchcache()._clear()
4093 repo.revbranchcache()._clear()
4056 if full:
4094 if full:
4057 view._branchcaches.clear()
4095 view._branchcaches.clear()
4058 else:
4096 else:
4059 filtered.pop(filtername, None)
4097 filtered.pop(filtername, None)
4060 view.branchmap()
4098 view.branchmap()
4061
4099
4062 return d
4100 return d
4063
4101
4064 # add filter in smaller subset to bigger subset
4102 # add filter in smaller subset to bigger subset
4065 possiblefilters = set(repoview.filtertable)
4103 possiblefilters = set(repoview.filtertable)
4066 if filternames:
4104 if filternames:
4067 possiblefilters &= set(filternames)
4105 possiblefilters &= set(filternames)
4068 subsettable = getbranchmapsubsettable()
4106 subsettable = getbranchmapsubsettable()
4069 allfilters = []
4107 allfilters = []
4070 while possiblefilters:
4108 while possiblefilters:
4071 for name in possiblefilters:
4109 for name in possiblefilters:
4072 subset = subsettable.get(name)
4110 subset = subsettable.get(name)
4073 if subset not in possiblefilters:
4111 if subset not in possiblefilters:
4074 break
4112 break
4075 else:
4113 else:
4076 assert False, b'subset cycle %s!' % possiblefilters
4114 assert False, b'subset cycle %s!' % possiblefilters
4077 allfilters.append(name)
4115 allfilters.append(name)
4078 possiblefilters.remove(name)
4116 possiblefilters.remove(name)
4079
4117
4080 # warm the cache
4118 # warm the cache
4081 if not full:
4119 if not full:
4082 for name in allfilters:
4120 for name in allfilters:
4083 repo.filtered(name).branchmap()
4121 repo.filtered(name).branchmap()
4084 if not filternames or b'unfiltered' in filternames:
4122 if not filternames or b'unfiltered' in filternames:
4085 # add unfiltered
4123 # add unfiltered
4086 allfilters.append(None)
4124 allfilters.append(None)
4087
4125
4088 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4126 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4089 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4127 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4090 branchcacheread.set(classmethod(lambda *args: None))
4128 branchcacheread.set(classmethod(lambda *args: None))
4091 else:
4129 else:
4092 # older versions
4130 # older versions
4093 branchcacheread = safeattrsetter(branchmap, b'read')
4131 branchcacheread = safeattrsetter(branchmap, b'read')
4094 branchcacheread.set(lambda *args: None)
4132 branchcacheread.set(lambda *args: None)
4095 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4133 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4096 branchcachewrite.set(lambda *args: None)
4134 branchcachewrite.set(lambda *args: None)
4097 try:
4135 try:
4098 for name in allfilters:
4136 for name in allfilters:
4099 printname = name
4137 printname = name
4100 if name is None:
4138 if name is None:
4101 printname = b'unfiltered'
4139 printname = b'unfiltered'
4102 timer(getbranchmap(name), title=printname)
4140 timer(getbranchmap(name), title=printname)
4103 finally:
4141 finally:
4104 branchcacheread.restore()
4142 branchcacheread.restore()
4105 branchcachewrite.restore()
4143 branchcachewrite.restore()
4106 fm.end()
4144 fm.end()
4107
4145
4108
4146
4109 @command(
4147 @command(
4110 b'perf::branchmapupdate|perfbranchmapupdate',
4148 b'perf::branchmapupdate|perfbranchmapupdate',
4111 [
4149 [
4112 (b'', b'base', [], b'subset of revision to start from'),
4150 (b'', b'base', [], b'subset of revision to start from'),
4113 (b'', b'target', [], b'subset of revision to end with'),
4151 (b'', b'target', [], b'subset of revision to end with'),
4114 (b'', b'clear-caches', False, b'clear cache between each runs'),
4152 (b'', b'clear-caches', False, b'clear cache between each runs'),
4115 ]
4153 ]
4116 + formatteropts,
4154 + formatteropts,
4117 )
4155 )
4118 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4156 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4119 """benchmark branchmap update from for <base> revs to <target> revs
4157 """benchmark branchmap update from for <base> revs to <target> revs
4120
4158
4121 If `--clear-caches` is passed, the following items will be reset before
4159 If `--clear-caches` is passed, the following items will be reset before
4122 each update:
4160 each update:
4123 * the changelog instance and associated indexes
4161 * the changelog instance and associated indexes
4124 * the rev-branch-cache instance
4162 * the rev-branch-cache instance
4125
4163
4126 Examples:
4164 Examples:
4127
4165
4128 # update for the one last revision
4166 # update for the one last revision
4129 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4167 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4130
4168
4131 $ update for change coming with a new branch
4169 $ update for change coming with a new branch
4132 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4170 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4133 """
4171 """
4134 from mercurial import branchmap
4172 from mercurial import branchmap
4135 from mercurial import repoview
4173 from mercurial import repoview
4136
4174
4137 opts = _byteskwargs(opts)
4175 opts = _byteskwargs(opts)
4138 timer, fm = gettimer(ui, opts)
4176 timer, fm = gettimer(ui, opts)
4139 clearcaches = opts[b'clear_caches']
4177 clearcaches = opts[b'clear_caches']
4140 unfi = repo.unfiltered()
4178 unfi = repo.unfiltered()
4141 x = [None] # used to pass data between closure
4179 x = [None] # used to pass data between closure
4142
4180
4143 # we use a `list` here to avoid possible side effect from smartset
4181 # we use a `list` here to avoid possible side effect from smartset
4144 baserevs = list(scmutil.revrange(repo, base))
4182 baserevs = list(scmutil.revrange(repo, base))
4145 targetrevs = list(scmutil.revrange(repo, target))
4183 targetrevs = list(scmutil.revrange(repo, target))
4146 if not baserevs:
4184 if not baserevs:
4147 raise error.Abort(b'no revisions selected for --base')
4185 raise error.Abort(b'no revisions selected for --base')
4148 if not targetrevs:
4186 if not targetrevs:
4149 raise error.Abort(b'no revisions selected for --target')
4187 raise error.Abort(b'no revisions selected for --target')
4150
4188
4151 # make sure the target branchmap also contains the one in the base
4189 # make sure the target branchmap also contains the one in the base
4152 targetrevs = list(set(baserevs) | set(targetrevs))
4190 targetrevs = list(set(baserevs) | set(targetrevs))
4153 targetrevs.sort()
4191 targetrevs.sort()
4154
4192
4155 cl = repo.changelog
4193 cl = repo.changelog
4156 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4194 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4157 allbaserevs.sort()
4195 allbaserevs.sort()
4158 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4196 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4159
4197
4160 newrevs = list(alltargetrevs.difference(allbaserevs))
4198 newrevs = list(alltargetrevs.difference(allbaserevs))
4161 newrevs.sort()
4199 newrevs.sort()
4162
4200
4163 allrevs = frozenset(unfi.changelog.revs())
4201 allrevs = frozenset(unfi.changelog.revs())
4164 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4202 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4165 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4203 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4166
4204
4167 def basefilter(repo, visibilityexceptions=None):
4205 def basefilter(repo, visibilityexceptions=None):
4168 return basefilterrevs
4206 return basefilterrevs
4169
4207
4170 def targetfilter(repo, visibilityexceptions=None):
4208 def targetfilter(repo, visibilityexceptions=None):
4171 return targetfilterrevs
4209 return targetfilterrevs
4172
4210
4173 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4211 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4174 ui.status(msg % (len(allbaserevs), len(newrevs)))
4212 ui.status(msg % (len(allbaserevs), len(newrevs)))
4175 if targetfilterrevs:
4213 if targetfilterrevs:
4176 msg = b'(%d revisions still filtered)\n'
4214 msg = b'(%d revisions still filtered)\n'
4177 ui.status(msg % len(targetfilterrevs))
4215 ui.status(msg % len(targetfilterrevs))
4178
4216
4179 try:
4217 try:
4180 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4218 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4181 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4219 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4182
4220
4183 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4221 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4184 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4222 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4185
4223
4186 # try to find an existing branchmap to reuse
4224 # try to find an existing branchmap to reuse
4187 subsettable = getbranchmapsubsettable()
4225 subsettable = getbranchmapsubsettable()
4188 candidatefilter = subsettable.get(None)
4226 candidatefilter = subsettable.get(None)
4189 while candidatefilter is not None:
4227 while candidatefilter is not None:
4190 candidatebm = repo.filtered(candidatefilter).branchmap()
4228 candidatebm = repo.filtered(candidatefilter).branchmap()
4191 if candidatebm.validfor(baserepo):
4229 if candidatebm.validfor(baserepo):
4192 filtered = repoview.filterrevs(repo, candidatefilter)
4230 filtered = repoview.filterrevs(repo, candidatefilter)
4193 missing = [r for r in allbaserevs if r in filtered]
4231 missing = [r for r in allbaserevs if r in filtered]
4194 base = candidatebm.copy()
4232 base = candidatebm.copy()
4195 base.update(baserepo, missing)
4233 base.update(baserepo, missing)
4196 break
4234 break
4197 candidatefilter = subsettable.get(candidatefilter)
4235 candidatefilter = subsettable.get(candidatefilter)
4198 else:
4236 else:
4199 # no suitable subset where found
4237 # no suitable subset where found
4200 base = branchmap.branchcache()
4238 base = branchmap.branchcache()
4201 base.update(baserepo, allbaserevs)
4239 base.update(baserepo, allbaserevs)
4202
4240
4203 def setup():
4241 def setup():
4204 x[0] = base.copy()
4242 x[0] = base.copy()
4205 if clearcaches:
4243 if clearcaches:
4206 unfi._revbranchcache = None
4244 unfi._revbranchcache = None
4207 clearchangelog(repo)
4245 clearchangelog(repo)
4208
4246
4209 def bench():
4247 def bench():
4210 x[0].update(targetrepo, newrevs)
4248 x[0].update(targetrepo, newrevs)
4211
4249
4212 timer(bench, setup=setup)
4250 timer(bench, setup=setup)
4213 fm.end()
4251 fm.end()
4214 finally:
4252 finally:
4215 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4253 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4216 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4254 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4217
4255
4218
4256
4219 @command(
4257 @command(
4220 b'perf::branchmapload|perfbranchmapload',
4258 b'perf::branchmapload|perfbranchmapload',
4221 [
4259 [
4222 (b'f', b'filter', b'', b'Specify repoview filter'),
4260 (b'f', b'filter', b'', b'Specify repoview filter'),
4223 (b'', b'list', False, b'List brachmap filter caches'),
4261 (b'', b'list', False, b'List brachmap filter caches'),
4224 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4262 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4225 ]
4263 ]
4226 + formatteropts,
4264 + formatteropts,
4227 )
4265 )
4228 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4266 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4229 """benchmark reading the branchmap"""
4267 """benchmark reading the branchmap"""
4230 opts = _byteskwargs(opts)
4268 opts = _byteskwargs(opts)
4231 clearrevlogs = opts[b'clear_revlogs']
4269 clearrevlogs = opts[b'clear_revlogs']
4232
4270
4233 if list:
4271 if list:
4234 for name, kind, st in repo.cachevfs.readdir(stat=True):
4272 for name, kind, st in repo.cachevfs.readdir(stat=True):
4235 if name.startswith(b'branch2'):
4273 if name.startswith(b'branch2'):
4236 filtername = name.partition(b'-')[2] or b'unfiltered'
4274 filtername = name.partition(b'-')[2] or b'unfiltered'
4237 ui.status(
4275 ui.status(
4238 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4276 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4239 )
4277 )
4240 return
4278 return
4241 if not filter:
4279 if not filter:
4242 filter = None
4280 filter = None
4243 subsettable = getbranchmapsubsettable()
4281 subsettable = getbranchmapsubsettable()
4244 if filter is None:
4282 if filter is None:
4245 repo = repo.unfiltered()
4283 repo = repo.unfiltered()
4246 else:
4284 else:
4247 repo = repoview.repoview(repo, filter)
4285 repo = repoview.repoview(repo, filter)
4248
4286
4249 repo.branchmap() # make sure we have a relevant, up to date branchmap
4287 repo.branchmap() # make sure we have a relevant, up to date branchmap
4250
4288
4251 try:
4289 try:
4252 fromfile = branchmap.branchcache.fromfile
4290 fromfile = branchmap.branchcache.fromfile
4253 except AttributeError:
4291 except AttributeError:
4254 # older versions
4292 # older versions
4255 fromfile = branchmap.read
4293 fromfile = branchmap.read
4256
4294
4257 currentfilter = filter
4295 currentfilter = filter
4258 # try once without timer, the filter may not be cached
4296 # try once without timer, the filter may not be cached
4259 while fromfile(repo) is None:
4297 while fromfile(repo) is None:
4260 currentfilter = subsettable.get(currentfilter)
4298 currentfilter = subsettable.get(currentfilter)
4261 if currentfilter is None:
4299 if currentfilter is None:
4262 raise error.Abort(
4300 raise error.Abort(
4263 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4301 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4264 )
4302 )
4265 repo = repo.filtered(currentfilter)
4303 repo = repo.filtered(currentfilter)
4266 timer, fm = gettimer(ui, opts)
4304 timer, fm = gettimer(ui, opts)
4267
4305
4268 def setup():
4306 def setup():
4269 if clearrevlogs:
4307 if clearrevlogs:
4270 clearchangelog(repo)
4308 clearchangelog(repo)
4271
4309
4272 def bench():
4310 def bench():
4273 fromfile(repo)
4311 fromfile(repo)
4274
4312
4275 timer(bench, setup=setup)
4313 timer(bench, setup=setup)
4276 fm.end()
4314 fm.end()
4277
4315
4278
4316
4279 @command(b'perf::loadmarkers|perfloadmarkers')
4317 @command(b'perf::loadmarkers|perfloadmarkers')
4280 def perfloadmarkers(ui, repo):
4318 def perfloadmarkers(ui, repo):
4281 """benchmark the time to parse the on-disk markers for a repo
4319 """benchmark the time to parse the on-disk markers for a repo
4282
4320
4283 Result is the number of markers in the repo."""
4321 Result is the number of markers in the repo."""
4284 timer, fm = gettimer(ui)
4322 timer, fm = gettimer(ui)
4285 svfs = getsvfs(repo)
4323 svfs = getsvfs(repo)
4286 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4324 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4287 fm.end()
4325 fm.end()
4288
4326
4289
4327
4290 @command(
4328 @command(
4291 b'perf::lrucachedict|perflrucachedict',
4329 b'perf::lrucachedict|perflrucachedict',
4292 formatteropts
4330 formatteropts
4293 + [
4331 + [
4294 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4332 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4295 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4333 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4296 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4334 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4297 (b'', b'size', 4, b'size of cache'),
4335 (b'', b'size', 4, b'size of cache'),
4298 (b'', b'gets', 10000, b'number of key lookups'),
4336 (b'', b'gets', 10000, b'number of key lookups'),
4299 (b'', b'sets', 10000, b'number of key sets'),
4337 (b'', b'sets', 10000, b'number of key sets'),
4300 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4338 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4301 (
4339 (
4302 b'',
4340 b'',
4303 b'mixedgetfreq',
4341 b'mixedgetfreq',
4304 50,
4342 50,
4305 b'frequency of get vs set ops in mixed mode',
4343 b'frequency of get vs set ops in mixed mode',
4306 ),
4344 ),
4307 ],
4345 ],
4308 norepo=True,
4346 norepo=True,
4309 )
4347 )
4310 def perflrucache(
4348 def perflrucache(
4311 ui,
4349 ui,
4312 mincost=0,
4350 mincost=0,
4313 maxcost=100,
4351 maxcost=100,
4314 costlimit=0,
4352 costlimit=0,
4315 size=4,
4353 size=4,
4316 gets=10000,
4354 gets=10000,
4317 sets=10000,
4355 sets=10000,
4318 mixed=10000,
4356 mixed=10000,
4319 mixedgetfreq=50,
4357 mixedgetfreq=50,
4320 **opts
4358 **opts
4321 ):
4359 ):
4322 opts = _byteskwargs(opts)
4360 opts = _byteskwargs(opts)
4323
4361
4324 def doinit():
4362 def doinit():
4325 for i in _xrange(10000):
4363 for i in _xrange(10000):
4326 util.lrucachedict(size)
4364 util.lrucachedict(size)
4327
4365
4328 costrange = list(range(mincost, maxcost + 1))
4366 costrange = list(range(mincost, maxcost + 1))
4329
4367
4330 values = []
4368 values = []
4331 for i in _xrange(size):
4369 for i in _xrange(size):
4332 values.append(random.randint(0, _maxint))
4370 values.append(random.randint(0, _maxint))
4333
4371
4334 # Get mode fills the cache and tests raw lookup performance with no
4372 # Get mode fills the cache and tests raw lookup performance with no
4335 # eviction.
4373 # eviction.
4336 getseq = []
4374 getseq = []
4337 for i in _xrange(gets):
4375 for i in _xrange(gets):
4338 getseq.append(random.choice(values))
4376 getseq.append(random.choice(values))
4339
4377
4340 def dogets():
4378 def dogets():
4341 d = util.lrucachedict(size)
4379 d = util.lrucachedict(size)
4342 for v in values:
4380 for v in values:
4343 d[v] = v
4381 d[v] = v
4344 for key in getseq:
4382 for key in getseq:
4345 value = d[key]
4383 value = d[key]
4346 value # silence pyflakes warning
4384 value # silence pyflakes warning
4347
4385
4348 def dogetscost():
4386 def dogetscost():
4349 d = util.lrucachedict(size, maxcost=costlimit)
4387 d = util.lrucachedict(size, maxcost=costlimit)
4350 for i, v in enumerate(values):
4388 for i, v in enumerate(values):
4351 d.insert(v, v, cost=costs[i])
4389 d.insert(v, v, cost=costs[i])
4352 for key in getseq:
4390 for key in getseq:
4353 try:
4391 try:
4354 value = d[key]
4392 value = d[key]
4355 value # silence pyflakes warning
4393 value # silence pyflakes warning
4356 except KeyError:
4394 except KeyError:
4357 pass
4395 pass
4358
4396
4359 # Set mode tests insertion speed with cache eviction.
4397 # Set mode tests insertion speed with cache eviction.
4360 setseq = []
4398 setseq = []
4361 costs = []
4399 costs = []
4362 for i in _xrange(sets):
4400 for i in _xrange(sets):
4363 setseq.append(random.randint(0, _maxint))
4401 setseq.append(random.randint(0, _maxint))
4364 costs.append(random.choice(costrange))
4402 costs.append(random.choice(costrange))
4365
4403
4366 def doinserts():
4404 def doinserts():
4367 d = util.lrucachedict(size)
4405 d = util.lrucachedict(size)
4368 for v in setseq:
4406 for v in setseq:
4369 d.insert(v, v)
4407 d.insert(v, v)
4370
4408
4371 def doinsertscost():
4409 def doinsertscost():
4372 d = util.lrucachedict(size, maxcost=costlimit)
4410 d = util.lrucachedict(size, maxcost=costlimit)
4373 for i, v in enumerate(setseq):
4411 for i, v in enumerate(setseq):
4374 d.insert(v, v, cost=costs[i])
4412 d.insert(v, v, cost=costs[i])
4375
4413
4376 def dosets():
4414 def dosets():
4377 d = util.lrucachedict(size)
4415 d = util.lrucachedict(size)
4378 for v in setseq:
4416 for v in setseq:
4379 d[v] = v
4417 d[v] = v
4380
4418
4381 # Mixed mode randomly performs gets and sets with eviction.
4419 # Mixed mode randomly performs gets and sets with eviction.
4382 mixedops = []
4420 mixedops = []
4383 for i in _xrange(mixed):
4421 for i in _xrange(mixed):
4384 r = random.randint(0, 100)
4422 r = random.randint(0, 100)
4385 if r < mixedgetfreq:
4423 if r < mixedgetfreq:
4386 op = 0
4424 op = 0
4387 else:
4425 else:
4388 op = 1
4426 op = 1
4389
4427
4390 mixedops.append(
4428 mixedops.append(
4391 (op, random.randint(0, size * 2), random.choice(costrange))
4429 (op, random.randint(0, size * 2), random.choice(costrange))
4392 )
4430 )
4393
4431
4394 def domixed():
4432 def domixed():
4395 d = util.lrucachedict(size)
4433 d = util.lrucachedict(size)
4396
4434
4397 for op, v, cost in mixedops:
4435 for op, v, cost in mixedops:
4398 if op == 0:
4436 if op == 0:
4399 try:
4437 try:
4400 d[v]
4438 d[v]
4401 except KeyError:
4439 except KeyError:
4402 pass
4440 pass
4403 else:
4441 else:
4404 d[v] = v
4442 d[v] = v
4405
4443
4406 def domixedcost():
4444 def domixedcost():
4407 d = util.lrucachedict(size, maxcost=costlimit)
4445 d = util.lrucachedict(size, maxcost=costlimit)
4408
4446
4409 for op, v, cost in mixedops:
4447 for op, v, cost in mixedops:
4410 if op == 0:
4448 if op == 0:
4411 try:
4449 try:
4412 d[v]
4450 d[v]
4413 except KeyError:
4451 except KeyError:
4414 pass
4452 pass
4415 else:
4453 else:
4416 d.insert(v, v, cost=cost)
4454 d.insert(v, v, cost=cost)
4417
4455
4418 benches = [
4456 benches = [
4419 (doinit, b'init'),
4457 (doinit, b'init'),
4420 ]
4458 ]
4421
4459
4422 if costlimit:
4460 if costlimit:
4423 benches.extend(
4461 benches.extend(
4424 [
4462 [
4425 (dogetscost, b'gets w/ cost limit'),
4463 (dogetscost, b'gets w/ cost limit'),
4426 (doinsertscost, b'inserts w/ cost limit'),
4464 (doinsertscost, b'inserts w/ cost limit'),
4427 (domixedcost, b'mixed w/ cost limit'),
4465 (domixedcost, b'mixed w/ cost limit'),
4428 ]
4466 ]
4429 )
4467 )
4430 else:
4468 else:
4431 benches.extend(
4469 benches.extend(
4432 [
4470 [
4433 (dogets, b'gets'),
4471 (dogets, b'gets'),
4434 (doinserts, b'inserts'),
4472 (doinserts, b'inserts'),
4435 (dosets, b'sets'),
4473 (dosets, b'sets'),
4436 (domixed, b'mixed'),
4474 (domixed, b'mixed'),
4437 ]
4475 ]
4438 )
4476 )
4439
4477
4440 for fn, title in benches:
4478 for fn, title in benches:
4441 timer, fm = gettimer(ui, opts)
4479 timer, fm = gettimer(ui, opts)
4442 timer(fn, title=title)
4480 timer(fn, title=title)
4443 fm.end()
4481 fm.end()
4444
4482
4445
4483
4446 @command(
4484 @command(
4447 b'perf::write|perfwrite',
4485 b'perf::write|perfwrite',
4448 formatteropts
4486 formatteropts
4449 + [
4487 + [
4450 (b'', b'write-method', b'write', b'ui write method'),
4488 (b'', b'write-method', b'write', b'ui write method'),
4451 (b'', b'nlines', 100, b'number of lines'),
4489 (b'', b'nlines', 100, b'number of lines'),
4452 (b'', b'nitems', 100, b'number of items (per line)'),
4490 (b'', b'nitems', 100, b'number of items (per line)'),
4453 (b'', b'item', b'x', b'item that is written'),
4491 (b'', b'item', b'x', b'item that is written'),
4454 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4492 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4455 (b'', b'flush-line', None, b'flush after each line'),
4493 (b'', b'flush-line', None, b'flush after each line'),
4456 ],
4494 ],
4457 )
4495 )
4458 def perfwrite(ui, repo, **opts):
4496 def perfwrite(ui, repo, **opts):
4459 """microbenchmark ui.write (and others)"""
4497 """microbenchmark ui.write (and others)"""
4460 opts = _byteskwargs(opts)
4498 opts = _byteskwargs(opts)
4461
4499
4462 write = getattr(ui, _sysstr(opts[b'write_method']))
4500 write = getattr(ui, _sysstr(opts[b'write_method']))
4463 nlines = int(opts[b'nlines'])
4501 nlines = int(opts[b'nlines'])
4464 nitems = int(opts[b'nitems'])
4502 nitems = int(opts[b'nitems'])
4465 item = opts[b'item']
4503 item = opts[b'item']
4466 batch_line = opts.get(b'batch_line')
4504 batch_line = opts.get(b'batch_line')
4467 flush_line = opts.get(b'flush_line')
4505 flush_line = opts.get(b'flush_line')
4468
4506
4469 if batch_line:
4507 if batch_line:
4470 line = item * nitems + b'\n'
4508 line = item * nitems + b'\n'
4471
4509
4472 def benchmark():
4510 def benchmark():
4473 for i in pycompat.xrange(nlines):
4511 for i in pycompat.xrange(nlines):
4474 if batch_line:
4512 if batch_line:
4475 write(line)
4513 write(line)
4476 else:
4514 else:
4477 for i in pycompat.xrange(nitems):
4515 for i in pycompat.xrange(nitems):
4478 write(item)
4516 write(item)
4479 write(b'\n')
4517 write(b'\n')
4480 if flush_line:
4518 if flush_line:
4481 ui.flush()
4519 ui.flush()
4482 ui.flush()
4520 ui.flush()
4483
4521
4484 timer, fm = gettimer(ui, opts)
4522 timer, fm = gettimer(ui, opts)
4485 timer(benchmark)
4523 timer(benchmark)
4486 fm.end()
4524 fm.end()
4487
4525
4488
4526
4489 def uisetup(ui):
4527 def uisetup(ui):
4490 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4528 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4491 commands, b'debugrevlogopts'
4529 commands, b'debugrevlogopts'
4492 ):
4530 ):
4493 # for "historical portability":
4531 # for "historical portability":
4494 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4532 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4495 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4533 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4496 # openrevlog() should cause failure, because it has been
4534 # openrevlog() should cause failure, because it has been
4497 # available since 3.5 (or 49c583ca48c4).
4535 # available since 3.5 (or 49c583ca48c4).
4498 def openrevlog(orig, repo, cmd, file_, opts):
4536 def openrevlog(orig, repo, cmd, file_, opts):
4499 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4537 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4500 raise error.Abort(
4538 raise error.Abort(
4501 b"This version doesn't support --dir option",
4539 b"This version doesn't support --dir option",
4502 hint=b"use 3.5 or later",
4540 hint=b"use 3.5 or later",
4503 )
4541 )
4504 return orig(repo, cmd, file_, opts)
4542 return orig(repo, cmd, file_, opts)
4505
4543
4506 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4544 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4507
4545
4508
4546
4509 @command(
4547 @command(
4510 b'perf::progress|perfprogress',
4548 b'perf::progress|perfprogress',
4511 formatteropts
4549 formatteropts
4512 + [
4550 + [
4513 (b'', b'topic', b'topic', b'topic for progress messages'),
4551 (b'', b'topic', b'topic', b'topic for progress messages'),
4514 (b'c', b'total', 1000000, b'total value we are progressing to'),
4552 (b'c', b'total', 1000000, b'total value we are progressing to'),
4515 ],
4553 ],
4516 norepo=True,
4554 norepo=True,
4517 )
4555 )
4518 def perfprogress(ui, topic=None, total=None, **opts):
4556 def perfprogress(ui, topic=None, total=None, **opts):
4519 """printing of progress bars"""
4557 """printing of progress bars"""
4520 opts = _byteskwargs(opts)
4558 opts = _byteskwargs(opts)
4521
4559
4522 timer, fm = gettimer(ui, opts)
4560 timer, fm = gettimer(ui, opts)
4523
4561
4524 def doprogress():
4562 def doprogress():
4525 with ui.makeprogress(topic, total=total) as progress:
4563 with ui.makeprogress(topic, total=total) as progress:
4526 for i in _xrange(total):
4564 for i in _xrange(total):
4527 progress.increment()
4565 progress.increment()
4528
4566
4529 timer(doprogress)
4567 timer(doprogress)
4530 fm.end()
4568 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now