##// END OF EJS Templates
perf: document `perfdirstate`
marmoute -
r43391:ce315b1f default
parent child Browse files
Show More
@@ -1,3751 +1,3756
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
767 """benchmark the performance of a single status call
768
768
769 The repository data are preserved between each call.
769 The repository data are preserved between each call.
770
770
771 By default, only the status of the tracked file are requested. If
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
773 """
774 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
775 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 # False))))
777 # False))))
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 fm.end()
780 fm.end()
781
781
782
782
783 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
784 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
785 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
786 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
787 try:
787 try:
788 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
789 repo.ui.quiet = True
789 repo.ui.quiet = True
790 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
791 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 else:
795 else:
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 finally:
797 finally:
798 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
799 fm.end()
799 fm.end()
800
800
801
801
802 def clearcaches(cl):
802 def clearcaches(cl):
803 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
804 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
805 cl.clearcaches()
805 cl.clearcaches()
806 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
807 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
808
808
809 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
810 cl._nodepos = None
810 cl._nodepos = None
811
811
812
812
813 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
814 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
815 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
816 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
817 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
818 cl = repo.changelog
818 cl = repo.changelog
819
819
820 def s():
820 def s():
821 clearcaches(cl)
821 clearcaches(cl)
822
822
823 def d():
823 def d():
824 len(cl.headrevs())
824 len(cl.headrevs())
825
825
826 timer(d, setup=s)
826 timer(d, setup=s)
827 fm.end()
827 fm.end()
828
828
829
829
830 @command(
830 @command(
831 b'perftags',
831 b'perftags',
832 formatteropts
832 formatteropts
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 )
834 )
835 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
839 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
840
840
841 def s():
841 def s():
842 if clearrevlogs:
842 if clearrevlogs:
843 clearchangelog(repo)
843 clearchangelog(repo)
844 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
845 repocleartagscache()
845 repocleartagscache()
846
846
847 def t():
847 def t():
848 return len(repo.tags())
848 return len(repo.tags())
849
849
850 timer(t, setup=s)
850 timer(t, setup=s)
851 fm.end()
851 fm.end()
852
852
853
853
854 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
855 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
856 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
857 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
858 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
859
859
860 def d():
860 def d():
861 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
862 pass
862 pass
863
863
864 timer(d)
864 timer(d)
865 fm.end()
865 fm.end()
866
866
867
867
868 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
869 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
870 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
871 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
872 revs = repo.revs(revset)
872 revs = repo.revs(revset)
873 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
874
874
875 def d():
875 def d():
876 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
877 for rev in revs:
877 for rev in revs:
878 rev in s
878 rev in s
879
879
880 timer(d)
880 timer(d)
881 fm.end()
881 fm.end()
882
882
883
883
884 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
886 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
887 """
887 """
888 repos = [repo, None]
888 repos = [repo, None]
889 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
890 path = ui.expandpath(path)
890 path = ui.expandpath(path)
891
891
892 def s():
892 def s():
893 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
894
894
895 def d():
895 def d():
896 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
897
897
898 timer(d, setup=s)
898 timer(d, setup=s)
899 fm.end()
899 fm.end()
900
900
901
901
902 @command(
902 @command(
903 b'perfbookmarks',
903 b'perfbookmarks',
904 formatteropts
904 formatteropts
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 )
906 )
907 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
908 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
909 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
910 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
911
911
912 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
913
913
914 def s():
914 def s():
915 if clearrevlogs:
915 if clearrevlogs:
916 clearchangelog(repo)
916 clearchangelog(repo)
917 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
918
918
919 def d():
919 def d():
920 repo._bookmarks
920 repo._bookmarks
921
921
922 timer(d, setup=s)
922 timer(d, setup=s)
923 fm.end()
923 fm.end()
924
924
925
925
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
928 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
929
929
930 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
931 much as possible.
931 much as possible.
932 """
932 """
933 from mercurial import (
933 from mercurial import (
934 bundle2,
934 bundle2,
935 exchange,
935 exchange,
936 streamclone,
936 streamclone,
937 )
937 )
938
938
939 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
940
940
941 def makebench(fn):
941 def makebench(fn):
942 def run():
942 def run():
943 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
944 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 fn(bundle)
945 fn(bundle)
946
946
947 return run
947 return run
948
948
949 def makereadnbytes(size):
949 def makereadnbytes(size):
950 def run():
950 def run():
951 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
952 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 while bundle.read(size):
953 while bundle.read(size):
954 pass
954 pass
955
955
956 return run
956 return run
957
957
958 def makestdioread(size):
958 def makestdioread(size):
959 def run():
959 def run():
960 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
961 while fh.read(size):
961 while fh.read(size):
962 pass
962 pass
963
963
964 return run
964 return run
965
965
966 # bundle1
966 # bundle1
967
967
968 def deltaiter(bundle):
968 def deltaiter(bundle):
969 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
970 pass
970 pass
971
971
972 def iterchunks(bundle):
972 def iterchunks(bundle):
973 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
974 pass
974 pass
975
975
976 # bundle2
976 # bundle2
977
977
978 def forwardchunks(bundle):
978 def forwardchunks(bundle):
979 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
980 pass
980 pass
981
981
982 def iterparts(bundle):
982 def iterparts(bundle):
983 for part in bundle.iterparts():
983 for part in bundle.iterparts():
984 pass
984 pass
985
985
986 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
987 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
988 pass
988 pass
989
989
990 def seek(bundle):
990 def seek(bundle):
991 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
992 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
993
993
994 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
995 def run():
995 def run():
996 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
997 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 for part in bundle.iterparts():
998 for part in bundle.iterparts():
999 while part.read(size):
999 while part.read(size):
1000 pass
1000 pass
1001
1001
1002 return run
1002 return run
1003
1003
1004 benches = [
1004 benches = [
1005 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
1006 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1007 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1008 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1009 ]
1009 ]
1010
1010
1011 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013
1013
1014 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 benches.extend(
1015 benches.extend(
1016 [
1016 [
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 ]
1023 ]
1024 )
1024 )
1025 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1026 benches.extend(
1026 benches.extend(
1027 [
1027 [
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 (
1030 (
1031 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1032 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1033 ),
1033 ),
1034 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 ]
1039 ]
1040 )
1040 )
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1043 else:
1043 else:
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045
1045
1046 for fn, title in benches:
1046 for fn, title in benches:
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 timer(fn, title=title)
1048 timer(fn, title=title)
1049 fm.end()
1049 fm.end()
1050
1050
1051
1051
1052 @command(
1052 @command(
1053 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1054 formatteropts
1054 formatteropts
1055 + [
1055 + [
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 ],
1058 ],
1059 )
1059 )
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1062
1062
1063 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1064 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1065 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1066 and pull requests).
1066 and pull requests).
1067
1067
1068 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1069 """
1069 """
1070 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1071 cl = repo.changelog
1071 cl = repo.changelog
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1074
1074
1075 def d():
1075 def d():
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 for chunk in chunks:
1077 for chunk in chunks:
1078 pass
1078 pass
1079
1079
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081
1081
1082 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 timer(d)
1084 timer(d)
1085
1085
1086 fm.end()
1086 fm.end()
1087
1087
1088
1088
1089 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1090 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1091 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1093 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1094 b'a' in dirstate
1094 b'a' in dirstate
1095
1095
1096 def d():
1096 def d():
1097 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1098 del dirstate._map._dirs
1098 del dirstate._map._dirs
1099
1099
1100 timer(d)
1100 timer(d)
1101 fm.end()
1101 fm.end()
1102
1102
1103
1103
1104 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1105 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1110 """
1106 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1107 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1108 b"a" in repo.dirstate
1113 b"a" in repo.dirstate
1109
1114
1110 def d():
1115 def d():
1111 repo.dirstate.invalidate()
1116 repo.dirstate.invalidate()
1112 b"a" in repo.dirstate
1117 b"a" in repo.dirstate
1113
1118
1114 timer(d)
1119 timer(d)
1115 fm.end()
1120 fm.end()
1116
1121
1117
1122
1118 @command(b'perfdirstatedirs', formatteropts)
1123 @command(b'perfdirstatedirs', formatteropts)
1119 def perfdirstatedirs(ui, repo, **opts):
1124 def perfdirstatedirs(ui, repo, **opts):
1120 opts = _byteskwargs(opts)
1125 opts = _byteskwargs(opts)
1121 timer, fm = gettimer(ui, opts)
1126 timer, fm = gettimer(ui, opts)
1122 b"a" in repo.dirstate
1127 b"a" in repo.dirstate
1123
1128
1124 def d():
1129 def d():
1125 repo.dirstate.hasdir(b"a")
1130 repo.dirstate.hasdir(b"a")
1126 del repo.dirstate._map._dirs
1131 del repo.dirstate._map._dirs
1127
1132
1128 timer(d)
1133 timer(d)
1129 fm.end()
1134 fm.end()
1130
1135
1131
1136
1132 @command(b'perfdirstatefoldmap', formatteropts)
1137 @command(b'perfdirstatefoldmap', formatteropts)
1133 def perfdirstatefoldmap(ui, repo, **opts):
1138 def perfdirstatefoldmap(ui, repo, **opts):
1134 opts = _byteskwargs(opts)
1139 opts = _byteskwargs(opts)
1135 timer, fm = gettimer(ui, opts)
1140 timer, fm = gettimer(ui, opts)
1136 dirstate = repo.dirstate
1141 dirstate = repo.dirstate
1137 b'a' in dirstate
1142 b'a' in dirstate
1138
1143
1139 def d():
1144 def d():
1140 dirstate._map.filefoldmap.get(b'a')
1145 dirstate._map.filefoldmap.get(b'a')
1141 del dirstate._map.filefoldmap
1146 del dirstate._map.filefoldmap
1142
1147
1143 timer(d)
1148 timer(d)
1144 fm.end()
1149 fm.end()
1145
1150
1146
1151
1147 @command(b'perfdirfoldmap', formatteropts)
1152 @command(b'perfdirfoldmap', formatteropts)
1148 def perfdirfoldmap(ui, repo, **opts):
1153 def perfdirfoldmap(ui, repo, **opts):
1149 opts = _byteskwargs(opts)
1154 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1155 timer, fm = gettimer(ui, opts)
1151 dirstate = repo.dirstate
1156 dirstate = repo.dirstate
1152 b'a' in dirstate
1157 b'a' in dirstate
1153
1158
1154 def d():
1159 def d():
1155 dirstate._map.dirfoldmap.get(b'a')
1160 dirstate._map.dirfoldmap.get(b'a')
1156 del dirstate._map.dirfoldmap
1161 del dirstate._map.dirfoldmap
1157 del dirstate._map._dirs
1162 del dirstate._map._dirs
1158
1163
1159 timer(d)
1164 timer(d)
1160 fm.end()
1165 fm.end()
1161
1166
1162
1167
1163 @command(b'perfdirstatewrite', formatteropts)
1168 @command(b'perfdirstatewrite', formatteropts)
1164 def perfdirstatewrite(ui, repo, **opts):
1169 def perfdirstatewrite(ui, repo, **opts):
1165 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1166 timer, fm = gettimer(ui, opts)
1171 timer, fm = gettimer(ui, opts)
1167 ds = repo.dirstate
1172 ds = repo.dirstate
1168 b"a" in ds
1173 b"a" in ds
1169
1174
1170 def d():
1175 def d():
1171 ds._dirty = True
1176 ds._dirty = True
1172 ds.write(repo.currenttransaction())
1177 ds.write(repo.currenttransaction())
1173
1178
1174 timer(d)
1179 timer(d)
1175 fm.end()
1180 fm.end()
1176
1181
1177
1182
1178 def _getmergerevs(repo, opts):
1183 def _getmergerevs(repo, opts):
1179 """parse command argument to return rev involved in merge
1184 """parse command argument to return rev involved in merge
1180
1185
1181 input: options dictionnary with `rev`, `from` and `bse`
1186 input: options dictionnary with `rev`, `from` and `bse`
1182 output: (localctx, otherctx, basectx)
1187 output: (localctx, otherctx, basectx)
1183 """
1188 """
1184 if opts[b'from']:
1189 if opts[b'from']:
1185 fromrev = scmutil.revsingle(repo, opts[b'from'])
1190 fromrev = scmutil.revsingle(repo, opts[b'from'])
1186 wctx = repo[fromrev]
1191 wctx = repo[fromrev]
1187 else:
1192 else:
1188 wctx = repo[None]
1193 wctx = repo[None]
1189 # we don't want working dir files to be stat'd in the benchmark, so
1194 # we don't want working dir files to be stat'd in the benchmark, so
1190 # prime that cache
1195 # prime that cache
1191 wctx.dirty()
1196 wctx.dirty()
1192 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1197 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1193 if opts[b'base']:
1198 if opts[b'base']:
1194 fromrev = scmutil.revsingle(repo, opts[b'base'])
1199 fromrev = scmutil.revsingle(repo, opts[b'base'])
1195 ancestor = repo[fromrev]
1200 ancestor = repo[fromrev]
1196 else:
1201 else:
1197 ancestor = wctx.ancestor(rctx)
1202 ancestor = wctx.ancestor(rctx)
1198 return (wctx, rctx, ancestor)
1203 return (wctx, rctx, ancestor)
1199
1204
1200
1205
1201 @command(
1206 @command(
1202 b'perfmergecalculate',
1207 b'perfmergecalculate',
1203 [
1208 [
1204 (b'r', b'rev', b'.', b'rev to merge against'),
1209 (b'r', b'rev', b'.', b'rev to merge against'),
1205 (b'', b'from', b'', b'rev to merge from'),
1210 (b'', b'from', b'', b'rev to merge from'),
1206 (b'', b'base', b'', b'the revision to use as base'),
1211 (b'', b'base', b'', b'the revision to use as base'),
1207 ]
1212 ]
1208 + formatteropts,
1213 + formatteropts,
1209 )
1214 )
1210 def perfmergecalculate(ui, repo, **opts):
1215 def perfmergecalculate(ui, repo, **opts):
1211 opts = _byteskwargs(opts)
1216 opts = _byteskwargs(opts)
1212 timer, fm = gettimer(ui, opts)
1217 timer, fm = gettimer(ui, opts)
1213
1218
1214 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1219 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1215
1220
1216 def d():
1221 def d():
1217 # acceptremote is True because we don't want prompts in the middle of
1222 # acceptremote is True because we don't want prompts in the middle of
1218 # our benchmark
1223 # our benchmark
1219 merge.calculateupdates(
1224 merge.calculateupdates(
1220 repo,
1225 repo,
1221 wctx,
1226 wctx,
1222 rctx,
1227 rctx,
1223 [ancestor],
1228 [ancestor],
1224 branchmerge=False,
1229 branchmerge=False,
1225 force=False,
1230 force=False,
1226 acceptremote=True,
1231 acceptremote=True,
1227 followcopies=True,
1232 followcopies=True,
1228 )
1233 )
1229
1234
1230 timer(d)
1235 timer(d)
1231 fm.end()
1236 fm.end()
1232
1237
1233
1238
1234 @command(
1239 @command(
1235 b'perfmergecopies',
1240 b'perfmergecopies',
1236 [
1241 [
1237 (b'r', b'rev', b'.', b'rev to merge against'),
1242 (b'r', b'rev', b'.', b'rev to merge against'),
1238 (b'', b'from', b'', b'rev to merge from'),
1243 (b'', b'from', b'', b'rev to merge from'),
1239 (b'', b'base', b'', b'the revision to use as base'),
1244 (b'', b'base', b'', b'the revision to use as base'),
1240 ]
1245 ]
1241 + formatteropts,
1246 + formatteropts,
1242 )
1247 )
1243 def perfmergecopies(ui, repo, **opts):
1248 def perfmergecopies(ui, repo, **opts):
1244 """measure runtime of `copies.mergecopies`"""
1249 """measure runtime of `copies.mergecopies`"""
1245 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1246 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1247 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1252 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1248
1253
1249 def d():
1254 def d():
1250 # acceptremote is True because we don't want prompts in the middle of
1255 # acceptremote is True because we don't want prompts in the middle of
1251 # our benchmark
1256 # our benchmark
1252 copies.mergecopies(repo, wctx, rctx, ancestor)
1257 copies.mergecopies(repo, wctx, rctx, ancestor)
1253
1258
1254 timer(d)
1259 timer(d)
1255 fm.end()
1260 fm.end()
1256
1261
1257
1262
1258 @command(b'perfpathcopies', [], b"REV REV")
1263 @command(b'perfpathcopies', [], b"REV REV")
1259 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1264 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1260 """benchmark the copy tracing logic"""
1265 """benchmark the copy tracing logic"""
1261 opts = _byteskwargs(opts)
1266 opts = _byteskwargs(opts)
1262 timer, fm = gettimer(ui, opts)
1267 timer, fm = gettimer(ui, opts)
1263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1268 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1269 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1265
1270
1266 def d():
1271 def d():
1267 copies.pathcopies(ctx1, ctx2)
1272 copies.pathcopies(ctx1, ctx2)
1268
1273
1269 timer(d)
1274 timer(d)
1270 fm.end()
1275 fm.end()
1271
1276
1272
1277
1273 @command(
1278 @command(
1274 b'perfphases',
1279 b'perfphases',
1275 [(b'', b'full', False, b'include file reading time too'),],
1280 [(b'', b'full', False, b'include file reading time too'),],
1276 b"",
1281 b"",
1277 )
1282 )
1278 def perfphases(ui, repo, **opts):
1283 def perfphases(ui, repo, **opts):
1279 """benchmark phasesets computation"""
1284 """benchmark phasesets computation"""
1280 opts = _byteskwargs(opts)
1285 opts = _byteskwargs(opts)
1281 timer, fm = gettimer(ui, opts)
1286 timer, fm = gettimer(ui, opts)
1282 _phases = repo._phasecache
1287 _phases = repo._phasecache
1283 full = opts.get(b'full')
1288 full = opts.get(b'full')
1284
1289
1285 def d():
1290 def d():
1286 phases = _phases
1291 phases = _phases
1287 if full:
1292 if full:
1288 clearfilecache(repo, b'_phasecache')
1293 clearfilecache(repo, b'_phasecache')
1289 phases = repo._phasecache
1294 phases = repo._phasecache
1290 phases.invalidate()
1295 phases.invalidate()
1291 phases.loadphaserevs(repo)
1296 phases.loadphaserevs(repo)
1292
1297
1293 timer(d)
1298 timer(d)
1294 fm.end()
1299 fm.end()
1295
1300
1296
1301
1297 @command(b'perfphasesremote', [], b"[DEST]")
1302 @command(b'perfphasesremote', [], b"[DEST]")
1298 def perfphasesremote(ui, repo, dest=None, **opts):
1303 def perfphasesremote(ui, repo, dest=None, **opts):
1299 """benchmark time needed to analyse phases of the remote server"""
1304 """benchmark time needed to analyse phases of the remote server"""
1300 from mercurial.node import bin
1305 from mercurial.node import bin
1301 from mercurial import (
1306 from mercurial import (
1302 exchange,
1307 exchange,
1303 hg,
1308 hg,
1304 phases,
1309 phases,
1305 )
1310 )
1306
1311
1307 opts = _byteskwargs(opts)
1312 opts = _byteskwargs(opts)
1308 timer, fm = gettimer(ui, opts)
1313 timer, fm = gettimer(ui, opts)
1309
1314
1310 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1315 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1311 if not path:
1316 if not path:
1312 raise error.Abort(
1317 raise error.Abort(
1313 b'default repository not configured!',
1318 b'default repository not configured!',
1314 hint=b"see 'hg help config.paths'",
1319 hint=b"see 'hg help config.paths'",
1315 )
1320 )
1316 dest = path.pushloc or path.loc
1321 dest = path.pushloc or path.loc
1317 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1322 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1318 other = hg.peer(repo, opts, dest)
1323 other = hg.peer(repo, opts, dest)
1319
1324
1320 # easier to perform discovery through the operation
1325 # easier to perform discovery through the operation
1321 op = exchange.pushoperation(repo, other)
1326 op = exchange.pushoperation(repo, other)
1322 exchange._pushdiscoverychangeset(op)
1327 exchange._pushdiscoverychangeset(op)
1323
1328
1324 remotesubset = op.fallbackheads
1329 remotesubset = op.fallbackheads
1325
1330
1326 with other.commandexecutor() as e:
1331 with other.commandexecutor() as e:
1327 remotephases = e.callcommand(
1332 remotephases = e.callcommand(
1328 b'listkeys', {b'namespace': b'phases'}
1333 b'listkeys', {b'namespace': b'phases'}
1329 ).result()
1334 ).result()
1330 del other
1335 del other
1331 publishing = remotephases.get(b'publishing', False)
1336 publishing = remotephases.get(b'publishing', False)
1332 if publishing:
1337 if publishing:
1333 ui.statusnoi18n(b'publishing: yes\n')
1338 ui.statusnoi18n(b'publishing: yes\n')
1334 else:
1339 else:
1335 ui.statusnoi18n(b'publishing: no\n')
1340 ui.statusnoi18n(b'publishing: no\n')
1336
1341
1337 nodemap = repo.changelog.nodemap
1342 nodemap = repo.changelog.nodemap
1338 nonpublishroots = 0
1343 nonpublishroots = 0
1339 for nhex, phase in remotephases.iteritems():
1344 for nhex, phase in remotephases.iteritems():
1340 if nhex == b'publishing': # ignore data related to publish option
1345 if nhex == b'publishing': # ignore data related to publish option
1341 continue
1346 continue
1342 node = bin(nhex)
1347 node = bin(nhex)
1343 if node in nodemap and int(phase):
1348 if node in nodemap and int(phase):
1344 nonpublishroots += 1
1349 nonpublishroots += 1
1345 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1350 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1346 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1351 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1347
1352
1348 def d():
1353 def d():
1349 phases.remotephasessummary(repo, remotesubset, remotephases)
1354 phases.remotephasessummary(repo, remotesubset, remotephases)
1350
1355
1351 timer(d)
1356 timer(d)
1352 fm.end()
1357 fm.end()
1353
1358
1354
1359
1355 @command(
1360 @command(
1356 b'perfmanifest',
1361 b'perfmanifest',
1357 [
1362 [
1358 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1363 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1359 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1364 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1360 ]
1365 ]
1361 + formatteropts,
1366 + formatteropts,
1362 b'REV|NODE',
1367 b'REV|NODE',
1363 )
1368 )
1364 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1369 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1365 """benchmark the time to read a manifest from disk and return a usable
1370 """benchmark the time to read a manifest from disk and return a usable
1366 dict-like object
1371 dict-like object
1367
1372
1368 Manifest caches are cleared before retrieval."""
1373 Manifest caches are cleared before retrieval."""
1369 opts = _byteskwargs(opts)
1374 opts = _byteskwargs(opts)
1370 timer, fm = gettimer(ui, opts)
1375 timer, fm = gettimer(ui, opts)
1371 if not manifest_rev:
1376 if not manifest_rev:
1372 ctx = scmutil.revsingle(repo, rev, rev)
1377 ctx = scmutil.revsingle(repo, rev, rev)
1373 t = ctx.manifestnode()
1378 t = ctx.manifestnode()
1374 else:
1379 else:
1375 from mercurial.node import bin
1380 from mercurial.node import bin
1376
1381
1377 if len(rev) == 40:
1382 if len(rev) == 40:
1378 t = bin(rev)
1383 t = bin(rev)
1379 else:
1384 else:
1380 try:
1385 try:
1381 rev = int(rev)
1386 rev = int(rev)
1382
1387
1383 if util.safehasattr(repo.manifestlog, b'getstorage'):
1388 if util.safehasattr(repo.manifestlog, b'getstorage'):
1384 t = repo.manifestlog.getstorage(b'').node(rev)
1389 t = repo.manifestlog.getstorage(b'').node(rev)
1385 else:
1390 else:
1386 t = repo.manifestlog._revlog.lookup(rev)
1391 t = repo.manifestlog._revlog.lookup(rev)
1387 except ValueError:
1392 except ValueError:
1388 raise error.Abort(
1393 raise error.Abort(
1389 b'manifest revision must be integer or full node'
1394 b'manifest revision must be integer or full node'
1390 )
1395 )
1391
1396
1392 def d():
1397 def d():
1393 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1398 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1394 repo.manifestlog[t].read()
1399 repo.manifestlog[t].read()
1395
1400
1396 timer(d)
1401 timer(d)
1397 fm.end()
1402 fm.end()
1398
1403
1399
1404
1400 @command(b'perfchangeset', formatteropts)
1405 @command(b'perfchangeset', formatteropts)
1401 def perfchangeset(ui, repo, rev, **opts):
1406 def perfchangeset(ui, repo, rev, **opts):
1402 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1403 timer, fm = gettimer(ui, opts)
1408 timer, fm = gettimer(ui, opts)
1404 n = scmutil.revsingle(repo, rev).node()
1409 n = scmutil.revsingle(repo, rev).node()
1405
1410
1406 def d():
1411 def d():
1407 repo.changelog.read(n)
1412 repo.changelog.read(n)
1408 # repo.changelog._cache = None
1413 # repo.changelog._cache = None
1409
1414
1410 timer(d)
1415 timer(d)
1411 fm.end()
1416 fm.end()
1412
1417
1413
1418
1414 @command(b'perfignore', formatteropts)
1419 @command(b'perfignore', formatteropts)
1415 def perfignore(ui, repo, **opts):
1420 def perfignore(ui, repo, **opts):
1416 """benchmark operation related to computing ignore"""
1421 """benchmark operation related to computing ignore"""
1417 opts = _byteskwargs(opts)
1422 opts = _byteskwargs(opts)
1418 timer, fm = gettimer(ui, opts)
1423 timer, fm = gettimer(ui, opts)
1419 dirstate = repo.dirstate
1424 dirstate = repo.dirstate
1420
1425
1421 def setupone():
1426 def setupone():
1422 dirstate.invalidate()
1427 dirstate.invalidate()
1423 clearfilecache(dirstate, b'_ignore')
1428 clearfilecache(dirstate, b'_ignore')
1424
1429
1425 def runone():
1430 def runone():
1426 dirstate._ignore
1431 dirstate._ignore
1427
1432
1428 timer(runone, setup=setupone, title=b"load")
1433 timer(runone, setup=setupone, title=b"load")
1429 fm.end()
1434 fm.end()
1430
1435
1431
1436
1432 @command(
1437 @command(
1433 b'perfindex',
1438 b'perfindex',
1434 [
1439 [
1435 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1440 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1436 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1441 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1437 ]
1442 ]
1438 + formatteropts,
1443 + formatteropts,
1439 )
1444 )
1440 def perfindex(ui, repo, **opts):
1445 def perfindex(ui, repo, **opts):
1441 """benchmark index creation time followed by a lookup
1446 """benchmark index creation time followed by a lookup
1442
1447
1443 The default is to look `tip` up. Depending on the index implementation,
1448 The default is to look `tip` up. Depending on the index implementation,
1444 the revision looked up can matters. For example, an implementation
1449 the revision looked up can matters. For example, an implementation
1445 scanning the index will have a faster lookup time for `--rev tip` than for
1450 scanning the index will have a faster lookup time for `--rev tip` than for
1446 `--rev 0`. The number of looked up revisions and their order can also
1451 `--rev 0`. The number of looked up revisions and their order can also
1447 matters.
1452 matters.
1448
1453
1449 Example of useful set to test:
1454 Example of useful set to test:
1450 * tip
1455 * tip
1451 * 0
1456 * 0
1452 * -10:
1457 * -10:
1453 * :10
1458 * :10
1454 * -10: + :10
1459 * -10: + :10
1455 * :10: + -10:
1460 * :10: + -10:
1456 * -10000:
1461 * -10000:
1457 * -10000: + 0
1462 * -10000: + 0
1458
1463
1459 It is not currently possible to check for lookup of a missing node. For
1464 It is not currently possible to check for lookup of a missing node. For
1460 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1465 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1461 import mercurial.revlog
1466 import mercurial.revlog
1462
1467
1463 opts = _byteskwargs(opts)
1468 opts = _byteskwargs(opts)
1464 timer, fm = gettimer(ui, opts)
1469 timer, fm = gettimer(ui, opts)
1465 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1470 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1466 if opts[b'no_lookup']:
1471 if opts[b'no_lookup']:
1467 if opts['rev']:
1472 if opts['rev']:
1468 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1473 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1469 nodes = []
1474 nodes = []
1470 elif not opts[b'rev']:
1475 elif not opts[b'rev']:
1471 nodes = [repo[b"tip"].node()]
1476 nodes = [repo[b"tip"].node()]
1472 else:
1477 else:
1473 revs = scmutil.revrange(repo, opts[b'rev'])
1478 revs = scmutil.revrange(repo, opts[b'rev'])
1474 cl = repo.changelog
1479 cl = repo.changelog
1475 nodes = [cl.node(r) for r in revs]
1480 nodes = [cl.node(r) for r in revs]
1476
1481
1477 unfi = repo.unfiltered()
1482 unfi = repo.unfiltered()
1478 # find the filecache func directly
1483 # find the filecache func directly
1479 # This avoid polluting the benchmark with the filecache logic
1484 # This avoid polluting the benchmark with the filecache logic
1480 makecl = unfi.__class__.changelog.func
1485 makecl = unfi.__class__.changelog.func
1481
1486
1482 def setup():
1487 def setup():
1483 # probably not necessary, but for good measure
1488 # probably not necessary, but for good measure
1484 clearchangelog(unfi)
1489 clearchangelog(unfi)
1485
1490
1486 def d():
1491 def d():
1487 cl = makecl(unfi)
1492 cl = makecl(unfi)
1488 for n in nodes:
1493 for n in nodes:
1489 cl.rev(n)
1494 cl.rev(n)
1490
1495
1491 timer(d, setup=setup)
1496 timer(d, setup=setup)
1492 fm.end()
1497 fm.end()
1493
1498
1494
1499
1495 @command(
1500 @command(
1496 b'perfnodemap',
1501 b'perfnodemap',
1497 [
1502 [
1498 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1503 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1499 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1504 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1500 ]
1505 ]
1501 + formatteropts,
1506 + formatteropts,
1502 )
1507 )
1503 def perfnodemap(ui, repo, **opts):
1508 def perfnodemap(ui, repo, **opts):
1504 """benchmark the time necessary to look up revision from a cold nodemap
1509 """benchmark the time necessary to look up revision from a cold nodemap
1505
1510
1506 Depending on the implementation, the amount and order of revision we look
1511 Depending on the implementation, the amount and order of revision we look
1507 up can varies. Example of useful set to test:
1512 up can varies. Example of useful set to test:
1508 * tip
1513 * tip
1509 * 0
1514 * 0
1510 * -10:
1515 * -10:
1511 * :10
1516 * :10
1512 * -10: + :10
1517 * -10: + :10
1513 * :10: + -10:
1518 * :10: + -10:
1514 * -10000:
1519 * -10000:
1515 * -10000: + 0
1520 * -10000: + 0
1516
1521
1517 The command currently focus on valid binary lookup. Benchmarking for
1522 The command currently focus on valid binary lookup. Benchmarking for
1518 hexlookup, prefix lookup and missing lookup would also be valuable.
1523 hexlookup, prefix lookup and missing lookup would also be valuable.
1519 """
1524 """
1520 import mercurial.revlog
1525 import mercurial.revlog
1521
1526
1522 opts = _byteskwargs(opts)
1527 opts = _byteskwargs(opts)
1523 timer, fm = gettimer(ui, opts)
1528 timer, fm = gettimer(ui, opts)
1524 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1529 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1525
1530
1526 unfi = repo.unfiltered()
1531 unfi = repo.unfiltered()
1527 clearcaches = opts['clear_caches']
1532 clearcaches = opts['clear_caches']
1528 # find the filecache func directly
1533 # find the filecache func directly
1529 # This avoid polluting the benchmark with the filecache logic
1534 # This avoid polluting the benchmark with the filecache logic
1530 makecl = unfi.__class__.changelog.func
1535 makecl = unfi.__class__.changelog.func
1531 if not opts[b'rev']:
1536 if not opts[b'rev']:
1532 raise error.Abort('use --rev to specify revisions to look up')
1537 raise error.Abort('use --rev to specify revisions to look up')
1533 revs = scmutil.revrange(repo, opts[b'rev'])
1538 revs = scmutil.revrange(repo, opts[b'rev'])
1534 cl = repo.changelog
1539 cl = repo.changelog
1535 nodes = [cl.node(r) for r in revs]
1540 nodes = [cl.node(r) for r in revs]
1536
1541
1537 # use a list to pass reference to a nodemap from one closure to the next
1542 # use a list to pass reference to a nodemap from one closure to the next
1538 nodeget = [None]
1543 nodeget = [None]
1539
1544
1540 def setnodeget():
1545 def setnodeget():
1541 # probably not necessary, but for good measure
1546 # probably not necessary, but for good measure
1542 clearchangelog(unfi)
1547 clearchangelog(unfi)
1543 nodeget[0] = makecl(unfi).nodemap.get
1548 nodeget[0] = makecl(unfi).nodemap.get
1544
1549
1545 def d():
1550 def d():
1546 get = nodeget[0]
1551 get = nodeget[0]
1547 for n in nodes:
1552 for n in nodes:
1548 get(n)
1553 get(n)
1549
1554
1550 setup = None
1555 setup = None
1551 if clearcaches:
1556 if clearcaches:
1552
1557
1553 def setup():
1558 def setup():
1554 setnodeget()
1559 setnodeget()
1555
1560
1556 else:
1561 else:
1557 setnodeget()
1562 setnodeget()
1558 d() # prewarm the data structure
1563 d() # prewarm the data structure
1559 timer(d, setup=setup)
1564 timer(d, setup=setup)
1560 fm.end()
1565 fm.end()
1561
1566
1562
1567
1563 @command(b'perfstartup', formatteropts)
1568 @command(b'perfstartup', formatteropts)
1564 def perfstartup(ui, repo, **opts):
1569 def perfstartup(ui, repo, **opts):
1565 opts = _byteskwargs(opts)
1570 opts = _byteskwargs(opts)
1566 timer, fm = gettimer(ui, opts)
1571 timer, fm = gettimer(ui, opts)
1567
1572
1568 def d():
1573 def d():
1569 if os.name != r'nt':
1574 if os.name != r'nt':
1570 os.system(
1575 os.system(
1571 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1576 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1572 )
1577 )
1573 else:
1578 else:
1574 os.environ[r'HGRCPATH'] = r' '
1579 os.environ[r'HGRCPATH'] = r' '
1575 os.system(r"%s version -q > NUL" % sys.argv[0])
1580 os.system(r"%s version -q > NUL" % sys.argv[0])
1576
1581
1577 timer(d)
1582 timer(d)
1578 fm.end()
1583 fm.end()
1579
1584
1580
1585
1581 @command(b'perfparents', formatteropts)
1586 @command(b'perfparents', formatteropts)
1582 def perfparents(ui, repo, **opts):
1587 def perfparents(ui, repo, **opts):
1583 """benchmark the time necessary to fetch one changeset's parents.
1588 """benchmark the time necessary to fetch one changeset's parents.
1584
1589
1585 The fetch is done using the `node identifier`, traversing all object layers
1590 The fetch is done using the `node identifier`, traversing all object layers
1586 from the repository object. The first N revisions will be used for this
1591 from the repository object. The first N revisions will be used for this
1587 benchmark. N is controlled by the ``perf.parentscount`` config option
1592 benchmark. N is controlled by the ``perf.parentscount`` config option
1588 (default: 1000).
1593 (default: 1000).
1589 """
1594 """
1590 opts = _byteskwargs(opts)
1595 opts = _byteskwargs(opts)
1591 timer, fm = gettimer(ui, opts)
1596 timer, fm = gettimer(ui, opts)
1592 # control the number of commits perfparents iterates over
1597 # control the number of commits perfparents iterates over
1593 # experimental config: perf.parentscount
1598 # experimental config: perf.parentscount
1594 count = getint(ui, b"perf", b"parentscount", 1000)
1599 count = getint(ui, b"perf", b"parentscount", 1000)
1595 if len(repo.changelog) < count:
1600 if len(repo.changelog) < count:
1596 raise error.Abort(b"repo needs %d commits for this test" % count)
1601 raise error.Abort(b"repo needs %d commits for this test" % count)
1597 repo = repo.unfiltered()
1602 repo = repo.unfiltered()
1598 nl = [repo.changelog.node(i) for i in _xrange(count)]
1603 nl = [repo.changelog.node(i) for i in _xrange(count)]
1599
1604
1600 def d():
1605 def d():
1601 for n in nl:
1606 for n in nl:
1602 repo.changelog.parents(n)
1607 repo.changelog.parents(n)
1603
1608
1604 timer(d)
1609 timer(d)
1605 fm.end()
1610 fm.end()
1606
1611
1607
1612
1608 @command(b'perfctxfiles', formatteropts)
1613 @command(b'perfctxfiles', formatteropts)
1609 def perfctxfiles(ui, repo, x, **opts):
1614 def perfctxfiles(ui, repo, x, **opts):
1610 opts = _byteskwargs(opts)
1615 opts = _byteskwargs(opts)
1611 x = int(x)
1616 x = int(x)
1612 timer, fm = gettimer(ui, opts)
1617 timer, fm = gettimer(ui, opts)
1613
1618
1614 def d():
1619 def d():
1615 len(repo[x].files())
1620 len(repo[x].files())
1616
1621
1617 timer(d)
1622 timer(d)
1618 fm.end()
1623 fm.end()
1619
1624
1620
1625
1621 @command(b'perfrawfiles', formatteropts)
1626 @command(b'perfrawfiles', formatteropts)
1622 def perfrawfiles(ui, repo, x, **opts):
1627 def perfrawfiles(ui, repo, x, **opts):
1623 opts = _byteskwargs(opts)
1628 opts = _byteskwargs(opts)
1624 x = int(x)
1629 x = int(x)
1625 timer, fm = gettimer(ui, opts)
1630 timer, fm = gettimer(ui, opts)
1626 cl = repo.changelog
1631 cl = repo.changelog
1627
1632
1628 def d():
1633 def d():
1629 len(cl.read(x)[3])
1634 len(cl.read(x)[3])
1630
1635
1631 timer(d)
1636 timer(d)
1632 fm.end()
1637 fm.end()
1633
1638
1634
1639
1635 @command(b'perflookup', formatteropts)
1640 @command(b'perflookup', formatteropts)
1636 def perflookup(ui, repo, rev, **opts):
1641 def perflookup(ui, repo, rev, **opts):
1637 opts = _byteskwargs(opts)
1642 opts = _byteskwargs(opts)
1638 timer, fm = gettimer(ui, opts)
1643 timer, fm = gettimer(ui, opts)
1639 timer(lambda: len(repo.lookup(rev)))
1644 timer(lambda: len(repo.lookup(rev)))
1640 fm.end()
1645 fm.end()
1641
1646
1642
1647
1643 @command(
1648 @command(
1644 b'perflinelogedits',
1649 b'perflinelogedits',
1645 [
1650 [
1646 (b'n', b'edits', 10000, b'number of edits'),
1651 (b'n', b'edits', 10000, b'number of edits'),
1647 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1652 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1648 ],
1653 ],
1649 norepo=True,
1654 norepo=True,
1650 )
1655 )
1651 def perflinelogedits(ui, **opts):
1656 def perflinelogedits(ui, **opts):
1652 from mercurial import linelog
1657 from mercurial import linelog
1653
1658
1654 opts = _byteskwargs(opts)
1659 opts = _byteskwargs(opts)
1655
1660
1656 edits = opts[b'edits']
1661 edits = opts[b'edits']
1657 maxhunklines = opts[b'max_hunk_lines']
1662 maxhunklines = opts[b'max_hunk_lines']
1658
1663
1659 maxb1 = 100000
1664 maxb1 = 100000
1660 random.seed(0)
1665 random.seed(0)
1661 randint = random.randint
1666 randint = random.randint
1662 currentlines = 0
1667 currentlines = 0
1663 arglist = []
1668 arglist = []
1664 for rev in _xrange(edits):
1669 for rev in _xrange(edits):
1665 a1 = randint(0, currentlines)
1670 a1 = randint(0, currentlines)
1666 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1671 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1667 b1 = randint(0, maxb1)
1672 b1 = randint(0, maxb1)
1668 b2 = randint(b1, b1 + maxhunklines)
1673 b2 = randint(b1, b1 + maxhunklines)
1669 currentlines += (b2 - b1) - (a2 - a1)
1674 currentlines += (b2 - b1) - (a2 - a1)
1670 arglist.append((rev, a1, a2, b1, b2))
1675 arglist.append((rev, a1, a2, b1, b2))
1671
1676
1672 def d():
1677 def d():
1673 ll = linelog.linelog()
1678 ll = linelog.linelog()
1674 for args in arglist:
1679 for args in arglist:
1675 ll.replacelines(*args)
1680 ll.replacelines(*args)
1676
1681
1677 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1678 timer(d)
1683 timer(d)
1679 fm.end()
1684 fm.end()
1680
1685
1681
1686
1682 @command(b'perfrevrange', formatteropts)
1687 @command(b'perfrevrange', formatteropts)
1683 def perfrevrange(ui, repo, *specs, **opts):
1688 def perfrevrange(ui, repo, *specs, **opts):
1684 opts = _byteskwargs(opts)
1689 opts = _byteskwargs(opts)
1685 timer, fm = gettimer(ui, opts)
1690 timer, fm = gettimer(ui, opts)
1686 revrange = scmutil.revrange
1691 revrange = scmutil.revrange
1687 timer(lambda: len(revrange(repo, specs)))
1692 timer(lambda: len(revrange(repo, specs)))
1688 fm.end()
1693 fm.end()
1689
1694
1690
1695
1691 @command(b'perfnodelookup', formatteropts)
1696 @command(b'perfnodelookup', formatteropts)
1692 def perfnodelookup(ui, repo, rev, **opts):
1697 def perfnodelookup(ui, repo, rev, **opts):
1693 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1694 timer, fm = gettimer(ui, opts)
1699 timer, fm = gettimer(ui, opts)
1695 import mercurial.revlog
1700 import mercurial.revlog
1696
1701
1697 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1702 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1698 n = scmutil.revsingle(repo, rev).node()
1703 n = scmutil.revsingle(repo, rev).node()
1699 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1704 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1700
1705
1701 def d():
1706 def d():
1702 cl.rev(n)
1707 cl.rev(n)
1703 clearcaches(cl)
1708 clearcaches(cl)
1704
1709
1705 timer(d)
1710 timer(d)
1706 fm.end()
1711 fm.end()
1707
1712
1708
1713
1709 @command(
1714 @command(
1710 b'perflog',
1715 b'perflog',
1711 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1716 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1712 )
1717 )
1713 def perflog(ui, repo, rev=None, **opts):
1718 def perflog(ui, repo, rev=None, **opts):
1714 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1715 if rev is None:
1720 if rev is None:
1716 rev = []
1721 rev = []
1717 timer, fm = gettimer(ui, opts)
1722 timer, fm = gettimer(ui, opts)
1718 ui.pushbuffer()
1723 ui.pushbuffer()
1719 timer(
1724 timer(
1720 lambda: commands.log(
1725 lambda: commands.log(
1721 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1726 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1722 )
1727 )
1723 )
1728 )
1724 ui.popbuffer()
1729 ui.popbuffer()
1725 fm.end()
1730 fm.end()
1726
1731
1727
1732
1728 @command(b'perfmoonwalk', formatteropts)
1733 @command(b'perfmoonwalk', formatteropts)
1729 def perfmoonwalk(ui, repo, **opts):
1734 def perfmoonwalk(ui, repo, **opts):
1730 """benchmark walking the changelog backwards
1735 """benchmark walking the changelog backwards
1731
1736
1732 This also loads the changelog data for each revision in the changelog.
1737 This also loads the changelog data for each revision in the changelog.
1733 """
1738 """
1734 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1735 timer, fm = gettimer(ui, opts)
1740 timer, fm = gettimer(ui, opts)
1736
1741
1737 def moonwalk():
1742 def moonwalk():
1738 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1743 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1739 ctx = repo[i]
1744 ctx = repo[i]
1740 ctx.branch() # read changelog data (in addition to the index)
1745 ctx.branch() # read changelog data (in addition to the index)
1741
1746
1742 timer(moonwalk)
1747 timer(moonwalk)
1743 fm.end()
1748 fm.end()
1744
1749
1745
1750
1746 @command(
1751 @command(
1747 b'perftemplating',
1752 b'perftemplating',
1748 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1753 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1749 )
1754 )
1750 def perftemplating(ui, repo, testedtemplate=None, **opts):
1755 def perftemplating(ui, repo, testedtemplate=None, **opts):
1751 """test the rendering time of a given template"""
1756 """test the rendering time of a given template"""
1752 if makelogtemplater is None:
1757 if makelogtemplater is None:
1753 raise error.Abort(
1758 raise error.Abort(
1754 b"perftemplating not available with this Mercurial",
1759 b"perftemplating not available with this Mercurial",
1755 hint=b"use 4.3 or later",
1760 hint=b"use 4.3 or later",
1756 )
1761 )
1757
1762
1758 opts = _byteskwargs(opts)
1763 opts = _byteskwargs(opts)
1759
1764
1760 nullui = ui.copy()
1765 nullui = ui.copy()
1761 nullui.fout = open(os.devnull, r'wb')
1766 nullui.fout = open(os.devnull, r'wb')
1762 nullui.disablepager()
1767 nullui.disablepager()
1763 revs = opts.get(b'rev')
1768 revs = opts.get(b'rev')
1764 if not revs:
1769 if not revs:
1765 revs = [b'all()']
1770 revs = [b'all()']
1766 revs = list(scmutil.revrange(repo, revs))
1771 revs = list(scmutil.revrange(repo, revs))
1767
1772
1768 defaulttemplate = (
1773 defaulttemplate = (
1769 b'{date|shortdate} [{rev}:{node|short}]'
1774 b'{date|shortdate} [{rev}:{node|short}]'
1770 b' {author|person}: {desc|firstline}\n'
1775 b' {author|person}: {desc|firstline}\n'
1771 )
1776 )
1772 if testedtemplate is None:
1777 if testedtemplate is None:
1773 testedtemplate = defaulttemplate
1778 testedtemplate = defaulttemplate
1774 displayer = makelogtemplater(nullui, repo, testedtemplate)
1779 displayer = makelogtemplater(nullui, repo, testedtemplate)
1775
1780
1776 def format():
1781 def format():
1777 for r in revs:
1782 for r in revs:
1778 ctx = repo[r]
1783 ctx = repo[r]
1779 displayer.show(ctx)
1784 displayer.show(ctx)
1780 displayer.flush(ctx)
1785 displayer.flush(ctx)
1781
1786
1782 timer, fm = gettimer(ui, opts)
1787 timer, fm = gettimer(ui, opts)
1783 timer(format)
1788 timer(format)
1784 fm.end()
1789 fm.end()
1785
1790
1786
1791
1787 def _displaystats(ui, opts, entries, data):
1792 def _displaystats(ui, opts, entries, data):
1788 pass
1793 pass
1789 # use a second formatter because the data are quite different, not sure
1794 # use a second formatter because the data are quite different, not sure
1790 # how it flies with the templater.
1795 # how it flies with the templater.
1791 fm = ui.formatter(b'perf-stats', opts)
1796 fm = ui.formatter(b'perf-stats', opts)
1792 for key, title in entries:
1797 for key, title in entries:
1793 values = data[key]
1798 values = data[key]
1794 nbvalues = len(data)
1799 nbvalues = len(data)
1795 values.sort()
1800 values.sort()
1796 stats = {
1801 stats = {
1797 'key': key,
1802 'key': key,
1798 'title': title,
1803 'title': title,
1799 'nbitems': len(values),
1804 'nbitems': len(values),
1800 'min': values[0][0],
1805 'min': values[0][0],
1801 '10%': values[(nbvalues * 10) // 100][0],
1806 '10%': values[(nbvalues * 10) // 100][0],
1802 '25%': values[(nbvalues * 25) // 100][0],
1807 '25%': values[(nbvalues * 25) // 100][0],
1803 '50%': values[(nbvalues * 50) // 100][0],
1808 '50%': values[(nbvalues * 50) // 100][0],
1804 '75%': values[(nbvalues * 75) // 100][0],
1809 '75%': values[(nbvalues * 75) // 100][0],
1805 '80%': values[(nbvalues * 80) // 100][0],
1810 '80%': values[(nbvalues * 80) // 100][0],
1806 '85%': values[(nbvalues * 85) // 100][0],
1811 '85%': values[(nbvalues * 85) // 100][0],
1807 '90%': values[(nbvalues * 90) // 100][0],
1812 '90%': values[(nbvalues * 90) // 100][0],
1808 '95%': values[(nbvalues * 95) // 100][0],
1813 '95%': values[(nbvalues * 95) // 100][0],
1809 '99%': values[(nbvalues * 99) // 100][0],
1814 '99%': values[(nbvalues * 99) // 100][0],
1810 'max': values[-1][0],
1815 'max': values[-1][0],
1811 }
1816 }
1812 fm.startitem()
1817 fm.startitem()
1813 fm.data(**stats)
1818 fm.data(**stats)
1814 # make node pretty for the human output
1819 # make node pretty for the human output
1815 fm.plain('### %s (%d items)\n' % (title, len(values)))
1820 fm.plain('### %s (%d items)\n' % (title, len(values)))
1816 lines = [
1821 lines = [
1817 'min',
1822 'min',
1818 '10%',
1823 '10%',
1819 '25%',
1824 '25%',
1820 '50%',
1825 '50%',
1821 '75%',
1826 '75%',
1822 '80%',
1827 '80%',
1823 '85%',
1828 '85%',
1824 '90%',
1829 '90%',
1825 '95%',
1830 '95%',
1826 '99%',
1831 '99%',
1827 'max',
1832 'max',
1828 ]
1833 ]
1829 for l in lines:
1834 for l in lines:
1830 fm.plain('%s: %s\n' % (l, stats[l]))
1835 fm.plain('%s: %s\n' % (l, stats[l]))
1831 fm.end()
1836 fm.end()
1832
1837
1833
1838
1834 @command(
1839 @command(
1835 b'perfhelper-mergecopies',
1840 b'perfhelper-mergecopies',
1836 formatteropts
1841 formatteropts
1837 + [
1842 + [
1838 (b'r', b'revs', [], b'restrict search to these revisions'),
1843 (b'r', b'revs', [], b'restrict search to these revisions'),
1839 (b'', b'timing', False, b'provides extra data (costly)'),
1844 (b'', b'timing', False, b'provides extra data (costly)'),
1840 (b'', b'stats', False, b'provides statistic about the measured data'),
1845 (b'', b'stats', False, b'provides statistic about the measured data'),
1841 ],
1846 ],
1842 )
1847 )
1843 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1848 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1844 """find statistics about potential parameters for `perfmergecopies`
1849 """find statistics about potential parameters for `perfmergecopies`
1845
1850
1846 This command find (base, p1, p2) triplet relevant for copytracing
1851 This command find (base, p1, p2) triplet relevant for copytracing
1847 benchmarking in the context of a merge. It reports values for some of the
1852 benchmarking in the context of a merge. It reports values for some of the
1848 parameters that impact merge copy tracing time during merge.
1853 parameters that impact merge copy tracing time during merge.
1849
1854
1850 If `--timing` is set, rename detection is run and the associated timing
1855 If `--timing` is set, rename detection is run and the associated timing
1851 will be reported. The extra details come at the cost of slower command
1856 will be reported. The extra details come at the cost of slower command
1852 execution.
1857 execution.
1853
1858
1854 Since rename detection is only run once, other factors might easily
1859 Since rename detection is only run once, other factors might easily
1855 affect the precision of the timing. However it should give a good
1860 affect the precision of the timing. However it should give a good
1856 approximation of which revision triplets are very costly.
1861 approximation of which revision triplets are very costly.
1857 """
1862 """
1858 opts = _byteskwargs(opts)
1863 opts = _byteskwargs(opts)
1859 fm = ui.formatter(b'perf', opts)
1864 fm = ui.formatter(b'perf', opts)
1860 dotiming = opts[b'timing']
1865 dotiming = opts[b'timing']
1861 dostats = opts[b'stats']
1866 dostats = opts[b'stats']
1862
1867
1863 output_template = [
1868 output_template = [
1864 ("base", "%(base)12s"),
1869 ("base", "%(base)12s"),
1865 ("p1", "%(p1.node)12s"),
1870 ("p1", "%(p1.node)12s"),
1866 ("p2", "%(p2.node)12s"),
1871 ("p2", "%(p2.node)12s"),
1867 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1872 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1868 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1873 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1869 ("p1.renames", "%(p1.renamedfiles)12d"),
1874 ("p1.renames", "%(p1.renamedfiles)12d"),
1870 ("p1.time", "%(p1.time)12.3f"),
1875 ("p1.time", "%(p1.time)12.3f"),
1871 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1876 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1872 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1877 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1873 ("p2.renames", "%(p2.renamedfiles)12d"),
1878 ("p2.renames", "%(p2.renamedfiles)12d"),
1874 ("p2.time", "%(p2.time)12.3f"),
1879 ("p2.time", "%(p2.time)12.3f"),
1875 ("renames", "%(nbrenamedfiles)12d"),
1880 ("renames", "%(nbrenamedfiles)12d"),
1876 ("total.time", "%(time)12.3f"),
1881 ("total.time", "%(time)12.3f"),
1877 ]
1882 ]
1878 if not dotiming:
1883 if not dotiming:
1879 output_template = [
1884 output_template = [
1880 i
1885 i
1881 for i in output_template
1886 for i in output_template
1882 if not ('time' in i[0] or 'renames' in i[0])
1887 if not ('time' in i[0] or 'renames' in i[0])
1883 ]
1888 ]
1884 header_names = [h for (h, v) in output_template]
1889 header_names = [h for (h, v) in output_template]
1885 output = ' '.join([v for (h, v) in output_template]) + '\n'
1890 output = ' '.join([v for (h, v) in output_template]) + '\n'
1886 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1891 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1887 fm.plain(header % tuple(header_names))
1892 fm.plain(header % tuple(header_names))
1888
1893
1889 if not revs:
1894 if not revs:
1890 revs = ['all()']
1895 revs = ['all()']
1891 revs = scmutil.revrange(repo, revs)
1896 revs = scmutil.revrange(repo, revs)
1892
1897
1893 if dostats:
1898 if dostats:
1894 alldata = {
1899 alldata = {
1895 'nbrevs': [],
1900 'nbrevs': [],
1896 'nbmissingfiles': [],
1901 'nbmissingfiles': [],
1897 }
1902 }
1898 if dotiming:
1903 if dotiming:
1899 alldata['parentnbrenames'] = []
1904 alldata['parentnbrenames'] = []
1900 alldata['totalnbrenames'] = []
1905 alldata['totalnbrenames'] = []
1901 alldata['parenttime'] = []
1906 alldata['parenttime'] = []
1902 alldata['totaltime'] = []
1907 alldata['totaltime'] = []
1903
1908
1904 roi = repo.revs('merge() and %ld', revs)
1909 roi = repo.revs('merge() and %ld', revs)
1905 for r in roi:
1910 for r in roi:
1906 ctx = repo[r]
1911 ctx = repo[r]
1907 p1 = ctx.p1()
1912 p1 = ctx.p1()
1908 p2 = ctx.p2()
1913 p2 = ctx.p2()
1909 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1914 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1910 for b in bases:
1915 for b in bases:
1911 b = repo[b]
1916 b = repo[b]
1912 p1missing = copies._computeforwardmissing(b, p1)
1917 p1missing = copies._computeforwardmissing(b, p1)
1913 p2missing = copies._computeforwardmissing(b, p2)
1918 p2missing = copies._computeforwardmissing(b, p2)
1914 data = {
1919 data = {
1915 b'base': b.hex(),
1920 b'base': b.hex(),
1916 b'p1.node': p1.hex(),
1921 b'p1.node': p1.hex(),
1917 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1922 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1918 b'p1.nbmissingfiles': len(p1missing),
1923 b'p1.nbmissingfiles': len(p1missing),
1919 b'p2.node': p2.hex(),
1924 b'p2.node': p2.hex(),
1920 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1925 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1921 b'p2.nbmissingfiles': len(p2missing),
1926 b'p2.nbmissingfiles': len(p2missing),
1922 }
1927 }
1923 if dostats:
1928 if dostats:
1924 if p1missing:
1929 if p1missing:
1925 alldata['nbrevs'].append(
1930 alldata['nbrevs'].append(
1926 (data['p1.nbrevs'], b.hex(), p1.hex())
1931 (data['p1.nbrevs'], b.hex(), p1.hex())
1927 )
1932 )
1928 alldata['nbmissingfiles'].append(
1933 alldata['nbmissingfiles'].append(
1929 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1934 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1930 )
1935 )
1931 if p2missing:
1936 if p2missing:
1932 alldata['nbrevs'].append(
1937 alldata['nbrevs'].append(
1933 (data['p2.nbrevs'], b.hex(), p2.hex())
1938 (data['p2.nbrevs'], b.hex(), p2.hex())
1934 )
1939 )
1935 alldata['nbmissingfiles'].append(
1940 alldata['nbmissingfiles'].append(
1936 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1941 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1937 )
1942 )
1938 if dotiming:
1943 if dotiming:
1939 begin = util.timer()
1944 begin = util.timer()
1940 mergedata = copies.mergecopies(repo, p1, p2, b)
1945 mergedata = copies.mergecopies(repo, p1, p2, b)
1941 end = util.timer()
1946 end = util.timer()
1942 # not very stable timing since we did only one run
1947 # not very stable timing since we did only one run
1943 data['time'] = end - begin
1948 data['time'] = end - begin
1944 # mergedata contains five dicts: "copy", "movewithdir",
1949 # mergedata contains five dicts: "copy", "movewithdir",
1945 # "diverge", "renamedelete" and "dirmove".
1950 # "diverge", "renamedelete" and "dirmove".
1946 # The first 4 are about renamed file so lets count that.
1951 # The first 4 are about renamed file so lets count that.
1947 renames = len(mergedata[0])
1952 renames = len(mergedata[0])
1948 renames += len(mergedata[1])
1953 renames += len(mergedata[1])
1949 renames += len(mergedata[2])
1954 renames += len(mergedata[2])
1950 renames += len(mergedata[3])
1955 renames += len(mergedata[3])
1951 data['nbrenamedfiles'] = renames
1956 data['nbrenamedfiles'] = renames
1952 begin = util.timer()
1957 begin = util.timer()
1953 p1renames = copies.pathcopies(b, p1)
1958 p1renames = copies.pathcopies(b, p1)
1954 end = util.timer()
1959 end = util.timer()
1955 data['p1.time'] = end - begin
1960 data['p1.time'] = end - begin
1956 begin = util.timer()
1961 begin = util.timer()
1957 p2renames = copies.pathcopies(b, p2)
1962 p2renames = copies.pathcopies(b, p2)
1958 data['p2.time'] = end - begin
1963 data['p2.time'] = end - begin
1959 end = util.timer()
1964 end = util.timer()
1960 data['p1.renamedfiles'] = len(p1renames)
1965 data['p1.renamedfiles'] = len(p1renames)
1961 data['p2.renamedfiles'] = len(p2renames)
1966 data['p2.renamedfiles'] = len(p2renames)
1962
1967
1963 if dostats:
1968 if dostats:
1964 if p1missing:
1969 if p1missing:
1965 alldata['parentnbrenames'].append(
1970 alldata['parentnbrenames'].append(
1966 (data['p1.renamedfiles'], b.hex(), p1.hex())
1971 (data['p1.renamedfiles'], b.hex(), p1.hex())
1967 )
1972 )
1968 alldata['parenttime'].append(
1973 alldata['parenttime'].append(
1969 (data['p1.time'], b.hex(), p1.hex())
1974 (data['p1.time'], b.hex(), p1.hex())
1970 )
1975 )
1971 if p2missing:
1976 if p2missing:
1972 alldata['parentnbrenames'].append(
1977 alldata['parentnbrenames'].append(
1973 (data['p2.renamedfiles'], b.hex(), p2.hex())
1978 (data['p2.renamedfiles'], b.hex(), p2.hex())
1974 )
1979 )
1975 alldata['parenttime'].append(
1980 alldata['parenttime'].append(
1976 (data['p2.time'], b.hex(), p2.hex())
1981 (data['p2.time'], b.hex(), p2.hex())
1977 )
1982 )
1978 if p1missing or p2missing:
1983 if p1missing or p2missing:
1979 alldata['totalnbrenames'].append(
1984 alldata['totalnbrenames'].append(
1980 (
1985 (
1981 data['nbrenamedfiles'],
1986 data['nbrenamedfiles'],
1982 b.hex(),
1987 b.hex(),
1983 p1.hex(),
1988 p1.hex(),
1984 p2.hex(),
1989 p2.hex(),
1985 )
1990 )
1986 )
1991 )
1987 alldata['totaltime'].append(
1992 alldata['totaltime'].append(
1988 (data['time'], b.hex(), p1.hex(), p2.hex())
1993 (data['time'], b.hex(), p1.hex(), p2.hex())
1989 )
1994 )
1990 fm.startitem()
1995 fm.startitem()
1991 fm.data(**data)
1996 fm.data(**data)
1992 # make node pretty for the human output
1997 # make node pretty for the human output
1993 out = data.copy()
1998 out = data.copy()
1994 out['base'] = fm.hexfunc(b.node())
1999 out['base'] = fm.hexfunc(b.node())
1995 out['p1.node'] = fm.hexfunc(p1.node())
2000 out['p1.node'] = fm.hexfunc(p1.node())
1996 out['p2.node'] = fm.hexfunc(p2.node())
2001 out['p2.node'] = fm.hexfunc(p2.node())
1997 fm.plain(output % out)
2002 fm.plain(output % out)
1998
2003
1999 fm.end()
2004 fm.end()
2000 if dostats:
2005 if dostats:
2001 # use a second formatter because the data are quite different, not sure
2006 # use a second formatter because the data are quite different, not sure
2002 # how it flies with the templater.
2007 # how it flies with the templater.
2003 entries = [
2008 entries = [
2004 ('nbrevs', 'number of revision covered'),
2009 ('nbrevs', 'number of revision covered'),
2005 ('nbmissingfiles', 'number of missing files at head'),
2010 ('nbmissingfiles', 'number of missing files at head'),
2006 ]
2011 ]
2007 if dotiming:
2012 if dotiming:
2008 entries.append(
2013 entries.append(
2009 ('parentnbrenames', 'rename from one parent to base')
2014 ('parentnbrenames', 'rename from one parent to base')
2010 )
2015 )
2011 entries.append(('totalnbrenames', 'total number of renames'))
2016 entries.append(('totalnbrenames', 'total number of renames'))
2012 entries.append(('parenttime', 'time for one parent'))
2017 entries.append(('parenttime', 'time for one parent'))
2013 entries.append(('totaltime', 'time for both parents'))
2018 entries.append(('totaltime', 'time for both parents'))
2014 _displaystats(ui, opts, entries, alldata)
2019 _displaystats(ui, opts, entries, alldata)
2015
2020
2016
2021
2017 @command(
2022 @command(
2018 b'perfhelper-pathcopies',
2023 b'perfhelper-pathcopies',
2019 formatteropts
2024 formatteropts
2020 + [
2025 + [
2021 (b'r', b'revs', [], b'restrict search to these revisions'),
2026 (b'r', b'revs', [], b'restrict search to these revisions'),
2022 (b'', b'timing', False, b'provides extra data (costly)'),
2027 (b'', b'timing', False, b'provides extra data (costly)'),
2023 (b'', b'stats', False, b'provides statistic about the measured data'),
2028 (b'', b'stats', False, b'provides statistic about the measured data'),
2024 ],
2029 ],
2025 )
2030 )
2026 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2031 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2027 """find statistic about potential parameters for the `perftracecopies`
2032 """find statistic about potential parameters for the `perftracecopies`
2028
2033
2029 This command find source-destination pair relevant for copytracing testing.
2034 This command find source-destination pair relevant for copytracing testing.
2030 It report value for some of the parameters that impact copy tracing time.
2035 It report value for some of the parameters that impact copy tracing time.
2031
2036
2032 If `--timing` is set, rename detection is run and the associated timing
2037 If `--timing` is set, rename detection is run and the associated timing
2033 will be reported. The extra details comes at the cost of a slower command
2038 will be reported. The extra details comes at the cost of a slower command
2034 execution.
2039 execution.
2035
2040
2036 Since the rename detection is only run once, other factors might easily
2041 Since the rename detection is only run once, other factors might easily
2037 affect the precision of the timing. However it should give a good
2042 affect the precision of the timing. However it should give a good
2038 approximation of which revision pairs are very costly.
2043 approximation of which revision pairs are very costly.
2039 """
2044 """
2040 opts = _byteskwargs(opts)
2045 opts = _byteskwargs(opts)
2041 fm = ui.formatter(b'perf', opts)
2046 fm = ui.formatter(b'perf', opts)
2042 dotiming = opts[b'timing']
2047 dotiming = opts[b'timing']
2043 dostats = opts[b'stats']
2048 dostats = opts[b'stats']
2044
2049
2045 if dotiming:
2050 if dotiming:
2046 header = '%12s %12s %12s %12s %12s %12s\n'
2051 header = '%12s %12s %12s %12s %12s %12s\n'
2047 output = (
2052 output = (
2048 "%(source)12s %(destination)12s "
2053 "%(source)12s %(destination)12s "
2049 "%(nbrevs)12d %(nbmissingfiles)12d "
2054 "%(nbrevs)12d %(nbmissingfiles)12d "
2050 "%(nbrenamedfiles)12d %(time)18.5f\n"
2055 "%(nbrenamedfiles)12d %(time)18.5f\n"
2051 )
2056 )
2052 header_names = (
2057 header_names = (
2053 "source",
2058 "source",
2054 "destination",
2059 "destination",
2055 "nb-revs",
2060 "nb-revs",
2056 "nb-files",
2061 "nb-files",
2057 "nb-renames",
2062 "nb-renames",
2058 "time",
2063 "time",
2059 )
2064 )
2060 fm.plain(header % header_names)
2065 fm.plain(header % header_names)
2061 else:
2066 else:
2062 header = '%12s %12s %12s %12s\n'
2067 header = '%12s %12s %12s %12s\n'
2063 output = (
2068 output = (
2064 "%(source)12s %(destination)12s "
2069 "%(source)12s %(destination)12s "
2065 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2070 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2066 )
2071 )
2067 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2072 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2068
2073
2069 if not revs:
2074 if not revs:
2070 revs = ['all()']
2075 revs = ['all()']
2071 revs = scmutil.revrange(repo, revs)
2076 revs = scmutil.revrange(repo, revs)
2072
2077
2073 if dostats:
2078 if dostats:
2074 alldata = {
2079 alldata = {
2075 'nbrevs': [],
2080 'nbrevs': [],
2076 'nbmissingfiles': [],
2081 'nbmissingfiles': [],
2077 }
2082 }
2078 if dotiming:
2083 if dotiming:
2079 alldata['nbrenames'] = []
2084 alldata['nbrenames'] = []
2080 alldata['time'] = []
2085 alldata['time'] = []
2081
2086
2082 roi = repo.revs('merge() and %ld', revs)
2087 roi = repo.revs('merge() and %ld', revs)
2083 for r in roi:
2088 for r in roi:
2084 ctx = repo[r]
2089 ctx = repo[r]
2085 p1 = ctx.p1().rev()
2090 p1 = ctx.p1().rev()
2086 p2 = ctx.p2().rev()
2091 p2 = ctx.p2().rev()
2087 bases = repo.changelog._commonancestorsheads(p1, p2)
2092 bases = repo.changelog._commonancestorsheads(p1, p2)
2088 for p in (p1, p2):
2093 for p in (p1, p2):
2089 for b in bases:
2094 for b in bases:
2090 base = repo[b]
2095 base = repo[b]
2091 parent = repo[p]
2096 parent = repo[p]
2092 missing = copies._computeforwardmissing(base, parent)
2097 missing = copies._computeforwardmissing(base, parent)
2093 if not missing:
2098 if not missing:
2094 continue
2099 continue
2095 data = {
2100 data = {
2096 b'source': base.hex(),
2101 b'source': base.hex(),
2097 b'destination': parent.hex(),
2102 b'destination': parent.hex(),
2098 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2103 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2099 b'nbmissingfiles': len(missing),
2104 b'nbmissingfiles': len(missing),
2100 }
2105 }
2101 if dostats:
2106 if dostats:
2102 alldata['nbrevs'].append(
2107 alldata['nbrevs'].append(
2103 (data['nbrevs'], base.hex(), parent.hex(),)
2108 (data['nbrevs'], base.hex(), parent.hex(),)
2104 )
2109 )
2105 alldata['nbmissingfiles'].append(
2110 alldata['nbmissingfiles'].append(
2106 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2111 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2107 )
2112 )
2108 if dotiming:
2113 if dotiming:
2109 begin = util.timer()
2114 begin = util.timer()
2110 renames = copies.pathcopies(base, parent)
2115 renames = copies.pathcopies(base, parent)
2111 end = util.timer()
2116 end = util.timer()
2112 # not very stable timing since we did only one run
2117 # not very stable timing since we did only one run
2113 data['time'] = end - begin
2118 data['time'] = end - begin
2114 data['nbrenamedfiles'] = len(renames)
2119 data['nbrenamedfiles'] = len(renames)
2115 if dostats:
2120 if dostats:
2116 alldata['time'].append(
2121 alldata['time'].append(
2117 (data['time'], base.hex(), parent.hex(),)
2122 (data['time'], base.hex(), parent.hex(),)
2118 )
2123 )
2119 alldata['nbrenames'].append(
2124 alldata['nbrenames'].append(
2120 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2125 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2121 )
2126 )
2122 fm.startitem()
2127 fm.startitem()
2123 fm.data(**data)
2128 fm.data(**data)
2124 out = data.copy()
2129 out = data.copy()
2125 out['source'] = fm.hexfunc(base.node())
2130 out['source'] = fm.hexfunc(base.node())
2126 out['destination'] = fm.hexfunc(parent.node())
2131 out['destination'] = fm.hexfunc(parent.node())
2127 fm.plain(output % out)
2132 fm.plain(output % out)
2128
2133
2129 fm.end()
2134 fm.end()
2130 if dostats:
2135 if dostats:
2131 # use a second formatter because the data are quite different, not sure
2136 # use a second formatter because the data are quite different, not sure
2132 # how it flies with the templater.
2137 # how it flies with the templater.
2133 fm = ui.formatter(b'perf', opts)
2138 fm = ui.formatter(b'perf', opts)
2134 entries = [
2139 entries = [
2135 ('nbrevs', 'number of revision covered'),
2140 ('nbrevs', 'number of revision covered'),
2136 ('nbmissingfiles', 'number of missing files at head'),
2141 ('nbmissingfiles', 'number of missing files at head'),
2137 ]
2142 ]
2138 if dotiming:
2143 if dotiming:
2139 entries.append(('nbrenames', 'renamed files'))
2144 entries.append(('nbrenames', 'renamed files'))
2140 entries.append(('time', 'time'))
2145 entries.append(('time', 'time'))
2141 _displaystats(ui, opts, entries, alldata)
2146 _displaystats(ui, opts, entries, alldata)
2142
2147
2143
2148
2144 @command(b'perfcca', formatteropts)
2149 @command(b'perfcca', formatteropts)
2145 def perfcca(ui, repo, **opts):
2150 def perfcca(ui, repo, **opts):
2146 opts = _byteskwargs(opts)
2151 opts = _byteskwargs(opts)
2147 timer, fm = gettimer(ui, opts)
2152 timer, fm = gettimer(ui, opts)
2148 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2149 fm.end()
2154 fm.end()
2150
2155
2151
2156
2152 @command(b'perffncacheload', formatteropts)
2157 @command(b'perffncacheload', formatteropts)
2153 def perffncacheload(ui, repo, **opts):
2158 def perffncacheload(ui, repo, **opts):
2154 opts = _byteskwargs(opts)
2159 opts = _byteskwargs(opts)
2155 timer, fm = gettimer(ui, opts)
2160 timer, fm = gettimer(ui, opts)
2156 s = repo.store
2161 s = repo.store
2157
2162
2158 def d():
2163 def d():
2159 s.fncache._load()
2164 s.fncache._load()
2160
2165
2161 timer(d)
2166 timer(d)
2162 fm.end()
2167 fm.end()
2163
2168
2164
2169
2165 @command(b'perffncachewrite', formatteropts)
2170 @command(b'perffncachewrite', formatteropts)
2166 def perffncachewrite(ui, repo, **opts):
2171 def perffncachewrite(ui, repo, **opts):
2167 opts = _byteskwargs(opts)
2172 opts = _byteskwargs(opts)
2168 timer, fm = gettimer(ui, opts)
2173 timer, fm = gettimer(ui, opts)
2169 s = repo.store
2174 s = repo.store
2170 lock = repo.lock()
2175 lock = repo.lock()
2171 s.fncache._load()
2176 s.fncache._load()
2172 tr = repo.transaction(b'perffncachewrite')
2177 tr = repo.transaction(b'perffncachewrite')
2173 tr.addbackup(b'fncache')
2178 tr.addbackup(b'fncache')
2174
2179
2175 def d():
2180 def d():
2176 s.fncache._dirty = True
2181 s.fncache._dirty = True
2177 s.fncache.write(tr)
2182 s.fncache.write(tr)
2178
2183
2179 timer(d)
2184 timer(d)
2180 tr.close()
2185 tr.close()
2181 lock.release()
2186 lock.release()
2182 fm.end()
2187 fm.end()
2183
2188
2184
2189
2185 @command(b'perffncacheencode', formatteropts)
2190 @command(b'perffncacheencode', formatteropts)
2186 def perffncacheencode(ui, repo, **opts):
2191 def perffncacheencode(ui, repo, **opts):
2187 opts = _byteskwargs(opts)
2192 opts = _byteskwargs(opts)
2188 timer, fm = gettimer(ui, opts)
2193 timer, fm = gettimer(ui, opts)
2189 s = repo.store
2194 s = repo.store
2190 s.fncache._load()
2195 s.fncache._load()
2191
2196
2192 def d():
2197 def d():
2193 for p in s.fncache.entries:
2198 for p in s.fncache.entries:
2194 s.encode(p)
2199 s.encode(p)
2195
2200
2196 timer(d)
2201 timer(d)
2197 fm.end()
2202 fm.end()
2198
2203
2199
2204
2200 def _bdiffworker(q, blocks, xdiff, ready, done):
2205 def _bdiffworker(q, blocks, xdiff, ready, done):
2201 while not done.is_set():
2206 while not done.is_set():
2202 pair = q.get()
2207 pair = q.get()
2203 while pair is not None:
2208 while pair is not None:
2204 if xdiff:
2209 if xdiff:
2205 mdiff.bdiff.xdiffblocks(*pair)
2210 mdiff.bdiff.xdiffblocks(*pair)
2206 elif blocks:
2211 elif blocks:
2207 mdiff.bdiff.blocks(*pair)
2212 mdiff.bdiff.blocks(*pair)
2208 else:
2213 else:
2209 mdiff.textdiff(*pair)
2214 mdiff.textdiff(*pair)
2210 q.task_done()
2215 q.task_done()
2211 pair = q.get()
2216 pair = q.get()
2212 q.task_done() # for the None one
2217 q.task_done() # for the None one
2213 with ready:
2218 with ready:
2214 ready.wait()
2219 ready.wait()
2215
2220
2216
2221
2217 def _manifestrevision(repo, mnode):
2222 def _manifestrevision(repo, mnode):
2218 ml = repo.manifestlog
2223 ml = repo.manifestlog
2219
2224
2220 if util.safehasattr(ml, b'getstorage'):
2225 if util.safehasattr(ml, b'getstorage'):
2221 store = ml.getstorage(b'')
2226 store = ml.getstorage(b'')
2222 else:
2227 else:
2223 store = ml._revlog
2228 store = ml._revlog
2224
2229
2225 return store.revision(mnode)
2230 return store.revision(mnode)
2226
2231
2227
2232
2228 @command(
2233 @command(
2229 b'perfbdiff',
2234 b'perfbdiff',
2230 revlogopts
2235 revlogopts
2231 + formatteropts
2236 + formatteropts
2232 + [
2237 + [
2233 (
2238 (
2234 b'',
2239 b'',
2235 b'count',
2240 b'count',
2236 1,
2241 1,
2237 b'number of revisions to test (when using --startrev)',
2242 b'number of revisions to test (when using --startrev)',
2238 ),
2243 ),
2239 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2244 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2240 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2245 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2241 (b'', b'blocks', False, b'test computing diffs into blocks'),
2246 (b'', b'blocks', False, b'test computing diffs into blocks'),
2242 (b'', b'xdiff', False, b'use xdiff algorithm'),
2247 (b'', b'xdiff', False, b'use xdiff algorithm'),
2243 ],
2248 ],
2244 b'-c|-m|FILE REV',
2249 b'-c|-m|FILE REV',
2245 )
2250 )
2246 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2251 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2247 """benchmark a bdiff between revisions
2252 """benchmark a bdiff between revisions
2248
2253
2249 By default, benchmark a bdiff between its delta parent and itself.
2254 By default, benchmark a bdiff between its delta parent and itself.
2250
2255
2251 With ``--count``, benchmark bdiffs between delta parents and self for N
2256 With ``--count``, benchmark bdiffs between delta parents and self for N
2252 revisions starting at the specified revision.
2257 revisions starting at the specified revision.
2253
2258
2254 With ``--alldata``, assume the requested revision is a changeset and
2259 With ``--alldata``, assume the requested revision is a changeset and
2255 measure bdiffs for all changes related to that changeset (manifest
2260 measure bdiffs for all changes related to that changeset (manifest
2256 and filelogs).
2261 and filelogs).
2257 """
2262 """
2258 opts = _byteskwargs(opts)
2263 opts = _byteskwargs(opts)
2259
2264
2260 if opts[b'xdiff'] and not opts[b'blocks']:
2265 if opts[b'xdiff'] and not opts[b'blocks']:
2261 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2266 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2262
2267
2263 if opts[b'alldata']:
2268 if opts[b'alldata']:
2264 opts[b'changelog'] = True
2269 opts[b'changelog'] = True
2265
2270
2266 if opts.get(b'changelog') or opts.get(b'manifest'):
2271 if opts.get(b'changelog') or opts.get(b'manifest'):
2267 file_, rev = None, file_
2272 file_, rev = None, file_
2268 elif rev is None:
2273 elif rev is None:
2269 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2274 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2270
2275
2271 blocks = opts[b'blocks']
2276 blocks = opts[b'blocks']
2272 xdiff = opts[b'xdiff']
2277 xdiff = opts[b'xdiff']
2273 textpairs = []
2278 textpairs = []
2274
2279
2275 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2280 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2276
2281
2277 startrev = r.rev(r.lookup(rev))
2282 startrev = r.rev(r.lookup(rev))
2278 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2283 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2279 if opts[b'alldata']:
2284 if opts[b'alldata']:
2280 # Load revisions associated with changeset.
2285 # Load revisions associated with changeset.
2281 ctx = repo[rev]
2286 ctx = repo[rev]
2282 mtext = _manifestrevision(repo, ctx.manifestnode())
2287 mtext = _manifestrevision(repo, ctx.manifestnode())
2283 for pctx in ctx.parents():
2288 for pctx in ctx.parents():
2284 pman = _manifestrevision(repo, pctx.manifestnode())
2289 pman = _manifestrevision(repo, pctx.manifestnode())
2285 textpairs.append((pman, mtext))
2290 textpairs.append((pman, mtext))
2286
2291
2287 # Load filelog revisions by iterating manifest delta.
2292 # Load filelog revisions by iterating manifest delta.
2288 man = ctx.manifest()
2293 man = ctx.manifest()
2289 pman = ctx.p1().manifest()
2294 pman = ctx.p1().manifest()
2290 for filename, change in pman.diff(man).items():
2295 for filename, change in pman.diff(man).items():
2291 fctx = repo.file(filename)
2296 fctx = repo.file(filename)
2292 f1 = fctx.revision(change[0][0] or -1)
2297 f1 = fctx.revision(change[0][0] or -1)
2293 f2 = fctx.revision(change[1][0] or -1)
2298 f2 = fctx.revision(change[1][0] or -1)
2294 textpairs.append((f1, f2))
2299 textpairs.append((f1, f2))
2295 else:
2300 else:
2296 dp = r.deltaparent(rev)
2301 dp = r.deltaparent(rev)
2297 textpairs.append((r.revision(dp), r.revision(rev)))
2302 textpairs.append((r.revision(dp), r.revision(rev)))
2298
2303
2299 withthreads = threads > 0
2304 withthreads = threads > 0
2300 if not withthreads:
2305 if not withthreads:
2301
2306
2302 def d():
2307 def d():
2303 for pair in textpairs:
2308 for pair in textpairs:
2304 if xdiff:
2309 if xdiff:
2305 mdiff.bdiff.xdiffblocks(*pair)
2310 mdiff.bdiff.xdiffblocks(*pair)
2306 elif blocks:
2311 elif blocks:
2307 mdiff.bdiff.blocks(*pair)
2312 mdiff.bdiff.blocks(*pair)
2308 else:
2313 else:
2309 mdiff.textdiff(*pair)
2314 mdiff.textdiff(*pair)
2310
2315
2311 else:
2316 else:
2312 q = queue()
2317 q = queue()
2313 for i in _xrange(threads):
2318 for i in _xrange(threads):
2314 q.put(None)
2319 q.put(None)
2315 ready = threading.Condition()
2320 ready = threading.Condition()
2316 done = threading.Event()
2321 done = threading.Event()
2317 for i in _xrange(threads):
2322 for i in _xrange(threads):
2318 threading.Thread(
2323 threading.Thread(
2319 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2324 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2320 ).start()
2325 ).start()
2321 q.join()
2326 q.join()
2322
2327
2323 def d():
2328 def d():
2324 for pair in textpairs:
2329 for pair in textpairs:
2325 q.put(pair)
2330 q.put(pair)
2326 for i in _xrange(threads):
2331 for i in _xrange(threads):
2327 q.put(None)
2332 q.put(None)
2328 with ready:
2333 with ready:
2329 ready.notify_all()
2334 ready.notify_all()
2330 q.join()
2335 q.join()
2331
2336
2332 timer, fm = gettimer(ui, opts)
2337 timer, fm = gettimer(ui, opts)
2333 timer(d)
2338 timer(d)
2334 fm.end()
2339 fm.end()
2335
2340
2336 if withthreads:
2341 if withthreads:
2337 done.set()
2342 done.set()
2338 for i in _xrange(threads):
2343 for i in _xrange(threads):
2339 q.put(None)
2344 q.put(None)
2340 with ready:
2345 with ready:
2341 ready.notify_all()
2346 ready.notify_all()
2342
2347
2343
2348
2344 @command(
2349 @command(
2345 b'perfunidiff',
2350 b'perfunidiff',
2346 revlogopts
2351 revlogopts
2347 + formatteropts
2352 + formatteropts
2348 + [
2353 + [
2349 (
2354 (
2350 b'',
2355 b'',
2351 b'count',
2356 b'count',
2352 1,
2357 1,
2353 b'number of revisions to test (when using --startrev)',
2358 b'number of revisions to test (when using --startrev)',
2354 ),
2359 ),
2355 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2360 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2356 ],
2361 ],
2357 b'-c|-m|FILE REV',
2362 b'-c|-m|FILE REV',
2358 )
2363 )
2359 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2364 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2360 """benchmark a unified diff between revisions
2365 """benchmark a unified diff between revisions
2361
2366
2362 This doesn't include any copy tracing - it's just a unified diff
2367 This doesn't include any copy tracing - it's just a unified diff
2363 of the texts.
2368 of the texts.
2364
2369
2365 By default, benchmark a diff between its delta parent and itself.
2370 By default, benchmark a diff between its delta parent and itself.
2366
2371
2367 With ``--count``, benchmark diffs between delta parents and self for N
2372 With ``--count``, benchmark diffs between delta parents and self for N
2368 revisions starting at the specified revision.
2373 revisions starting at the specified revision.
2369
2374
2370 With ``--alldata``, assume the requested revision is a changeset and
2375 With ``--alldata``, assume the requested revision is a changeset and
2371 measure diffs for all changes related to that changeset (manifest
2376 measure diffs for all changes related to that changeset (manifest
2372 and filelogs).
2377 and filelogs).
2373 """
2378 """
2374 opts = _byteskwargs(opts)
2379 opts = _byteskwargs(opts)
2375 if opts[b'alldata']:
2380 if opts[b'alldata']:
2376 opts[b'changelog'] = True
2381 opts[b'changelog'] = True
2377
2382
2378 if opts.get(b'changelog') or opts.get(b'manifest'):
2383 if opts.get(b'changelog') or opts.get(b'manifest'):
2379 file_, rev = None, file_
2384 file_, rev = None, file_
2380 elif rev is None:
2385 elif rev is None:
2381 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2386 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2382
2387
2383 textpairs = []
2388 textpairs = []
2384
2389
2385 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2390 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2386
2391
2387 startrev = r.rev(r.lookup(rev))
2392 startrev = r.rev(r.lookup(rev))
2388 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2393 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2389 if opts[b'alldata']:
2394 if opts[b'alldata']:
2390 # Load revisions associated with changeset.
2395 # Load revisions associated with changeset.
2391 ctx = repo[rev]
2396 ctx = repo[rev]
2392 mtext = _manifestrevision(repo, ctx.manifestnode())
2397 mtext = _manifestrevision(repo, ctx.manifestnode())
2393 for pctx in ctx.parents():
2398 for pctx in ctx.parents():
2394 pman = _manifestrevision(repo, pctx.manifestnode())
2399 pman = _manifestrevision(repo, pctx.manifestnode())
2395 textpairs.append((pman, mtext))
2400 textpairs.append((pman, mtext))
2396
2401
2397 # Load filelog revisions by iterating manifest delta.
2402 # Load filelog revisions by iterating manifest delta.
2398 man = ctx.manifest()
2403 man = ctx.manifest()
2399 pman = ctx.p1().manifest()
2404 pman = ctx.p1().manifest()
2400 for filename, change in pman.diff(man).items():
2405 for filename, change in pman.diff(man).items():
2401 fctx = repo.file(filename)
2406 fctx = repo.file(filename)
2402 f1 = fctx.revision(change[0][0] or -1)
2407 f1 = fctx.revision(change[0][0] or -1)
2403 f2 = fctx.revision(change[1][0] or -1)
2408 f2 = fctx.revision(change[1][0] or -1)
2404 textpairs.append((f1, f2))
2409 textpairs.append((f1, f2))
2405 else:
2410 else:
2406 dp = r.deltaparent(rev)
2411 dp = r.deltaparent(rev)
2407 textpairs.append((r.revision(dp), r.revision(rev)))
2412 textpairs.append((r.revision(dp), r.revision(rev)))
2408
2413
2409 def d():
2414 def d():
2410 for left, right in textpairs:
2415 for left, right in textpairs:
2411 # The date strings don't matter, so we pass empty strings.
2416 # The date strings don't matter, so we pass empty strings.
2412 headerlines, hunks = mdiff.unidiff(
2417 headerlines, hunks = mdiff.unidiff(
2413 left, b'', right, b'', b'left', b'right', binary=False
2418 left, b'', right, b'', b'left', b'right', binary=False
2414 )
2419 )
2415 # consume iterators in roughly the way patch.py does
2420 # consume iterators in roughly the way patch.py does
2416 b'\n'.join(headerlines)
2421 b'\n'.join(headerlines)
2417 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2422 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2418
2423
2419 timer, fm = gettimer(ui, opts)
2424 timer, fm = gettimer(ui, opts)
2420 timer(d)
2425 timer(d)
2421 fm.end()
2426 fm.end()
2422
2427
2423
2428
2424 @command(b'perfdiffwd', formatteropts)
2429 @command(b'perfdiffwd', formatteropts)
2425 def perfdiffwd(ui, repo, **opts):
2430 def perfdiffwd(ui, repo, **opts):
2426 """Profile diff of working directory changes"""
2431 """Profile diff of working directory changes"""
2427 opts = _byteskwargs(opts)
2432 opts = _byteskwargs(opts)
2428 timer, fm = gettimer(ui, opts)
2433 timer, fm = gettimer(ui, opts)
2429 options = {
2434 options = {
2430 'w': 'ignore_all_space',
2435 'w': 'ignore_all_space',
2431 'b': 'ignore_space_change',
2436 'b': 'ignore_space_change',
2432 'B': 'ignore_blank_lines',
2437 'B': 'ignore_blank_lines',
2433 }
2438 }
2434
2439
2435 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2440 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2436 opts = dict((options[c], b'1') for c in diffopt)
2441 opts = dict((options[c], b'1') for c in diffopt)
2437
2442
2438 def d():
2443 def d():
2439 ui.pushbuffer()
2444 ui.pushbuffer()
2440 commands.diff(ui, repo, **opts)
2445 commands.diff(ui, repo, **opts)
2441 ui.popbuffer()
2446 ui.popbuffer()
2442
2447
2443 diffopt = diffopt.encode('ascii')
2448 diffopt = diffopt.encode('ascii')
2444 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2449 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2445 timer(d, title=title)
2450 timer(d, title=title)
2446 fm.end()
2451 fm.end()
2447
2452
2448
2453
2449 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2454 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2450 def perfrevlogindex(ui, repo, file_=None, **opts):
2455 def perfrevlogindex(ui, repo, file_=None, **opts):
2451 """Benchmark operations against a revlog index.
2456 """Benchmark operations against a revlog index.
2452
2457
2453 This tests constructing a revlog instance, reading index data,
2458 This tests constructing a revlog instance, reading index data,
2454 parsing index data, and performing various operations related to
2459 parsing index data, and performing various operations related to
2455 index data.
2460 index data.
2456 """
2461 """
2457
2462
2458 opts = _byteskwargs(opts)
2463 opts = _byteskwargs(opts)
2459
2464
2460 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2465 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2461
2466
2462 opener = getattr(rl, 'opener') # trick linter
2467 opener = getattr(rl, 'opener') # trick linter
2463 indexfile = rl.indexfile
2468 indexfile = rl.indexfile
2464 data = opener.read(indexfile)
2469 data = opener.read(indexfile)
2465
2470
2466 header = struct.unpack(b'>I', data[0:4])[0]
2471 header = struct.unpack(b'>I', data[0:4])[0]
2467 version = header & 0xFFFF
2472 version = header & 0xFFFF
2468 if version == 1:
2473 if version == 1:
2469 revlogio = revlog.revlogio()
2474 revlogio = revlog.revlogio()
2470 inline = header & (1 << 16)
2475 inline = header & (1 << 16)
2471 else:
2476 else:
2472 raise error.Abort(b'unsupported revlog version: %d' % version)
2477 raise error.Abort(b'unsupported revlog version: %d' % version)
2473
2478
2474 rllen = len(rl)
2479 rllen = len(rl)
2475
2480
2476 node0 = rl.node(0)
2481 node0 = rl.node(0)
2477 node25 = rl.node(rllen // 4)
2482 node25 = rl.node(rllen // 4)
2478 node50 = rl.node(rllen // 2)
2483 node50 = rl.node(rllen // 2)
2479 node75 = rl.node(rllen // 4 * 3)
2484 node75 = rl.node(rllen // 4 * 3)
2480 node100 = rl.node(rllen - 1)
2485 node100 = rl.node(rllen - 1)
2481
2486
2482 allrevs = range(rllen)
2487 allrevs = range(rllen)
2483 allrevsrev = list(reversed(allrevs))
2488 allrevsrev = list(reversed(allrevs))
2484 allnodes = [rl.node(rev) for rev in range(rllen)]
2489 allnodes = [rl.node(rev) for rev in range(rllen)]
2485 allnodesrev = list(reversed(allnodes))
2490 allnodesrev = list(reversed(allnodes))
2486
2491
2487 def constructor():
2492 def constructor():
2488 revlog.revlog(opener, indexfile)
2493 revlog.revlog(opener, indexfile)
2489
2494
2490 def read():
2495 def read():
2491 with opener(indexfile) as fh:
2496 with opener(indexfile) as fh:
2492 fh.read()
2497 fh.read()
2493
2498
2494 def parseindex():
2499 def parseindex():
2495 revlogio.parseindex(data, inline)
2500 revlogio.parseindex(data, inline)
2496
2501
2497 def getentry(revornode):
2502 def getentry(revornode):
2498 index = revlogio.parseindex(data, inline)[0]
2503 index = revlogio.parseindex(data, inline)[0]
2499 index[revornode]
2504 index[revornode]
2500
2505
2501 def getentries(revs, count=1):
2506 def getentries(revs, count=1):
2502 index = revlogio.parseindex(data, inline)[0]
2507 index = revlogio.parseindex(data, inline)[0]
2503
2508
2504 for i in range(count):
2509 for i in range(count):
2505 for rev in revs:
2510 for rev in revs:
2506 index[rev]
2511 index[rev]
2507
2512
2508 def resolvenode(node):
2513 def resolvenode(node):
2509 nodemap = revlogio.parseindex(data, inline)[1]
2514 nodemap = revlogio.parseindex(data, inline)[1]
2510 # This only works for the C code.
2515 # This only works for the C code.
2511 if nodemap is None:
2516 if nodemap is None:
2512 return
2517 return
2513
2518
2514 try:
2519 try:
2515 nodemap[node]
2520 nodemap[node]
2516 except error.RevlogError:
2521 except error.RevlogError:
2517 pass
2522 pass
2518
2523
2519 def resolvenodes(nodes, count=1):
2524 def resolvenodes(nodes, count=1):
2520 nodemap = revlogio.parseindex(data, inline)[1]
2525 nodemap = revlogio.parseindex(data, inline)[1]
2521 if nodemap is None:
2526 if nodemap is None:
2522 return
2527 return
2523
2528
2524 for i in range(count):
2529 for i in range(count):
2525 for node in nodes:
2530 for node in nodes:
2526 try:
2531 try:
2527 nodemap[node]
2532 nodemap[node]
2528 except error.RevlogError:
2533 except error.RevlogError:
2529 pass
2534 pass
2530
2535
2531 benches = [
2536 benches = [
2532 (constructor, b'revlog constructor'),
2537 (constructor, b'revlog constructor'),
2533 (read, b'read'),
2538 (read, b'read'),
2534 (parseindex, b'create index object'),
2539 (parseindex, b'create index object'),
2535 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2540 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2536 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2541 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2537 (lambda: resolvenode(node0), b'look up node at rev 0'),
2542 (lambda: resolvenode(node0), b'look up node at rev 0'),
2538 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2543 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2539 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2544 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2540 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2545 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2541 (lambda: resolvenode(node100), b'look up node at tip'),
2546 (lambda: resolvenode(node100), b'look up node at tip'),
2542 # 2x variation is to measure caching impact.
2547 # 2x variation is to measure caching impact.
2543 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2548 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2544 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2549 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2545 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2550 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2546 (
2551 (
2547 lambda: resolvenodes(allnodesrev, 2),
2552 lambda: resolvenodes(allnodesrev, 2),
2548 b'look up all nodes 2x (reverse)',
2553 b'look up all nodes 2x (reverse)',
2549 ),
2554 ),
2550 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2555 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2551 (
2556 (
2552 lambda: getentries(allrevs, 2),
2557 lambda: getentries(allrevs, 2),
2553 b'retrieve all index entries 2x (forward)',
2558 b'retrieve all index entries 2x (forward)',
2554 ),
2559 ),
2555 (
2560 (
2556 lambda: getentries(allrevsrev),
2561 lambda: getentries(allrevsrev),
2557 b'retrieve all index entries (reverse)',
2562 b'retrieve all index entries (reverse)',
2558 ),
2563 ),
2559 (
2564 (
2560 lambda: getentries(allrevsrev, 2),
2565 lambda: getentries(allrevsrev, 2),
2561 b'retrieve all index entries 2x (reverse)',
2566 b'retrieve all index entries 2x (reverse)',
2562 ),
2567 ),
2563 ]
2568 ]
2564
2569
2565 for fn, title in benches:
2570 for fn, title in benches:
2566 timer, fm = gettimer(ui, opts)
2571 timer, fm = gettimer(ui, opts)
2567 timer(fn, title=title)
2572 timer(fn, title=title)
2568 fm.end()
2573 fm.end()
2569
2574
2570
2575
2571 @command(
2576 @command(
2572 b'perfrevlogrevisions',
2577 b'perfrevlogrevisions',
2573 revlogopts
2578 revlogopts
2574 + formatteropts
2579 + formatteropts
2575 + [
2580 + [
2576 (b'd', b'dist', 100, b'distance between the revisions'),
2581 (b'd', b'dist', 100, b'distance between the revisions'),
2577 (b's', b'startrev', 0, b'revision to start reading at'),
2582 (b's', b'startrev', 0, b'revision to start reading at'),
2578 (b'', b'reverse', False, b'read in reverse'),
2583 (b'', b'reverse', False, b'read in reverse'),
2579 ],
2584 ],
2580 b'-c|-m|FILE',
2585 b'-c|-m|FILE',
2581 )
2586 )
2582 def perfrevlogrevisions(
2587 def perfrevlogrevisions(
2583 ui, repo, file_=None, startrev=0, reverse=False, **opts
2588 ui, repo, file_=None, startrev=0, reverse=False, **opts
2584 ):
2589 ):
2585 """Benchmark reading a series of revisions from a revlog.
2590 """Benchmark reading a series of revisions from a revlog.
2586
2591
2587 By default, we read every ``-d/--dist`` revision from 0 to tip of
2592 By default, we read every ``-d/--dist`` revision from 0 to tip of
2588 the specified revlog.
2593 the specified revlog.
2589
2594
2590 The start revision can be defined via ``-s/--startrev``.
2595 The start revision can be defined via ``-s/--startrev``.
2591 """
2596 """
2592 opts = _byteskwargs(opts)
2597 opts = _byteskwargs(opts)
2593
2598
2594 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2595 rllen = getlen(ui)(rl)
2600 rllen = getlen(ui)(rl)
2596
2601
2597 if startrev < 0:
2602 if startrev < 0:
2598 startrev = rllen + startrev
2603 startrev = rllen + startrev
2599
2604
2600 def d():
2605 def d():
2601 rl.clearcaches()
2606 rl.clearcaches()
2602
2607
2603 beginrev = startrev
2608 beginrev = startrev
2604 endrev = rllen
2609 endrev = rllen
2605 dist = opts[b'dist']
2610 dist = opts[b'dist']
2606
2611
2607 if reverse:
2612 if reverse:
2608 beginrev, endrev = endrev - 1, beginrev - 1
2613 beginrev, endrev = endrev - 1, beginrev - 1
2609 dist = -1 * dist
2614 dist = -1 * dist
2610
2615
2611 for x in _xrange(beginrev, endrev, dist):
2616 for x in _xrange(beginrev, endrev, dist):
2612 # Old revisions don't support passing int.
2617 # Old revisions don't support passing int.
2613 n = rl.node(x)
2618 n = rl.node(x)
2614 rl.revision(n)
2619 rl.revision(n)
2615
2620
2616 timer, fm = gettimer(ui, opts)
2621 timer, fm = gettimer(ui, opts)
2617 timer(d)
2622 timer(d)
2618 fm.end()
2623 fm.end()
2619
2624
2620
2625
2621 @command(
2626 @command(
2622 b'perfrevlogwrite',
2627 b'perfrevlogwrite',
2623 revlogopts
2628 revlogopts
2624 + formatteropts
2629 + formatteropts
2625 + [
2630 + [
2626 (b's', b'startrev', 1000, b'revision to start writing at'),
2631 (b's', b'startrev', 1000, b'revision to start writing at'),
2627 (b'', b'stoprev', -1, b'last revision to write'),
2632 (b'', b'stoprev', -1, b'last revision to write'),
2628 (b'', b'count', 3, b'number of passes to perform'),
2633 (b'', b'count', 3, b'number of passes to perform'),
2629 (b'', b'details', False, b'print timing for every revisions tested'),
2634 (b'', b'details', False, b'print timing for every revisions tested'),
2630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2635 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2636 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2632 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2637 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2633 ],
2638 ],
2634 b'-c|-m|FILE',
2639 b'-c|-m|FILE',
2635 )
2640 )
2636 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2641 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2637 """Benchmark writing a series of revisions to a revlog.
2642 """Benchmark writing a series of revisions to a revlog.
2638
2643
2639 Possible source values are:
2644 Possible source values are:
2640 * `full`: add from a full text (default).
2645 * `full`: add from a full text (default).
2641 * `parent-1`: add from a delta to the first parent
2646 * `parent-1`: add from a delta to the first parent
2642 * `parent-2`: add from a delta to the second parent if it exists
2647 * `parent-2`: add from a delta to the second parent if it exists
2643 (use a delta from the first parent otherwise)
2648 (use a delta from the first parent otherwise)
2644 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2649 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2645 * `storage`: add from the existing precomputed deltas
2650 * `storage`: add from the existing precomputed deltas
2646
2651
2647 Note: This performance command measures performance in a custom way. As a
2652 Note: This performance command measures performance in a custom way. As a
2648 result some of the global configuration of the 'perf' command does not
2653 result some of the global configuration of the 'perf' command does not
2649 apply to it:
2654 apply to it:
2650
2655
2651 * ``pre-run``: disabled
2656 * ``pre-run``: disabled
2652
2657
2653 * ``profile-benchmark``: disabled
2658 * ``profile-benchmark``: disabled
2654
2659
2655 * ``run-limits``: disabled use --count instead
2660 * ``run-limits``: disabled use --count instead
2656 """
2661 """
2657 opts = _byteskwargs(opts)
2662 opts = _byteskwargs(opts)
2658
2663
2659 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2664 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2660 rllen = getlen(ui)(rl)
2665 rllen = getlen(ui)(rl)
2661 if startrev < 0:
2666 if startrev < 0:
2662 startrev = rllen + startrev
2667 startrev = rllen + startrev
2663 if stoprev < 0:
2668 if stoprev < 0:
2664 stoprev = rllen + stoprev
2669 stoprev = rllen + stoprev
2665
2670
2666 lazydeltabase = opts['lazydeltabase']
2671 lazydeltabase = opts['lazydeltabase']
2667 source = opts['source']
2672 source = opts['source']
2668 clearcaches = opts['clear_caches']
2673 clearcaches = opts['clear_caches']
2669 validsource = (
2674 validsource = (
2670 b'full',
2675 b'full',
2671 b'parent-1',
2676 b'parent-1',
2672 b'parent-2',
2677 b'parent-2',
2673 b'parent-smallest',
2678 b'parent-smallest',
2674 b'storage',
2679 b'storage',
2675 )
2680 )
2676 if source not in validsource:
2681 if source not in validsource:
2677 raise error.Abort('invalid source type: %s' % source)
2682 raise error.Abort('invalid source type: %s' % source)
2678
2683
2679 ### actually gather results
2684 ### actually gather results
2680 count = opts['count']
2685 count = opts['count']
2681 if count <= 0:
2686 if count <= 0:
2682 raise error.Abort('invalide run count: %d' % count)
2687 raise error.Abort('invalide run count: %d' % count)
2683 allresults = []
2688 allresults = []
2684 for c in range(count):
2689 for c in range(count):
2685 timing = _timeonewrite(
2690 timing = _timeonewrite(
2686 ui,
2691 ui,
2687 rl,
2692 rl,
2688 source,
2693 source,
2689 startrev,
2694 startrev,
2690 stoprev,
2695 stoprev,
2691 c + 1,
2696 c + 1,
2692 lazydeltabase=lazydeltabase,
2697 lazydeltabase=lazydeltabase,
2693 clearcaches=clearcaches,
2698 clearcaches=clearcaches,
2694 )
2699 )
2695 allresults.append(timing)
2700 allresults.append(timing)
2696
2701
2697 ### consolidate the results in a single list
2702 ### consolidate the results in a single list
2698 results = []
2703 results = []
2699 for idx, (rev, t) in enumerate(allresults[0]):
2704 for idx, (rev, t) in enumerate(allresults[0]):
2700 ts = [t]
2705 ts = [t]
2701 for other in allresults[1:]:
2706 for other in allresults[1:]:
2702 orev, ot = other[idx]
2707 orev, ot = other[idx]
2703 assert orev == rev
2708 assert orev == rev
2704 ts.append(ot)
2709 ts.append(ot)
2705 results.append((rev, ts))
2710 results.append((rev, ts))
2706 resultcount = len(results)
2711 resultcount = len(results)
2707
2712
2708 ### Compute and display relevant statistics
2713 ### Compute and display relevant statistics
2709
2714
2710 # get a formatter
2715 # get a formatter
2711 fm = ui.formatter(b'perf', opts)
2716 fm = ui.formatter(b'perf', opts)
2712 displayall = ui.configbool(b"perf", b"all-timing", False)
2717 displayall = ui.configbool(b"perf", b"all-timing", False)
2713
2718
2714 # print individual details if requested
2719 # print individual details if requested
2715 if opts['details']:
2720 if opts['details']:
2716 for idx, item in enumerate(results, 1):
2721 for idx, item in enumerate(results, 1):
2717 rev, data = item
2722 rev, data = item
2718 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2723 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2719 formatone(fm, data, title=title, displayall=displayall)
2724 formatone(fm, data, title=title, displayall=displayall)
2720
2725
2721 # sorts results by median time
2726 # sorts results by median time
2722 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2727 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2723 # list of (name, index) to display)
2728 # list of (name, index) to display)
2724 relevants = [
2729 relevants = [
2725 ("min", 0),
2730 ("min", 0),
2726 ("10%", resultcount * 10 // 100),
2731 ("10%", resultcount * 10 // 100),
2727 ("25%", resultcount * 25 // 100),
2732 ("25%", resultcount * 25 // 100),
2728 ("50%", resultcount * 70 // 100),
2733 ("50%", resultcount * 70 // 100),
2729 ("75%", resultcount * 75 // 100),
2734 ("75%", resultcount * 75 // 100),
2730 ("90%", resultcount * 90 // 100),
2735 ("90%", resultcount * 90 // 100),
2731 ("95%", resultcount * 95 // 100),
2736 ("95%", resultcount * 95 // 100),
2732 ("99%", resultcount * 99 // 100),
2737 ("99%", resultcount * 99 // 100),
2733 ("99.9%", resultcount * 999 // 1000),
2738 ("99.9%", resultcount * 999 // 1000),
2734 ("99.99%", resultcount * 9999 // 10000),
2739 ("99.99%", resultcount * 9999 // 10000),
2735 ("99.999%", resultcount * 99999 // 100000),
2740 ("99.999%", resultcount * 99999 // 100000),
2736 ("max", -1),
2741 ("max", -1),
2737 ]
2742 ]
2738 if not ui.quiet:
2743 if not ui.quiet:
2739 for name, idx in relevants:
2744 for name, idx in relevants:
2740 data = results[idx]
2745 data = results[idx]
2741 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2746 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2742 formatone(fm, data[1], title=title, displayall=displayall)
2747 formatone(fm, data[1], title=title, displayall=displayall)
2743
2748
2744 # XXX summing that many float will not be very precise, we ignore this fact
2749 # XXX summing that many float will not be very precise, we ignore this fact
2745 # for now
2750 # for now
2746 totaltime = []
2751 totaltime = []
2747 for item in allresults:
2752 for item in allresults:
2748 totaltime.append(
2753 totaltime.append(
2749 (
2754 (
2750 sum(x[1][0] for x in item),
2755 sum(x[1][0] for x in item),
2751 sum(x[1][1] for x in item),
2756 sum(x[1][1] for x in item),
2752 sum(x[1][2] for x in item),
2757 sum(x[1][2] for x in item),
2753 )
2758 )
2754 )
2759 )
2755 formatone(
2760 formatone(
2756 fm,
2761 fm,
2757 totaltime,
2762 totaltime,
2758 title="total time (%d revs)" % resultcount,
2763 title="total time (%d revs)" % resultcount,
2759 displayall=displayall,
2764 displayall=displayall,
2760 )
2765 )
2761 fm.end()
2766 fm.end()
2762
2767
2763
2768
2764 class _faketr(object):
2769 class _faketr(object):
2765 def add(s, x, y, z=None):
2770 def add(s, x, y, z=None):
2766 return None
2771 return None
2767
2772
2768
2773
2769 def _timeonewrite(
2774 def _timeonewrite(
2770 ui,
2775 ui,
2771 orig,
2776 orig,
2772 source,
2777 source,
2773 startrev,
2778 startrev,
2774 stoprev,
2779 stoprev,
2775 runidx=None,
2780 runidx=None,
2776 lazydeltabase=True,
2781 lazydeltabase=True,
2777 clearcaches=True,
2782 clearcaches=True,
2778 ):
2783 ):
2779 timings = []
2784 timings = []
2780 tr = _faketr()
2785 tr = _faketr()
2781 with _temprevlog(ui, orig, startrev) as dest:
2786 with _temprevlog(ui, orig, startrev) as dest:
2782 dest._lazydeltabase = lazydeltabase
2787 dest._lazydeltabase = lazydeltabase
2783 revs = list(orig.revs(startrev, stoprev))
2788 revs = list(orig.revs(startrev, stoprev))
2784 total = len(revs)
2789 total = len(revs)
2785 topic = 'adding'
2790 topic = 'adding'
2786 if runidx is not None:
2791 if runidx is not None:
2787 topic += ' (run #%d)' % runidx
2792 topic += ' (run #%d)' % runidx
2788 # Support both old and new progress API
2793 # Support both old and new progress API
2789 if util.safehasattr(ui, 'makeprogress'):
2794 if util.safehasattr(ui, 'makeprogress'):
2790 progress = ui.makeprogress(topic, unit='revs', total=total)
2795 progress = ui.makeprogress(topic, unit='revs', total=total)
2791
2796
2792 def updateprogress(pos):
2797 def updateprogress(pos):
2793 progress.update(pos)
2798 progress.update(pos)
2794
2799
2795 def completeprogress():
2800 def completeprogress():
2796 progress.complete()
2801 progress.complete()
2797
2802
2798 else:
2803 else:
2799
2804
2800 def updateprogress(pos):
2805 def updateprogress(pos):
2801 ui.progress(topic, pos, unit='revs', total=total)
2806 ui.progress(topic, pos, unit='revs', total=total)
2802
2807
2803 def completeprogress():
2808 def completeprogress():
2804 ui.progress(topic, None, unit='revs', total=total)
2809 ui.progress(topic, None, unit='revs', total=total)
2805
2810
2806 for idx, rev in enumerate(revs):
2811 for idx, rev in enumerate(revs):
2807 updateprogress(idx)
2812 updateprogress(idx)
2808 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2813 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2809 if clearcaches:
2814 if clearcaches:
2810 dest.index.clearcaches()
2815 dest.index.clearcaches()
2811 dest.clearcaches()
2816 dest.clearcaches()
2812 with timeone() as r:
2817 with timeone() as r:
2813 dest.addrawrevision(*addargs, **addkwargs)
2818 dest.addrawrevision(*addargs, **addkwargs)
2814 timings.append((rev, r[0]))
2819 timings.append((rev, r[0]))
2815 updateprogress(total)
2820 updateprogress(total)
2816 completeprogress()
2821 completeprogress()
2817 return timings
2822 return timings
2818
2823
2819
2824
2820 def _getrevisionseed(orig, rev, tr, source):
2825 def _getrevisionseed(orig, rev, tr, source):
2821 from mercurial.node import nullid
2826 from mercurial.node import nullid
2822
2827
2823 linkrev = orig.linkrev(rev)
2828 linkrev = orig.linkrev(rev)
2824 node = orig.node(rev)
2829 node = orig.node(rev)
2825 p1, p2 = orig.parents(node)
2830 p1, p2 = orig.parents(node)
2826 flags = orig.flags(rev)
2831 flags = orig.flags(rev)
2827 cachedelta = None
2832 cachedelta = None
2828 text = None
2833 text = None
2829
2834
2830 if source == b'full':
2835 if source == b'full':
2831 text = orig.revision(rev)
2836 text = orig.revision(rev)
2832 elif source == b'parent-1':
2837 elif source == b'parent-1':
2833 baserev = orig.rev(p1)
2838 baserev = orig.rev(p1)
2834 cachedelta = (baserev, orig.revdiff(p1, rev))
2839 cachedelta = (baserev, orig.revdiff(p1, rev))
2835 elif source == b'parent-2':
2840 elif source == b'parent-2':
2836 parent = p2
2841 parent = p2
2837 if p2 == nullid:
2842 if p2 == nullid:
2838 parent = p1
2843 parent = p1
2839 baserev = orig.rev(parent)
2844 baserev = orig.rev(parent)
2840 cachedelta = (baserev, orig.revdiff(parent, rev))
2845 cachedelta = (baserev, orig.revdiff(parent, rev))
2841 elif source == b'parent-smallest':
2846 elif source == b'parent-smallest':
2842 p1diff = orig.revdiff(p1, rev)
2847 p1diff = orig.revdiff(p1, rev)
2843 parent = p1
2848 parent = p1
2844 diff = p1diff
2849 diff = p1diff
2845 if p2 != nullid:
2850 if p2 != nullid:
2846 p2diff = orig.revdiff(p2, rev)
2851 p2diff = orig.revdiff(p2, rev)
2847 if len(p1diff) > len(p2diff):
2852 if len(p1diff) > len(p2diff):
2848 parent = p2
2853 parent = p2
2849 diff = p2diff
2854 diff = p2diff
2850 baserev = orig.rev(parent)
2855 baserev = orig.rev(parent)
2851 cachedelta = (baserev, diff)
2856 cachedelta = (baserev, diff)
2852 elif source == b'storage':
2857 elif source == b'storage':
2853 baserev = orig.deltaparent(rev)
2858 baserev = orig.deltaparent(rev)
2854 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2859 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2855
2860
2856 return (
2861 return (
2857 (text, tr, linkrev, p1, p2),
2862 (text, tr, linkrev, p1, p2),
2858 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2863 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2859 )
2864 )
2860
2865
2861
2866
2862 @contextlib.contextmanager
2867 @contextlib.contextmanager
2863 def _temprevlog(ui, orig, truncaterev):
2868 def _temprevlog(ui, orig, truncaterev):
2864 from mercurial import vfs as vfsmod
2869 from mercurial import vfs as vfsmod
2865
2870
2866 if orig._inline:
2871 if orig._inline:
2867 raise error.Abort('not supporting inline revlog (yet)')
2872 raise error.Abort('not supporting inline revlog (yet)')
2868 revlogkwargs = {}
2873 revlogkwargs = {}
2869 k = 'upperboundcomp'
2874 k = 'upperboundcomp'
2870 if util.safehasattr(orig, k):
2875 if util.safehasattr(orig, k):
2871 revlogkwargs[k] = getattr(orig, k)
2876 revlogkwargs[k] = getattr(orig, k)
2872
2877
2873 origindexpath = orig.opener.join(orig.indexfile)
2878 origindexpath = orig.opener.join(orig.indexfile)
2874 origdatapath = orig.opener.join(orig.datafile)
2879 origdatapath = orig.opener.join(orig.datafile)
2875 indexname = 'revlog.i'
2880 indexname = 'revlog.i'
2876 dataname = 'revlog.d'
2881 dataname = 'revlog.d'
2877
2882
2878 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2883 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2879 try:
2884 try:
2880 # copy the data file in a temporary directory
2885 # copy the data file in a temporary directory
2881 ui.debug('copying data in %s\n' % tmpdir)
2886 ui.debug('copying data in %s\n' % tmpdir)
2882 destindexpath = os.path.join(tmpdir, 'revlog.i')
2887 destindexpath = os.path.join(tmpdir, 'revlog.i')
2883 destdatapath = os.path.join(tmpdir, 'revlog.d')
2888 destdatapath = os.path.join(tmpdir, 'revlog.d')
2884 shutil.copyfile(origindexpath, destindexpath)
2889 shutil.copyfile(origindexpath, destindexpath)
2885 shutil.copyfile(origdatapath, destdatapath)
2890 shutil.copyfile(origdatapath, destdatapath)
2886
2891
2887 # remove the data we want to add again
2892 # remove the data we want to add again
2888 ui.debug('truncating data to be rewritten\n')
2893 ui.debug('truncating data to be rewritten\n')
2889 with open(destindexpath, 'ab') as index:
2894 with open(destindexpath, 'ab') as index:
2890 index.seek(0)
2895 index.seek(0)
2891 index.truncate(truncaterev * orig._io.size)
2896 index.truncate(truncaterev * orig._io.size)
2892 with open(destdatapath, 'ab') as data:
2897 with open(destdatapath, 'ab') as data:
2893 data.seek(0)
2898 data.seek(0)
2894 data.truncate(orig.start(truncaterev))
2899 data.truncate(orig.start(truncaterev))
2895
2900
2896 # instantiate a new revlog from the temporary copy
2901 # instantiate a new revlog from the temporary copy
2897 ui.debug('truncating adding to be rewritten\n')
2902 ui.debug('truncating adding to be rewritten\n')
2898 vfs = vfsmod.vfs(tmpdir)
2903 vfs = vfsmod.vfs(tmpdir)
2899 vfs.options = getattr(orig.opener, 'options', None)
2904 vfs.options = getattr(orig.opener, 'options', None)
2900
2905
2901 dest = revlog.revlog(
2906 dest = revlog.revlog(
2902 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2907 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2903 )
2908 )
2904 if dest._inline:
2909 if dest._inline:
2905 raise error.Abort('not supporting inline revlog (yet)')
2910 raise error.Abort('not supporting inline revlog (yet)')
2906 # make sure internals are initialized
2911 # make sure internals are initialized
2907 dest.revision(len(dest) - 1)
2912 dest.revision(len(dest) - 1)
2908 yield dest
2913 yield dest
2909 del dest, vfs
2914 del dest, vfs
2910 finally:
2915 finally:
2911 shutil.rmtree(tmpdir, True)
2916 shutil.rmtree(tmpdir, True)
2912
2917
2913
2918
2914 @command(
2919 @command(
2915 b'perfrevlogchunks',
2920 b'perfrevlogchunks',
2916 revlogopts
2921 revlogopts
2917 + formatteropts
2922 + formatteropts
2918 + [
2923 + [
2919 (b'e', b'engines', b'', b'compression engines to use'),
2924 (b'e', b'engines', b'', b'compression engines to use'),
2920 (b's', b'startrev', 0, b'revision to start at'),
2925 (b's', b'startrev', 0, b'revision to start at'),
2921 ],
2926 ],
2922 b'-c|-m|FILE',
2927 b'-c|-m|FILE',
2923 )
2928 )
2924 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2929 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2925 """Benchmark operations on revlog chunks.
2930 """Benchmark operations on revlog chunks.
2926
2931
2927 Logically, each revlog is a collection of fulltext revisions. However,
2932 Logically, each revlog is a collection of fulltext revisions. However,
2928 stored within each revlog are "chunks" of possibly compressed data. This
2933 stored within each revlog are "chunks" of possibly compressed data. This
2929 data needs to be read and decompressed or compressed and written.
2934 data needs to be read and decompressed or compressed and written.
2930
2935
2931 This command measures the time it takes to read+decompress and recompress
2936 This command measures the time it takes to read+decompress and recompress
2932 chunks in a revlog. It effectively isolates I/O and compression performance.
2937 chunks in a revlog. It effectively isolates I/O and compression performance.
2933 For measurements of higher-level operations like resolving revisions,
2938 For measurements of higher-level operations like resolving revisions,
2934 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2939 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2935 """
2940 """
2936 opts = _byteskwargs(opts)
2941 opts = _byteskwargs(opts)
2937
2942
2938 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2943 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2939
2944
2940 # _chunkraw was renamed to _getsegmentforrevs.
2945 # _chunkraw was renamed to _getsegmentforrevs.
2941 try:
2946 try:
2942 segmentforrevs = rl._getsegmentforrevs
2947 segmentforrevs = rl._getsegmentforrevs
2943 except AttributeError:
2948 except AttributeError:
2944 segmentforrevs = rl._chunkraw
2949 segmentforrevs = rl._chunkraw
2945
2950
2946 # Verify engines argument.
2951 # Verify engines argument.
2947 if engines:
2952 if engines:
2948 engines = set(e.strip() for e in engines.split(b','))
2953 engines = set(e.strip() for e in engines.split(b','))
2949 for engine in engines:
2954 for engine in engines:
2950 try:
2955 try:
2951 util.compressionengines[engine]
2956 util.compressionengines[engine]
2952 except KeyError:
2957 except KeyError:
2953 raise error.Abort(b'unknown compression engine: %s' % engine)
2958 raise error.Abort(b'unknown compression engine: %s' % engine)
2954 else:
2959 else:
2955 engines = []
2960 engines = []
2956 for e in util.compengines:
2961 for e in util.compengines:
2957 engine = util.compengines[e]
2962 engine = util.compengines[e]
2958 try:
2963 try:
2959 if engine.available():
2964 if engine.available():
2960 engine.revlogcompressor().compress(b'dummy')
2965 engine.revlogcompressor().compress(b'dummy')
2961 engines.append(e)
2966 engines.append(e)
2962 except NotImplementedError:
2967 except NotImplementedError:
2963 pass
2968 pass
2964
2969
2965 revs = list(rl.revs(startrev, len(rl) - 1))
2970 revs = list(rl.revs(startrev, len(rl) - 1))
2966
2971
2967 def rlfh(rl):
2972 def rlfh(rl):
2968 if rl._inline:
2973 if rl._inline:
2969 return getsvfs(repo)(rl.indexfile)
2974 return getsvfs(repo)(rl.indexfile)
2970 else:
2975 else:
2971 return getsvfs(repo)(rl.datafile)
2976 return getsvfs(repo)(rl.datafile)
2972
2977
2973 def doread():
2978 def doread():
2974 rl.clearcaches()
2979 rl.clearcaches()
2975 for rev in revs:
2980 for rev in revs:
2976 segmentforrevs(rev, rev)
2981 segmentforrevs(rev, rev)
2977
2982
2978 def doreadcachedfh():
2983 def doreadcachedfh():
2979 rl.clearcaches()
2984 rl.clearcaches()
2980 fh = rlfh(rl)
2985 fh = rlfh(rl)
2981 for rev in revs:
2986 for rev in revs:
2982 segmentforrevs(rev, rev, df=fh)
2987 segmentforrevs(rev, rev, df=fh)
2983
2988
2984 def doreadbatch():
2989 def doreadbatch():
2985 rl.clearcaches()
2990 rl.clearcaches()
2986 segmentforrevs(revs[0], revs[-1])
2991 segmentforrevs(revs[0], revs[-1])
2987
2992
2988 def doreadbatchcachedfh():
2993 def doreadbatchcachedfh():
2989 rl.clearcaches()
2994 rl.clearcaches()
2990 fh = rlfh(rl)
2995 fh = rlfh(rl)
2991 segmentforrevs(revs[0], revs[-1], df=fh)
2996 segmentforrevs(revs[0], revs[-1], df=fh)
2992
2997
2993 def dochunk():
2998 def dochunk():
2994 rl.clearcaches()
2999 rl.clearcaches()
2995 fh = rlfh(rl)
3000 fh = rlfh(rl)
2996 for rev in revs:
3001 for rev in revs:
2997 rl._chunk(rev, df=fh)
3002 rl._chunk(rev, df=fh)
2998
3003
2999 chunks = [None]
3004 chunks = [None]
3000
3005
3001 def dochunkbatch():
3006 def dochunkbatch():
3002 rl.clearcaches()
3007 rl.clearcaches()
3003 fh = rlfh(rl)
3008 fh = rlfh(rl)
3004 # Save chunks as a side-effect.
3009 # Save chunks as a side-effect.
3005 chunks[0] = rl._chunks(revs, df=fh)
3010 chunks[0] = rl._chunks(revs, df=fh)
3006
3011
3007 def docompress(compressor):
3012 def docompress(compressor):
3008 rl.clearcaches()
3013 rl.clearcaches()
3009
3014
3010 try:
3015 try:
3011 # Swap in the requested compression engine.
3016 # Swap in the requested compression engine.
3012 oldcompressor = rl._compressor
3017 oldcompressor = rl._compressor
3013 rl._compressor = compressor
3018 rl._compressor = compressor
3014 for chunk in chunks[0]:
3019 for chunk in chunks[0]:
3015 rl.compress(chunk)
3020 rl.compress(chunk)
3016 finally:
3021 finally:
3017 rl._compressor = oldcompressor
3022 rl._compressor = oldcompressor
3018
3023
3019 benches = [
3024 benches = [
3020 (lambda: doread(), b'read'),
3025 (lambda: doread(), b'read'),
3021 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3026 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3022 (lambda: doreadbatch(), b'read batch'),
3027 (lambda: doreadbatch(), b'read batch'),
3023 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3028 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3024 (lambda: dochunk(), b'chunk'),
3029 (lambda: dochunk(), b'chunk'),
3025 (lambda: dochunkbatch(), b'chunk batch'),
3030 (lambda: dochunkbatch(), b'chunk batch'),
3026 ]
3031 ]
3027
3032
3028 for engine in sorted(engines):
3033 for engine in sorted(engines):
3029 compressor = util.compengines[engine].revlogcompressor()
3034 compressor = util.compengines[engine].revlogcompressor()
3030 benches.append(
3035 benches.append(
3031 (
3036 (
3032 functools.partial(docompress, compressor),
3037 functools.partial(docompress, compressor),
3033 b'compress w/ %s' % engine,
3038 b'compress w/ %s' % engine,
3034 )
3039 )
3035 )
3040 )
3036
3041
3037 for fn, title in benches:
3042 for fn, title in benches:
3038 timer, fm = gettimer(ui, opts)
3043 timer, fm = gettimer(ui, opts)
3039 timer(fn, title=title)
3044 timer(fn, title=title)
3040 fm.end()
3045 fm.end()
3041
3046
3042
3047
3043 @command(
3048 @command(
3044 b'perfrevlogrevision',
3049 b'perfrevlogrevision',
3045 revlogopts
3050 revlogopts
3046 + formatteropts
3051 + formatteropts
3047 + [(b'', b'cache', False, b'use caches instead of clearing')],
3052 + [(b'', b'cache', False, b'use caches instead of clearing')],
3048 b'-c|-m|FILE REV',
3053 b'-c|-m|FILE REV',
3049 )
3054 )
3050 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3055 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3051 """Benchmark obtaining a revlog revision.
3056 """Benchmark obtaining a revlog revision.
3052
3057
3053 Obtaining a revlog revision consists of roughly the following steps:
3058 Obtaining a revlog revision consists of roughly the following steps:
3054
3059
3055 1. Compute the delta chain
3060 1. Compute the delta chain
3056 2. Slice the delta chain if applicable
3061 2. Slice the delta chain if applicable
3057 3. Obtain the raw chunks for that delta chain
3062 3. Obtain the raw chunks for that delta chain
3058 4. Decompress each raw chunk
3063 4. Decompress each raw chunk
3059 5. Apply binary patches to obtain fulltext
3064 5. Apply binary patches to obtain fulltext
3060 6. Verify hash of fulltext
3065 6. Verify hash of fulltext
3061
3066
3062 This command measures the time spent in each of these phases.
3067 This command measures the time spent in each of these phases.
3063 """
3068 """
3064 opts = _byteskwargs(opts)
3069 opts = _byteskwargs(opts)
3065
3070
3066 if opts.get(b'changelog') or opts.get(b'manifest'):
3071 if opts.get(b'changelog') or opts.get(b'manifest'):
3067 file_, rev = None, file_
3072 file_, rev = None, file_
3068 elif rev is None:
3073 elif rev is None:
3069 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3074 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3070
3075
3071 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3076 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3072
3077
3073 # _chunkraw was renamed to _getsegmentforrevs.
3078 # _chunkraw was renamed to _getsegmentforrevs.
3074 try:
3079 try:
3075 segmentforrevs = r._getsegmentforrevs
3080 segmentforrevs = r._getsegmentforrevs
3076 except AttributeError:
3081 except AttributeError:
3077 segmentforrevs = r._chunkraw
3082 segmentforrevs = r._chunkraw
3078
3083
3079 node = r.lookup(rev)
3084 node = r.lookup(rev)
3080 rev = r.rev(node)
3085 rev = r.rev(node)
3081
3086
3082 def getrawchunks(data, chain):
3087 def getrawchunks(data, chain):
3083 start = r.start
3088 start = r.start
3084 length = r.length
3089 length = r.length
3085 inline = r._inline
3090 inline = r._inline
3086 iosize = r._io.size
3091 iosize = r._io.size
3087 buffer = util.buffer
3092 buffer = util.buffer
3088
3093
3089 chunks = []
3094 chunks = []
3090 ladd = chunks.append
3095 ladd = chunks.append
3091 for idx, item in enumerate(chain):
3096 for idx, item in enumerate(chain):
3092 offset = start(item[0])
3097 offset = start(item[0])
3093 bits = data[idx]
3098 bits = data[idx]
3094 for rev in item:
3099 for rev in item:
3095 chunkstart = start(rev)
3100 chunkstart = start(rev)
3096 if inline:
3101 if inline:
3097 chunkstart += (rev + 1) * iosize
3102 chunkstart += (rev + 1) * iosize
3098 chunklength = length(rev)
3103 chunklength = length(rev)
3099 ladd(buffer(bits, chunkstart - offset, chunklength))
3104 ladd(buffer(bits, chunkstart - offset, chunklength))
3100
3105
3101 return chunks
3106 return chunks
3102
3107
3103 def dodeltachain(rev):
3108 def dodeltachain(rev):
3104 if not cache:
3109 if not cache:
3105 r.clearcaches()
3110 r.clearcaches()
3106 r._deltachain(rev)
3111 r._deltachain(rev)
3107
3112
3108 def doread(chain):
3113 def doread(chain):
3109 if not cache:
3114 if not cache:
3110 r.clearcaches()
3115 r.clearcaches()
3111 for item in slicedchain:
3116 for item in slicedchain:
3112 segmentforrevs(item[0], item[-1])
3117 segmentforrevs(item[0], item[-1])
3113
3118
3114 def doslice(r, chain, size):
3119 def doslice(r, chain, size):
3115 for s in slicechunk(r, chain, targetsize=size):
3120 for s in slicechunk(r, chain, targetsize=size):
3116 pass
3121 pass
3117
3122
3118 def dorawchunks(data, chain):
3123 def dorawchunks(data, chain):
3119 if not cache:
3124 if not cache:
3120 r.clearcaches()
3125 r.clearcaches()
3121 getrawchunks(data, chain)
3126 getrawchunks(data, chain)
3122
3127
3123 def dodecompress(chunks):
3128 def dodecompress(chunks):
3124 decomp = r.decompress
3129 decomp = r.decompress
3125 for chunk in chunks:
3130 for chunk in chunks:
3126 decomp(chunk)
3131 decomp(chunk)
3127
3132
3128 def dopatch(text, bins):
3133 def dopatch(text, bins):
3129 if not cache:
3134 if not cache:
3130 r.clearcaches()
3135 r.clearcaches()
3131 mdiff.patches(text, bins)
3136 mdiff.patches(text, bins)
3132
3137
3133 def dohash(text):
3138 def dohash(text):
3134 if not cache:
3139 if not cache:
3135 r.clearcaches()
3140 r.clearcaches()
3136 r.checkhash(text, node, rev=rev)
3141 r.checkhash(text, node, rev=rev)
3137
3142
3138 def dorevision():
3143 def dorevision():
3139 if not cache:
3144 if not cache:
3140 r.clearcaches()
3145 r.clearcaches()
3141 r.revision(node)
3146 r.revision(node)
3142
3147
3143 try:
3148 try:
3144 from mercurial.revlogutils.deltas import slicechunk
3149 from mercurial.revlogutils.deltas import slicechunk
3145 except ImportError:
3150 except ImportError:
3146 slicechunk = getattr(revlog, '_slicechunk', None)
3151 slicechunk = getattr(revlog, '_slicechunk', None)
3147
3152
3148 size = r.length(rev)
3153 size = r.length(rev)
3149 chain = r._deltachain(rev)[0]
3154 chain = r._deltachain(rev)[0]
3150 if not getattr(r, '_withsparseread', False):
3155 if not getattr(r, '_withsparseread', False):
3151 slicedchain = (chain,)
3156 slicedchain = (chain,)
3152 else:
3157 else:
3153 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3158 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3154 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3159 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3155 rawchunks = getrawchunks(data, slicedchain)
3160 rawchunks = getrawchunks(data, slicedchain)
3156 bins = r._chunks(chain)
3161 bins = r._chunks(chain)
3157 text = bytes(bins[0])
3162 text = bytes(bins[0])
3158 bins = bins[1:]
3163 bins = bins[1:]
3159 text = mdiff.patches(text, bins)
3164 text = mdiff.patches(text, bins)
3160
3165
3161 benches = [
3166 benches = [
3162 (lambda: dorevision(), b'full'),
3167 (lambda: dorevision(), b'full'),
3163 (lambda: dodeltachain(rev), b'deltachain'),
3168 (lambda: dodeltachain(rev), b'deltachain'),
3164 (lambda: doread(chain), b'read'),
3169 (lambda: doread(chain), b'read'),
3165 ]
3170 ]
3166
3171
3167 if getattr(r, '_withsparseread', False):
3172 if getattr(r, '_withsparseread', False):
3168 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3173 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3169 benches.append(slicing)
3174 benches.append(slicing)
3170
3175
3171 benches.extend(
3176 benches.extend(
3172 [
3177 [
3173 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3178 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3174 (lambda: dodecompress(rawchunks), b'decompress'),
3179 (lambda: dodecompress(rawchunks), b'decompress'),
3175 (lambda: dopatch(text, bins), b'patch'),
3180 (lambda: dopatch(text, bins), b'patch'),
3176 (lambda: dohash(text), b'hash'),
3181 (lambda: dohash(text), b'hash'),
3177 ]
3182 ]
3178 )
3183 )
3179
3184
3180 timer, fm = gettimer(ui, opts)
3185 timer, fm = gettimer(ui, opts)
3181 for fn, title in benches:
3186 for fn, title in benches:
3182 timer(fn, title=title)
3187 timer(fn, title=title)
3183 fm.end()
3188 fm.end()
3184
3189
3185
3190
3186 @command(
3191 @command(
3187 b'perfrevset',
3192 b'perfrevset',
3188 [
3193 [
3189 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3194 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3190 (b'', b'contexts', False, b'obtain changectx for each revision'),
3195 (b'', b'contexts', False, b'obtain changectx for each revision'),
3191 ]
3196 ]
3192 + formatteropts,
3197 + formatteropts,
3193 b"REVSET",
3198 b"REVSET",
3194 )
3199 )
3195 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3200 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3196 """benchmark the execution time of a revset
3201 """benchmark the execution time of a revset
3197
3202
3198 Use the --clean option if need to evaluate the impact of build volatile
3203 Use the --clean option if need to evaluate the impact of build volatile
3199 revisions set cache on the revset execution. Volatile cache hold filtered
3204 revisions set cache on the revset execution. Volatile cache hold filtered
3200 and obsolete related cache."""
3205 and obsolete related cache."""
3201 opts = _byteskwargs(opts)
3206 opts = _byteskwargs(opts)
3202
3207
3203 timer, fm = gettimer(ui, opts)
3208 timer, fm = gettimer(ui, opts)
3204
3209
3205 def d():
3210 def d():
3206 if clear:
3211 if clear:
3207 repo.invalidatevolatilesets()
3212 repo.invalidatevolatilesets()
3208 if contexts:
3213 if contexts:
3209 for ctx in repo.set(expr):
3214 for ctx in repo.set(expr):
3210 pass
3215 pass
3211 else:
3216 else:
3212 for r in repo.revs(expr):
3217 for r in repo.revs(expr):
3213 pass
3218 pass
3214
3219
3215 timer(d)
3220 timer(d)
3216 fm.end()
3221 fm.end()
3217
3222
3218
3223
3219 @command(
3224 @command(
3220 b'perfvolatilesets',
3225 b'perfvolatilesets',
3221 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3226 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3222 + formatteropts,
3227 + formatteropts,
3223 )
3228 )
3224 def perfvolatilesets(ui, repo, *names, **opts):
3229 def perfvolatilesets(ui, repo, *names, **opts):
3225 """benchmark the computation of various volatile set
3230 """benchmark the computation of various volatile set
3226
3231
3227 Volatile set computes element related to filtering and obsolescence."""
3232 Volatile set computes element related to filtering and obsolescence."""
3228 opts = _byteskwargs(opts)
3233 opts = _byteskwargs(opts)
3229 timer, fm = gettimer(ui, opts)
3234 timer, fm = gettimer(ui, opts)
3230 repo = repo.unfiltered()
3235 repo = repo.unfiltered()
3231
3236
3232 def getobs(name):
3237 def getobs(name):
3233 def d():
3238 def d():
3234 repo.invalidatevolatilesets()
3239 repo.invalidatevolatilesets()
3235 if opts[b'clear_obsstore']:
3240 if opts[b'clear_obsstore']:
3236 clearfilecache(repo, b'obsstore')
3241 clearfilecache(repo, b'obsstore')
3237 obsolete.getrevs(repo, name)
3242 obsolete.getrevs(repo, name)
3238
3243
3239 return d
3244 return d
3240
3245
3241 allobs = sorted(obsolete.cachefuncs)
3246 allobs = sorted(obsolete.cachefuncs)
3242 if names:
3247 if names:
3243 allobs = [n for n in allobs if n in names]
3248 allobs = [n for n in allobs if n in names]
3244
3249
3245 for name in allobs:
3250 for name in allobs:
3246 timer(getobs(name), title=name)
3251 timer(getobs(name), title=name)
3247
3252
3248 def getfiltered(name):
3253 def getfiltered(name):
3249 def d():
3254 def d():
3250 repo.invalidatevolatilesets()
3255 repo.invalidatevolatilesets()
3251 if opts[b'clear_obsstore']:
3256 if opts[b'clear_obsstore']:
3252 clearfilecache(repo, b'obsstore')
3257 clearfilecache(repo, b'obsstore')
3253 repoview.filterrevs(repo, name)
3258 repoview.filterrevs(repo, name)
3254
3259
3255 return d
3260 return d
3256
3261
3257 allfilter = sorted(repoview.filtertable)
3262 allfilter = sorted(repoview.filtertable)
3258 if names:
3263 if names:
3259 allfilter = [n for n in allfilter if n in names]
3264 allfilter = [n for n in allfilter if n in names]
3260
3265
3261 for name in allfilter:
3266 for name in allfilter:
3262 timer(getfiltered(name), title=name)
3267 timer(getfiltered(name), title=name)
3263 fm.end()
3268 fm.end()
3264
3269
3265
3270
3266 @command(
3271 @command(
3267 b'perfbranchmap',
3272 b'perfbranchmap',
3268 [
3273 [
3269 (b'f', b'full', False, b'Includes build time of subset'),
3274 (b'f', b'full', False, b'Includes build time of subset'),
3270 (
3275 (
3271 b'',
3276 b'',
3272 b'clear-revbranch',
3277 b'clear-revbranch',
3273 False,
3278 False,
3274 b'purge the revbranch cache between computation',
3279 b'purge the revbranch cache between computation',
3275 ),
3280 ),
3276 ]
3281 ]
3277 + formatteropts,
3282 + formatteropts,
3278 )
3283 )
3279 def perfbranchmap(ui, repo, *filternames, **opts):
3284 def perfbranchmap(ui, repo, *filternames, **opts):
3280 """benchmark the update of a branchmap
3285 """benchmark the update of a branchmap
3281
3286
3282 This benchmarks the full repo.branchmap() call with read and write disabled
3287 This benchmarks the full repo.branchmap() call with read and write disabled
3283 """
3288 """
3284 opts = _byteskwargs(opts)
3289 opts = _byteskwargs(opts)
3285 full = opts.get(b"full", False)
3290 full = opts.get(b"full", False)
3286 clear_revbranch = opts.get(b"clear_revbranch", False)
3291 clear_revbranch = opts.get(b"clear_revbranch", False)
3287 timer, fm = gettimer(ui, opts)
3292 timer, fm = gettimer(ui, opts)
3288
3293
3289 def getbranchmap(filtername):
3294 def getbranchmap(filtername):
3290 """generate a benchmark function for the filtername"""
3295 """generate a benchmark function for the filtername"""
3291 if filtername is None:
3296 if filtername is None:
3292 view = repo
3297 view = repo
3293 else:
3298 else:
3294 view = repo.filtered(filtername)
3299 view = repo.filtered(filtername)
3295 if util.safehasattr(view._branchcaches, '_per_filter'):
3300 if util.safehasattr(view._branchcaches, '_per_filter'):
3296 filtered = view._branchcaches._per_filter
3301 filtered = view._branchcaches._per_filter
3297 else:
3302 else:
3298 # older versions
3303 # older versions
3299 filtered = view._branchcaches
3304 filtered = view._branchcaches
3300
3305
3301 def d():
3306 def d():
3302 if clear_revbranch:
3307 if clear_revbranch:
3303 repo.revbranchcache()._clear()
3308 repo.revbranchcache()._clear()
3304 if full:
3309 if full:
3305 view._branchcaches.clear()
3310 view._branchcaches.clear()
3306 else:
3311 else:
3307 filtered.pop(filtername, None)
3312 filtered.pop(filtername, None)
3308 view.branchmap()
3313 view.branchmap()
3309
3314
3310 return d
3315 return d
3311
3316
3312 # add filter in smaller subset to bigger subset
3317 # add filter in smaller subset to bigger subset
3313 possiblefilters = set(repoview.filtertable)
3318 possiblefilters = set(repoview.filtertable)
3314 if filternames:
3319 if filternames:
3315 possiblefilters &= set(filternames)
3320 possiblefilters &= set(filternames)
3316 subsettable = getbranchmapsubsettable()
3321 subsettable = getbranchmapsubsettable()
3317 allfilters = []
3322 allfilters = []
3318 while possiblefilters:
3323 while possiblefilters:
3319 for name in possiblefilters:
3324 for name in possiblefilters:
3320 subset = subsettable.get(name)
3325 subset = subsettable.get(name)
3321 if subset not in possiblefilters:
3326 if subset not in possiblefilters:
3322 break
3327 break
3323 else:
3328 else:
3324 assert False, b'subset cycle %s!' % possiblefilters
3329 assert False, b'subset cycle %s!' % possiblefilters
3325 allfilters.append(name)
3330 allfilters.append(name)
3326 possiblefilters.remove(name)
3331 possiblefilters.remove(name)
3327
3332
3328 # warm the cache
3333 # warm the cache
3329 if not full:
3334 if not full:
3330 for name in allfilters:
3335 for name in allfilters:
3331 repo.filtered(name).branchmap()
3336 repo.filtered(name).branchmap()
3332 if not filternames or b'unfiltered' in filternames:
3337 if not filternames or b'unfiltered' in filternames:
3333 # add unfiltered
3338 # add unfiltered
3334 allfilters.append(None)
3339 allfilters.append(None)
3335
3340
3336 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3341 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3337 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3342 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3338 branchcacheread.set(classmethod(lambda *args: None))
3343 branchcacheread.set(classmethod(lambda *args: None))
3339 else:
3344 else:
3340 # older versions
3345 # older versions
3341 branchcacheread = safeattrsetter(branchmap, b'read')
3346 branchcacheread = safeattrsetter(branchmap, b'read')
3342 branchcacheread.set(lambda *args: None)
3347 branchcacheread.set(lambda *args: None)
3343 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3348 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3344 branchcachewrite.set(lambda *args: None)
3349 branchcachewrite.set(lambda *args: None)
3345 try:
3350 try:
3346 for name in allfilters:
3351 for name in allfilters:
3347 printname = name
3352 printname = name
3348 if name is None:
3353 if name is None:
3349 printname = b'unfiltered'
3354 printname = b'unfiltered'
3350 timer(getbranchmap(name), title=str(printname))
3355 timer(getbranchmap(name), title=str(printname))
3351 finally:
3356 finally:
3352 branchcacheread.restore()
3357 branchcacheread.restore()
3353 branchcachewrite.restore()
3358 branchcachewrite.restore()
3354 fm.end()
3359 fm.end()
3355
3360
3356
3361
3357 @command(
3362 @command(
3358 b'perfbranchmapupdate',
3363 b'perfbranchmapupdate',
3359 [
3364 [
3360 (b'', b'base', [], b'subset of revision to start from'),
3365 (b'', b'base', [], b'subset of revision to start from'),
3361 (b'', b'target', [], b'subset of revision to end with'),
3366 (b'', b'target', [], b'subset of revision to end with'),
3362 (b'', b'clear-caches', False, b'clear cache between each runs'),
3367 (b'', b'clear-caches', False, b'clear cache between each runs'),
3363 ]
3368 ]
3364 + formatteropts,
3369 + formatteropts,
3365 )
3370 )
3366 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3371 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3367 """benchmark branchmap update from for <base> revs to <target> revs
3372 """benchmark branchmap update from for <base> revs to <target> revs
3368
3373
3369 If `--clear-caches` is passed, the following items will be reset before
3374 If `--clear-caches` is passed, the following items will be reset before
3370 each update:
3375 each update:
3371 * the changelog instance and associated indexes
3376 * the changelog instance and associated indexes
3372 * the rev-branch-cache instance
3377 * the rev-branch-cache instance
3373
3378
3374 Examples:
3379 Examples:
3375
3380
3376 # update for the one last revision
3381 # update for the one last revision
3377 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3382 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3378
3383
3379 $ update for change coming with a new branch
3384 $ update for change coming with a new branch
3380 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3385 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3381 """
3386 """
3382 from mercurial import branchmap
3387 from mercurial import branchmap
3383 from mercurial import repoview
3388 from mercurial import repoview
3384
3389
3385 opts = _byteskwargs(opts)
3390 opts = _byteskwargs(opts)
3386 timer, fm = gettimer(ui, opts)
3391 timer, fm = gettimer(ui, opts)
3387 clearcaches = opts[b'clear_caches']
3392 clearcaches = opts[b'clear_caches']
3388 unfi = repo.unfiltered()
3393 unfi = repo.unfiltered()
3389 x = [None] # used to pass data between closure
3394 x = [None] # used to pass data between closure
3390
3395
3391 # we use a `list` here to avoid possible side effect from smartset
3396 # we use a `list` here to avoid possible side effect from smartset
3392 baserevs = list(scmutil.revrange(repo, base))
3397 baserevs = list(scmutil.revrange(repo, base))
3393 targetrevs = list(scmutil.revrange(repo, target))
3398 targetrevs = list(scmutil.revrange(repo, target))
3394 if not baserevs:
3399 if not baserevs:
3395 raise error.Abort(b'no revisions selected for --base')
3400 raise error.Abort(b'no revisions selected for --base')
3396 if not targetrevs:
3401 if not targetrevs:
3397 raise error.Abort(b'no revisions selected for --target')
3402 raise error.Abort(b'no revisions selected for --target')
3398
3403
3399 # make sure the target branchmap also contains the one in the base
3404 # make sure the target branchmap also contains the one in the base
3400 targetrevs = list(set(baserevs) | set(targetrevs))
3405 targetrevs = list(set(baserevs) | set(targetrevs))
3401 targetrevs.sort()
3406 targetrevs.sort()
3402
3407
3403 cl = repo.changelog
3408 cl = repo.changelog
3404 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3409 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3405 allbaserevs.sort()
3410 allbaserevs.sort()
3406 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3411 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3407
3412
3408 newrevs = list(alltargetrevs.difference(allbaserevs))
3413 newrevs = list(alltargetrevs.difference(allbaserevs))
3409 newrevs.sort()
3414 newrevs.sort()
3410
3415
3411 allrevs = frozenset(unfi.changelog.revs())
3416 allrevs = frozenset(unfi.changelog.revs())
3412 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3417 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3413 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3418 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3414
3419
3415 def basefilter(repo, visibilityexceptions=None):
3420 def basefilter(repo, visibilityexceptions=None):
3416 return basefilterrevs
3421 return basefilterrevs
3417
3422
3418 def targetfilter(repo, visibilityexceptions=None):
3423 def targetfilter(repo, visibilityexceptions=None):
3419 return targetfilterrevs
3424 return targetfilterrevs
3420
3425
3421 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3426 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3422 ui.status(msg % (len(allbaserevs), len(newrevs)))
3427 ui.status(msg % (len(allbaserevs), len(newrevs)))
3423 if targetfilterrevs:
3428 if targetfilterrevs:
3424 msg = b'(%d revisions still filtered)\n'
3429 msg = b'(%d revisions still filtered)\n'
3425 ui.status(msg % len(targetfilterrevs))
3430 ui.status(msg % len(targetfilterrevs))
3426
3431
3427 try:
3432 try:
3428 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3433 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3429 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3434 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3430
3435
3431 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3436 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3432 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3437 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3433
3438
3434 # try to find an existing branchmap to reuse
3439 # try to find an existing branchmap to reuse
3435 subsettable = getbranchmapsubsettable()
3440 subsettable = getbranchmapsubsettable()
3436 candidatefilter = subsettable.get(None)
3441 candidatefilter = subsettable.get(None)
3437 while candidatefilter is not None:
3442 while candidatefilter is not None:
3438 candidatebm = repo.filtered(candidatefilter).branchmap()
3443 candidatebm = repo.filtered(candidatefilter).branchmap()
3439 if candidatebm.validfor(baserepo):
3444 if candidatebm.validfor(baserepo):
3440 filtered = repoview.filterrevs(repo, candidatefilter)
3445 filtered = repoview.filterrevs(repo, candidatefilter)
3441 missing = [r for r in allbaserevs if r in filtered]
3446 missing = [r for r in allbaserevs if r in filtered]
3442 base = candidatebm.copy()
3447 base = candidatebm.copy()
3443 base.update(baserepo, missing)
3448 base.update(baserepo, missing)
3444 break
3449 break
3445 candidatefilter = subsettable.get(candidatefilter)
3450 candidatefilter = subsettable.get(candidatefilter)
3446 else:
3451 else:
3447 # no suitable subset where found
3452 # no suitable subset where found
3448 base = branchmap.branchcache()
3453 base = branchmap.branchcache()
3449 base.update(baserepo, allbaserevs)
3454 base.update(baserepo, allbaserevs)
3450
3455
3451 def setup():
3456 def setup():
3452 x[0] = base.copy()
3457 x[0] = base.copy()
3453 if clearcaches:
3458 if clearcaches:
3454 unfi._revbranchcache = None
3459 unfi._revbranchcache = None
3455 clearchangelog(repo)
3460 clearchangelog(repo)
3456
3461
3457 def bench():
3462 def bench():
3458 x[0].update(targetrepo, newrevs)
3463 x[0].update(targetrepo, newrevs)
3459
3464
3460 timer(bench, setup=setup)
3465 timer(bench, setup=setup)
3461 fm.end()
3466 fm.end()
3462 finally:
3467 finally:
3463 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3468 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3464 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3469 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3465
3470
3466
3471
3467 @command(
3472 @command(
3468 b'perfbranchmapload',
3473 b'perfbranchmapload',
3469 [
3474 [
3470 (b'f', b'filter', b'', b'Specify repoview filter'),
3475 (b'f', b'filter', b'', b'Specify repoview filter'),
3471 (b'', b'list', False, b'List brachmap filter caches'),
3476 (b'', b'list', False, b'List brachmap filter caches'),
3472 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3477 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3473 ]
3478 ]
3474 + formatteropts,
3479 + formatteropts,
3475 )
3480 )
3476 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3481 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3477 """benchmark reading the branchmap"""
3482 """benchmark reading the branchmap"""
3478 opts = _byteskwargs(opts)
3483 opts = _byteskwargs(opts)
3479 clearrevlogs = opts[b'clear_revlogs']
3484 clearrevlogs = opts[b'clear_revlogs']
3480
3485
3481 if list:
3486 if list:
3482 for name, kind, st in repo.cachevfs.readdir(stat=True):
3487 for name, kind, st in repo.cachevfs.readdir(stat=True):
3483 if name.startswith(b'branch2'):
3488 if name.startswith(b'branch2'):
3484 filtername = name.partition(b'-')[2] or b'unfiltered'
3489 filtername = name.partition(b'-')[2] or b'unfiltered'
3485 ui.status(
3490 ui.status(
3486 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3491 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3487 )
3492 )
3488 return
3493 return
3489 if not filter:
3494 if not filter:
3490 filter = None
3495 filter = None
3491 subsettable = getbranchmapsubsettable()
3496 subsettable = getbranchmapsubsettable()
3492 if filter is None:
3497 if filter is None:
3493 repo = repo.unfiltered()
3498 repo = repo.unfiltered()
3494 else:
3499 else:
3495 repo = repoview.repoview(repo, filter)
3500 repo = repoview.repoview(repo, filter)
3496
3501
3497 repo.branchmap() # make sure we have a relevant, up to date branchmap
3502 repo.branchmap() # make sure we have a relevant, up to date branchmap
3498
3503
3499 try:
3504 try:
3500 fromfile = branchmap.branchcache.fromfile
3505 fromfile = branchmap.branchcache.fromfile
3501 except AttributeError:
3506 except AttributeError:
3502 # older versions
3507 # older versions
3503 fromfile = branchmap.read
3508 fromfile = branchmap.read
3504
3509
3505 currentfilter = filter
3510 currentfilter = filter
3506 # try once without timer, the filter may not be cached
3511 # try once without timer, the filter may not be cached
3507 while fromfile(repo) is None:
3512 while fromfile(repo) is None:
3508 currentfilter = subsettable.get(currentfilter)
3513 currentfilter = subsettable.get(currentfilter)
3509 if currentfilter is None:
3514 if currentfilter is None:
3510 raise error.Abort(
3515 raise error.Abort(
3511 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3516 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3512 )
3517 )
3513 repo = repo.filtered(currentfilter)
3518 repo = repo.filtered(currentfilter)
3514 timer, fm = gettimer(ui, opts)
3519 timer, fm = gettimer(ui, opts)
3515
3520
3516 def setup():
3521 def setup():
3517 if clearrevlogs:
3522 if clearrevlogs:
3518 clearchangelog(repo)
3523 clearchangelog(repo)
3519
3524
3520 def bench():
3525 def bench():
3521 fromfile(repo)
3526 fromfile(repo)
3522
3527
3523 timer(bench, setup=setup)
3528 timer(bench, setup=setup)
3524 fm.end()
3529 fm.end()
3525
3530
3526
3531
3527 @command(b'perfloadmarkers')
3532 @command(b'perfloadmarkers')
3528 def perfloadmarkers(ui, repo):
3533 def perfloadmarkers(ui, repo):
3529 """benchmark the time to parse the on-disk markers for a repo
3534 """benchmark the time to parse the on-disk markers for a repo
3530
3535
3531 Result is the number of markers in the repo."""
3536 Result is the number of markers in the repo."""
3532 timer, fm = gettimer(ui)
3537 timer, fm = gettimer(ui)
3533 svfs = getsvfs(repo)
3538 svfs = getsvfs(repo)
3534 timer(lambda: len(obsolete.obsstore(svfs)))
3539 timer(lambda: len(obsolete.obsstore(svfs)))
3535 fm.end()
3540 fm.end()
3536
3541
3537
3542
3538 @command(
3543 @command(
3539 b'perflrucachedict',
3544 b'perflrucachedict',
3540 formatteropts
3545 formatteropts
3541 + [
3546 + [
3542 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3547 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3543 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3548 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3544 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3549 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3545 (b'', b'size', 4, b'size of cache'),
3550 (b'', b'size', 4, b'size of cache'),
3546 (b'', b'gets', 10000, b'number of key lookups'),
3551 (b'', b'gets', 10000, b'number of key lookups'),
3547 (b'', b'sets', 10000, b'number of key sets'),
3552 (b'', b'sets', 10000, b'number of key sets'),
3548 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3553 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3549 (
3554 (
3550 b'',
3555 b'',
3551 b'mixedgetfreq',
3556 b'mixedgetfreq',
3552 50,
3557 50,
3553 b'frequency of get vs set ops in mixed mode',
3558 b'frequency of get vs set ops in mixed mode',
3554 ),
3559 ),
3555 ],
3560 ],
3556 norepo=True,
3561 norepo=True,
3557 )
3562 )
3558 def perflrucache(
3563 def perflrucache(
3559 ui,
3564 ui,
3560 mincost=0,
3565 mincost=0,
3561 maxcost=100,
3566 maxcost=100,
3562 costlimit=0,
3567 costlimit=0,
3563 size=4,
3568 size=4,
3564 gets=10000,
3569 gets=10000,
3565 sets=10000,
3570 sets=10000,
3566 mixed=10000,
3571 mixed=10000,
3567 mixedgetfreq=50,
3572 mixedgetfreq=50,
3568 **opts
3573 **opts
3569 ):
3574 ):
3570 opts = _byteskwargs(opts)
3575 opts = _byteskwargs(opts)
3571
3576
3572 def doinit():
3577 def doinit():
3573 for i in _xrange(10000):
3578 for i in _xrange(10000):
3574 util.lrucachedict(size)
3579 util.lrucachedict(size)
3575
3580
3576 costrange = list(range(mincost, maxcost + 1))
3581 costrange = list(range(mincost, maxcost + 1))
3577
3582
3578 values = []
3583 values = []
3579 for i in _xrange(size):
3584 for i in _xrange(size):
3580 values.append(random.randint(0, _maxint))
3585 values.append(random.randint(0, _maxint))
3581
3586
3582 # Get mode fills the cache and tests raw lookup performance with no
3587 # Get mode fills the cache and tests raw lookup performance with no
3583 # eviction.
3588 # eviction.
3584 getseq = []
3589 getseq = []
3585 for i in _xrange(gets):
3590 for i in _xrange(gets):
3586 getseq.append(random.choice(values))
3591 getseq.append(random.choice(values))
3587
3592
3588 def dogets():
3593 def dogets():
3589 d = util.lrucachedict(size)
3594 d = util.lrucachedict(size)
3590 for v in values:
3595 for v in values:
3591 d[v] = v
3596 d[v] = v
3592 for key in getseq:
3597 for key in getseq:
3593 value = d[key]
3598 value = d[key]
3594 value # silence pyflakes warning
3599 value # silence pyflakes warning
3595
3600
3596 def dogetscost():
3601 def dogetscost():
3597 d = util.lrucachedict(size, maxcost=costlimit)
3602 d = util.lrucachedict(size, maxcost=costlimit)
3598 for i, v in enumerate(values):
3603 for i, v in enumerate(values):
3599 d.insert(v, v, cost=costs[i])
3604 d.insert(v, v, cost=costs[i])
3600 for key in getseq:
3605 for key in getseq:
3601 try:
3606 try:
3602 value = d[key]
3607 value = d[key]
3603 value # silence pyflakes warning
3608 value # silence pyflakes warning
3604 except KeyError:
3609 except KeyError:
3605 pass
3610 pass
3606
3611
3607 # Set mode tests insertion speed with cache eviction.
3612 # Set mode tests insertion speed with cache eviction.
3608 setseq = []
3613 setseq = []
3609 costs = []
3614 costs = []
3610 for i in _xrange(sets):
3615 for i in _xrange(sets):
3611 setseq.append(random.randint(0, _maxint))
3616 setseq.append(random.randint(0, _maxint))
3612 costs.append(random.choice(costrange))
3617 costs.append(random.choice(costrange))
3613
3618
3614 def doinserts():
3619 def doinserts():
3615 d = util.lrucachedict(size)
3620 d = util.lrucachedict(size)
3616 for v in setseq:
3621 for v in setseq:
3617 d.insert(v, v)
3622 d.insert(v, v)
3618
3623
3619 def doinsertscost():
3624 def doinsertscost():
3620 d = util.lrucachedict(size, maxcost=costlimit)
3625 d = util.lrucachedict(size, maxcost=costlimit)
3621 for i, v in enumerate(setseq):
3626 for i, v in enumerate(setseq):
3622 d.insert(v, v, cost=costs[i])
3627 d.insert(v, v, cost=costs[i])
3623
3628
3624 def dosets():
3629 def dosets():
3625 d = util.lrucachedict(size)
3630 d = util.lrucachedict(size)
3626 for v in setseq:
3631 for v in setseq:
3627 d[v] = v
3632 d[v] = v
3628
3633
3629 # Mixed mode randomly performs gets and sets with eviction.
3634 # Mixed mode randomly performs gets and sets with eviction.
3630 mixedops = []
3635 mixedops = []
3631 for i in _xrange(mixed):
3636 for i in _xrange(mixed):
3632 r = random.randint(0, 100)
3637 r = random.randint(0, 100)
3633 if r < mixedgetfreq:
3638 if r < mixedgetfreq:
3634 op = 0
3639 op = 0
3635 else:
3640 else:
3636 op = 1
3641 op = 1
3637
3642
3638 mixedops.append(
3643 mixedops.append(
3639 (op, random.randint(0, size * 2), random.choice(costrange))
3644 (op, random.randint(0, size * 2), random.choice(costrange))
3640 )
3645 )
3641
3646
3642 def domixed():
3647 def domixed():
3643 d = util.lrucachedict(size)
3648 d = util.lrucachedict(size)
3644
3649
3645 for op, v, cost in mixedops:
3650 for op, v, cost in mixedops:
3646 if op == 0:
3651 if op == 0:
3647 try:
3652 try:
3648 d[v]
3653 d[v]
3649 except KeyError:
3654 except KeyError:
3650 pass
3655 pass
3651 else:
3656 else:
3652 d[v] = v
3657 d[v] = v
3653
3658
3654 def domixedcost():
3659 def domixedcost():
3655 d = util.lrucachedict(size, maxcost=costlimit)
3660 d = util.lrucachedict(size, maxcost=costlimit)
3656
3661
3657 for op, v, cost in mixedops:
3662 for op, v, cost in mixedops:
3658 if op == 0:
3663 if op == 0:
3659 try:
3664 try:
3660 d[v]
3665 d[v]
3661 except KeyError:
3666 except KeyError:
3662 pass
3667 pass
3663 else:
3668 else:
3664 d.insert(v, v, cost=cost)
3669 d.insert(v, v, cost=cost)
3665
3670
3666 benches = [
3671 benches = [
3667 (doinit, b'init'),
3672 (doinit, b'init'),
3668 ]
3673 ]
3669
3674
3670 if costlimit:
3675 if costlimit:
3671 benches.extend(
3676 benches.extend(
3672 [
3677 [
3673 (dogetscost, b'gets w/ cost limit'),
3678 (dogetscost, b'gets w/ cost limit'),
3674 (doinsertscost, b'inserts w/ cost limit'),
3679 (doinsertscost, b'inserts w/ cost limit'),
3675 (domixedcost, b'mixed w/ cost limit'),
3680 (domixedcost, b'mixed w/ cost limit'),
3676 ]
3681 ]
3677 )
3682 )
3678 else:
3683 else:
3679 benches.extend(
3684 benches.extend(
3680 [
3685 [
3681 (dogets, b'gets'),
3686 (dogets, b'gets'),
3682 (doinserts, b'inserts'),
3687 (doinserts, b'inserts'),
3683 (dosets, b'sets'),
3688 (dosets, b'sets'),
3684 (domixed, b'mixed'),
3689 (domixed, b'mixed'),
3685 ]
3690 ]
3686 )
3691 )
3687
3692
3688 for fn, title in benches:
3693 for fn, title in benches:
3689 timer, fm = gettimer(ui, opts)
3694 timer, fm = gettimer(ui, opts)
3690 timer(fn, title=title)
3695 timer(fn, title=title)
3691 fm.end()
3696 fm.end()
3692
3697
3693
3698
3694 @command(b'perfwrite', formatteropts)
3699 @command(b'perfwrite', formatteropts)
3695 def perfwrite(ui, repo, **opts):
3700 def perfwrite(ui, repo, **opts):
3696 """microbenchmark ui.write
3701 """microbenchmark ui.write
3697 """
3702 """
3698 opts = _byteskwargs(opts)
3703 opts = _byteskwargs(opts)
3699
3704
3700 timer, fm = gettimer(ui, opts)
3705 timer, fm = gettimer(ui, opts)
3701
3706
3702 def write():
3707 def write():
3703 for i in range(100000):
3708 for i in range(100000):
3704 ui.writenoi18n(b'Testing write performance\n')
3709 ui.writenoi18n(b'Testing write performance\n')
3705
3710
3706 timer(write)
3711 timer(write)
3707 fm.end()
3712 fm.end()
3708
3713
3709
3714
3710 def uisetup(ui):
3715 def uisetup(ui):
3711 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3716 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3712 commands, b'debugrevlogopts'
3717 commands, b'debugrevlogopts'
3713 ):
3718 ):
3714 # for "historical portability":
3719 # for "historical portability":
3715 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3720 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3716 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3721 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3717 # openrevlog() should cause failure, because it has been
3722 # openrevlog() should cause failure, because it has been
3718 # available since 3.5 (or 49c583ca48c4).
3723 # available since 3.5 (or 49c583ca48c4).
3719 def openrevlog(orig, repo, cmd, file_, opts):
3724 def openrevlog(orig, repo, cmd, file_, opts):
3720 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3725 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3721 raise error.Abort(
3726 raise error.Abort(
3722 b"This version doesn't support --dir option",
3727 b"This version doesn't support --dir option",
3723 hint=b"use 3.5 or later",
3728 hint=b"use 3.5 or later",
3724 )
3729 )
3725 return orig(repo, cmd, file_, opts)
3730 return orig(repo, cmd, file_, opts)
3726
3731
3727 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3732 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3728
3733
3729
3734
3730 @command(
3735 @command(
3731 b'perfprogress',
3736 b'perfprogress',
3732 formatteropts
3737 formatteropts
3733 + [
3738 + [
3734 (b'', b'topic', b'topic', b'topic for progress messages'),
3739 (b'', b'topic', b'topic', b'topic for progress messages'),
3735 (b'c', b'total', 1000000, b'total value we are progressing to'),
3740 (b'c', b'total', 1000000, b'total value we are progressing to'),
3736 ],
3741 ],
3737 norepo=True,
3742 norepo=True,
3738 )
3743 )
3739 def perfprogress(ui, topic=None, total=None, **opts):
3744 def perfprogress(ui, topic=None, total=None, **opts):
3740 """printing of progress bars"""
3745 """printing of progress bars"""
3741 opts = _byteskwargs(opts)
3746 opts = _byteskwargs(opts)
3742
3747
3743 timer, fm = gettimer(ui, opts)
3748 timer, fm = gettimer(ui, opts)
3744
3749
3745 def doprogress():
3750 def doprogress():
3746 with ui.makeprogress(topic, total=total) as progress:
3751 with ui.makeprogress(topic, total=total) as progress:
3747 for i in _xrange(total):
3752 for i in _xrange(total):
3748 progress.increment()
3753 progress.increment()
3749
3754
3750 timer(doprogress)
3755 timer(doprogress)
3751 fm.end()
3756 fm.end()
@@ -1,396 +1,396
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 (no help text available)
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 perfdirstatedirs
111 perfdirstatedirs
112 (no help text available)
112 (no help text available)
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 (no help text available)
114 (no help text available)
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus benchmark the performance of a single status call
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
211 #if repofncache
211 #if repofncache
212 $ hg perffncacheencode
212 $ hg perffncacheencode
213 $ hg perffncacheload
213 $ hg perffncacheload
214 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
215 fncache already up to date
215 fncache already up to date
216 $ hg perffncachewrite
216 $ hg perffncachewrite
217 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
218 fncache already up to date
218 fncache already up to date
219 #endif
219 #endif
220 $ hg perfheads
220 $ hg perfheads
221 $ hg perfignore
221 $ hg perfignore
222 $ hg perfindex
222 $ hg perfindex
223 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
224 $ hg perfloadmarkers
225 $ hg perflog
225 $ hg perflog
226 $ hg perflookup 2
226 $ hg perflookup 2
227 $ hg perflrucache
227 $ hg perflrucache
228 $ hg perfmanifest 2
228 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
232 [255]
232 [255]
233 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
234 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
240 #if reporevlogstore
240 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
242 #endif
243 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
245 $ hg perfrevrange
246 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
247 $ hg perfstartup
247 $ hg perfstartup
248 $ hg perfstatus
248 $ hg perfstatus
249 $ hg perftags
249 $ hg perftags
250 $ hg perftemplating
250 $ hg perftemplating
251 $ hg perfvolatilesets
251 $ hg perfvolatilesets
252 $ hg perfwalk
252 $ hg perfwalk
253 $ hg perfparents
253 $ hg perfparents
254 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
255
255
256 Test run control
256 Test run control
257 ----------------
257 ----------------
258
258
259 Simple single entry
259 Simple single entry
260
260
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
263
263
264 Multiple entries
264 Multiple entries
265
265
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 error case are ignored
269 error case are ignored
270
270
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
281
281
282 test actual output
282 test actual output
283 ------------------
283 ------------------
284
284
285 normal output:
285 normal output:
286
286
287 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
289
289
290 detailed output:
290 detailed output:
291
291
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297
297
298 test json output
298 test json output
299 ----------------
299 ----------------
300
300
301 normal output:
301 normal output:
302
302
303 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
304 [
304 [
305 {
305 {
306 "comb": *, (glob)
306 "comb": *, (glob)
307 "count": *, (glob)
307 "count": *, (glob)
308 "sys": *, (glob)
308 "sys": *, (glob)
309 "user": *, (glob)
309 "user": *, (glob)
310 "wall": * (glob)
310 "wall": * (glob)
311 }
311 }
312 ]
312 ]
313
313
314 detailed output:
314 detailed output:
315
315
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
317 [
318 {
318 {
319 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
320 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
322 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
324 "comb": *, (glob)
324 "comb": *, (glob)
325 "count": *, (glob)
325 "count": *, (glob)
326 "max.comb": *, (glob)
326 "max.comb": *, (glob)
327 "max.count": *, (glob)
327 "max.count": *, (glob)
328 "max.sys": *, (glob)
328 "max.sys": *, (glob)
329 "max.user": *, (glob)
329 "max.user": *, (glob)
330 "max.wall": *, (glob)
330 "max.wall": *, (glob)
331 "median.comb": *, (glob)
331 "median.comb": *, (glob)
332 "median.count": *, (glob)
332 "median.count": *, (glob)
333 "median.sys": *, (glob)
333 "median.sys": *, (glob)
334 "median.user": *, (glob)
334 "median.user": *, (glob)
335 "median.wall": *, (glob)
335 "median.wall": *, (glob)
336 "sys": *, (glob)
336 "sys": *, (glob)
337 "user": *, (glob)
337 "user": *, (glob)
338 "wall": * (glob)
338 "wall": * (glob)
339 }
339 }
340 ]
340 ]
341
341
342 Test pre-run feature
342 Test pre-run feature
343 --------------------
343 --------------------
344
344
345 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
346
346
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
349 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
352 searching for changes
353 searching for changes
353 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360
360
361 test profile-benchmark option
361 test profile-benchmark option
362 ------------------------------
362 ------------------------------
363
363
364 Function to check that statprof ran
364 Function to check that statprof ran
365 $ statprofran () {
365 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
367 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
369
370 Check perf.py for historical portability
370 Check perf.py for historical portability
371 ----------------------------------------
371 ----------------------------------------
372
372
373 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
374
374
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
379 > from mercurial import (
379 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
382 > from mercurial import (
382 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
396 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now