##// END OF EJS Templates
perf: document `perfstatus`
marmoute -
r43390:97f80dd2 default
parent child Browse files
Show More
@@ -1,3744 +1,3751 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 from __future__ import absolute_import
57 from __future__ import absolute_import
58 import contextlib
58 import contextlib
59 import functools
59 import functools
60 import gc
60 import gc
61 import os
61 import os
62 import random
62 import random
63 import shutil
63 import shutil
64 import struct
64 import struct
65 import sys
65 import sys
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 from mercurial import (
69 from mercurial import (
70 changegroup,
70 changegroup,
71 cmdutil,
71 cmdutil,
72 commands,
72 commands,
73 copies,
73 copies,
74 error,
74 error,
75 extensions,
75 extensions,
76 hg,
76 hg,
77 mdiff,
77 mdiff,
78 merge,
78 merge,
79 revlog,
79 revlog,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122
122
123 def identity(a):
123 def identity(a):
124 return a
124 return a
125
125
126
126
127 try:
127 try:
128 from mercurial import pycompat
128 from mercurial import pycompat
129
129
130 getargspec = pycompat.getargspec # added to module after 4.5
130 getargspec = pycompat.getargspec # added to module after 4.5
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 if pycompat.ispy3:
136 if pycompat.ispy3:
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 else:
138 else:
139 _maxint = sys.maxint
139 _maxint = sys.maxint
140 except (NameError, ImportError, AttributeError):
140 except (NameError, ImportError, AttributeError):
141 import inspect
141 import inspect
142
142
143 getargspec = inspect.getargspec
143 getargspec = inspect.getargspec
144 _byteskwargs = identity
144 _byteskwargs = identity
145 _bytestr = str
145 _bytestr = str
146 fsencode = identity # no py3 support
146 fsencode = identity # no py3 support
147 _maxint = sys.maxint # no py3 support
147 _maxint = sys.maxint # no py3 support
148 _sysstr = lambda x: x # no py3 support
148 _sysstr = lambda x: x # no py3 support
149 _xrange = xrange
149 _xrange = xrange
150
150
151 try:
151 try:
152 # 4.7+
152 # 4.7+
153 queue = pycompat.queue.Queue
153 queue = pycompat.queue.Queue
154 except (NameError, AttributeError, ImportError):
154 except (NameError, AttributeError, ImportError):
155 # <4.7.
155 # <4.7.
156 try:
156 try:
157 queue = pycompat.queue
157 queue = pycompat.queue
158 except (NameError, AttributeError, ImportError):
158 except (NameError, AttributeError, ImportError):
159 import Queue as queue
159 import Queue as queue
160
160
161 try:
161 try:
162 from mercurial import logcmdutil
162 from mercurial import logcmdutil
163
163
164 makelogtemplater = logcmdutil.maketemplater
164 makelogtemplater = logcmdutil.maketemplater
165 except (AttributeError, ImportError):
165 except (AttributeError, ImportError):
166 try:
166 try:
167 makelogtemplater = cmdutil.makelogtemplater
167 makelogtemplater = cmdutil.makelogtemplater
168 except (AttributeError, ImportError):
168 except (AttributeError, ImportError):
169 makelogtemplater = None
169 makelogtemplater = None
170
170
171 # for "historical portability":
171 # for "historical portability":
172 # define util.safehasattr forcibly, because util.safehasattr has been
172 # define util.safehasattr forcibly, because util.safehasattr has been
173 # available since 1.9.3 (or 94b200a11cf7)
173 # available since 1.9.3 (or 94b200a11cf7)
174 _undefined = object()
174 _undefined = object()
175
175
176
176
177 def safehasattr(thing, attr):
177 def safehasattr(thing, attr):
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179
179
180
180
181 setattr(util, 'safehasattr', safehasattr)
181 setattr(util, 'safehasattr', safehasattr)
182
182
183 # for "historical portability":
183 # for "historical portability":
184 # define util.timer forcibly, because util.timer has been available
184 # define util.timer forcibly, because util.timer has been available
185 # since ae5d60bb70c9
185 # since ae5d60bb70c9
186 if safehasattr(time, 'perf_counter'):
186 if safehasattr(time, 'perf_counter'):
187 util.timer = time.perf_counter
187 util.timer = time.perf_counter
188 elif os.name == b'nt':
188 elif os.name == b'nt':
189 util.timer = time.clock
189 util.timer = time.clock
190 else:
190 else:
191 util.timer = time.time
191 util.timer = time.time
192
192
193 # for "historical portability":
193 # for "historical portability":
194 # use locally defined empty option list, if formatteropts isn't
194 # use locally defined empty option list, if formatteropts isn't
195 # available, because commands.formatteropts has been available since
195 # available, because commands.formatteropts has been available since
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 # available since 2.2 (or ae5f92e154d3)
197 # available since 2.2 (or ae5f92e154d3)
198 formatteropts = getattr(
198 formatteropts = getattr(
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 )
200 )
201
201
202 # for "historical portability":
202 # for "historical portability":
203 # use locally defined option list, if debugrevlogopts isn't available,
203 # use locally defined option list, if debugrevlogopts isn't available,
204 # because commands.debugrevlogopts has been available since 3.7 (or
204 # because commands.debugrevlogopts has been available since 3.7 (or
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 # since 1.9 (or a79fea6b3e77).
206 # since 1.9 (or a79fea6b3e77).
207 revlogopts = getattr(
207 revlogopts = getattr(
208 cmdutil,
208 cmdutil,
209 "debugrevlogopts",
209 "debugrevlogopts",
210 getattr(
210 getattr(
211 commands,
211 commands,
212 "debugrevlogopts",
212 "debugrevlogopts",
213 [
213 [
214 (b'c', b'changelog', False, b'open changelog'),
214 (b'c', b'changelog', False, b'open changelog'),
215 (b'm', b'manifest', False, b'open manifest'),
215 (b'm', b'manifest', False, b'open manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
216 (b'', b'dir', False, b'open directory manifest'),
217 ],
217 ],
218 ),
218 ),
219 )
219 )
220
220
221 cmdtable = {}
221 cmdtable = {}
222
222
223 # for "historical portability":
223 # for "historical portability":
224 # define parsealiases locally, because cmdutil.parsealiases has been
224 # define parsealiases locally, because cmdutil.parsealiases has been
225 # available since 1.5 (or 6252852b4332)
225 # available since 1.5 (or 6252852b4332)
226 def parsealiases(cmd):
226 def parsealiases(cmd):
227 return cmd.split(b"|")
227 return cmd.split(b"|")
228
228
229
229
230 if safehasattr(registrar, 'command'):
230 if safehasattr(registrar, 'command'):
231 command = registrar.command(cmdtable)
231 command = registrar.command(cmdtable)
232 elif safehasattr(cmdutil, 'command'):
232 elif safehasattr(cmdutil, 'command'):
233 command = cmdutil.command(cmdtable)
233 command = cmdutil.command(cmdtable)
234 if b'norepo' not in getargspec(command).args:
234 if b'norepo' not in getargspec(command).args:
235 # for "historical portability":
235 # for "historical portability":
236 # wrap original cmdutil.command, because "norepo" option has
236 # wrap original cmdutil.command, because "norepo" option has
237 # been available since 3.1 (or 75a96326cecb)
237 # been available since 3.1 (or 75a96326cecb)
238 _command = command
238 _command = command
239
239
240 def command(name, options=(), synopsis=None, norepo=False):
240 def command(name, options=(), synopsis=None, norepo=False):
241 if norepo:
241 if norepo:
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 return _command(name, list(options), synopsis)
243 return _command(name, list(options), synopsis)
244
244
245
245
246 else:
246 else:
247 # for "historical portability":
247 # for "historical portability":
248 # define "@command" annotation locally, because cmdutil.command
248 # define "@command" annotation locally, because cmdutil.command
249 # has been available since 1.9 (or 2daa5179e73f)
249 # has been available since 1.9 (or 2daa5179e73f)
250 def command(name, options=(), synopsis=None, norepo=False):
250 def command(name, options=(), synopsis=None, norepo=False):
251 def decorator(func):
251 def decorator(func):
252 if synopsis:
252 if synopsis:
253 cmdtable[name] = func, list(options), synopsis
253 cmdtable[name] = func, list(options), synopsis
254 else:
254 else:
255 cmdtable[name] = func, list(options)
255 cmdtable[name] = func, list(options)
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return func
258 return func
259
259
260 return decorator
260 return decorator
261
261
262
262
263 try:
263 try:
264 import mercurial.registrar
264 import mercurial.registrar
265 import mercurial.configitems
265 import mercurial.configitems
266
266
267 configtable = {}
267 configtable = {}
268 configitem = mercurial.registrar.configitem(configtable)
268 configitem = mercurial.registrar.configitem(configtable)
269 configitem(
269 configitem(
270 b'perf',
270 b'perf',
271 b'presleep',
271 b'presleep',
272 default=mercurial.configitems.dynamicdefault,
272 default=mercurial.configitems.dynamicdefault,
273 experimental=True,
273 experimental=True,
274 )
274 )
275 configitem(
275 configitem(
276 b'perf',
276 b'perf',
277 b'stub',
277 b'stub',
278 default=mercurial.configitems.dynamicdefault,
278 default=mercurial.configitems.dynamicdefault,
279 experimental=True,
279 experimental=True,
280 )
280 )
281 configitem(
281 configitem(
282 b'perf',
282 b'perf',
283 b'parentscount',
283 b'parentscount',
284 default=mercurial.configitems.dynamicdefault,
284 default=mercurial.configitems.dynamicdefault,
285 experimental=True,
285 experimental=True,
286 )
286 )
287 configitem(
287 configitem(
288 b'perf',
288 b'perf',
289 b'all-timing',
289 b'all-timing',
290 default=mercurial.configitems.dynamicdefault,
290 default=mercurial.configitems.dynamicdefault,
291 experimental=True,
291 experimental=True,
292 )
292 )
293 configitem(
293 configitem(
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'profile-benchmark',
298 b'profile-benchmark',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 )
300 )
301 configitem(
301 configitem(
302 b'perf',
302 b'perf',
303 b'run-limits',
303 b'run-limits',
304 default=mercurial.configitems.dynamicdefault,
304 default=mercurial.configitems.dynamicdefault,
305 experimental=True,
305 experimental=True,
306 )
306 )
307 except (ImportError, AttributeError):
307 except (ImportError, AttributeError):
308 pass
308 pass
309 except TypeError:
309 except TypeError:
310 # compatibility fix for a11fd395e83f
310 # compatibility fix for a11fd395e83f
311 # hg version: 5.2
311 # hg version: 5.2
312 configitem(
312 configitem(
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 )
314 )
315 configitem(
315 configitem(
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 )
320 )
321 configitem(
321 configitem(
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 )
323 )
324 configitem(
324 configitem(
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 )
326 )
327 configitem(
327 configitem(
328 b'perf',
328 b'perf',
329 b'profile-benchmark',
329 b'profile-benchmark',
330 default=mercurial.configitems.dynamicdefault,
330 default=mercurial.configitems.dynamicdefault,
331 )
331 )
332 configitem(
332 configitem(
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 )
334 )
335
335
336
336
337 def getlen(ui):
337 def getlen(ui):
338 if ui.configbool(b"perf", b"stub", False):
338 if ui.configbool(b"perf", b"stub", False):
339 return lambda x: 1
339 return lambda x: 1
340 return len
340 return len
341
341
342
342
343 class noop(object):
343 class noop(object):
344 """dummy context manager"""
344 """dummy context manager"""
345
345
346 def __enter__(self):
346 def __enter__(self):
347 pass
347 pass
348
348
349 def __exit__(self, *args):
349 def __exit__(self, *args):
350 pass
350 pass
351
351
352
352
353 NOOPCTX = noop()
353 NOOPCTX = noop()
354
354
355
355
356 def gettimer(ui, opts=None):
356 def gettimer(ui, opts=None):
357 """return a timer function and formatter: (timer, formatter)
357 """return a timer function and formatter: (timer, formatter)
358
358
359 This function exists to gather the creation of formatter in a single
359 This function exists to gather the creation of formatter in a single
360 place instead of duplicating it in all performance commands."""
360 place instead of duplicating it in all performance commands."""
361
361
362 # enforce an idle period before execution to counteract power management
362 # enforce an idle period before execution to counteract power management
363 # experimental config: perf.presleep
363 # experimental config: perf.presleep
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365
365
366 if opts is None:
366 if opts is None:
367 opts = {}
367 opts = {}
368 # redirect all to stderr unless buffer api is in use
368 # redirect all to stderr unless buffer api is in use
369 if not ui._buffers:
369 if not ui._buffers:
370 ui = ui.copy()
370 ui = ui.copy()
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 if uifout:
372 if uifout:
373 # for "historical portability":
373 # for "historical portability":
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 uifout.set(ui.ferr)
375 uifout.set(ui.ferr)
376
376
377 # get a formatter
377 # get a formatter
378 uiformatter = getattr(ui, 'formatter', None)
378 uiformatter = getattr(ui, 'formatter', None)
379 if uiformatter:
379 if uiformatter:
380 fm = uiformatter(b'perf', opts)
380 fm = uiformatter(b'perf', opts)
381 else:
381 else:
382 # for "historical portability":
382 # for "historical portability":
383 # define formatter locally, because ui.formatter has been
383 # define formatter locally, because ui.formatter has been
384 # available since 2.2 (or ae5f92e154d3)
384 # available since 2.2 (or ae5f92e154d3)
385 from mercurial import node
385 from mercurial import node
386
386
387 class defaultformatter(object):
387 class defaultformatter(object):
388 """Minimized composition of baseformatter and plainformatter
388 """Minimized composition of baseformatter and plainformatter
389 """
389 """
390
390
391 def __init__(self, ui, topic, opts):
391 def __init__(self, ui, topic, opts):
392 self._ui = ui
392 self._ui = ui
393 if ui.debugflag:
393 if ui.debugflag:
394 self.hexfunc = node.hex
394 self.hexfunc = node.hex
395 else:
395 else:
396 self.hexfunc = node.short
396 self.hexfunc = node.short
397
397
398 def __nonzero__(self):
398 def __nonzero__(self):
399 return False
399 return False
400
400
401 __bool__ = __nonzero__
401 __bool__ = __nonzero__
402
402
403 def startitem(self):
403 def startitem(self):
404 pass
404 pass
405
405
406 def data(self, **data):
406 def data(self, **data):
407 pass
407 pass
408
408
409 def write(self, fields, deftext, *fielddata, **opts):
409 def write(self, fields, deftext, *fielddata, **opts):
410 self._ui.write(deftext % fielddata, **opts)
410 self._ui.write(deftext % fielddata, **opts)
411
411
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 if cond:
413 if cond:
414 self._ui.write(deftext % fielddata, **opts)
414 self._ui.write(deftext % fielddata, **opts)
415
415
416 def plain(self, text, **opts):
416 def plain(self, text, **opts):
417 self._ui.write(text, **opts)
417 self._ui.write(text, **opts)
418
418
419 def end(self):
419 def end(self):
420 pass
420 pass
421
421
422 fm = defaultformatter(ui, b'perf', opts)
422 fm = defaultformatter(ui, b'perf', opts)
423
423
424 # stub function, runs code only once instead of in a loop
424 # stub function, runs code only once instead of in a loop
425 # experimental config: perf.stub
425 # experimental config: perf.stub
426 if ui.configbool(b"perf", b"stub", False):
426 if ui.configbool(b"perf", b"stub", False):
427 return functools.partial(stub_timer, fm), fm
427 return functools.partial(stub_timer, fm), fm
428
428
429 # experimental config: perf.all-timing
429 # experimental config: perf.all-timing
430 displayall = ui.configbool(b"perf", b"all-timing", False)
430 displayall = ui.configbool(b"perf", b"all-timing", False)
431
431
432 # experimental config: perf.run-limits
432 # experimental config: perf.run-limits
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 limits = []
434 limits = []
435 for item in limitspec:
435 for item in limitspec:
436 parts = item.split(b'-', 1)
436 parts = item.split(b'-', 1)
437 if len(parts) < 2:
437 if len(parts) < 2:
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 continue
439 continue
440 try:
440 try:
441 time_limit = float(_sysstr(parts[0]))
441 time_limit = float(_sysstr(parts[0]))
442 except ValueError as e:
442 except ValueError as e:
443 ui.warn(
443 ui.warn(
444 (
444 (
445 b'malformatted run limit entry, %s: %s\n'
445 b'malformatted run limit entry, %s: %s\n'
446 % (_bytestr(e), item)
446 % (_bytestr(e), item)
447 )
447 )
448 )
448 )
449 continue
449 continue
450 try:
450 try:
451 run_limit = int(_sysstr(parts[1]))
451 run_limit = int(_sysstr(parts[1]))
452 except ValueError as e:
452 except ValueError as e:
453 ui.warn(
453 ui.warn(
454 (
454 (
455 b'malformatted run limit entry, %s: %s\n'
455 b'malformatted run limit entry, %s: %s\n'
456 % (_bytestr(e), item)
456 % (_bytestr(e), item)
457 )
457 )
458 )
458 )
459 continue
459 continue
460 limits.append((time_limit, run_limit))
460 limits.append((time_limit, run_limit))
461 if not limits:
461 if not limits:
462 limits = DEFAULTLIMITS
462 limits = DEFAULTLIMITS
463
463
464 profiler = None
464 profiler = None
465 if profiling is not None:
465 if profiling is not None:
466 if ui.configbool(b"perf", b"profile-benchmark", False):
466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 profiler = profiling.profile(ui)
467 profiler = profiling.profile(ui)
468
468
469 prerun = getint(ui, b"perf", b"pre-run", 0)
469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 t = functools.partial(
470 t = functools.partial(
471 _timer,
471 _timer,
472 fm,
472 fm,
473 displayall=displayall,
473 displayall=displayall,
474 limits=limits,
474 limits=limits,
475 prerun=prerun,
475 prerun=prerun,
476 profiler=profiler,
476 profiler=profiler,
477 )
477 )
478 return t, fm
478 return t, fm
479
479
480
480
481 def stub_timer(fm, func, setup=None, title=None):
481 def stub_timer(fm, func, setup=None, title=None):
482 if setup is not None:
482 if setup is not None:
483 setup()
483 setup()
484 func()
484 func()
485
485
486
486
487 @contextlib.contextmanager
487 @contextlib.contextmanager
488 def timeone():
488 def timeone():
489 r = []
489 r = []
490 ostart = os.times()
490 ostart = os.times()
491 cstart = util.timer()
491 cstart = util.timer()
492 yield r
492 yield r
493 cstop = util.timer()
493 cstop = util.timer()
494 ostop = os.times()
494 ostop = os.times()
495 a, b = ostart, ostop
495 a, b = ostart, ostop
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497
497
498
498
499 # list of stop condition (elapsed time, minimal run count)
499 # list of stop condition (elapsed time, minimal run count)
500 DEFAULTLIMITS = (
500 DEFAULTLIMITS = (
501 (3.0, 100),
501 (3.0, 100),
502 (10.0, 3),
502 (10.0, 3),
503 )
503 )
504
504
505
505
506 def _timer(
506 def _timer(
507 fm,
507 fm,
508 func,
508 func,
509 setup=None,
509 setup=None,
510 title=None,
510 title=None,
511 displayall=False,
511 displayall=False,
512 limits=DEFAULTLIMITS,
512 limits=DEFAULTLIMITS,
513 prerun=0,
513 prerun=0,
514 profiler=None,
514 profiler=None,
515 ):
515 ):
516 gc.collect()
516 gc.collect()
517 results = []
517 results = []
518 begin = util.timer()
518 begin = util.timer()
519 count = 0
519 count = 0
520 if profiler is None:
520 if profiler is None:
521 profiler = NOOPCTX
521 profiler = NOOPCTX
522 for i in range(prerun):
522 for i in range(prerun):
523 if setup is not None:
523 if setup is not None:
524 setup()
524 setup()
525 func()
525 func()
526 keepgoing = True
526 keepgoing = True
527 while keepgoing:
527 while keepgoing:
528 if setup is not None:
528 if setup is not None:
529 setup()
529 setup()
530 with profiler:
530 with profiler:
531 with timeone() as item:
531 with timeone() as item:
532 r = func()
532 r = func()
533 profiler = NOOPCTX
533 profiler = NOOPCTX
534 count += 1
534 count += 1
535 results.append(item[0])
535 results.append(item[0])
536 cstop = util.timer()
536 cstop = util.timer()
537 # Look for a stop condition.
537 # Look for a stop condition.
538 elapsed = cstop - begin
538 elapsed = cstop - begin
539 for t, mincount in limits:
539 for t, mincount in limits:
540 if elapsed >= t and count >= mincount:
540 if elapsed >= t and count >= mincount:
541 keepgoing = False
541 keepgoing = False
542 break
542 break
543
543
544 formatone(fm, results, title=title, result=r, displayall=displayall)
544 formatone(fm, results, title=title, result=r, displayall=displayall)
545
545
546
546
547 def formatone(fm, timings, title=None, result=None, displayall=False):
547 def formatone(fm, timings, title=None, result=None, displayall=False):
548
548
549 count = len(timings)
549 count = len(timings)
550
550
551 fm.startitem()
551 fm.startitem()
552
552
553 if title:
553 if title:
554 fm.write(b'title', b'! %s\n', title)
554 fm.write(b'title', b'! %s\n', title)
555 if result:
555 if result:
556 fm.write(b'result', b'! result: %s\n', result)
556 fm.write(b'result', b'! result: %s\n', result)
557
557
558 def display(role, entry):
558 def display(role, entry):
559 prefix = b''
559 prefix = b''
560 if role != b'best':
560 if role != b'best':
561 prefix = b'%s.' % role
561 prefix = b'%s.' % role
562 fm.plain(b'!')
562 fm.plain(b'!')
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 fm.write(prefix + b'user', b' user %f', entry[1])
565 fm.write(prefix + b'user', b' user %f', entry[1])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 fm.plain(b'\n')
568 fm.plain(b'\n')
569
569
570 timings.sort()
570 timings.sort()
571 min_val = timings[0]
571 min_val = timings[0]
572 display(b'best', min_val)
572 display(b'best', min_val)
573 if displayall:
573 if displayall:
574 max_val = timings[-1]
574 max_val = timings[-1]
575 display(b'max', max_val)
575 display(b'max', max_val)
576 avg = tuple([sum(x) / count for x in zip(*timings)])
576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 display(b'avg', avg)
577 display(b'avg', avg)
578 median = timings[len(timings) // 2]
578 median = timings[len(timings) // 2]
579 display(b'median', median)
579 display(b'median', median)
580
580
581
581
582 # utilities for historical portability
582 # utilities for historical portability
583
583
584
584
585 def getint(ui, section, name, default):
585 def getint(ui, section, name, default):
586 # for "historical portability":
586 # for "historical portability":
587 # ui.configint has been available since 1.9 (or fa2b596db182)
587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 v = ui.config(section, name, None)
588 v = ui.config(section, name, None)
589 if v is None:
589 if v is None:
590 return default
590 return default
591 try:
591 try:
592 return int(v)
592 return int(v)
593 except ValueError:
593 except ValueError:
594 raise error.ConfigError(
594 raise error.ConfigError(
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 )
596 )
597
597
598
598
599 def safeattrsetter(obj, name, ignoremissing=False):
599 def safeattrsetter(obj, name, ignoremissing=False):
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601
601
602 This function is aborted, if 'obj' doesn't have 'name' attribute
602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 at runtime. This avoids overlooking removal of an attribute, which
603 at runtime. This avoids overlooking removal of an attribute, which
604 breaks assumption of performance measurement, in the future.
604 breaks assumption of performance measurement, in the future.
605
605
606 This function returns the object to (1) assign a new value, and
606 This function returns the object to (1) assign a new value, and
607 (2) restore an original value to the attribute.
607 (2) restore an original value to the attribute.
608
608
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 abortion, and this function returns None. This is useful to
610 abortion, and this function returns None. This is useful to
611 examine an attribute, which isn't ensured in all Mercurial
611 examine an attribute, which isn't ensured in all Mercurial
612 versions.
612 versions.
613 """
613 """
614 if not util.safehasattr(obj, name):
614 if not util.safehasattr(obj, name):
615 if ignoremissing:
615 if ignoremissing:
616 return None
616 return None
617 raise error.Abort(
617 raise error.Abort(
618 (
618 (
619 b"missing attribute %s of %s might break assumption"
619 b"missing attribute %s of %s might break assumption"
620 b" of performance measurement"
620 b" of performance measurement"
621 )
621 )
622 % (name, obj)
622 % (name, obj)
623 )
623 )
624
624
625 origvalue = getattr(obj, _sysstr(name))
625 origvalue = getattr(obj, _sysstr(name))
626
626
627 class attrutil(object):
627 class attrutil(object):
628 def set(self, newvalue):
628 def set(self, newvalue):
629 setattr(obj, _sysstr(name), newvalue)
629 setattr(obj, _sysstr(name), newvalue)
630
630
631 def restore(self):
631 def restore(self):
632 setattr(obj, _sysstr(name), origvalue)
632 setattr(obj, _sysstr(name), origvalue)
633
633
634 return attrutil()
634 return attrutil()
635
635
636
636
637 # utilities to examine each internal API changes
637 # utilities to examine each internal API changes
638
638
639
639
640 def getbranchmapsubsettable():
640 def getbranchmapsubsettable():
641 # for "historical portability":
641 # for "historical portability":
642 # subsettable is defined in:
642 # subsettable is defined in:
643 # - branchmap since 2.9 (or 175c6fd8cacc)
643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 # - repoview since 2.5 (or 59a9f18d4587)
644 # - repoview since 2.5 (or 59a9f18d4587)
645 # - repoviewutil since 5.0
645 # - repoviewutil since 5.0
646 for mod in (branchmap, repoview, repoviewutil):
646 for mod in (branchmap, repoview, repoviewutil):
647 subsettable = getattr(mod, 'subsettable', None)
647 subsettable = getattr(mod, 'subsettable', None)
648 if subsettable:
648 if subsettable:
649 return subsettable
649 return subsettable
650
650
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 # branchmap and repoview modules exist, but subsettable attribute
652 # branchmap and repoview modules exist, but subsettable attribute
653 # doesn't)
653 # doesn't)
654 raise error.Abort(
654 raise error.Abort(
655 b"perfbranchmap not available with this Mercurial",
655 b"perfbranchmap not available with this Mercurial",
656 hint=b"use 2.5 or later",
656 hint=b"use 2.5 or later",
657 )
657 )
658
658
659
659
660 def getsvfs(repo):
660 def getsvfs(repo):
661 """Return appropriate object to access files under .hg/store
661 """Return appropriate object to access files under .hg/store
662 """
662 """
663 # for "historical portability":
663 # for "historical portability":
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 svfs = getattr(repo, 'svfs', None)
665 svfs = getattr(repo, 'svfs', None)
666 if svfs:
666 if svfs:
667 return svfs
667 return svfs
668 else:
668 else:
669 return getattr(repo, 'sopener')
669 return getattr(repo, 'sopener')
670
670
671
671
672 def getvfs(repo):
672 def getvfs(repo):
673 """Return appropriate object to access files under .hg
673 """Return appropriate object to access files under .hg
674 """
674 """
675 # for "historical portability":
675 # for "historical portability":
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 vfs = getattr(repo, 'vfs', None)
677 vfs = getattr(repo, 'vfs', None)
678 if vfs:
678 if vfs:
679 return vfs
679 return vfs
680 else:
680 else:
681 return getattr(repo, 'opener')
681 return getattr(repo, 'opener')
682
682
683
683
684 def repocleartagscachefunc(repo):
684 def repocleartagscachefunc(repo):
685 """Return the function to clear tags cache according to repo internal API
685 """Return the function to clear tags cache according to repo internal API
686 """
686 """
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 # correct way to clear tags cache, because existing code paths
689 # correct way to clear tags cache, because existing code paths
690 # expect _tagscache to be a structured object.
690 # expect _tagscache to be a structured object.
691 def clearcache():
691 def clearcache():
692 # _tagscache has been filteredpropertycache since 2.5 (or
692 # _tagscache has been filteredpropertycache since 2.5 (or
693 # 98c867ac1330), and delattr() can't work in such case
693 # 98c867ac1330), and delattr() can't work in such case
694 if b'_tagscache' in vars(repo):
694 if b'_tagscache' in vars(repo):
695 del repo.__dict__[b'_tagscache']
695 del repo.__dict__[b'_tagscache']
696
696
697 return clearcache
697 return clearcache
698
698
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 if repotags: # since 1.4 (or 5614a628d173)
700 if repotags: # since 1.4 (or 5614a628d173)
701 return lambda: repotags.set(None)
701 return lambda: repotags.set(None)
702
702
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 return lambda: repotagscache.set(None)
705 return lambda: repotagscache.set(None)
706
706
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 # this point, but it isn't so problematic, because:
708 # this point, but it isn't so problematic, because:
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 # in perftags() causes failure soon
710 # in perftags() causes failure soon
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 raise error.Abort(b"tags API of this hg command is unknown")
712 raise error.Abort(b"tags API of this hg command is unknown")
713
713
714
714
715 # utilities to clear cache
715 # utilities to clear cache
716
716
717
717
718 def clearfilecache(obj, attrname):
718 def clearfilecache(obj, attrname):
719 unfiltered = getattr(obj, 'unfiltered', None)
719 unfiltered = getattr(obj, 'unfiltered', None)
720 if unfiltered is not None:
720 if unfiltered is not None:
721 obj = obj.unfiltered()
721 obj = obj.unfiltered()
722 if attrname in vars(obj):
722 if attrname in vars(obj):
723 delattr(obj, attrname)
723 delattr(obj, attrname)
724 obj._filecache.pop(attrname, None)
724 obj._filecache.pop(attrname, None)
725
725
726
726
727 def clearchangelog(repo):
727 def clearchangelog(repo):
728 if repo is not repo.unfiltered():
728 if repo is not repo.unfiltered():
729 object.__setattr__(repo, r'_clcachekey', None)
729 object.__setattr__(repo, r'_clcachekey', None)
730 object.__setattr__(repo, r'_clcache', None)
730 object.__setattr__(repo, r'_clcache', None)
731 clearfilecache(repo.unfiltered(), 'changelog')
731 clearfilecache(repo.unfiltered(), 'changelog')
732
732
733
733
734 # perf commands
734 # perf commands
735
735
736
736
737 @command(b'perfwalk', formatteropts)
737 @command(b'perfwalk', formatteropts)
738 def perfwalk(ui, repo, *pats, **opts):
738 def perfwalk(ui, repo, *pats, **opts):
739 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 m = scmutil.match(repo[None], pats, {})
741 m = scmutil.match(repo[None], pats, {})
742 timer(
742 timer(
743 lambda: len(
743 lambda: len(
744 list(
744 list(
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 )
746 )
747 )
747 )
748 )
748 )
749 fm.end()
749 fm.end()
750
750
751
751
752 @command(b'perfannotate', formatteropts)
752 @command(b'perfannotate', formatteropts)
753 def perfannotate(ui, repo, f, **opts):
753 def perfannotate(ui, repo, f, **opts):
754 opts = _byteskwargs(opts)
754 opts = _byteskwargs(opts)
755 timer, fm = gettimer(ui, opts)
755 timer, fm = gettimer(ui, opts)
756 fc = repo[b'.'][f]
756 fc = repo[b'.'][f]
757 timer(lambda: len(fc.annotate(True)))
757 timer(lambda: len(fc.annotate(True)))
758 fm.end()
758 fm.end()
759
759
760
760
761 @command(
761 @command(
762 b'perfstatus',
762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 + formatteropts,
764 + formatteropts,
765 )
765 )
766 def perfstatus(ui, repo, **opts):
766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
768
769 The repository data are preserved between each call.
770
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
767 opts = _byteskwargs(opts)
774 opts = _byteskwargs(opts)
768 # m = match.always(repo.root, repo.getcwd())
775 # m = match.always(repo.root, repo.getcwd())
769 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
770 # False))))
777 # False))))
771 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
772 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
773 fm.end()
780 fm.end()
774
781
775
782
776 @command(b'perfaddremove', formatteropts)
783 @command(b'perfaddremove', formatteropts)
777 def perfaddremove(ui, repo, **opts):
784 def perfaddremove(ui, repo, **opts):
778 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
779 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
780 try:
787 try:
781 oldquiet = repo.ui.quiet
788 oldquiet = repo.ui.quiet
782 repo.ui.quiet = True
789 repo.ui.quiet = True
783 matcher = scmutil.match(repo[None])
790 matcher = scmutil.match(repo[None])
784 opts[b'dry_run'] = True
791 opts[b'dry_run'] = True
785 if b'uipathfn' in getargspec(scmutil.addremove).args:
792 if b'uipathfn' in getargspec(scmutil.addremove).args:
786 uipathfn = scmutil.getuipathfn(repo)
793 uipathfn = scmutil.getuipathfn(repo)
787 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
788 else:
795 else:
789 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
790 finally:
797 finally:
791 repo.ui.quiet = oldquiet
798 repo.ui.quiet = oldquiet
792 fm.end()
799 fm.end()
793
800
794
801
795 def clearcaches(cl):
802 def clearcaches(cl):
796 # behave somewhat consistently across internal API changes
803 # behave somewhat consistently across internal API changes
797 if util.safehasattr(cl, b'clearcaches'):
804 if util.safehasattr(cl, b'clearcaches'):
798 cl.clearcaches()
805 cl.clearcaches()
799 elif util.safehasattr(cl, b'_nodecache'):
806 elif util.safehasattr(cl, b'_nodecache'):
800 from mercurial.node import nullid, nullrev
807 from mercurial.node import nullid, nullrev
801
808
802 cl._nodecache = {nullid: nullrev}
809 cl._nodecache = {nullid: nullrev}
803 cl._nodepos = None
810 cl._nodepos = None
804
811
805
812
806 @command(b'perfheads', formatteropts)
813 @command(b'perfheads', formatteropts)
807 def perfheads(ui, repo, **opts):
814 def perfheads(ui, repo, **opts):
808 """benchmark the computation of a changelog heads"""
815 """benchmark the computation of a changelog heads"""
809 opts = _byteskwargs(opts)
816 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
817 timer, fm = gettimer(ui, opts)
811 cl = repo.changelog
818 cl = repo.changelog
812
819
813 def s():
820 def s():
814 clearcaches(cl)
821 clearcaches(cl)
815
822
816 def d():
823 def d():
817 len(cl.headrevs())
824 len(cl.headrevs())
818
825
819 timer(d, setup=s)
826 timer(d, setup=s)
820 fm.end()
827 fm.end()
821
828
822
829
823 @command(
830 @command(
824 b'perftags',
831 b'perftags',
825 formatteropts
832 formatteropts
826 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
827 )
834 )
828 def perftags(ui, repo, **opts):
835 def perftags(ui, repo, **opts):
829 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
830 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
831 repocleartagscache = repocleartagscachefunc(repo)
838 repocleartagscache = repocleartagscachefunc(repo)
832 clearrevlogs = opts[b'clear_revlogs']
839 clearrevlogs = opts[b'clear_revlogs']
833
840
834 def s():
841 def s():
835 if clearrevlogs:
842 if clearrevlogs:
836 clearchangelog(repo)
843 clearchangelog(repo)
837 clearfilecache(repo.unfiltered(), 'manifest')
844 clearfilecache(repo.unfiltered(), 'manifest')
838 repocleartagscache()
845 repocleartagscache()
839
846
840 def t():
847 def t():
841 return len(repo.tags())
848 return len(repo.tags())
842
849
843 timer(t, setup=s)
850 timer(t, setup=s)
844 fm.end()
851 fm.end()
845
852
846
853
847 @command(b'perfancestors', formatteropts)
854 @command(b'perfancestors', formatteropts)
848 def perfancestors(ui, repo, **opts):
855 def perfancestors(ui, repo, **opts):
849 opts = _byteskwargs(opts)
856 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
851 heads = repo.changelog.headrevs()
858 heads = repo.changelog.headrevs()
852
859
853 def d():
860 def d():
854 for a in repo.changelog.ancestors(heads):
861 for a in repo.changelog.ancestors(heads):
855 pass
862 pass
856
863
857 timer(d)
864 timer(d)
858 fm.end()
865 fm.end()
859
866
860
867
861 @command(b'perfancestorset', formatteropts)
868 @command(b'perfancestorset', formatteropts)
862 def perfancestorset(ui, repo, revset, **opts):
869 def perfancestorset(ui, repo, revset, **opts):
863 opts = _byteskwargs(opts)
870 opts = _byteskwargs(opts)
864 timer, fm = gettimer(ui, opts)
871 timer, fm = gettimer(ui, opts)
865 revs = repo.revs(revset)
872 revs = repo.revs(revset)
866 heads = repo.changelog.headrevs()
873 heads = repo.changelog.headrevs()
867
874
868 def d():
875 def d():
869 s = repo.changelog.ancestors(heads)
876 s = repo.changelog.ancestors(heads)
870 for rev in revs:
877 for rev in revs:
871 rev in s
878 rev in s
872
879
873 timer(d)
880 timer(d)
874 fm.end()
881 fm.end()
875
882
876
883
877 @command(b'perfdiscovery', formatteropts, b'PATH')
884 @command(b'perfdiscovery', formatteropts, b'PATH')
878 def perfdiscovery(ui, repo, path, **opts):
885 def perfdiscovery(ui, repo, path, **opts):
879 """benchmark discovery between local repo and the peer at given path
886 """benchmark discovery between local repo and the peer at given path
880 """
887 """
881 repos = [repo, None]
888 repos = [repo, None]
882 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
883 path = ui.expandpath(path)
890 path = ui.expandpath(path)
884
891
885 def s():
892 def s():
886 repos[1] = hg.peer(ui, opts, path)
893 repos[1] = hg.peer(ui, opts, path)
887
894
888 def d():
895 def d():
889 setdiscovery.findcommonheads(ui, *repos)
896 setdiscovery.findcommonheads(ui, *repos)
890
897
891 timer(d, setup=s)
898 timer(d, setup=s)
892 fm.end()
899 fm.end()
893
900
894
901
895 @command(
902 @command(
896 b'perfbookmarks',
903 b'perfbookmarks',
897 formatteropts
904 formatteropts
898 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
899 )
906 )
900 def perfbookmarks(ui, repo, **opts):
907 def perfbookmarks(ui, repo, **opts):
901 """benchmark parsing bookmarks from disk to memory"""
908 """benchmark parsing bookmarks from disk to memory"""
902 opts = _byteskwargs(opts)
909 opts = _byteskwargs(opts)
903 timer, fm = gettimer(ui, opts)
910 timer, fm = gettimer(ui, opts)
904
911
905 clearrevlogs = opts[b'clear_revlogs']
912 clearrevlogs = opts[b'clear_revlogs']
906
913
907 def s():
914 def s():
908 if clearrevlogs:
915 if clearrevlogs:
909 clearchangelog(repo)
916 clearchangelog(repo)
910 clearfilecache(repo, b'_bookmarks')
917 clearfilecache(repo, b'_bookmarks')
911
918
912 def d():
919 def d():
913 repo._bookmarks
920 repo._bookmarks
914
921
915 timer(d, setup=s)
922 timer(d, setup=s)
916 fm.end()
923 fm.end()
917
924
918
925
919 @command(b'perfbundleread', formatteropts, b'BUNDLE')
926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
920 def perfbundleread(ui, repo, bundlepath, **opts):
927 def perfbundleread(ui, repo, bundlepath, **opts):
921 """Benchmark reading of bundle files.
928 """Benchmark reading of bundle files.
922
929
923 This command is meant to isolate the I/O part of bundle reading as
930 This command is meant to isolate the I/O part of bundle reading as
924 much as possible.
931 much as possible.
925 """
932 """
926 from mercurial import (
933 from mercurial import (
927 bundle2,
934 bundle2,
928 exchange,
935 exchange,
929 streamclone,
936 streamclone,
930 )
937 )
931
938
932 opts = _byteskwargs(opts)
939 opts = _byteskwargs(opts)
933
940
934 def makebench(fn):
941 def makebench(fn):
935 def run():
942 def run():
936 with open(bundlepath, b'rb') as fh:
943 with open(bundlepath, b'rb') as fh:
937 bundle = exchange.readbundle(ui, fh, bundlepath)
944 bundle = exchange.readbundle(ui, fh, bundlepath)
938 fn(bundle)
945 fn(bundle)
939
946
940 return run
947 return run
941
948
942 def makereadnbytes(size):
949 def makereadnbytes(size):
943 def run():
950 def run():
944 with open(bundlepath, b'rb') as fh:
951 with open(bundlepath, b'rb') as fh:
945 bundle = exchange.readbundle(ui, fh, bundlepath)
952 bundle = exchange.readbundle(ui, fh, bundlepath)
946 while bundle.read(size):
953 while bundle.read(size):
947 pass
954 pass
948
955
949 return run
956 return run
950
957
951 def makestdioread(size):
958 def makestdioread(size):
952 def run():
959 def run():
953 with open(bundlepath, b'rb') as fh:
960 with open(bundlepath, b'rb') as fh:
954 while fh.read(size):
961 while fh.read(size):
955 pass
962 pass
956
963
957 return run
964 return run
958
965
959 # bundle1
966 # bundle1
960
967
961 def deltaiter(bundle):
968 def deltaiter(bundle):
962 for delta in bundle.deltaiter():
969 for delta in bundle.deltaiter():
963 pass
970 pass
964
971
965 def iterchunks(bundle):
972 def iterchunks(bundle):
966 for chunk in bundle.getchunks():
973 for chunk in bundle.getchunks():
967 pass
974 pass
968
975
969 # bundle2
976 # bundle2
970
977
971 def forwardchunks(bundle):
978 def forwardchunks(bundle):
972 for chunk in bundle._forwardchunks():
979 for chunk in bundle._forwardchunks():
973 pass
980 pass
974
981
975 def iterparts(bundle):
982 def iterparts(bundle):
976 for part in bundle.iterparts():
983 for part in bundle.iterparts():
977 pass
984 pass
978
985
979 def iterpartsseekable(bundle):
986 def iterpartsseekable(bundle):
980 for part in bundle.iterparts(seekable=True):
987 for part in bundle.iterparts(seekable=True):
981 pass
988 pass
982
989
983 def seek(bundle):
990 def seek(bundle):
984 for part in bundle.iterparts(seekable=True):
991 for part in bundle.iterparts(seekable=True):
985 part.seek(0, os.SEEK_END)
992 part.seek(0, os.SEEK_END)
986
993
987 def makepartreadnbytes(size):
994 def makepartreadnbytes(size):
988 def run():
995 def run():
989 with open(bundlepath, b'rb') as fh:
996 with open(bundlepath, b'rb') as fh:
990 bundle = exchange.readbundle(ui, fh, bundlepath)
997 bundle = exchange.readbundle(ui, fh, bundlepath)
991 for part in bundle.iterparts():
998 for part in bundle.iterparts():
992 while part.read(size):
999 while part.read(size):
993 pass
1000 pass
994
1001
995 return run
1002 return run
996
1003
997 benches = [
1004 benches = [
998 (makestdioread(8192), b'read(8k)'),
1005 (makestdioread(8192), b'read(8k)'),
999 (makestdioread(16384), b'read(16k)'),
1006 (makestdioread(16384), b'read(16k)'),
1000 (makestdioread(32768), b'read(32k)'),
1007 (makestdioread(32768), b'read(32k)'),
1001 (makestdioread(131072), b'read(128k)'),
1008 (makestdioread(131072), b'read(128k)'),
1002 ]
1009 ]
1003
1010
1004 with open(bundlepath, b'rb') as fh:
1011 with open(bundlepath, b'rb') as fh:
1005 bundle = exchange.readbundle(ui, fh, bundlepath)
1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1006
1013
1007 if isinstance(bundle, changegroup.cg1unpacker):
1014 if isinstance(bundle, changegroup.cg1unpacker):
1008 benches.extend(
1015 benches.extend(
1009 [
1016 [
1010 (makebench(deltaiter), b'cg1 deltaiter()'),
1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1011 (makebench(iterchunks), b'cg1 getchunks()'),
1018 (makebench(iterchunks), b'cg1 getchunks()'),
1012 (makereadnbytes(8192), b'cg1 read(8k)'),
1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1013 (makereadnbytes(16384), b'cg1 read(16k)'),
1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1014 (makereadnbytes(32768), b'cg1 read(32k)'),
1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1015 (makereadnbytes(131072), b'cg1 read(128k)'),
1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1016 ]
1023 ]
1017 )
1024 )
1018 elif isinstance(bundle, bundle2.unbundle20):
1025 elif isinstance(bundle, bundle2.unbundle20):
1019 benches.extend(
1026 benches.extend(
1020 [
1027 [
1021 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1022 (makebench(iterparts), b'bundle2 iterparts()'),
1029 (makebench(iterparts), b'bundle2 iterparts()'),
1023 (
1030 (
1024 makebench(iterpartsseekable),
1031 makebench(iterpartsseekable),
1025 b'bundle2 iterparts() seekable',
1032 b'bundle2 iterparts() seekable',
1026 ),
1033 ),
1027 (makebench(seek), b'bundle2 part seek()'),
1034 (makebench(seek), b'bundle2 part seek()'),
1028 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1029 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1030 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1031 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1032 ]
1039 ]
1033 )
1040 )
1034 elif isinstance(bundle, streamclone.streamcloneapplier):
1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1035 raise error.Abort(b'stream clone bundles not supported')
1042 raise error.Abort(b'stream clone bundles not supported')
1036 else:
1043 else:
1037 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1038
1045
1039 for fn, title in benches:
1046 for fn, title in benches:
1040 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1041 timer(fn, title=title)
1048 timer(fn, title=title)
1042 fm.end()
1049 fm.end()
1043
1050
1044
1051
1045 @command(
1052 @command(
1046 b'perfchangegroupchangelog',
1053 b'perfchangegroupchangelog',
1047 formatteropts
1054 formatteropts
1048 + [
1055 + [
1049 (b'', b'cgversion', b'02', b'changegroup version'),
1056 (b'', b'cgversion', b'02', b'changegroup version'),
1050 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1051 ],
1058 ],
1052 )
1059 )
1053 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1054 """Benchmark producing a changelog group for a changegroup.
1061 """Benchmark producing a changelog group for a changegroup.
1055
1062
1056 This measures the time spent processing the changelog during a
1063 This measures the time spent processing the changelog during a
1057 bundle operation. This occurs during `hg bundle` and on a server
1064 bundle operation. This occurs during `hg bundle` and on a server
1058 processing a `getbundle` wire protocol request (handles clones
1065 processing a `getbundle` wire protocol request (handles clones
1059 and pull requests).
1066 and pull requests).
1060
1067
1061 By default, all revisions are added to the changegroup.
1068 By default, all revisions are added to the changegroup.
1062 """
1069 """
1063 opts = _byteskwargs(opts)
1070 opts = _byteskwargs(opts)
1064 cl = repo.changelog
1071 cl = repo.changelog
1065 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1066 bundler = changegroup.getbundler(cgversion, repo)
1073 bundler = changegroup.getbundler(cgversion, repo)
1067
1074
1068 def d():
1075 def d():
1069 state, chunks = bundler._generatechangelog(cl, nodes)
1076 state, chunks = bundler._generatechangelog(cl, nodes)
1070 for chunk in chunks:
1077 for chunk in chunks:
1071 pass
1078 pass
1072
1079
1073 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1074
1081
1075 # Terminal printing can interfere with timing. So disable it.
1082 # Terminal printing can interfere with timing. So disable it.
1076 with ui.configoverride({(b'progress', b'disable'): True}):
1083 with ui.configoverride({(b'progress', b'disable'): True}):
1077 timer(d)
1084 timer(d)
1078
1085
1079 fm.end()
1086 fm.end()
1080
1087
1081
1088
1082 @command(b'perfdirs', formatteropts)
1089 @command(b'perfdirs', formatteropts)
1083 def perfdirs(ui, repo, **opts):
1090 def perfdirs(ui, repo, **opts):
1084 opts = _byteskwargs(opts)
1091 opts = _byteskwargs(opts)
1085 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1086 dirstate = repo.dirstate
1093 dirstate = repo.dirstate
1087 b'a' in dirstate
1094 b'a' in dirstate
1088
1095
1089 def d():
1096 def d():
1090 dirstate.hasdir(b'a')
1097 dirstate.hasdir(b'a')
1091 del dirstate._map._dirs
1098 del dirstate._map._dirs
1092
1099
1093 timer(d)
1100 timer(d)
1094 fm.end()
1101 fm.end()
1095
1102
1096
1103
1097 @command(b'perfdirstate', formatteropts)
1104 @command(b'perfdirstate', formatteropts)
1098 def perfdirstate(ui, repo, **opts):
1105 def perfdirstate(ui, repo, **opts):
1099 opts = _byteskwargs(opts)
1106 opts = _byteskwargs(opts)
1100 timer, fm = gettimer(ui, opts)
1107 timer, fm = gettimer(ui, opts)
1101 b"a" in repo.dirstate
1108 b"a" in repo.dirstate
1102
1109
1103 def d():
1110 def d():
1104 repo.dirstate.invalidate()
1111 repo.dirstate.invalidate()
1105 b"a" in repo.dirstate
1112 b"a" in repo.dirstate
1106
1113
1107 timer(d)
1114 timer(d)
1108 fm.end()
1115 fm.end()
1109
1116
1110
1117
1111 @command(b'perfdirstatedirs', formatteropts)
1118 @command(b'perfdirstatedirs', formatteropts)
1112 def perfdirstatedirs(ui, repo, **opts):
1119 def perfdirstatedirs(ui, repo, **opts):
1113 opts = _byteskwargs(opts)
1120 opts = _byteskwargs(opts)
1114 timer, fm = gettimer(ui, opts)
1121 timer, fm = gettimer(ui, opts)
1115 b"a" in repo.dirstate
1122 b"a" in repo.dirstate
1116
1123
1117 def d():
1124 def d():
1118 repo.dirstate.hasdir(b"a")
1125 repo.dirstate.hasdir(b"a")
1119 del repo.dirstate._map._dirs
1126 del repo.dirstate._map._dirs
1120
1127
1121 timer(d)
1128 timer(d)
1122 fm.end()
1129 fm.end()
1123
1130
1124
1131
1125 @command(b'perfdirstatefoldmap', formatteropts)
1132 @command(b'perfdirstatefoldmap', formatteropts)
1126 def perfdirstatefoldmap(ui, repo, **opts):
1133 def perfdirstatefoldmap(ui, repo, **opts):
1127 opts = _byteskwargs(opts)
1134 opts = _byteskwargs(opts)
1128 timer, fm = gettimer(ui, opts)
1135 timer, fm = gettimer(ui, opts)
1129 dirstate = repo.dirstate
1136 dirstate = repo.dirstate
1130 b'a' in dirstate
1137 b'a' in dirstate
1131
1138
1132 def d():
1139 def d():
1133 dirstate._map.filefoldmap.get(b'a')
1140 dirstate._map.filefoldmap.get(b'a')
1134 del dirstate._map.filefoldmap
1141 del dirstate._map.filefoldmap
1135
1142
1136 timer(d)
1143 timer(d)
1137 fm.end()
1144 fm.end()
1138
1145
1139
1146
1140 @command(b'perfdirfoldmap', formatteropts)
1147 @command(b'perfdirfoldmap', formatteropts)
1141 def perfdirfoldmap(ui, repo, **opts):
1148 def perfdirfoldmap(ui, repo, **opts):
1142 opts = _byteskwargs(opts)
1149 opts = _byteskwargs(opts)
1143 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1144 dirstate = repo.dirstate
1151 dirstate = repo.dirstate
1145 b'a' in dirstate
1152 b'a' in dirstate
1146
1153
1147 def d():
1154 def d():
1148 dirstate._map.dirfoldmap.get(b'a')
1155 dirstate._map.dirfoldmap.get(b'a')
1149 del dirstate._map.dirfoldmap
1156 del dirstate._map.dirfoldmap
1150 del dirstate._map._dirs
1157 del dirstate._map._dirs
1151
1158
1152 timer(d)
1159 timer(d)
1153 fm.end()
1160 fm.end()
1154
1161
1155
1162
1156 @command(b'perfdirstatewrite', formatteropts)
1163 @command(b'perfdirstatewrite', formatteropts)
1157 def perfdirstatewrite(ui, repo, **opts):
1164 def perfdirstatewrite(ui, repo, **opts):
1158 opts = _byteskwargs(opts)
1165 opts = _byteskwargs(opts)
1159 timer, fm = gettimer(ui, opts)
1166 timer, fm = gettimer(ui, opts)
1160 ds = repo.dirstate
1167 ds = repo.dirstate
1161 b"a" in ds
1168 b"a" in ds
1162
1169
1163 def d():
1170 def d():
1164 ds._dirty = True
1171 ds._dirty = True
1165 ds.write(repo.currenttransaction())
1172 ds.write(repo.currenttransaction())
1166
1173
1167 timer(d)
1174 timer(d)
1168 fm.end()
1175 fm.end()
1169
1176
1170
1177
1171 def _getmergerevs(repo, opts):
1178 def _getmergerevs(repo, opts):
1172 """parse command argument to return rev involved in merge
1179 """parse command argument to return rev involved in merge
1173
1180
1174 input: options dictionnary with `rev`, `from` and `bse`
1181 input: options dictionnary with `rev`, `from` and `bse`
1175 output: (localctx, otherctx, basectx)
1182 output: (localctx, otherctx, basectx)
1176 """
1183 """
1177 if opts[b'from']:
1184 if opts[b'from']:
1178 fromrev = scmutil.revsingle(repo, opts[b'from'])
1185 fromrev = scmutil.revsingle(repo, opts[b'from'])
1179 wctx = repo[fromrev]
1186 wctx = repo[fromrev]
1180 else:
1187 else:
1181 wctx = repo[None]
1188 wctx = repo[None]
1182 # we don't want working dir files to be stat'd in the benchmark, so
1189 # we don't want working dir files to be stat'd in the benchmark, so
1183 # prime that cache
1190 # prime that cache
1184 wctx.dirty()
1191 wctx.dirty()
1185 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1192 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1186 if opts[b'base']:
1193 if opts[b'base']:
1187 fromrev = scmutil.revsingle(repo, opts[b'base'])
1194 fromrev = scmutil.revsingle(repo, opts[b'base'])
1188 ancestor = repo[fromrev]
1195 ancestor = repo[fromrev]
1189 else:
1196 else:
1190 ancestor = wctx.ancestor(rctx)
1197 ancestor = wctx.ancestor(rctx)
1191 return (wctx, rctx, ancestor)
1198 return (wctx, rctx, ancestor)
1192
1199
1193
1200
1194 @command(
1201 @command(
1195 b'perfmergecalculate',
1202 b'perfmergecalculate',
1196 [
1203 [
1197 (b'r', b'rev', b'.', b'rev to merge against'),
1204 (b'r', b'rev', b'.', b'rev to merge against'),
1198 (b'', b'from', b'', b'rev to merge from'),
1205 (b'', b'from', b'', b'rev to merge from'),
1199 (b'', b'base', b'', b'the revision to use as base'),
1206 (b'', b'base', b'', b'the revision to use as base'),
1200 ]
1207 ]
1201 + formatteropts,
1208 + formatteropts,
1202 )
1209 )
1203 def perfmergecalculate(ui, repo, **opts):
1210 def perfmergecalculate(ui, repo, **opts):
1204 opts = _byteskwargs(opts)
1211 opts = _byteskwargs(opts)
1205 timer, fm = gettimer(ui, opts)
1212 timer, fm = gettimer(ui, opts)
1206
1213
1207 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1214 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1208
1215
1209 def d():
1216 def d():
1210 # acceptremote is True because we don't want prompts in the middle of
1217 # acceptremote is True because we don't want prompts in the middle of
1211 # our benchmark
1218 # our benchmark
1212 merge.calculateupdates(
1219 merge.calculateupdates(
1213 repo,
1220 repo,
1214 wctx,
1221 wctx,
1215 rctx,
1222 rctx,
1216 [ancestor],
1223 [ancestor],
1217 branchmerge=False,
1224 branchmerge=False,
1218 force=False,
1225 force=False,
1219 acceptremote=True,
1226 acceptremote=True,
1220 followcopies=True,
1227 followcopies=True,
1221 )
1228 )
1222
1229
1223 timer(d)
1230 timer(d)
1224 fm.end()
1231 fm.end()
1225
1232
1226
1233
1227 @command(
1234 @command(
1228 b'perfmergecopies',
1235 b'perfmergecopies',
1229 [
1236 [
1230 (b'r', b'rev', b'.', b'rev to merge against'),
1237 (b'r', b'rev', b'.', b'rev to merge against'),
1231 (b'', b'from', b'', b'rev to merge from'),
1238 (b'', b'from', b'', b'rev to merge from'),
1232 (b'', b'base', b'', b'the revision to use as base'),
1239 (b'', b'base', b'', b'the revision to use as base'),
1233 ]
1240 ]
1234 + formatteropts,
1241 + formatteropts,
1235 )
1242 )
1236 def perfmergecopies(ui, repo, **opts):
1243 def perfmergecopies(ui, repo, **opts):
1237 """measure runtime of `copies.mergecopies`"""
1244 """measure runtime of `copies.mergecopies`"""
1238 opts = _byteskwargs(opts)
1245 opts = _byteskwargs(opts)
1239 timer, fm = gettimer(ui, opts)
1246 timer, fm = gettimer(ui, opts)
1240 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1247 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1241
1248
1242 def d():
1249 def d():
1243 # acceptremote is True because we don't want prompts in the middle of
1250 # acceptremote is True because we don't want prompts in the middle of
1244 # our benchmark
1251 # our benchmark
1245 copies.mergecopies(repo, wctx, rctx, ancestor)
1252 copies.mergecopies(repo, wctx, rctx, ancestor)
1246
1253
1247 timer(d)
1254 timer(d)
1248 fm.end()
1255 fm.end()
1249
1256
1250
1257
1251 @command(b'perfpathcopies', [], b"REV REV")
1258 @command(b'perfpathcopies', [], b"REV REV")
1252 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1259 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1253 """benchmark the copy tracing logic"""
1260 """benchmark the copy tracing logic"""
1254 opts = _byteskwargs(opts)
1261 opts = _byteskwargs(opts)
1255 timer, fm = gettimer(ui, opts)
1262 timer, fm = gettimer(ui, opts)
1256 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1257 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1258
1265
1259 def d():
1266 def d():
1260 copies.pathcopies(ctx1, ctx2)
1267 copies.pathcopies(ctx1, ctx2)
1261
1268
1262 timer(d)
1269 timer(d)
1263 fm.end()
1270 fm.end()
1264
1271
1265
1272
1266 @command(
1273 @command(
1267 b'perfphases',
1274 b'perfphases',
1268 [(b'', b'full', False, b'include file reading time too'),],
1275 [(b'', b'full', False, b'include file reading time too'),],
1269 b"",
1276 b"",
1270 )
1277 )
1271 def perfphases(ui, repo, **opts):
1278 def perfphases(ui, repo, **opts):
1272 """benchmark phasesets computation"""
1279 """benchmark phasesets computation"""
1273 opts = _byteskwargs(opts)
1280 opts = _byteskwargs(opts)
1274 timer, fm = gettimer(ui, opts)
1281 timer, fm = gettimer(ui, opts)
1275 _phases = repo._phasecache
1282 _phases = repo._phasecache
1276 full = opts.get(b'full')
1283 full = opts.get(b'full')
1277
1284
1278 def d():
1285 def d():
1279 phases = _phases
1286 phases = _phases
1280 if full:
1287 if full:
1281 clearfilecache(repo, b'_phasecache')
1288 clearfilecache(repo, b'_phasecache')
1282 phases = repo._phasecache
1289 phases = repo._phasecache
1283 phases.invalidate()
1290 phases.invalidate()
1284 phases.loadphaserevs(repo)
1291 phases.loadphaserevs(repo)
1285
1292
1286 timer(d)
1293 timer(d)
1287 fm.end()
1294 fm.end()
1288
1295
1289
1296
1290 @command(b'perfphasesremote', [], b"[DEST]")
1297 @command(b'perfphasesremote', [], b"[DEST]")
1291 def perfphasesremote(ui, repo, dest=None, **opts):
1298 def perfphasesremote(ui, repo, dest=None, **opts):
1292 """benchmark time needed to analyse phases of the remote server"""
1299 """benchmark time needed to analyse phases of the remote server"""
1293 from mercurial.node import bin
1300 from mercurial.node import bin
1294 from mercurial import (
1301 from mercurial import (
1295 exchange,
1302 exchange,
1296 hg,
1303 hg,
1297 phases,
1304 phases,
1298 )
1305 )
1299
1306
1300 opts = _byteskwargs(opts)
1307 opts = _byteskwargs(opts)
1301 timer, fm = gettimer(ui, opts)
1308 timer, fm = gettimer(ui, opts)
1302
1309
1303 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1310 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1304 if not path:
1311 if not path:
1305 raise error.Abort(
1312 raise error.Abort(
1306 b'default repository not configured!',
1313 b'default repository not configured!',
1307 hint=b"see 'hg help config.paths'",
1314 hint=b"see 'hg help config.paths'",
1308 )
1315 )
1309 dest = path.pushloc or path.loc
1316 dest = path.pushloc or path.loc
1310 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1317 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1311 other = hg.peer(repo, opts, dest)
1318 other = hg.peer(repo, opts, dest)
1312
1319
1313 # easier to perform discovery through the operation
1320 # easier to perform discovery through the operation
1314 op = exchange.pushoperation(repo, other)
1321 op = exchange.pushoperation(repo, other)
1315 exchange._pushdiscoverychangeset(op)
1322 exchange._pushdiscoverychangeset(op)
1316
1323
1317 remotesubset = op.fallbackheads
1324 remotesubset = op.fallbackheads
1318
1325
1319 with other.commandexecutor() as e:
1326 with other.commandexecutor() as e:
1320 remotephases = e.callcommand(
1327 remotephases = e.callcommand(
1321 b'listkeys', {b'namespace': b'phases'}
1328 b'listkeys', {b'namespace': b'phases'}
1322 ).result()
1329 ).result()
1323 del other
1330 del other
1324 publishing = remotephases.get(b'publishing', False)
1331 publishing = remotephases.get(b'publishing', False)
1325 if publishing:
1332 if publishing:
1326 ui.statusnoi18n(b'publishing: yes\n')
1333 ui.statusnoi18n(b'publishing: yes\n')
1327 else:
1334 else:
1328 ui.statusnoi18n(b'publishing: no\n')
1335 ui.statusnoi18n(b'publishing: no\n')
1329
1336
1330 nodemap = repo.changelog.nodemap
1337 nodemap = repo.changelog.nodemap
1331 nonpublishroots = 0
1338 nonpublishroots = 0
1332 for nhex, phase in remotephases.iteritems():
1339 for nhex, phase in remotephases.iteritems():
1333 if nhex == b'publishing': # ignore data related to publish option
1340 if nhex == b'publishing': # ignore data related to publish option
1334 continue
1341 continue
1335 node = bin(nhex)
1342 node = bin(nhex)
1336 if node in nodemap and int(phase):
1343 if node in nodemap and int(phase):
1337 nonpublishroots += 1
1344 nonpublishroots += 1
1338 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1345 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1339 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1346 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1340
1347
1341 def d():
1348 def d():
1342 phases.remotephasessummary(repo, remotesubset, remotephases)
1349 phases.remotephasessummary(repo, remotesubset, remotephases)
1343
1350
1344 timer(d)
1351 timer(d)
1345 fm.end()
1352 fm.end()
1346
1353
1347
1354
1348 @command(
1355 @command(
1349 b'perfmanifest',
1356 b'perfmanifest',
1350 [
1357 [
1351 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1358 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1352 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1359 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1353 ]
1360 ]
1354 + formatteropts,
1361 + formatteropts,
1355 b'REV|NODE',
1362 b'REV|NODE',
1356 )
1363 )
1357 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1364 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1358 """benchmark the time to read a manifest from disk and return a usable
1365 """benchmark the time to read a manifest from disk and return a usable
1359 dict-like object
1366 dict-like object
1360
1367
1361 Manifest caches are cleared before retrieval."""
1368 Manifest caches are cleared before retrieval."""
1362 opts = _byteskwargs(opts)
1369 opts = _byteskwargs(opts)
1363 timer, fm = gettimer(ui, opts)
1370 timer, fm = gettimer(ui, opts)
1364 if not manifest_rev:
1371 if not manifest_rev:
1365 ctx = scmutil.revsingle(repo, rev, rev)
1372 ctx = scmutil.revsingle(repo, rev, rev)
1366 t = ctx.manifestnode()
1373 t = ctx.manifestnode()
1367 else:
1374 else:
1368 from mercurial.node import bin
1375 from mercurial.node import bin
1369
1376
1370 if len(rev) == 40:
1377 if len(rev) == 40:
1371 t = bin(rev)
1378 t = bin(rev)
1372 else:
1379 else:
1373 try:
1380 try:
1374 rev = int(rev)
1381 rev = int(rev)
1375
1382
1376 if util.safehasattr(repo.manifestlog, b'getstorage'):
1383 if util.safehasattr(repo.manifestlog, b'getstorage'):
1377 t = repo.manifestlog.getstorage(b'').node(rev)
1384 t = repo.manifestlog.getstorage(b'').node(rev)
1378 else:
1385 else:
1379 t = repo.manifestlog._revlog.lookup(rev)
1386 t = repo.manifestlog._revlog.lookup(rev)
1380 except ValueError:
1387 except ValueError:
1381 raise error.Abort(
1388 raise error.Abort(
1382 b'manifest revision must be integer or full node'
1389 b'manifest revision must be integer or full node'
1383 )
1390 )
1384
1391
1385 def d():
1392 def d():
1386 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1393 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1387 repo.manifestlog[t].read()
1394 repo.manifestlog[t].read()
1388
1395
1389 timer(d)
1396 timer(d)
1390 fm.end()
1397 fm.end()
1391
1398
1392
1399
1393 @command(b'perfchangeset', formatteropts)
1400 @command(b'perfchangeset', formatteropts)
1394 def perfchangeset(ui, repo, rev, **opts):
1401 def perfchangeset(ui, repo, rev, **opts):
1395 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1403 timer, fm = gettimer(ui, opts)
1397 n = scmutil.revsingle(repo, rev).node()
1404 n = scmutil.revsingle(repo, rev).node()
1398
1405
1399 def d():
1406 def d():
1400 repo.changelog.read(n)
1407 repo.changelog.read(n)
1401 # repo.changelog._cache = None
1408 # repo.changelog._cache = None
1402
1409
1403 timer(d)
1410 timer(d)
1404 fm.end()
1411 fm.end()
1405
1412
1406
1413
1407 @command(b'perfignore', formatteropts)
1414 @command(b'perfignore', formatteropts)
1408 def perfignore(ui, repo, **opts):
1415 def perfignore(ui, repo, **opts):
1409 """benchmark operation related to computing ignore"""
1416 """benchmark operation related to computing ignore"""
1410 opts = _byteskwargs(opts)
1417 opts = _byteskwargs(opts)
1411 timer, fm = gettimer(ui, opts)
1418 timer, fm = gettimer(ui, opts)
1412 dirstate = repo.dirstate
1419 dirstate = repo.dirstate
1413
1420
1414 def setupone():
1421 def setupone():
1415 dirstate.invalidate()
1422 dirstate.invalidate()
1416 clearfilecache(dirstate, b'_ignore')
1423 clearfilecache(dirstate, b'_ignore')
1417
1424
1418 def runone():
1425 def runone():
1419 dirstate._ignore
1426 dirstate._ignore
1420
1427
1421 timer(runone, setup=setupone, title=b"load")
1428 timer(runone, setup=setupone, title=b"load")
1422 fm.end()
1429 fm.end()
1423
1430
1424
1431
1425 @command(
1432 @command(
1426 b'perfindex',
1433 b'perfindex',
1427 [
1434 [
1428 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1435 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1429 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1436 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1430 ]
1437 ]
1431 + formatteropts,
1438 + formatteropts,
1432 )
1439 )
1433 def perfindex(ui, repo, **opts):
1440 def perfindex(ui, repo, **opts):
1434 """benchmark index creation time followed by a lookup
1441 """benchmark index creation time followed by a lookup
1435
1442
1436 The default is to look `tip` up. Depending on the index implementation,
1443 The default is to look `tip` up. Depending on the index implementation,
1437 the revision looked up can matters. For example, an implementation
1444 the revision looked up can matters. For example, an implementation
1438 scanning the index will have a faster lookup time for `--rev tip` than for
1445 scanning the index will have a faster lookup time for `--rev tip` than for
1439 `--rev 0`. The number of looked up revisions and their order can also
1446 `--rev 0`. The number of looked up revisions and their order can also
1440 matters.
1447 matters.
1441
1448
1442 Example of useful set to test:
1449 Example of useful set to test:
1443 * tip
1450 * tip
1444 * 0
1451 * 0
1445 * -10:
1452 * -10:
1446 * :10
1453 * :10
1447 * -10: + :10
1454 * -10: + :10
1448 * :10: + -10:
1455 * :10: + -10:
1449 * -10000:
1456 * -10000:
1450 * -10000: + 0
1457 * -10000: + 0
1451
1458
1452 It is not currently possible to check for lookup of a missing node. For
1459 It is not currently possible to check for lookup of a missing node. For
1453 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1460 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1454 import mercurial.revlog
1461 import mercurial.revlog
1455
1462
1456 opts = _byteskwargs(opts)
1463 opts = _byteskwargs(opts)
1457 timer, fm = gettimer(ui, opts)
1464 timer, fm = gettimer(ui, opts)
1458 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1465 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1459 if opts[b'no_lookup']:
1466 if opts[b'no_lookup']:
1460 if opts['rev']:
1467 if opts['rev']:
1461 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1468 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1462 nodes = []
1469 nodes = []
1463 elif not opts[b'rev']:
1470 elif not opts[b'rev']:
1464 nodes = [repo[b"tip"].node()]
1471 nodes = [repo[b"tip"].node()]
1465 else:
1472 else:
1466 revs = scmutil.revrange(repo, opts[b'rev'])
1473 revs = scmutil.revrange(repo, opts[b'rev'])
1467 cl = repo.changelog
1474 cl = repo.changelog
1468 nodes = [cl.node(r) for r in revs]
1475 nodes = [cl.node(r) for r in revs]
1469
1476
1470 unfi = repo.unfiltered()
1477 unfi = repo.unfiltered()
1471 # find the filecache func directly
1478 # find the filecache func directly
1472 # This avoid polluting the benchmark with the filecache logic
1479 # This avoid polluting the benchmark with the filecache logic
1473 makecl = unfi.__class__.changelog.func
1480 makecl = unfi.__class__.changelog.func
1474
1481
1475 def setup():
1482 def setup():
1476 # probably not necessary, but for good measure
1483 # probably not necessary, but for good measure
1477 clearchangelog(unfi)
1484 clearchangelog(unfi)
1478
1485
1479 def d():
1486 def d():
1480 cl = makecl(unfi)
1487 cl = makecl(unfi)
1481 for n in nodes:
1488 for n in nodes:
1482 cl.rev(n)
1489 cl.rev(n)
1483
1490
1484 timer(d, setup=setup)
1491 timer(d, setup=setup)
1485 fm.end()
1492 fm.end()
1486
1493
1487
1494
1488 @command(
1495 @command(
1489 b'perfnodemap',
1496 b'perfnodemap',
1490 [
1497 [
1491 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1498 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1492 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1499 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1493 ]
1500 ]
1494 + formatteropts,
1501 + formatteropts,
1495 )
1502 )
1496 def perfnodemap(ui, repo, **opts):
1503 def perfnodemap(ui, repo, **opts):
1497 """benchmark the time necessary to look up revision from a cold nodemap
1504 """benchmark the time necessary to look up revision from a cold nodemap
1498
1505
1499 Depending on the implementation, the amount and order of revision we look
1506 Depending on the implementation, the amount and order of revision we look
1500 up can varies. Example of useful set to test:
1507 up can varies. Example of useful set to test:
1501 * tip
1508 * tip
1502 * 0
1509 * 0
1503 * -10:
1510 * -10:
1504 * :10
1511 * :10
1505 * -10: + :10
1512 * -10: + :10
1506 * :10: + -10:
1513 * :10: + -10:
1507 * -10000:
1514 * -10000:
1508 * -10000: + 0
1515 * -10000: + 0
1509
1516
1510 The command currently focus on valid binary lookup. Benchmarking for
1517 The command currently focus on valid binary lookup. Benchmarking for
1511 hexlookup, prefix lookup and missing lookup would also be valuable.
1518 hexlookup, prefix lookup and missing lookup would also be valuable.
1512 """
1519 """
1513 import mercurial.revlog
1520 import mercurial.revlog
1514
1521
1515 opts = _byteskwargs(opts)
1522 opts = _byteskwargs(opts)
1516 timer, fm = gettimer(ui, opts)
1523 timer, fm = gettimer(ui, opts)
1517 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1524 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1518
1525
1519 unfi = repo.unfiltered()
1526 unfi = repo.unfiltered()
1520 clearcaches = opts['clear_caches']
1527 clearcaches = opts['clear_caches']
1521 # find the filecache func directly
1528 # find the filecache func directly
1522 # This avoid polluting the benchmark with the filecache logic
1529 # This avoid polluting the benchmark with the filecache logic
1523 makecl = unfi.__class__.changelog.func
1530 makecl = unfi.__class__.changelog.func
1524 if not opts[b'rev']:
1531 if not opts[b'rev']:
1525 raise error.Abort('use --rev to specify revisions to look up')
1532 raise error.Abort('use --rev to specify revisions to look up')
1526 revs = scmutil.revrange(repo, opts[b'rev'])
1533 revs = scmutil.revrange(repo, opts[b'rev'])
1527 cl = repo.changelog
1534 cl = repo.changelog
1528 nodes = [cl.node(r) for r in revs]
1535 nodes = [cl.node(r) for r in revs]
1529
1536
1530 # use a list to pass reference to a nodemap from one closure to the next
1537 # use a list to pass reference to a nodemap from one closure to the next
1531 nodeget = [None]
1538 nodeget = [None]
1532
1539
1533 def setnodeget():
1540 def setnodeget():
1534 # probably not necessary, but for good measure
1541 # probably not necessary, but for good measure
1535 clearchangelog(unfi)
1542 clearchangelog(unfi)
1536 nodeget[0] = makecl(unfi).nodemap.get
1543 nodeget[0] = makecl(unfi).nodemap.get
1537
1544
1538 def d():
1545 def d():
1539 get = nodeget[0]
1546 get = nodeget[0]
1540 for n in nodes:
1547 for n in nodes:
1541 get(n)
1548 get(n)
1542
1549
1543 setup = None
1550 setup = None
1544 if clearcaches:
1551 if clearcaches:
1545
1552
1546 def setup():
1553 def setup():
1547 setnodeget()
1554 setnodeget()
1548
1555
1549 else:
1556 else:
1550 setnodeget()
1557 setnodeget()
1551 d() # prewarm the data structure
1558 d() # prewarm the data structure
1552 timer(d, setup=setup)
1559 timer(d, setup=setup)
1553 fm.end()
1560 fm.end()
1554
1561
1555
1562
1556 @command(b'perfstartup', formatteropts)
1563 @command(b'perfstartup', formatteropts)
1557 def perfstartup(ui, repo, **opts):
1564 def perfstartup(ui, repo, **opts):
1558 opts = _byteskwargs(opts)
1565 opts = _byteskwargs(opts)
1559 timer, fm = gettimer(ui, opts)
1566 timer, fm = gettimer(ui, opts)
1560
1567
1561 def d():
1568 def d():
1562 if os.name != r'nt':
1569 if os.name != r'nt':
1563 os.system(
1570 os.system(
1564 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1571 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1565 )
1572 )
1566 else:
1573 else:
1567 os.environ[r'HGRCPATH'] = r' '
1574 os.environ[r'HGRCPATH'] = r' '
1568 os.system(r"%s version -q > NUL" % sys.argv[0])
1575 os.system(r"%s version -q > NUL" % sys.argv[0])
1569
1576
1570 timer(d)
1577 timer(d)
1571 fm.end()
1578 fm.end()
1572
1579
1573
1580
1574 @command(b'perfparents', formatteropts)
1581 @command(b'perfparents', formatteropts)
1575 def perfparents(ui, repo, **opts):
1582 def perfparents(ui, repo, **opts):
1576 """benchmark the time necessary to fetch one changeset's parents.
1583 """benchmark the time necessary to fetch one changeset's parents.
1577
1584
1578 The fetch is done using the `node identifier`, traversing all object layers
1585 The fetch is done using the `node identifier`, traversing all object layers
1579 from the repository object. The first N revisions will be used for this
1586 from the repository object. The first N revisions will be used for this
1580 benchmark. N is controlled by the ``perf.parentscount`` config option
1587 benchmark. N is controlled by the ``perf.parentscount`` config option
1581 (default: 1000).
1588 (default: 1000).
1582 """
1589 """
1583 opts = _byteskwargs(opts)
1590 opts = _byteskwargs(opts)
1584 timer, fm = gettimer(ui, opts)
1591 timer, fm = gettimer(ui, opts)
1585 # control the number of commits perfparents iterates over
1592 # control the number of commits perfparents iterates over
1586 # experimental config: perf.parentscount
1593 # experimental config: perf.parentscount
1587 count = getint(ui, b"perf", b"parentscount", 1000)
1594 count = getint(ui, b"perf", b"parentscount", 1000)
1588 if len(repo.changelog) < count:
1595 if len(repo.changelog) < count:
1589 raise error.Abort(b"repo needs %d commits for this test" % count)
1596 raise error.Abort(b"repo needs %d commits for this test" % count)
1590 repo = repo.unfiltered()
1597 repo = repo.unfiltered()
1591 nl = [repo.changelog.node(i) for i in _xrange(count)]
1598 nl = [repo.changelog.node(i) for i in _xrange(count)]
1592
1599
1593 def d():
1600 def d():
1594 for n in nl:
1601 for n in nl:
1595 repo.changelog.parents(n)
1602 repo.changelog.parents(n)
1596
1603
1597 timer(d)
1604 timer(d)
1598 fm.end()
1605 fm.end()
1599
1606
1600
1607
1601 @command(b'perfctxfiles', formatteropts)
1608 @command(b'perfctxfiles', formatteropts)
1602 def perfctxfiles(ui, repo, x, **opts):
1609 def perfctxfiles(ui, repo, x, **opts):
1603 opts = _byteskwargs(opts)
1610 opts = _byteskwargs(opts)
1604 x = int(x)
1611 x = int(x)
1605 timer, fm = gettimer(ui, opts)
1612 timer, fm = gettimer(ui, opts)
1606
1613
1607 def d():
1614 def d():
1608 len(repo[x].files())
1615 len(repo[x].files())
1609
1616
1610 timer(d)
1617 timer(d)
1611 fm.end()
1618 fm.end()
1612
1619
1613
1620
1614 @command(b'perfrawfiles', formatteropts)
1621 @command(b'perfrawfiles', formatteropts)
1615 def perfrawfiles(ui, repo, x, **opts):
1622 def perfrawfiles(ui, repo, x, **opts):
1616 opts = _byteskwargs(opts)
1623 opts = _byteskwargs(opts)
1617 x = int(x)
1624 x = int(x)
1618 timer, fm = gettimer(ui, opts)
1625 timer, fm = gettimer(ui, opts)
1619 cl = repo.changelog
1626 cl = repo.changelog
1620
1627
1621 def d():
1628 def d():
1622 len(cl.read(x)[3])
1629 len(cl.read(x)[3])
1623
1630
1624 timer(d)
1631 timer(d)
1625 fm.end()
1632 fm.end()
1626
1633
1627
1634
1628 @command(b'perflookup', formatteropts)
1635 @command(b'perflookup', formatteropts)
1629 def perflookup(ui, repo, rev, **opts):
1636 def perflookup(ui, repo, rev, **opts):
1630 opts = _byteskwargs(opts)
1637 opts = _byteskwargs(opts)
1631 timer, fm = gettimer(ui, opts)
1638 timer, fm = gettimer(ui, opts)
1632 timer(lambda: len(repo.lookup(rev)))
1639 timer(lambda: len(repo.lookup(rev)))
1633 fm.end()
1640 fm.end()
1634
1641
1635
1642
1636 @command(
1643 @command(
1637 b'perflinelogedits',
1644 b'perflinelogedits',
1638 [
1645 [
1639 (b'n', b'edits', 10000, b'number of edits'),
1646 (b'n', b'edits', 10000, b'number of edits'),
1640 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1647 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1641 ],
1648 ],
1642 norepo=True,
1649 norepo=True,
1643 )
1650 )
1644 def perflinelogedits(ui, **opts):
1651 def perflinelogedits(ui, **opts):
1645 from mercurial import linelog
1652 from mercurial import linelog
1646
1653
1647 opts = _byteskwargs(opts)
1654 opts = _byteskwargs(opts)
1648
1655
1649 edits = opts[b'edits']
1656 edits = opts[b'edits']
1650 maxhunklines = opts[b'max_hunk_lines']
1657 maxhunklines = opts[b'max_hunk_lines']
1651
1658
1652 maxb1 = 100000
1659 maxb1 = 100000
1653 random.seed(0)
1660 random.seed(0)
1654 randint = random.randint
1661 randint = random.randint
1655 currentlines = 0
1662 currentlines = 0
1656 arglist = []
1663 arglist = []
1657 for rev in _xrange(edits):
1664 for rev in _xrange(edits):
1658 a1 = randint(0, currentlines)
1665 a1 = randint(0, currentlines)
1659 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1666 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1660 b1 = randint(0, maxb1)
1667 b1 = randint(0, maxb1)
1661 b2 = randint(b1, b1 + maxhunklines)
1668 b2 = randint(b1, b1 + maxhunklines)
1662 currentlines += (b2 - b1) - (a2 - a1)
1669 currentlines += (b2 - b1) - (a2 - a1)
1663 arglist.append((rev, a1, a2, b1, b2))
1670 arglist.append((rev, a1, a2, b1, b2))
1664
1671
1665 def d():
1672 def d():
1666 ll = linelog.linelog()
1673 ll = linelog.linelog()
1667 for args in arglist:
1674 for args in arglist:
1668 ll.replacelines(*args)
1675 ll.replacelines(*args)
1669
1676
1670 timer, fm = gettimer(ui, opts)
1677 timer, fm = gettimer(ui, opts)
1671 timer(d)
1678 timer(d)
1672 fm.end()
1679 fm.end()
1673
1680
1674
1681
1675 @command(b'perfrevrange', formatteropts)
1682 @command(b'perfrevrange', formatteropts)
1676 def perfrevrange(ui, repo, *specs, **opts):
1683 def perfrevrange(ui, repo, *specs, **opts):
1677 opts = _byteskwargs(opts)
1684 opts = _byteskwargs(opts)
1678 timer, fm = gettimer(ui, opts)
1685 timer, fm = gettimer(ui, opts)
1679 revrange = scmutil.revrange
1686 revrange = scmutil.revrange
1680 timer(lambda: len(revrange(repo, specs)))
1687 timer(lambda: len(revrange(repo, specs)))
1681 fm.end()
1688 fm.end()
1682
1689
1683
1690
1684 @command(b'perfnodelookup', formatteropts)
1691 @command(b'perfnodelookup', formatteropts)
1685 def perfnodelookup(ui, repo, rev, **opts):
1692 def perfnodelookup(ui, repo, rev, **opts):
1686 opts = _byteskwargs(opts)
1693 opts = _byteskwargs(opts)
1687 timer, fm = gettimer(ui, opts)
1694 timer, fm = gettimer(ui, opts)
1688 import mercurial.revlog
1695 import mercurial.revlog
1689
1696
1690 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1697 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1691 n = scmutil.revsingle(repo, rev).node()
1698 n = scmutil.revsingle(repo, rev).node()
1692 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1699 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1693
1700
1694 def d():
1701 def d():
1695 cl.rev(n)
1702 cl.rev(n)
1696 clearcaches(cl)
1703 clearcaches(cl)
1697
1704
1698 timer(d)
1705 timer(d)
1699 fm.end()
1706 fm.end()
1700
1707
1701
1708
1702 @command(
1709 @command(
1703 b'perflog',
1710 b'perflog',
1704 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1711 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1705 )
1712 )
1706 def perflog(ui, repo, rev=None, **opts):
1713 def perflog(ui, repo, rev=None, **opts):
1707 opts = _byteskwargs(opts)
1714 opts = _byteskwargs(opts)
1708 if rev is None:
1715 if rev is None:
1709 rev = []
1716 rev = []
1710 timer, fm = gettimer(ui, opts)
1717 timer, fm = gettimer(ui, opts)
1711 ui.pushbuffer()
1718 ui.pushbuffer()
1712 timer(
1719 timer(
1713 lambda: commands.log(
1720 lambda: commands.log(
1714 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1721 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1715 )
1722 )
1716 )
1723 )
1717 ui.popbuffer()
1724 ui.popbuffer()
1718 fm.end()
1725 fm.end()
1719
1726
1720
1727
1721 @command(b'perfmoonwalk', formatteropts)
1728 @command(b'perfmoonwalk', formatteropts)
1722 def perfmoonwalk(ui, repo, **opts):
1729 def perfmoonwalk(ui, repo, **opts):
1723 """benchmark walking the changelog backwards
1730 """benchmark walking the changelog backwards
1724
1731
1725 This also loads the changelog data for each revision in the changelog.
1732 This also loads the changelog data for each revision in the changelog.
1726 """
1733 """
1727 opts = _byteskwargs(opts)
1734 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1735 timer, fm = gettimer(ui, opts)
1729
1736
1730 def moonwalk():
1737 def moonwalk():
1731 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1738 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1732 ctx = repo[i]
1739 ctx = repo[i]
1733 ctx.branch() # read changelog data (in addition to the index)
1740 ctx.branch() # read changelog data (in addition to the index)
1734
1741
1735 timer(moonwalk)
1742 timer(moonwalk)
1736 fm.end()
1743 fm.end()
1737
1744
1738
1745
1739 @command(
1746 @command(
1740 b'perftemplating',
1747 b'perftemplating',
1741 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1748 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1742 )
1749 )
1743 def perftemplating(ui, repo, testedtemplate=None, **opts):
1750 def perftemplating(ui, repo, testedtemplate=None, **opts):
1744 """test the rendering time of a given template"""
1751 """test the rendering time of a given template"""
1745 if makelogtemplater is None:
1752 if makelogtemplater is None:
1746 raise error.Abort(
1753 raise error.Abort(
1747 b"perftemplating not available with this Mercurial",
1754 b"perftemplating not available with this Mercurial",
1748 hint=b"use 4.3 or later",
1755 hint=b"use 4.3 or later",
1749 )
1756 )
1750
1757
1751 opts = _byteskwargs(opts)
1758 opts = _byteskwargs(opts)
1752
1759
1753 nullui = ui.copy()
1760 nullui = ui.copy()
1754 nullui.fout = open(os.devnull, r'wb')
1761 nullui.fout = open(os.devnull, r'wb')
1755 nullui.disablepager()
1762 nullui.disablepager()
1756 revs = opts.get(b'rev')
1763 revs = opts.get(b'rev')
1757 if not revs:
1764 if not revs:
1758 revs = [b'all()']
1765 revs = [b'all()']
1759 revs = list(scmutil.revrange(repo, revs))
1766 revs = list(scmutil.revrange(repo, revs))
1760
1767
1761 defaulttemplate = (
1768 defaulttemplate = (
1762 b'{date|shortdate} [{rev}:{node|short}]'
1769 b'{date|shortdate} [{rev}:{node|short}]'
1763 b' {author|person}: {desc|firstline}\n'
1770 b' {author|person}: {desc|firstline}\n'
1764 )
1771 )
1765 if testedtemplate is None:
1772 if testedtemplate is None:
1766 testedtemplate = defaulttemplate
1773 testedtemplate = defaulttemplate
1767 displayer = makelogtemplater(nullui, repo, testedtemplate)
1774 displayer = makelogtemplater(nullui, repo, testedtemplate)
1768
1775
1769 def format():
1776 def format():
1770 for r in revs:
1777 for r in revs:
1771 ctx = repo[r]
1778 ctx = repo[r]
1772 displayer.show(ctx)
1779 displayer.show(ctx)
1773 displayer.flush(ctx)
1780 displayer.flush(ctx)
1774
1781
1775 timer, fm = gettimer(ui, opts)
1782 timer, fm = gettimer(ui, opts)
1776 timer(format)
1783 timer(format)
1777 fm.end()
1784 fm.end()
1778
1785
1779
1786
1780 def _displaystats(ui, opts, entries, data):
1787 def _displaystats(ui, opts, entries, data):
1781 pass
1788 pass
1782 # use a second formatter because the data are quite different, not sure
1789 # use a second formatter because the data are quite different, not sure
1783 # how it flies with the templater.
1790 # how it flies with the templater.
1784 fm = ui.formatter(b'perf-stats', opts)
1791 fm = ui.formatter(b'perf-stats', opts)
1785 for key, title in entries:
1792 for key, title in entries:
1786 values = data[key]
1793 values = data[key]
1787 nbvalues = len(data)
1794 nbvalues = len(data)
1788 values.sort()
1795 values.sort()
1789 stats = {
1796 stats = {
1790 'key': key,
1797 'key': key,
1791 'title': title,
1798 'title': title,
1792 'nbitems': len(values),
1799 'nbitems': len(values),
1793 'min': values[0][0],
1800 'min': values[0][0],
1794 '10%': values[(nbvalues * 10) // 100][0],
1801 '10%': values[(nbvalues * 10) // 100][0],
1795 '25%': values[(nbvalues * 25) // 100][0],
1802 '25%': values[(nbvalues * 25) // 100][0],
1796 '50%': values[(nbvalues * 50) // 100][0],
1803 '50%': values[(nbvalues * 50) // 100][0],
1797 '75%': values[(nbvalues * 75) // 100][0],
1804 '75%': values[(nbvalues * 75) // 100][0],
1798 '80%': values[(nbvalues * 80) // 100][0],
1805 '80%': values[(nbvalues * 80) // 100][0],
1799 '85%': values[(nbvalues * 85) // 100][0],
1806 '85%': values[(nbvalues * 85) // 100][0],
1800 '90%': values[(nbvalues * 90) // 100][0],
1807 '90%': values[(nbvalues * 90) // 100][0],
1801 '95%': values[(nbvalues * 95) // 100][0],
1808 '95%': values[(nbvalues * 95) // 100][0],
1802 '99%': values[(nbvalues * 99) // 100][0],
1809 '99%': values[(nbvalues * 99) // 100][0],
1803 'max': values[-1][0],
1810 'max': values[-1][0],
1804 }
1811 }
1805 fm.startitem()
1812 fm.startitem()
1806 fm.data(**stats)
1813 fm.data(**stats)
1807 # make node pretty for the human output
1814 # make node pretty for the human output
1808 fm.plain('### %s (%d items)\n' % (title, len(values)))
1815 fm.plain('### %s (%d items)\n' % (title, len(values)))
1809 lines = [
1816 lines = [
1810 'min',
1817 'min',
1811 '10%',
1818 '10%',
1812 '25%',
1819 '25%',
1813 '50%',
1820 '50%',
1814 '75%',
1821 '75%',
1815 '80%',
1822 '80%',
1816 '85%',
1823 '85%',
1817 '90%',
1824 '90%',
1818 '95%',
1825 '95%',
1819 '99%',
1826 '99%',
1820 'max',
1827 'max',
1821 ]
1828 ]
1822 for l in lines:
1829 for l in lines:
1823 fm.plain('%s: %s\n' % (l, stats[l]))
1830 fm.plain('%s: %s\n' % (l, stats[l]))
1824 fm.end()
1831 fm.end()
1825
1832
1826
1833
1827 @command(
1834 @command(
1828 b'perfhelper-mergecopies',
1835 b'perfhelper-mergecopies',
1829 formatteropts
1836 formatteropts
1830 + [
1837 + [
1831 (b'r', b'revs', [], b'restrict search to these revisions'),
1838 (b'r', b'revs', [], b'restrict search to these revisions'),
1832 (b'', b'timing', False, b'provides extra data (costly)'),
1839 (b'', b'timing', False, b'provides extra data (costly)'),
1833 (b'', b'stats', False, b'provides statistic about the measured data'),
1840 (b'', b'stats', False, b'provides statistic about the measured data'),
1834 ],
1841 ],
1835 )
1842 )
1836 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1843 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1837 """find statistics about potential parameters for `perfmergecopies`
1844 """find statistics about potential parameters for `perfmergecopies`
1838
1845
1839 This command find (base, p1, p2) triplet relevant for copytracing
1846 This command find (base, p1, p2) triplet relevant for copytracing
1840 benchmarking in the context of a merge. It reports values for some of the
1847 benchmarking in the context of a merge. It reports values for some of the
1841 parameters that impact merge copy tracing time during merge.
1848 parameters that impact merge copy tracing time during merge.
1842
1849
1843 If `--timing` is set, rename detection is run and the associated timing
1850 If `--timing` is set, rename detection is run and the associated timing
1844 will be reported. The extra details come at the cost of slower command
1851 will be reported. The extra details come at the cost of slower command
1845 execution.
1852 execution.
1846
1853
1847 Since rename detection is only run once, other factors might easily
1854 Since rename detection is only run once, other factors might easily
1848 affect the precision of the timing. However it should give a good
1855 affect the precision of the timing. However it should give a good
1849 approximation of which revision triplets are very costly.
1856 approximation of which revision triplets are very costly.
1850 """
1857 """
1851 opts = _byteskwargs(opts)
1858 opts = _byteskwargs(opts)
1852 fm = ui.formatter(b'perf', opts)
1859 fm = ui.formatter(b'perf', opts)
1853 dotiming = opts[b'timing']
1860 dotiming = opts[b'timing']
1854 dostats = opts[b'stats']
1861 dostats = opts[b'stats']
1855
1862
1856 output_template = [
1863 output_template = [
1857 ("base", "%(base)12s"),
1864 ("base", "%(base)12s"),
1858 ("p1", "%(p1.node)12s"),
1865 ("p1", "%(p1.node)12s"),
1859 ("p2", "%(p2.node)12s"),
1866 ("p2", "%(p2.node)12s"),
1860 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1867 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1861 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1868 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1862 ("p1.renames", "%(p1.renamedfiles)12d"),
1869 ("p1.renames", "%(p1.renamedfiles)12d"),
1863 ("p1.time", "%(p1.time)12.3f"),
1870 ("p1.time", "%(p1.time)12.3f"),
1864 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1871 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1865 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1872 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1866 ("p2.renames", "%(p2.renamedfiles)12d"),
1873 ("p2.renames", "%(p2.renamedfiles)12d"),
1867 ("p2.time", "%(p2.time)12.3f"),
1874 ("p2.time", "%(p2.time)12.3f"),
1868 ("renames", "%(nbrenamedfiles)12d"),
1875 ("renames", "%(nbrenamedfiles)12d"),
1869 ("total.time", "%(time)12.3f"),
1876 ("total.time", "%(time)12.3f"),
1870 ]
1877 ]
1871 if not dotiming:
1878 if not dotiming:
1872 output_template = [
1879 output_template = [
1873 i
1880 i
1874 for i in output_template
1881 for i in output_template
1875 if not ('time' in i[0] or 'renames' in i[0])
1882 if not ('time' in i[0] or 'renames' in i[0])
1876 ]
1883 ]
1877 header_names = [h for (h, v) in output_template]
1884 header_names = [h for (h, v) in output_template]
1878 output = ' '.join([v for (h, v) in output_template]) + '\n'
1885 output = ' '.join([v for (h, v) in output_template]) + '\n'
1879 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1886 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1880 fm.plain(header % tuple(header_names))
1887 fm.plain(header % tuple(header_names))
1881
1888
1882 if not revs:
1889 if not revs:
1883 revs = ['all()']
1890 revs = ['all()']
1884 revs = scmutil.revrange(repo, revs)
1891 revs = scmutil.revrange(repo, revs)
1885
1892
1886 if dostats:
1893 if dostats:
1887 alldata = {
1894 alldata = {
1888 'nbrevs': [],
1895 'nbrevs': [],
1889 'nbmissingfiles': [],
1896 'nbmissingfiles': [],
1890 }
1897 }
1891 if dotiming:
1898 if dotiming:
1892 alldata['parentnbrenames'] = []
1899 alldata['parentnbrenames'] = []
1893 alldata['totalnbrenames'] = []
1900 alldata['totalnbrenames'] = []
1894 alldata['parenttime'] = []
1901 alldata['parenttime'] = []
1895 alldata['totaltime'] = []
1902 alldata['totaltime'] = []
1896
1903
1897 roi = repo.revs('merge() and %ld', revs)
1904 roi = repo.revs('merge() and %ld', revs)
1898 for r in roi:
1905 for r in roi:
1899 ctx = repo[r]
1906 ctx = repo[r]
1900 p1 = ctx.p1()
1907 p1 = ctx.p1()
1901 p2 = ctx.p2()
1908 p2 = ctx.p2()
1902 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1909 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1903 for b in bases:
1910 for b in bases:
1904 b = repo[b]
1911 b = repo[b]
1905 p1missing = copies._computeforwardmissing(b, p1)
1912 p1missing = copies._computeforwardmissing(b, p1)
1906 p2missing = copies._computeforwardmissing(b, p2)
1913 p2missing = copies._computeforwardmissing(b, p2)
1907 data = {
1914 data = {
1908 b'base': b.hex(),
1915 b'base': b.hex(),
1909 b'p1.node': p1.hex(),
1916 b'p1.node': p1.hex(),
1910 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1917 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1911 b'p1.nbmissingfiles': len(p1missing),
1918 b'p1.nbmissingfiles': len(p1missing),
1912 b'p2.node': p2.hex(),
1919 b'p2.node': p2.hex(),
1913 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1920 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1914 b'p2.nbmissingfiles': len(p2missing),
1921 b'p2.nbmissingfiles': len(p2missing),
1915 }
1922 }
1916 if dostats:
1923 if dostats:
1917 if p1missing:
1924 if p1missing:
1918 alldata['nbrevs'].append(
1925 alldata['nbrevs'].append(
1919 (data['p1.nbrevs'], b.hex(), p1.hex())
1926 (data['p1.nbrevs'], b.hex(), p1.hex())
1920 )
1927 )
1921 alldata['nbmissingfiles'].append(
1928 alldata['nbmissingfiles'].append(
1922 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1929 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1923 )
1930 )
1924 if p2missing:
1931 if p2missing:
1925 alldata['nbrevs'].append(
1932 alldata['nbrevs'].append(
1926 (data['p2.nbrevs'], b.hex(), p2.hex())
1933 (data['p2.nbrevs'], b.hex(), p2.hex())
1927 )
1934 )
1928 alldata['nbmissingfiles'].append(
1935 alldata['nbmissingfiles'].append(
1929 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1936 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1930 )
1937 )
1931 if dotiming:
1938 if dotiming:
1932 begin = util.timer()
1939 begin = util.timer()
1933 mergedata = copies.mergecopies(repo, p1, p2, b)
1940 mergedata = copies.mergecopies(repo, p1, p2, b)
1934 end = util.timer()
1941 end = util.timer()
1935 # not very stable timing since we did only one run
1942 # not very stable timing since we did only one run
1936 data['time'] = end - begin
1943 data['time'] = end - begin
1937 # mergedata contains five dicts: "copy", "movewithdir",
1944 # mergedata contains five dicts: "copy", "movewithdir",
1938 # "diverge", "renamedelete" and "dirmove".
1945 # "diverge", "renamedelete" and "dirmove".
1939 # The first 4 are about renamed file so lets count that.
1946 # The first 4 are about renamed file so lets count that.
1940 renames = len(mergedata[0])
1947 renames = len(mergedata[0])
1941 renames += len(mergedata[1])
1948 renames += len(mergedata[1])
1942 renames += len(mergedata[2])
1949 renames += len(mergedata[2])
1943 renames += len(mergedata[3])
1950 renames += len(mergedata[3])
1944 data['nbrenamedfiles'] = renames
1951 data['nbrenamedfiles'] = renames
1945 begin = util.timer()
1952 begin = util.timer()
1946 p1renames = copies.pathcopies(b, p1)
1953 p1renames = copies.pathcopies(b, p1)
1947 end = util.timer()
1954 end = util.timer()
1948 data['p1.time'] = end - begin
1955 data['p1.time'] = end - begin
1949 begin = util.timer()
1956 begin = util.timer()
1950 p2renames = copies.pathcopies(b, p2)
1957 p2renames = copies.pathcopies(b, p2)
1951 data['p2.time'] = end - begin
1958 data['p2.time'] = end - begin
1952 end = util.timer()
1959 end = util.timer()
1953 data['p1.renamedfiles'] = len(p1renames)
1960 data['p1.renamedfiles'] = len(p1renames)
1954 data['p2.renamedfiles'] = len(p2renames)
1961 data['p2.renamedfiles'] = len(p2renames)
1955
1962
1956 if dostats:
1963 if dostats:
1957 if p1missing:
1964 if p1missing:
1958 alldata['parentnbrenames'].append(
1965 alldata['parentnbrenames'].append(
1959 (data['p1.renamedfiles'], b.hex(), p1.hex())
1966 (data['p1.renamedfiles'], b.hex(), p1.hex())
1960 )
1967 )
1961 alldata['parenttime'].append(
1968 alldata['parenttime'].append(
1962 (data['p1.time'], b.hex(), p1.hex())
1969 (data['p1.time'], b.hex(), p1.hex())
1963 )
1970 )
1964 if p2missing:
1971 if p2missing:
1965 alldata['parentnbrenames'].append(
1972 alldata['parentnbrenames'].append(
1966 (data['p2.renamedfiles'], b.hex(), p2.hex())
1973 (data['p2.renamedfiles'], b.hex(), p2.hex())
1967 )
1974 )
1968 alldata['parenttime'].append(
1975 alldata['parenttime'].append(
1969 (data['p2.time'], b.hex(), p2.hex())
1976 (data['p2.time'], b.hex(), p2.hex())
1970 )
1977 )
1971 if p1missing or p2missing:
1978 if p1missing or p2missing:
1972 alldata['totalnbrenames'].append(
1979 alldata['totalnbrenames'].append(
1973 (
1980 (
1974 data['nbrenamedfiles'],
1981 data['nbrenamedfiles'],
1975 b.hex(),
1982 b.hex(),
1976 p1.hex(),
1983 p1.hex(),
1977 p2.hex(),
1984 p2.hex(),
1978 )
1985 )
1979 )
1986 )
1980 alldata['totaltime'].append(
1987 alldata['totaltime'].append(
1981 (data['time'], b.hex(), p1.hex(), p2.hex())
1988 (data['time'], b.hex(), p1.hex(), p2.hex())
1982 )
1989 )
1983 fm.startitem()
1990 fm.startitem()
1984 fm.data(**data)
1991 fm.data(**data)
1985 # make node pretty for the human output
1992 # make node pretty for the human output
1986 out = data.copy()
1993 out = data.copy()
1987 out['base'] = fm.hexfunc(b.node())
1994 out['base'] = fm.hexfunc(b.node())
1988 out['p1.node'] = fm.hexfunc(p1.node())
1995 out['p1.node'] = fm.hexfunc(p1.node())
1989 out['p2.node'] = fm.hexfunc(p2.node())
1996 out['p2.node'] = fm.hexfunc(p2.node())
1990 fm.plain(output % out)
1997 fm.plain(output % out)
1991
1998
1992 fm.end()
1999 fm.end()
1993 if dostats:
2000 if dostats:
1994 # use a second formatter because the data are quite different, not sure
2001 # use a second formatter because the data are quite different, not sure
1995 # how it flies with the templater.
2002 # how it flies with the templater.
1996 entries = [
2003 entries = [
1997 ('nbrevs', 'number of revision covered'),
2004 ('nbrevs', 'number of revision covered'),
1998 ('nbmissingfiles', 'number of missing files at head'),
2005 ('nbmissingfiles', 'number of missing files at head'),
1999 ]
2006 ]
2000 if dotiming:
2007 if dotiming:
2001 entries.append(
2008 entries.append(
2002 ('parentnbrenames', 'rename from one parent to base')
2009 ('parentnbrenames', 'rename from one parent to base')
2003 )
2010 )
2004 entries.append(('totalnbrenames', 'total number of renames'))
2011 entries.append(('totalnbrenames', 'total number of renames'))
2005 entries.append(('parenttime', 'time for one parent'))
2012 entries.append(('parenttime', 'time for one parent'))
2006 entries.append(('totaltime', 'time for both parents'))
2013 entries.append(('totaltime', 'time for both parents'))
2007 _displaystats(ui, opts, entries, alldata)
2014 _displaystats(ui, opts, entries, alldata)
2008
2015
2009
2016
2010 @command(
2017 @command(
2011 b'perfhelper-pathcopies',
2018 b'perfhelper-pathcopies',
2012 formatteropts
2019 formatteropts
2013 + [
2020 + [
2014 (b'r', b'revs', [], b'restrict search to these revisions'),
2021 (b'r', b'revs', [], b'restrict search to these revisions'),
2015 (b'', b'timing', False, b'provides extra data (costly)'),
2022 (b'', b'timing', False, b'provides extra data (costly)'),
2016 (b'', b'stats', False, b'provides statistic about the measured data'),
2023 (b'', b'stats', False, b'provides statistic about the measured data'),
2017 ],
2024 ],
2018 )
2025 )
2019 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2026 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2020 """find statistic about potential parameters for the `perftracecopies`
2027 """find statistic about potential parameters for the `perftracecopies`
2021
2028
2022 This command find source-destination pair relevant for copytracing testing.
2029 This command find source-destination pair relevant for copytracing testing.
2023 It report value for some of the parameters that impact copy tracing time.
2030 It report value for some of the parameters that impact copy tracing time.
2024
2031
2025 If `--timing` is set, rename detection is run and the associated timing
2032 If `--timing` is set, rename detection is run and the associated timing
2026 will be reported. The extra details comes at the cost of a slower command
2033 will be reported. The extra details comes at the cost of a slower command
2027 execution.
2034 execution.
2028
2035
2029 Since the rename detection is only run once, other factors might easily
2036 Since the rename detection is only run once, other factors might easily
2030 affect the precision of the timing. However it should give a good
2037 affect the precision of the timing. However it should give a good
2031 approximation of which revision pairs are very costly.
2038 approximation of which revision pairs are very costly.
2032 """
2039 """
2033 opts = _byteskwargs(opts)
2040 opts = _byteskwargs(opts)
2034 fm = ui.formatter(b'perf', opts)
2041 fm = ui.formatter(b'perf', opts)
2035 dotiming = opts[b'timing']
2042 dotiming = opts[b'timing']
2036 dostats = opts[b'stats']
2043 dostats = opts[b'stats']
2037
2044
2038 if dotiming:
2045 if dotiming:
2039 header = '%12s %12s %12s %12s %12s %12s\n'
2046 header = '%12s %12s %12s %12s %12s %12s\n'
2040 output = (
2047 output = (
2041 "%(source)12s %(destination)12s "
2048 "%(source)12s %(destination)12s "
2042 "%(nbrevs)12d %(nbmissingfiles)12d "
2049 "%(nbrevs)12d %(nbmissingfiles)12d "
2043 "%(nbrenamedfiles)12d %(time)18.5f\n"
2050 "%(nbrenamedfiles)12d %(time)18.5f\n"
2044 )
2051 )
2045 header_names = (
2052 header_names = (
2046 "source",
2053 "source",
2047 "destination",
2054 "destination",
2048 "nb-revs",
2055 "nb-revs",
2049 "nb-files",
2056 "nb-files",
2050 "nb-renames",
2057 "nb-renames",
2051 "time",
2058 "time",
2052 )
2059 )
2053 fm.plain(header % header_names)
2060 fm.plain(header % header_names)
2054 else:
2061 else:
2055 header = '%12s %12s %12s %12s\n'
2062 header = '%12s %12s %12s %12s\n'
2056 output = (
2063 output = (
2057 "%(source)12s %(destination)12s "
2064 "%(source)12s %(destination)12s "
2058 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2065 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2059 )
2066 )
2060 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2067 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2061
2068
2062 if not revs:
2069 if not revs:
2063 revs = ['all()']
2070 revs = ['all()']
2064 revs = scmutil.revrange(repo, revs)
2071 revs = scmutil.revrange(repo, revs)
2065
2072
2066 if dostats:
2073 if dostats:
2067 alldata = {
2074 alldata = {
2068 'nbrevs': [],
2075 'nbrevs': [],
2069 'nbmissingfiles': [],
2076 'nbmissingfiles': [],
2070 }
2077 }
2071 if dotiming:
2078 if dotiming:
2072 alldata['nbrenames'] = []
2079 alldata['nbrenames'] = []
2073 alldata['time'] = []
2080 alldata['time'] = []
2074
2081
2075 roi = repo.revs('merge() and %ld', revs)
2082 roi = repo.revs('merge() and %ld', revs)
2076 for r in roi:
2083 for r in roi:
2077 ctx = repo[r]
2084 ctx = repo[r]
2078 p1 = ctx.p1().rev()
2085 p1 = ctx.p1().rev()
2079 p2 = ctx.p2().rev()
2086 p2 = ctx.p2().rev()
2080 bases = repo.changelog._commonancestorsheads(p1, p2)
2087 bases = repo.changelog._commonancestorsheads(p1, p2)
2081 for p in (p1, p2):
2088 for p in (p1, p2):
2082 for b in bases:
2089 for b in bases:
2083 base = repo[b]
2090 base = repo[b]
2084 parent = repo[p]
2091 parent = repo[p]
2085 missing = copies._computeforwardmissing(base, parent)
2092 missing = copies._computeforwardmissing(base, parent)
2086 if not missing:
2093 if not missing:
2087 continue
2094 continue
2088 data = {
2095 data = {
2089 b'source': base.hex(),
2096 b'source': base.hex(),
2090 b'destination': parent.hex(),
2097 b'destination': parent.hex(),
2091 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2098 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2092 b'nbmissingfiles': len(missing),
2099 b'nbmissingfiles': len(missing),
2093 }
2100 }
2094 if dostats:
2101 if dostats:
2095 alldata['nbrevs'].append(
2102 alldata['nbrevs'].append(
2096 (data['nbrevs'], base.hex(), parent.hex(),)
2103 (data['nbrevs'], base.hex(), parent.hex(),)
2097 )
2104 )
2098 alldata['nbmissingfiles'].append(
2105 alldata['nbmissingfiles'].append(
2099 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2106 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2100 )
2107 )
2101 if dotiming:
2108 if dotiming:
2102 begin = util.timer()
2109 begin = util.timer()
2103 renames = copies.pathcopies(base, parent)
2110 renames = copies.pathcopies(base, parent)
2104 end = util.timer()
2111 end = util.timer()
2105 # not very stable timing since we did only one run
2112 # not very stable timing since we did only one run
2106 data['time'] = end - begin
2113 data['time'] = end - begin
2107 data['nbrenamedfiles'] = len(renames)
2114 data['nbrenamedfiles'] = len(renames)
2108 if dostats:
2115 if dostats:
2109 alldata['time'].append(
2116 alldata['time'].append(
2110 (data['time'], base.hex(), parent.hex(),)
2117 (data['time'], base.hex(), parent.hex(),)
2111 )
2118 )
2112 alldata['nbrenames'].append(
2119 alldata['nbrenames'].append(
2113 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2120 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2114 )
2121 )
2115 fm.startitem()
2122 fm.startitem()
2116 fm.data(**data)
2123 fm.data(**data)
2117 out = data.copy()
2124 out = data.copy()
2118 out['source'] = fm.hexfunc(base.node())
2125 out['source'] = fm.hexfunc(base.node())
2119 out['destination'] = fm.hexfunc(parent.node())
2126 out['destination'] = fm.hexfunc(parent.node())
2120 fm.plain(output % out)
2127 fm.plain(output % out)
2121
2128
2122 fm.end()
2129 fm.end()
2123 if dostats:
2130 if dostats:
2124 # use a second formatter because the data are quite different, not sure
2131 # use a second formatter because the data are quite different, not sure
2125 # how it flies with the templater.
2132 # how it flies with the templater.
2126 fm = ui.formatter(b'perf', opts)
2133 fm = ui.formatter(b'perf', opts)
2127 entries = [
2134 entries = [
2128 ('nbrevs', 'number of revision covered'),
2135 ('nbrevs', 'number of revision covered'),
2129 ('nbmissingfiles', 'number of missing files at head'),
2136 ('nbmissingfiles', 'number of missing files at head'),
2130 ]
2137 ]
2131 if dotiming:
2138 if dotiming:
2132 entries.append(('nbrenames', 'renamed files'))
2139 entries.append(('nbrenames', 'renamed files'))
2133 entries.append(('time', 'time'))
2140 entries.append(('time', 'time'))
2134 _displaystats(ui, opts, entries, alldata)
2141 _displaystats(ui, opts, entries, alldata)
2135
2142
2136
2143
2137 @command(b'perfcca', formatteropts)
2144 @command(b'perfcca', formatteropts)
2138 def perfcca(ui, repo, **opts):
2145 def perfcca(ui, repo, **opts):
2139 opts = _byteskwargs(opts)
2146 opts = _byteskwargs(opts)
2140 timer, fm = gettimer(ui, opts)
2147 timer, fm = gettimer(ui, opts)
2141 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2148 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2142 fm.end()
2149 fm.end()
2143
2150
2144
2151
2145 @command(b'perffncacheload', formatteropts)
2152 @command(b'perffncacheload', formatteropts)
2146 def perffncacheload(ui, repo, **opts):
2153 def perffncacheload(ui, repo, **opts):
2147 opts = _byteskwargs(opts)
2154 opts = _byteskwargs(opts)
2148 timer, fm = gettimer(ui, opts)
2155 timer, fm = gettimer(ui, opts)
2149 s = repo.store
2156 s = repo.store
2150
2157
2151 def d():
2158 def d():
2152 s.fncache._load()
2159 s.fncache._load()
2153
2160
2154 timer(d)
2161 timer(d)
2155 fm.end()
2162 fm.end()
2156
2163
2157
2164
2158 @command(b'perffncachewrite', formatteropts)
2165 @command(b'perffncachewrite', formatteropts)
2159 def perffncachewrite(ui, repo, **opts):
2166 def perffncachewrite(ui, repo, **opts):
2160 opts = _byteskwargs(opts)
2167 opts = _byteskwargs(opts)
2161 timer, fm = gettimer(ui, opts)
2168 timer, fm = gettimer(ui, opts)
2162 s = repo.store
2169 s = repo.store
2163 lock = repo.lock()
2170 lock = repo.lock()
2164 s.fncache._load()
2171 s.fncache._load()
2165 tr = repo.transaction(b'perffncachewrite')
2172 tr = repo.transaction(b'perffncachewrite')
2166 tr.addbackup(b'fncache')
2173 tr.addbackup(b'fncache')
2167
2174
2168 def d():
2175 def d():
2169 s.fncache._dirty = True
2176 s.fncache._dirty = True
2170 s.fncache.write(tr)
2177 s.fncache.write(tr)
2171
2178
2172 timer(d)
2179 timer(d)
2173 tr.close()
2180 tr.close()
2174 lock.release()
2181 lock.release()
2175 fm.end()
2182 fm.end()
2176
2183
2177
2184
2178 @command(b'perffncacheencode', formatteropts)
2185 @command(b'perffncacheencode', formatteropts)
2179 def perffncacheencode(ui, repo, **opts):
2186 def perffncacheencode(ui, repo, **opts):
2180 opts = _byteskwargs(opts)
2187 opts = _byteskwargs(opts)
2181 timer, fm = gettimer(ui, opts)
2188 timer, fm = gettimer(ui, opts)
2182 s = repo.store
2189 s = repo.store
2183 s.fncache._load()
2190 s.fncache._load()
2184
2191
2185 def d():
2192 def d():
2186 for p in s.fncache.entries:
2193 for p in s.fncache.entries:
2187 s.encode(p)
2194 s.encode(p)
2188
2195
2189 timer(d)
2196 timer(d)
2190 fm.end()
2197 fm.end()
2191
2198
2192
2199
2193 def _bdiffworker(q, blocks, xdiff, ready, done):
2200 def _bdiffworker(q, blocks, xdiff, ready, done):
2194 while not done.is_set():
2201 while not done.is_set():
2195 pair = q.get()
2202 pair = q.get()
2196 while pair is not None:
2203 while pair is not None:
2197 if xdiff:
2204 if xdiff:
2198 mdiff.bdiff.xdiffblocks(*pair)
2205 mdiff.bdiff.xdiffblocks(*pair)
2199 elif blocks:
2206 elif blocks:
2200 mdiff.bdiff.blocks(*pair)
2207 mdiff.bdiff.blocks(*pair)
2201 else:
2208 else:
2202 mdiff.textdiff(*pair)
2209 mdiff.textdiff(*pair)
2203 q.task_done()
2210 q.task_done()
2204 pair = q.get()
2211 pair = q.get()
2205 q.task_done() # for the None one
2212 q.task_done() # for the None one
2206 with ready:
2213 with ready:
2207 ready.wait()
2214 ready.wait()
2208
2215
2209
2216
2210 def _manifestrevision(repo, mnode):
2217 def _manifestrevision(repo, mnode):
2211 ml = repo.manifestlog
2218 ml = repo.manifestlog
2212
2219
2213 if util.safehasattr(ml, b'getstorage'):
2220 if util.safehasattr(ml, b'getstorage'):
2214 store = ml.getstorage(b'')
2221 store = ml.getstorage(b'')
2215 else:
2222 else:
2216 store = ml._revlog
2223 store = ml._revlog
2217
2224
2218 return store.revision(mnode)
2225 return store.revision(mnode)
2219
2226
2220
2227
2221 @command(
2228 @command(
2222 b'perfbdiff',
2229 b'perfbdiff',
2223 revlogopts
2230 revlogopts
2224 + formatteropts
2231 + formatteropts
2225 + [
2232 + [
2226 (
2233 (
2227 b'',
2234 b'',
2228 b'count',
2235 b'count',
2229 1,
2236 1,
2230 b'number of revisions to test (when using --startrev)',
2237 b'number of revisions to test (when using --startrev)',
2231 ),
2238 ),
2232 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2239 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2233 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2240 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2234 (b'', b'blocks', False, b'test computing diffs into blocks'),
2241 (b'', b'blocks', False, b'test computing diffs into blocks'),
2235 (b'', b'xdiff', False, b'use xdiff algorithm'),
2242 (b'', b'xdiff', False, b'use xdiff algorithm'),
2236 ],
2243 ],
2237 b'-c|-m|FILE REV',
2244 b'-c|-m|FILE REV',
2238 )
2245 )
2239 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2246 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2240 """benchmark a bdiff between revisions
2247 """benchmark a bdiff between revisions
2241
2248
2242 By default, benchmark a bdiff between its delta parent and itself.
2249 By default, benchmark a bdiff between its delta parent and itself.
2243
2250
2244 With ``--count``, benchmark bdiffs between delta parents and self for N
2251 With ``--count``, benchmark bdiffs between delta parents and self for N
2245 revisions starting at the specified revision.
2252 revisions starting at the specified revision.
2246
2253
2247 With ``--alldata``, assume the requested revision is a changeset and
2254 With ``--alldata``, assume the requested revision is a changeset and
2248 measure bdiffs for all changes related to that changeset (manifest
2255 measure bdiffs for all changes related to that changeset (manifest
2249 and filelogs).
2256 and filelogs).
2250 """
2257 """
2251 opts = _byteskwargs(opts)
2258 opts = _byteskwargs(opts)
2252
2259
2253 if opts[b'xdiff'] and not opts[b'blocks']:
2260 if opts[b'xdiff'] and not opts[b'blocks']:
2254 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2261 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2255
2262
2256 if opts[b'alldata']:
2263 if opts[b'alldata']:
2257 opts[b'changelog'] = True
2264 opts[b'changelog'] = True
2258
2265
2259 if opts.get(b'changelog') or opts.get(b'manifest'):
2266 if opts.get(b'changelog') or opts.get(b'manifest'):
2260 file_, rev = None, file_
2267 file_, rev = None, file_
2261 elif rev is None:
2268 elif rev is None:
2262 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2269 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2263
2270
2264 blocks = opts[b'blocks']
2271 blocks = opts[b'blocks']
2265 xdiff = opts[b'xdiff']
2272 xdiff = opts[b'xdiff']
2266 textpairs = []
2273 textpairs = []
2267
2274
2268 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2275 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2269
2276
2270 startrev = r.rev(r.lookup(rev))
2277 startrev = r.rev(r.lookup(rev))
2271 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2278 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2272 if opts[b'alldata']:
2279 if opts[b'alldata']:
2273 # Load revisions associated with changeset.
2280 # Load revisions associated with changeset.
2274 ctx = repo[rev]
2281 ctx = repo[rev]
2275 mtext = _manifestrevision(repo, ctx.manifestnode())
2282 mtext = _manifestrevision(repo, ctx.manifestnode())
2276 for pctx in ctx.parents():
2283 for pctx in ctx.parents():
2277 pman = _manifestrevision(repo, pctx.manifestnode())
2284 pman = _manifestrevision(repo, pctx.manifestnode())
2278 textpairs.append((pman, mtext))
2285 textpairs.append((pman, mtext))
2279
2286
2280 # Load filelog revisions by iterating manifest delta.
2287 # Load filelog revisions by iterating manifest delta.
2281 man = ctx.manifest()
2288 man = ctx.manifest()
2282 pman = ctx.p1().manifest()
2289 pman = ctx.p1().manifest()
2283 for filename, change in pman.diff(man).items():
2290 for filename, change in pman.diff(man).items():
2284 fctx = repo.file(filename)
2291 fctx = repo.file(filename)
2285 f1 = fctx.revision(change[0][0] or -1)
2292 f1 = fctx.revision(change[0][0] or -1)
2286 f2 = fctx.revision(change[1][0] or -1)
2293 f2 = fctx.revision(change[1][0] or -1)
2287 textpairs.append((f1, f2))
2294 textpairs.append((f1, f2))
2288 else:
2295 else:
2289 dp = r.deltaparent(rev)
2296 dp = r.deltaparent(rev)
2290 textpairs.append((r.revision(dp), r.revision(rev)))
2297 textpairs.append((r.revision(dp), r.revision(rev)))
2291
2298
2292 withthreads = threads > 0
2299 withthreads = threads > 0
2293 if not withthreads:
2300 if not withthreads:
2294
2301
2295 def d():
2302 def d():
2296 for pair in textpairs:
2303 for pair in textpairs:
2297 if xdiff:
2304 if xdiff:
2298 mdiff.bdiff.xdiffblocks(*pair)
2305 mdiff.bdiff.xdiffblocks(*pair)
2299 elif blocks:
2306 elif blocks:
2300 mdiff.bdiff.blocks(*pair)
2307 mdiff.bdiff.blocks(*pair)
2301 else:
2308 else:
2302 mdiff.textdiff(*pair)
2309 mdiff.textdiff(*pair)
2303
2310
2304 else:
2311 else:
2305 q = queue()
2312 q = queue()
2306 for i in _xrange(threads):
2313 for i in _xrange(threads):
2307 q.put(None)
2314 q.put(None)
2308 ready = threading.Condition()
2315 ready = threading.Condition()
2309 done = threading.Event()
2316 done = threading.Event()
2310 for i in _xrange(threads):
2317 for i in _xrange(threads):
2311 threading.Thread(
2318 threading.Thread(
2312 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2319 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2313 ).start()
2320 ).start()
2314 q.join()
2321 q.join()
2315
2322
2316 def d():
2323 def d():
2317 for pair in textpairs:
2324 for pair in textpairs:
2318 q.put(pair)
2325 q.put(pair)
2319 for i in _xrange(threads):
2326 for i in _xrange(threads):
2320 q.put(None)
2327 q.put(None)
2321 with ready:
2328 with ready:
2322 ready.notify_all()
2329 ready.notify_all()
2323 q.join()
2330 q.join()
2324
2331
2325 timer, fm = gettimer(ui, opts)
2332 timer, fm = gettimer(ui, opts)
2326 timer(d)
2333 timer(d)
2327 fm.end()
2334 fm.end()
2328
2335
2329 if withthreads:
2336 if withthreads:
2330 done.set()
2337 done.set()
2331 for i in _xrange(threads):
2338 for i in _xrange(threads):
2332 q.put(None)
2339 q.put(None)
2333 with ready:
2340 with ready:
2334 ready.notify_all()
2341 ready.notify_all()
2335
2342
2336
2343
2337 @command(
2344 @command(
2338 b'perfunidiff',
2345 b'perfunidiff',
2339 revlogopts
2346 revlogopts
2340 + formatteropts
2347 + formatteropts
2341 + [
2348 + [
2342 (
2349 (
2343 b'',
2350 b'',
2344 b'count',
2351 b'count',
2345 1,
2352 1,
2346 b'number of revisions to test (when using --startrev)',
2353 b'number of revisions to test (when using --startrev)',
2347 ),
2354 ),
2348 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2355 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2349 ],
2356 ],
2350 b'-c|-m|FILE REV',
2357 b'-c|-m|FILE REV',
2351 )
2358 )
2352 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2359 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2353 """benchmark a unified diff between revisions
2360 """benchmark a unified diff between revisions
2354
2361
2355 This doesn't include any copy tracing - it's just a unified diff
2362 This doesn't include any copy tracing - it's just a unified diff
2356 of the texts.
2363 of the texts.
2357
2364
2358 By default, benchmark a diff between its delta parent and itself.
2365 By default, benchmark a diff between its delta parent and itself.
2359
2366
2360 With ``--count``, benchmark diffs between delta parents and self for N
2367 With ``--count``, benchmark diffs between delta parents and self for N
2361 revisions starting at the specified revision.
2368 revisions starting at the specified revision.
2362
2369
2363 With ``--alldata``, assume the requested revision is a changeset and
2370 With ``--alldata``, assume the requested revision is a changeset and
2364 measure diffs for all changes related to that changeset (manifest
2371 measure diffs for all changes related to that changeset (manifest
2365 and filelogs).
2372 and filelogs).
2366 """
2373 """
2367 opts = _byteskwargs(opts)
2374 opts = _byteskwargs(opts)
2368 if opts[b'alldata']:
2375 if opts[b'alldata']:
2369 opts[b'changelog'] = True
2376 opts[b'changelog'] = True
2370
2377
2371 if opts.get(b'changelog') or opts.get(b'manifest'):
2378 if opts.get(b'changelog') or opts.get(b'manifest'):
2372 file_, rev = None, file_
2379 file_, rev = None, file_
2373 elif rev is None:
2380 elif rev is None:
2374 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2381 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2375
2382
2376 textpairs = []
2383 textpairs = []
2377
2384
2378 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2385 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2379
2386
2380 startrev = r.rev(r.lookup(rev))
2387 startrev = r.rev(r.lookup(rev))
2381 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2388 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2382 if opts[b'alldata']:
2389 if opts[b'alldata']:
2383 # Load revisions associated with changeset.
2390 # Load revisions associated with changeset.
2384 ctx = repo[rev]
2391 ctx = repo[rev]
2385 mtext = _manifestrevision(repo, ctx.manifestnode())
2392 mtext = _manifestrevision(repo, ctx.manifestnode())
2386 for pctx in ctx.parents():
2393 for pctx in ctx.parents():
2387 pman = _manifestrevision(repo, pctx.manifestnode())
2394 pman = _manifestrevision(repo, pctx.manifestnode())
2388 textpairs.append((pman, mtext))
2395 textpairs.append((pman, mtext))
2389
2396
2390 # Load filelog revisions by iterating manifest delta.
2397 # Load filelog revisions by iterating manifest delta.
2391 man = ctx.manifest()
2398 man = ctx.manifest()
2392 pman = ctx.p1().manifest()
2399 pman = ctx.p1().manifest()
2393 for filename, change in pman.diff(man).items():
2400 for filename, change in pman.diff(man).items():
2394 fctx = repo.file(filename)
2401 fctx = repo.file(filename)
2395 f1 = fctx.revision(change[0][0] or -1)
2402 f1 = fctx.revision(change[0][0] or -1)
2396 f2 = fctx.revision(change[1][0] or -1)
2403 f2 = fctx.revision(change[1][0] or -1)
2397 textpairs.append((f1, f2))
2404 textpairs.append((f1, f2))
2398 else:
2405 else:
2399 dp = r.deltaparent(rev)
2406 dp = r.deltaparent(rev)
2400 textpairs.append((r.revision(dp), r.revision(rev)))
2407 textpairs.append((r.revision(dp), r.revision(rev)))
2401
2408
2402 def d():
2409 def d():
2403 for left, right in textpairs:
2410 for left, right in textpairs:
2404 # The date strings don't matter, so we pass empty strings.
2411 # The date strings don't matter, so we pass empty strings.
2405 headerlines, hunks = mdiff.unidiff(
2412 headerlines, hunks = mdiff.unidiff(
2406 left, b'', right, b'', b'left', b'right', binary=False
2413 left, b'', right, b'', b'left', b'right', binary=False
2407 )
2414 )
2408 # consume iterators in roughly the way patch.py does
2415 # consume iterators in roughly the way patch.py does
2409 b'\n'.join(headerlines)
2416 b'\n'.join(headerlines)
2410 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2417 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2411
2418
2412 timer, fm = gettimer(ui, opts)
2419 timer, fm = gettimer(ui, opts)
2413 timer(d)
2420 timer(d)
2414 fm.end()
2421 fm.end()
2415
2422
2416
2423
2417 @command(b'perfdiffwd', formatteropts)
2424 @command(b'perfdiffwd', formatteropts)
2418 def perfdiffwd(ui, repo, **opts):
2425 def perfdiffwd(ui, repo, **opts):
2419 """Profile diff of working directory changes"""
2426 """Profile diff of working directory changes"""
2420 opts = _byteskwargs(opts)
2427 opts = _byteskwargs(opts)
2421 timer, fm = gettimer(ui, opts)
2428 timer, fm = gettimer(ui, opts)
2422 options = {
2429 options = {
2423 'w': 'ignore_all_space',
2430 'w': 'ignore_all_space',
2424 'b': 'ignore_space_change',
2431 'b': 'ignore_space_change',
2425 'B': 'ignore_blank_lines',
2432 'B': 'ignore_blank_lines',
2426 }
2433 }
2427
2434
2428 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2435 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2429 opts = dict((options[c], b'1') for c in diffopt)
2436 opts = dict((options[c], b'1') for c in diffopt)
2430
2437
2431 def d():
2438 def d():
2432 ui.pushbuffer()
2439 ui.pushbuffer()
2433 commands.diff(ui, repo, **opts)
2440 commands.diff(ui, repo, **opts)
2434 ui.popbuffer()
2441 ui.popbuffer()
2435
2442
2436 diffopt = diffopt.encode('ascii')
2443 diffopt = diffopt.encode('ascii')
2437 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2444 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2438 timer(d, title=title)
2445 timer(d, title=title)
2439 fm.end()
2446 fm.end()
2440
2447
2441
2448
2442 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2449 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2443 def perfrevlogindex(ui, repo, file_=None, **opts):
2450 def perfrevlogindex(ui, repo, file_=None, **opts):
2444 """Benchmark operations against a revlog index.
2451 """Benchmark operations against a revlog index.
2445
2452
2446 This tests constructing a revlog instance, reading index data,
2453 This tests constructing a revlog instance, reading index data,
2447 parsing index data, and performing various operations related to
2454 parsing index data, and performing various operations related to
2448 index data.
2455 index data.
2449 """
2456 """
2450
2457
2451 opts = _byteskwargs(opts)
2458 opts = _byteskwargs(opts)
2452
2459
2453 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2460 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2454
2461
2455 opener = getattr(rl, 'opener') # trick linter
2462 opener = getattr(rl, 'opener') # trick linter
2456 indexfile = rl.indexfile
2463 indexfile = rl.indexfile
2457 data = opener.read(indexfile)
2464 data = opener.read(indexfile)
2458
2465
2459 header = struct.unpack(b'>I', data[0:4])[0]
2466 header = struct.unpack(b'>I', data[0:4])[0]
2460 version = header & 0xFFFF
2467 version = header & 0xFFFF
2461 if version == 1:
2468 if version == 1:
2462 revlogio = revlog.revlogio()
2469 revlogio = revlog.revlogio()
2463 inline = header & (1 << 16)
2470 inline = header & (1 << 16)
2464 else:
2471 else:
2465 raise error.Abort(b'unsupported revlog version: %d' % version)
2472 raise error.Abort(b'unsupported revlog version: %d' % version)
2466
2473
2467 rllen = len(rl)
2474 rllen = len(rl)
2468
2475
2469 node0 = rl.node(0)
2476 node0 = rl.node(0)
2470 node25 = rl.node(rllen // 4)
2477 node25 = rl.node(rllen // 4)
2471 node50 = rl.node(rllen // 2)
2478 node50 = rl.node(rllen // 2)
2472 node75 = rl.node(rllen // 4 * 3)
2479 node75 = rl.node(rllen // 4 * 3)
2473 node100 = rl.node(rllen - 1)
2480 node100 = rl.node(rllen - 1)
2474
2481
2475 allrevs = range(rllen)
2482 allrevs = range(rllen)
2476 allrevsrev = list(reversed(allrevs))
2483 allrevsrev = list(reversed(allrevs))
2477 allnodes = [rl.node(rev) for rev in range(rllen)]
2484 allnodes = [rl.node(rev) for rev in range(rllen)]
2478 allnodesrev = list(reversed(allnodes))
2485 allnodesrev = list(reversed(allnodes))
2479
2486
2480 def constructor():
2487 def constructor():
2481 revlog.revlog(opener, indexfile)
2488 revlog.revlog(opener, indexfile)
2482
2489
2483 def read():
2490 def read():
2484 with opener(indexfile) as fh:
2491 with opener(indexfile) as fh:
2485 fh.read()
2492 fh.read()
2486
2493
2487 def parseindex():
2494 def parseindex():
2488 revlogio.parseindex(data, inline)
2495 revlogio.parseindex(data, inline)
2489
2496
2490 def getentry(revornode):
2497 def getentry(revornode):
2491 index = revlogio.parseindex(data, inline)[0]
2498 index = revlogio.parseindex(data, inline)[0]
2492 index[revornode]
2499 index[revornode]
2493
2500
2494 def getentries(revs, count=1):
2501 def getentries(revs, count=1):
2495 index = revlogio.parseindex(data, inline)[0]
2502 index = revlogio.parseindex(data, inline)[0]
2496
2503
2497 for i in range(count):
2504 for i in range(count):
2498 for rev in revs:
2505 for rev in revs:
2499 index[rev]
2506 index[rev]
2500
2507
2501 def resolvenode(node):
2508 def resolvenode(node):
2502 nodemap = revlogio.parseindex(data, inline)[1]
2509 nodemap = revlogio.parseindex(data, inline)[1]
2503 # This only works for the C code.
2510 # This only works for the C code.
2504 if nodemap is None:
2511 if nodemap is None:
2505 return
2512 return
2506
2513
2507 try:
2514 try:
2508 nodemap[node]
2515 nodemap[node]
2509 except error.RevlogError:
2516 except error.RevlogError:
2510 pass
2517 pass
2511
2518
2512 def resolvenodes(nodes, count=1):
2519 def resolvenodes(nodes, count=1):
2513 nodemap = revlogio.parseindex(data, inline)[1]
2520 nodemap = revlogio.parseindex(data, inline)[1]
2514 if nodemap is None:
2521 if nodemap is None:
2515 return
2522 return
2516
2523
2517 for i in range(count):
2524 for i in range(count):
2518 for node in nodes:
2525 for node in nodes:
2519 try:
2526 try:
2520 nodemap[node]
2527 nodemap[node]
2521 except error.RevlogError:
2528 except error.RevlogError:
2522 pass
2529 pass
2523
2530
2524 benches = [
2531 benches = [
2525 (constructor, b'revlog constructor'),
2532 (constructor, b'revlog constructor'),
2526 (read, b'read'),
2533 (read, b'read'),
2527 (parseindex, b'create index object'),
2534 (parseindex, b'create index object'),
2528 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2535 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2529 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2536 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2530 (lambda: resolvenode(node0), b'look up node at rev 0'),
2537 (lambda: resolvenode(node0), b'look up node at rev 0'),
2531 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2538 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2532 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2539 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2533 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2540 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2534 (lambda: resolvenode(node100), b'look up node at tip'),
2541 (lambda: resolvenode(node100), b'look up node at tip'),
2535 # 2x variation is to measure caching impact.
2542 # 2x variation is to measure caching impact.
2536 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2543 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2537 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2544 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2538 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2545 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2539 (
2546 (
2540 lambda: resolvenodes(allnodesrev, 2),
2547 lambda: resolvenodes(allnodesrev, 2),
2541 b'look up all nodes 2x (reverse)',
2548 b'look up all nodes 2x (reverse)',
2542 ),
2549 ),
2543 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2550 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2544 (
2551 (
2545 lambda: getentries(allrevs, 2),
2552 lambda: getentries(allrevs, 2),
2546 b'retrieve all index entries 2x (forward)',
2553 b'retrieve all index entries 2x (forward)',
2547 ),
2554 ),
2548 (
2555 (
2549 lambda: getentries(allrevsrev),
2556 lambda: getentries(allrevsrev),
2550 b'retrieve all index entries (reverse)',
2557 b'retrieve all index entries (reverse)',
2551 ),
2558 ),
2552 (
2559 (
2553 lambda: getentries(allrevsrev, 2),
2560 lambda: getentries(allrevsrev, 2),
2554 b'retrieve all index entries 2x (reverse)',
2561 b'retrieve all index entries 2x (reverse)',
2555 ),
2562 ),
2556 ]
2563 ]
2557
2564
2558 for fn, title in benches:
2565 for fn, title in benches:
2559 timer, fm = gettimer(ui, opts)
2566 timer, fm = gettimer(ui, opts)
2560 timer(fn, title=title)
2567 timer(fn, title=title)
2561 fm.end()
2568 fm.end()
2562
2569
2563
2570
2564 @command(
2571 @command(
2565 b'perfrevlogrevisions',
2572 b'perfrevlogrevisions',
2566 revlogopts
2573 revlogopts
2567 + formatteropts
2574 + formatteropts
2568 + [
2575 + [
2569 (b'd', b'dist', 100, b'distance between the revisions'),
2576 (b'd', b'dist', 100, b'distance between the revisions'),
2570 (b's', b'startrev', 0, b'revision to start reading at'),
2577 (b's', b'startrev', 0, b'revision to start reading at'),
2571 (b'', b'reverse', False, b'read in reverse'),
2578 (b'', b'reverse', False, b'read in reverse'),
2572 ],
2579 ],
2573 b'-c|-m|FILE',
2580 b'-c|-m|FILE',
2574 )
2581 )
2575 def perfrevlogrevisions(
2582 def perfrevlogrevisions(
2576 ui, repo, file_=None, startrev=0, reverse=False, **opts
2583 ui, repo, file_=None, startrev=0, reverse=False, **opts
2577 ):
2584 ):
2578 """Benchmark reading a series of revisions from a revlog.
2585 """Benchmark reading a series of revisions from a revlog.
2579
2586
2580 By default, we read every ``-d/--dist`` revision from 0 to tip of
2587 By default, we read every ``-d/--dist`` revision from 0 to tip of
2581 the specified revlog.
2588 the specified revlog.
2582
2589
2583 The start revision can be defined via ``-s/--startrev``.
2590 The start revision can be defined via ``-s/--startrev``.
2584 """
2591 """
2585 opts = _byteskwargs(opts)
2592 opts = _byteskwargs(opts)
2586
2593
2587 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2594 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2588 rllen = getlen(ui)(rl)
2595 rllen = getlen(ui)(rl)
2589
2596
2590 if startrev < 0:
2597 if startrev < 0:
2591 startrev = rllen + startrev
2598 startrev = rllen + startrev
2592
2599
2593 def d():
2600 def d():
2594 rl.clearcaches()
2601 rl.clearcaches()
2595
2602
2596 beginrev = startrev
2603 beginrev = startrev
2597 endrev = rllen
2604 endrev = rllen
2598 dist = opts[b'dist']
2605 dist = opts[b'dist']
2599
2606
2600 if reverse:
2607 if reverse:
2601 beginrev, endrev = endrev - 1, beginrev - 1
2608 beginrev, endrev = endrev - 1, beginrev - 1
2602 dist = -1 * dist
2609 dist = -1 * dist
2603
2610
2604 for x in _xrange(beginrev, endrev, dist):
2611 for x in _xrange(beginrev, endrev, dist):
2605 # Old revisions don't support passing int.
2612 # Old revisions don't support passing int.
2606 n = rl.node(x)
2613 n = rl.node(x)
2607 rl.revision(n)
2614 rl.revision(n)
2608
2615
2609 timer, fm = gettimer(ui, opts)
2616 timer, fm = gettimer(ui, opts)
2610 timer(d)
2617 timer(d)
2611 fm.end()
2618 fm.end()
2612
2619
2613
2620
2614 @command(
2621 @command(
2615 b'perfrevlogwrite',
2622 b'perfrevlogwrite',
2616 revlogopts
2623 revlogopts
2617 + formatteropts
2624 + formatteropts
2618 + [
2625 + [
2619 (b's', b'startrev', 1000, b'revision to start writing at'),
2626 (b's', b'startrev', 1000, b'revision to start writing at'),
2620 (b'', b'stoprev', -1, b'last revision to write'),
2627 (b'', b'stoprev', -1, b'last revision to write'),
2621 (b'', b'count', 3, b'number of passes to perform'),
2628 (b'', b'count', 3, b'number of passes to perform'),
2622 (b'', b'details', False, b'print timing for every revisions tested'),
2629 (b'', b'details', False, b'print timing for every revisions tested'),
2623 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2624 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2625 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2632 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2626 ],
2633 ],
2627 b'-c|-m|FILE',
2634 b'-c|-m|FILE',
2628 )
2635 )
2629 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2636 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2630 """Benchmark writing a series of revisions to a revlog.
2637 """Benchmark writing a series of revisions to a revlog.
2631
2638
2632 Possible source values are:
2639 Possible source values are:
2633 * `full`: add from a full text (default).
2640 * `full`: add from a full text (default).
2634 * `parent-1`: add from a delta to the first parent
2641 * `parent-1`: add from a delta to the first parent
2635 * `parent-2`: add from a delta to the second parent if it exists
2642 * `parent-2`: add from a delta to the second parent if it exists
2636 (use a delta from the first parent otherwise)
2643 (use a delta from the first parent otherwise)
2637 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2644 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2638 * `storage`: add from the existing precomputed deltas
2645 * `storage`: add from the existing precomputed deltas
2639
2646
2640 Note: This performance command measures performance in a custom way. As a
2647 Note: This performance command measures performance in a custom way. As a
2641 result some of the global configuration of the 'perf' command does not
2648 result some of the global configuration of the 'perf' command does not
2642 apply to it:
2649 apply to it:
2643
2650
2644 * ``pre-run``: disabled
2651 * ``pre-run``: disabled
2645
2652
2646 * ``profile-benchmark``: disabled
2653 * ``profile-benchmark``: disabled
2647
2654
2648 * ``run-limits``: disabled use --count instead
2655 * ``run-limits``: disabled use --count instead
2649 """
2656 """
2650 opts = _byteskwargs(opts)
2657 opts = _byteskwargs(opts)
2651
2658
2652 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2659 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2653 rllen = getlen(ui)(rl)
2660 rllen = getlen(ui)(rl)
2654 if startrev < 0:
2661 if startrev < 0:
2655 startrev = rllen + startrev
2662 startrev = rllen + startrev
2656 if stoprev < 0:
2663 if stoprev < 0:
2657 stoprev = rllen + stoprev
2664 stoprev = rllen + stoprev
2658
2665
2659 lazydeltabase = opts['lazydeltabase']
2666 lazydeltabase = opts['lazydeltabase']
2660 source = opts['source']
2667 source = opts['source']
2661 clearcaches = opts['clear_caches']
2668 clearcaches = opts['clear_caches']
2662 validsource = (
2669 validsource = (
2663 b'full',
2670 b'full',
2664 b'parent-1',
2671 b'parent-1',
2665 b'parent-2',
2672 b'parent-2',
2666 b'parent-smallest',
2673 b'parent-smallest',
2667 b'storage',
2674 b'storage',
2668 )
2675 )
2669 if source not in validsource:
2676 if source not in validsource:
2670 raise error.Abort('invalid source type: %s' % source)
2677 raise error.Abort('invalid source type: %s' % source)
2671
2678
2672 ### actually gather results
2679 ### actually gather results
2673 count = opts['count']
2680 count = opts['count']
2674 if count <= 0:
2681 if count <= 0:
2675 raise error.Abort('invalide run count: %d' % count)
2682 raise error.Abort('invalide run count: %d' % count)
2676 allresults = []
2683 allresults = []
2677 for c in range(count):
2684 for c in range(count):
2678 timing = _timeonewrite(
2685 timing = _timeonewrite(
2679 ui,
2686 ui,
2680 rl,
2687 rl,
2681 source,
2688 source,
2682 startrev,
2689 startrev,
2683 stoprev,
2690 stoprev,
2684 c + 1,
2691 c + 1,
2685 lazydeltabase=lazydeltabase,
2692 lazydeltabase=lazydeltabase,
2686 clearcaches=clearcaches,
2693 clearcaches=clearcaches,
2687 )
2694 )
2688 allresults.append(timing)
2695 allresults.append(timing)
2689
2696
2690 ### consolidate the results in a single list
2697 ### consolidate the results in a single list
2691 results = []
2698 results = []
2692 for idx, (rev, t) in enumerate(allresults[0]):
2699 for idx, (rev, t) in enumerate(allresults[0]):
2693 ts = [t]
2700 ts = [t]
2694 for other in allresults[1:]:
2701 for other in allresults[1:]:
2695 orev, ot = other[idx]
2702 orev, ot = other[idx]
2696 assert orev == rev
2703 assert orev == rev
2697 ts.append(ot)
2704 ts.append(ot)
2698 results.append((rev, ts))
2705 results.append((rev, ts))
2699 resultcount = len(results)
2706 resultcount = len(results)
2700
2707
2701 ### Compute and display relevant statistics
2708 ### Compute and display relevant statistics
2702
2709
2703 # get a formatter
2710 # get a formatter
2704 fm = ui.formatter(b'perf', opts)
2711 fm = ui.formatter(b'perf', opts)
2705 displayall = ui.configbool(b"perf", b"all-timing", False)
2712 displayall = ui.configbool(b"perf", b"all-timing", False)
2706
2713
2707 # print individual details if requested
2714 # print individual details if requested
2708 if opts['details']:
2715 if opts['details']:
2709 for idx, item in enumerate(results, 1):
2716 for idx, item in enumerate(results, 1):
2710 rev, data = item
2717 rev, data = item
2711 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2718 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2712 formatone(fm, data, title=title, displayall=displayall)
2719 formatone(fm, data, title=title, displayall=displayall)
2713
2720
2714 # sorts results by median time
2721 # sorts results by median time
2715 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2722 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2716 # list of (name, index) to display)
2723 # list of (name, index) to display)
2717 relevants = [
2724 relevants = [
2718 ("min", 0),
2725 ("min", 0),
2719 ("10%", resultcount * 10 // 100),
2726 ("10%", resultcount * 10 // 100),
2720 ("25%", resultcount * 25 // 100),
2727 ("25%", resultcount * 25 // 100),
2721 ("50%", resultcount * 70 // 100),
2728 ("50%", resultcount * 70 // 100),
2722 ("75%", resultcount * 75 // 100),
2729 ("75%", resultcount * 75 // 100),
2723 ("90%", resultcount * 90 // 100),
2730 ("90%", resultcount * 90 // 100),
2724 ("95%", resultcount * 95 // 100),
2731 ("95%", resultcount * 95 // 100),
2725 ("99%", resultcount * 99 // 100),
2732 ("99%", resultcount * 99 // 100),
2726 ("99.9%", resultcount * 999 // 1000),
2733 ("99.9%", resultcount * 999 // 1000),
2727 ("99.99%", resultcount * 9999 // 10000),
2734 ("99.99%", resultcount * 9999 // 10000),
2728 ("99.999%", resultcount * 99999 // 100000),
2735 ("99.999%", resultcount * 99999 // 100000),
2729 ("max", -1),
2736 ("max", -1),
2730 ]
2737 ]
2731 if not ui.quiet:
2738 if not ui.quiet:
2732 for name, idx in relevants:
2739 for name, idx in relevants:
2733 data = results[idx]
2740 data = results[idx]
2734 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2741 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2735 formatone(fm, data[1], title=title, displayall=displayall)
2742 formatone(fm, data[1], title=title, displayall=displayall)
2736
2743
2737 # XXX summing that many float will not be very precise, we ignore this fact
2744 # XXX summing that many float will not be very precise, we ignore this fact
2738 # for now
2745 # for now
2739 totaltime = []
2746 totaltime = []
2740 for item in allresults:
2747 for item in allresults:
2741 totaltime.append(
2748 totaltime.append(
2742 (
2749 (
2743 sum(x[1][0] for x in item),
2750 sum(x[1][0] for x in item),
2744 sum(x[1][1] for x in item),
2751 sum(x[1][1] for x in item),
2745 sum(x[1][2] for x in item),
2752 sum(x[1][2] for x in item),
2746 )
2753 )
2747 )
2754 )
2748 formatone(
2755 formatone(
2749 fm,
2756 fm,
2750 totaltime,
2757 totaltime,
2751 title="total time (%d revs)" % resultcount,
2758 title="total time (%d revs)" % resultcount,
2752 displayall=displayall,
2759 displayall=displayall,
2753 )
2760 )
2754 fm.end()
2761 fm.end()
2755
2762
2756
2763
2757 class _faketr(object):
2764 class _faketr(object):
2758 def add(s, x, y, z=None):
2765 def add(s, x, y, z=None):
2759 return None
2766 return None
2760
2767
2761
2768
2762 def _timeonewrite(
2769 def _timeonewrite(
2763 ui,
2770 ui,
2764 orig,
2771 orig,
2765 source,
2772 source,
2766 startrev,
2773 startrev,
2767 stoprev,
2774 stoprev,
2768 runidx=None,
2775 runidx=None,
2769 lazydeltabase=True,
2776 lazydeltabase=True,
2770 clearcaches=True,
2777 clearcaches=True,
2771 ):
2778 ):
2772 timings = []
2779 timings = []
2773 tr = _faketr()
2780 tr = _faketr()
2774 with _temprevlog(ui, orig, startrev) as dest:
2781 with _temprevlog(ui, orig, startrev) as dest:
2775 dest._lazydeltabase = lazydeltabase
2782 dest._lazydeltabase = lazydeltabase
2776 revs = list(orig.revs(startrev, stoprev))
2783 revs = list(orig.revs(startrev, stoprev))
2777 total = len(revs)
2784 total = len(revs)
2778 topic = 'adding'
2785 topic = 'adding'
2779 if runidx is not None:
2786 if runidx is not None:
2780 topic += ' (run #%d)' % runidx
2787 topic += ' (run #%d)' % runidx
2781 # Support both old and new progress API
2788 # Support both old and new progress API
2782 if util.safehasattr(ui, 'makeprogress'):
2789 if util.safehasattr(ui, 'makeprogress'):
2783 progress = ui.makeprogress(topic, unit='revs', total=total)
2790 progress = ui.makeprogress(topic, unit='revs', total=total)
2784
2791
2785 def updateprogress(pos):
2792 def updateprogress(pos):
2786 progress.update(pos)
2793 progress.update(pos)
2787
2794
2788 def completeprogress():
2795 def completeprogress():
2789 progress.complete()
2796 progress.complete()
2790
2797
2791 else:
2798 else:
2792
2799
2793 def updateprogress(pos):
2800 def updateprogress(pos):
2794 ui.progress(topic, pos, unit='revs', total=total)
2801 ui.progress(topic, pos, unit='revs', total=total)
2795
2802
2796 def completeprogress():
2803 def completeprogress():
2797 ui.progress(topic, None, unit='revs', total=total)
2804 ui.progress(topic, None, unit='revs', total=total)
2798
2805
2799 for idx, rev in enumerate(revs):
2806 for idx, rev in enumerate(revs):
2800 updateprogress(idx)
2807 updateprogress(idx)
2801 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2808 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2802 if clearcaches:
2809 if clearcaches:
2803 dest.index.clearcaches()
2810 dest.index.clearcaches()
2804 dest.clearcaches()
2811 dest.clearcaches()
2805 with timeone() as r:
2812 with timeone() as r:
2806 dest.addrawrevision(*addargs, **addkwargs)
2813 dest.addrawrevision(*addargs, **addkwargs)
2807 timings.append((rev, r[0]))
2814 timings.append((rev, r[0]))
2808 updateprogress(total)
2815 updateprogress(total)
2809 completeprogress()
2816 completeprogress()
2810 return timings
2817 return timings
2811
2818
2812
2819
2813 def _getrevisionseed(orig, rev, tr, source):
2820 def _getrevisionseed(orig, rev, tr, source):
2814 from mercurial.node import nullid
2821 from mercurial.node import nullid
2815
2822
2816 linkrev = orig.linkrev(rev)
2823 linkrev = orig.linkrev(rev)
2817 node = orig.node(rev)
2824 node = orig.node(rev)
2818 p1, p2 = orig.parents(node)
2825 p1, p2 = orig.parents(node)
2819 flags = orig.flags(rev)
2826 flags = orig.flags(rev)
2820 cachedelta = None
2827 cachedelta = None
2821 text = None
2828 text = None
2822
2829
2823 if source == b'full':
2830 if source == b'full':
2824 text = orig.revision(rev)
2831 text = orig.revision(rev)
2825 elif source == b'parent-1':
2832 elif source == b'parent-1':
2826 baserev = orig.rev(p1)
2833 baserev = orig.rev(p1)
2827 cachedelta = (baserev, orig.revdiff(p1, rev))
2834 cachedelta = (baserev, orig.revdiff(p1, rev))
2828 elif source == b'parent-2':
2835 elif source == b'parent-2':
2829 parent = p2
2836 parent = p2
2830 if p2 == nullid:
2837 if p2 == nullid:
2831 parent = p1
2838 parent = p1
2832 baserev = orig.rev(parent)
2839 baserev = orig.rev(parent)
2833 cachedelta = (baserev, orig.revdiff(parent, rev))
2840 cachedelta = (baserev, orig.revdiff(parent, rev))
2834 elif source == b'parent-smallest':
2841 elif source == b'parent-smallest':
2835 p1diff = orig.revdiff(p1, rev)
2842 p1diff = orig.revdiff(p1, rev)
2836 parent = p1
2843 parent = p1
2837 diff = p1diff
2844 diff = p1diff
2838 if p2 != nullid:
2845 if p2 != nullid:
2839 p2diff = orig.revdiff(p2, rev)
2846 p2diff = orig.revdiff(p2, rev)
2840 if len(p1diff) > len(p2diff):
2847 if len(p1diff) > len(p2diff):
2841 parent = p2
2848 parent = p2
2842 diff = p2diff
2849 diff = p2diff
2843 baserev = orig.rev(parent)
2850 baserev = orig.rev(parent)
2844 cachedelta = (baserev, diff)
2851 cachedelta = (baserev, diff)
2845 elif source == b'storage':
2852 elif source == b'storage':
2846 baserev = orig.deltaparent(rev)
2853 baserev = orig.deltaparent(rev)
2847 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2854 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2848
2855
2849 return (
2856 return (
2850 (text, tr, linkrev, p1, p2),
2857 (text, tr, linkrev, p1, p2),
2851 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2858 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2852 )
2859 )
2853
2860
2854
2861
2855 @contextlib.contextmanager
2862 @contextlib.contextmanager
2856 def _temprevlog(ui, orig, truncaterev):
2863 def _temprevlog(ui, orig, truncaterev):
2857 from mercurial import vfs as vfsmod
2864 from mercurial import vfs as vfsmod
2858
2865
2859 if orig._inline:
2866 if orig._inline:
2860 raise error.Abort('not supporting inline revlog (yet)')
2867 raise error.Abort('not supporting inline revlog (yet)')
2861 revlogkwargs = {}
2868 revlogkwargs = {}
2862 k = 'upperboundcomp'
2869 k = 'upperboundcomp'
2863 if util.safehasattr(orig, k):
2870 if util.safehasattr(orig, k):
2864 revlogkwargs[k] = getattr(orig, k)
2871 revlogkwargs[k] = getattr(orig, k)
2865
2872
2866 origindexpath = orig.opener.join(orig.indexfile)
2873 origindexpath = orig.opener.join(orig.indexfile)
2867 origdatapath = orig.opener.join(orig.datafile)
2874 origdatapath = orig.opener.join(orig.datafile)
2868 indexname = 'revlog.i'
2875 indexname = 'revlog.i'
2869 dataname = 'revlog.d'
2876 dataname = 'revlog.d'
2870
2877
2871 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2878 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2872 try:
2879 try:
2873 # copy the data file in a temporary directory
2880 # copy the data file in a temporary directory
2874 ui.debug('copying data in %s\n' % tmpdir)
2881 ui.debug('copying data in %s\n' % tmpdir)
2875 destindexpath = os.path.join(tmpdir, 'revlog.i')
2882 destindexpath = os.path.join(tmpdir, 'revlog.i')
2876 destdatapath = os.path.join(tmpdir, 'revlog.d')
2883 destdatapath = os.path.join(tmpdir, 'revlog.d')
2877 shutil.copyfile(origindexpath, destindexpath)
2884 shutil.copyfile(origindexpath, destindexpath)
2878 shutil.copyfile(origdatapath, destdatapath)
2885 shutil.copyfile(origdatapath, destdatapath)
2879
2886
2880 # remove the data we want to add again
2887 # remove the data we want to add again
2881 ui.debug('truncating data to be rewritten\n')
2888 ui.debug('truncating data to be rewritten\n')
2882 with open(destindexpath, 'ab') as index:
2889 with open(destindexpath, 'ab') as index:
2883 index.seek(0)
2890 index.seek(0)
2884 index.truncate(truncaterev * orig._io.size)
2891 index.truncate(truncaterev * orig._io.size)
2885 with open(destdatapath, 'ab') as data:
2892 with open(destdatapath, 'ab') as data:
2886 data.seek(0)
2893 data.seek(0)
2887 data.truncate(orig.start(truncaterev))
2894 data.truncate(orig.start(truncaterev))
2888
2895
2889 # instantiate a new revlog from the temporary copy
2896 # instantiate a new revlog from the temporary copy
2890 ui.debug('truncating adding to be rewritten\n')
2897 ui.debug('truncating adding to be rewritten\n')
2891 vfs = vfsmod.vfs(tmpdir)
2898 vfs = vfsmod.vfs(tmpdir)
2892 vfs.options = getattr(orig.opener, 'options', None)
2899 vfs.options = getattr(orig.opener, 'options', None)
2893
2900
2894 dest = revlog.revlog(
2901 dest = revlog.revlog(
2895 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2902 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2896 )
2903 )
2897 if dest._inline:
2904 if dest._inline:
2898 raise error.Abort('not supporting inline revlog (yet)')
2905 raise error.Abort('not supporting inline revlog (yet)')
2899 # make sure internals are initialized
2906 # make sure internals are initialized
2900 dest.revision(len(dest) - 1)
2907 dest.revision(len(dest) - 1)
2901 yield dest
2908 yield dest
2902 del dest, vfs
2909 del dest, vfs
2903 finally:
2910 finally:
2904 shutil.rmtree(tmpdir, True)
2911 shutil.rmtree(tmpdir, True)
2905
2912
2906
2913
2907 @command(
2914 @command(
2908 b'perfrevlogchunks',
2915 b'perfrevlogchunks',
2909 revlogopts
2916 revlogopts
2910 + formatteropts
2917 + formatteropts
2911 + [
2918 + [
2912 (b'e', b'engines', b'', b'compression engines to use'),
2919 (b'e', b'engines', b'', b'compression engines to use'),
2913 (b's', b'startrev', 0, b'revision to start at'),
2920 (b's', b'startrev', 0, b'revision to start at'),
2914 ],
2921 ],
2915 b'-c|-m|FILE',
2922 b'-c|-m|FILE',
2916 )
2923 )
2917 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2924 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2918 """Benchmark operations on revlog chunks.
2925 """Benchmark operations on revlog chunks.
2919
2926
2920 Logically, each revlog is a collection of fulltext revisions. However,
2927 Logically, each revlog is a collection of fulltext revisions. However,
2921 stored within each revlog are "chunks" of possibly compressed data. This
2928 stored within each revlog are "chunks" of possibly compressed data. This
2922 data needs to be read and decompressed or compressed and written.
2929 data needs to be read and decompressed or compressed and written.
2923
2930
2924 This command measures the time it takes to read+decompress and recompress
2931 This command measures the time it takes to read+decompress and recompress
2925 chunks in a revlog. It effectively isolates I/O and compression performance.
2932 chunks in a revlog. It effectively isolates I/O and compression performance.
2926 For measurements of higher-level operations like resolving revisions,
2933 For measurements of higher-level operations like resolving revisions,
2927 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2934 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2928 """
2935 """
2929 opts = _byteskwargs(opts)
2936 opts = _byteskwargs(opts)
2930
2937
2931 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2938 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2932
2939
2933 # _chunkraw was renamed to _getsegmentforrevs.
2940 # _chunkraw was renamed to _getsegmentforrevs.
2934 try:
2941 try:
2935 segmentforrevs = rl._getsegmentforrevs
2942 segmentforrevs = rl._getsegmentforrevs
2936 except AttributeError:
2943 except AttributeError:
2937 segmentforrevs = rl._chunkraw
2944 segmentforrevs = rl._chunkraw
2938
2945
2939 # Verify engines argument.
2946 # Verify engines argument.
2940 if engines:
2947 if engines:
2941 engines = set(e.strip() for e in engines.split(b','))
2948 engines = set(e.strip() for e in engines.split(b','))
2942 for engine in engines:
2949 for engine in engines:
2943 try:
2950 try:
2944 util.compressionengines[engine]
2951 util.compressionengines[engine]
2945 except KeyError:
2952 except KeyError:
2946 raise error.Abort(b'unknown compression engine: %s' % engine)
2953 raise error.Abort(b'unknown compression engine: %s' % engine)
2947 else:
2954 else:
2948 engines = []
2955 engines = []
2949 for e in util.compengines:
2956 for e in util.compengines:
2950 engine = util.compengines[e]
2957 engine = util.compengines[e]
2951 try:
2958 try:
2952 if engine.available():
2959 if engine.available():
2953 engine.revlogcompressor().compress(b'dummy')
2960 engine.revlogcompressor().compress(b'dummy')
2954 engines.append(e)
2961 engines.append(e)
2955 except NotImplementedError:
2962 except NotImplementedError:
2956 pass
2963 pass
2957
2964
2958 revs = list(rl.revs(startrev, len(rl) - 1))
2965 revs = list(rl.revs(startrev, len(rl) - 1))
2959
2966
2960 def rlfh(rl):
2967 def rlfh(rl):
2961 if rl._inline:
2968 if rl._inline:
2962 return getsvfs(repo)(rl.indexfile)
2969 return getsvfs(repo)(rl.indexfile)
2963 else:
2970 else:
2964 return getsvfs(repo)(rl.datafile)
2971 return getsvfs(repo)(rl.datafile)
2965
2972
2966 def doread():
2973 def doread():
2967 rl.clearcaches()
2974 rl.clearcaches()
2968 for rev in revs:
2975 for rev in revs:
2969 segmentforrevs(rev, rev)
2976 segmentforrevs(rev, rev)
2970
2977
2971 def doreadcachedfh():
2978 def doreadcachedfh():
2972 rl.clearcaches()
2979 rl.clearcaches()
2973 fh = rlfh(rl)
2980 fh = rlfh(rl)
2974 for rev in revs:
2981 for rev in revs:
2975 segmentforrevs(rev, rev, df=fh)
2982 segmentforrevs(rev, rev, df=fh)
2976
2983
2977 def doreadbatch():
2984 def doreadbatch():
2978 rl.clearcaches()
2985 rl.clearcaches()
2979 segmentforrevs(revs[0], revs[-1])
2986 segmentforrevs(revs[0], revs[-1])
2980
2987
2981 def doreadbatchcachedfh():
2988 def doreadbatchcachedfh():
2982 rl.clearcaches()
2989 rl.clearcaches()
2983 fh = rlfh(rl)
2990 fh = rlfh(rl)
2984 segmentforrevs(revs[0], revs[-1], df=fh)
2991 segmentforrevs(revs[0], revs[-1], df=fh)
2985
2992
2986 def dochunk():
2993 def dochunk():
2987 rl.clearcaches()
2994 rl.clearcaches()
2988 fh = rlfh(rl)
2995 fh = rlfh(rl)
2989 for rev in revs:
2996 for rev in revs:
2990 rl._chunk(rev, df=fh)
2997 rl._chunk(rev, df=fh)
2991
2998
2992 chunks = [None]
2999 chunks = [None]
2993
3000
2994 def dochunkbatch():
3001 def dochunkbatch():
2995 rl.clearcaches()
3002 rl.clearcaches()
2996 fh = rlfh(rl)
3003 fh = rlfh(rl)
2997 # Save chunks as a side-effect.
3004 # Save chunks as a side-effect.
2998 chunks[0] = rl._chunks(revs, df=fh)
3005 chunks[0] = rl._chunks(revs, df=fh)
2999
3006
3000 def docompress(compressor):
3007 def docompress(compressor):
3001 rl.clearcaches()
3008 rl.clearcaches()
3002
3009
3003 try:
3010 try:
3004 # Swap in the requested compression engine.
3011 # Swap in the requested compression engine.
3005 oldcompressor = rl._compressor
3012 oldcompressor = rl._compressor
3006 rl._compressor = compressor
3013 rl._compressor = compressor
3007 for chunk in chunks[0]:
3014 for chunk in chunks[0]:
3008 rl.compress(chunk)
3015 rl.compress(chunk)
3009 finally:
3016 finally:
3010 rl._compressor = oldcompressor
3017 rl._compressor = oldcompressor
3011
3018
3012 benches = [
3019 benches = [
3013 (lambda: doread(), b'read'),
3020 (lambda: doread(), b'read'),
3014 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3021 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3015 (lambda: doreadbatch(), b'read batch'),
3022 (lambda: doreadbatch(), b'read batch'),
3016 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3023 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3017 (lambda: dochunk(), b'chunk'),
3024 (lambda: dochunk(), b'chunk'),
3018 (lambda: dochunkbatch(), b'chunk batch'),
3025 (lambda: dochunkbatch(), b'chunk batch'),
3019 ]
3026 ]
3020
3027
3021 for engine in sorted(engines):
3028 for engine in sorted(engines):
3022 compressor = util.compengines[engine].revlogcompressor()
3029 compressor = util.compengines[engine].revlogcompressor()
3023 benches.append(
3030 benches.append(
3024 (
3031 (
3025 functools.partial(docompress, compressor),
3032 functools.partial(docompress, compressor),
3026 b'compress w/ %s' % engine,
3033 b'compress w/ %s' % engine,
3027 )
3034 )
3028 )
3035 )
3029
3036
3030 for fn, title in benches:
3037 for fn, title in benches:
3031 timer, fm = gettimer(ui, opts)
3038 timer, fm = gettimer(ui, opts)
3032 timer(fn, title=title)
3039 timer(fn, title=title)
3033 fm.end()
3040 fm.end()
3034
3041
3035
3042
3036 @command(
3043 @command(
3037 b'perfrevlogrevision',
3044 b'perfrevlogrevision',
3038 revlogopts
3045 revlogopts
3039 + formatteropts
3046 + formatteropts
3040 + [(b'', b'cache', False, b'use caches instead of clearing')],
3047 + [(b'', b'cache', False, b'use caches instead of clearing')],
3041 b'-c|-m|FILE REV',
3048 b'-c|-m|FILE REV',
3042 )
3049 )
3043 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3050 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3044 """Benchmark obtaining a revlog revision.
3051 """Benchmark obtaining a revlog revision.
3045
3052
3046 Obtaining a revlog revision consists of roughly the following steps:
3053 Obtaining a revlog revision consists of roughly the following steps:
3047
3054
3048 1. Compute the delta chain
3055 1. Compute the delta chain
3049 2. Slice the delta chain if applicable
3056 2. Slice the delta chain if applicable
3050 3. Obtain the raw chunks for that delta chain
3057 3. Obtain the raw chunks for that delta chain
3051 4. Decompress each raw chunk
3058 4. Decompress each raw chunk
3052 5. Apply binary patches to obtain fulltext
3059 5. Apply binary patches to obtain fulltext
3053 6. Verify hash of fulltext
3060 6. Verify hash of fulltext
3054
3061
3055 This command measures the time spent in each of these phases.
3062 This command measures the time spent in each of these phases.
3056 """
3063 """
3057 opts = _byteskwargs(opts)
3064 opts = _byteskwargs(opts)
3058
3065
3059 if opts.get(b'changelog') or opts.get(b'manifest'):
3066 if opts.get(b'changelog') or opts.get(b'manifest'):
3060 file_, rev = None, file_
3067 file_, rev = None, file_
3061 elif rev is None:
3068 elif rev is None:
3062 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3069 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3063
3070
3064 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3071 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3065
3072
3066 # _chunkraw was renamed to _getsegmentforrevs.
3073 # _chunkraw was renamed to _getsegmentforrevs.
3067 try:
3074 try:
3068 segmentforrevs = r._getsegmentforrevs
3075 segmentforrevs = r._getsegmentforrevs
3069 except AttributeError:
3076 except AttributeError:
3070 segmentforrevs = r._chunkraw
3077 segmentforrevs = r._chunkraw
3071
3078
3072 node = r.lookup(rev)
3079 node = r.lookup(rev)
3073 rev = r.rev(node)
3080 rev = r.rev(node)
3074
3081
3075 def getrawchunks(data, chain):
3082 def getrawchunks(data, chain):
3076 start = r.start
3083 start = r.start
3077 length = r.length
3084 length = r.length
3078 inline = r._inline
3085 inline = r._inline
3079 iosize = r._io.size
3086 iosize = r._io.size
3080 buffer = util.buffer
3087 buffer = util.buffer
3081
3088
3082 chunks = []
3089 chunks = []
3083 ladd = chunks.append
3090 ladd = chunks.append
3084 for idx, item in enumerate(chain):
3091 for idx, item in enumerate(chain):
3085 offset = start(item[0])
3092 offset = start(item[0])
3086 bits = data[idx]
3093 bits = data[idx]
3087 for rev in item:
3094 for rev in item:
3088 chunkstart = start(rev)
3095 chunkstart = start(rev)
3089 if inline:
3096 if inline:
3090 chunkstart += (rev + 1) * iosize
3097 chunkstart += (rev + 1) * iosize
3091 chunklength = length(rev)
3098 chunklength = length(rev)
3092 ladd(buffer(bits, chunkstart - offset, chunklength))
3099 ladd(buffer(bits, chunkstart - offset, chunklength))
3093
3100
3094 return chunks
3101 return chunks
3095
3102
3096 def dodeltachain(rev):
3103 def dodeltachain(rev):
3097 if not cache:
3104 if not cache:
3098 r.clearcaches()
3105 r.clearcaches()
3099 r._deltachain(rev)
3106 r._deltachain(rev)
3100
3107
3101 def doread(chain):
3108 def doread(chain):
3102 if not cache:
3109 if not cache:
3103 r.clearcaches()
3110 r.clearcaches()
3104 for item in slicedchain:
3111 for item in slicedchain:
3105 segmentforrevs(item[0], item[-1])
3112 segmentforrevs(item[0], item[-1])
3106
3113
3107 def doslice(r, chain, size):
3114 def doslice(r, chain, size):
3108 for s in slicechunk(r, chain, targetsize=size):
3115 for s in slicechunk(r, chain, targetsize=size):
3109 pass
3116 pass
3110
3117
3111 def dorawchunks(data, chain):
3118 def dorawchunks(data, chain):
3112 if not cache:
3119 if not cache:
3113 r.clearcaches()
3120 r.clearcaches()
3114 getrawchunks(data, chain)
3121 getrawchunks(data, chain)
3115
3122
3116 def dodecompress(chunks):
3123 def dodecompress(chunks):
3117 decomp = r.decompress
3124 decomp = r.decompress
3118 for chunk in chunks:
3125 for chunk in chunks:
3119 decomp(chunk)
3126 decomp(chunk)
3120
3127
3121 def dopatch(text, bins):
3128 def dopatch(text, bins):
3122 if not cache:
3129 if not cache:
3123 r.clearcaches()
3130 r.clearcaches()
3124 mdiff.patches(text, bins)
3131 mdiff.patches(text, bins)
3125
3132
3126 def dohash(text):
3133 def dohash(text):
3127 if not cache:
3134 if not cache:
3128 r.clearcaches()
3135 r.clearcaches()
3129 r.checkhash(text, node, rev=rev)
3136 r.checkhash(text, node, rev=rev)
3130
3137
3131 def dorevision():
3138 def dorevision():
3132 if not cache:
3139 if not cache:
3133 r.clearcaches()
3140 r.clearcaches()
3134 r.revision(node)
3141 r.revision(node)
3135
3142
3136 try:
3143 try:
3137 from mercurial.revlogutils.deltas import slicechunk
3144 from mercurial.revlogutils.deltas import slicechunk
3138 except ImportError:
3145 except ImportError:
3139 slicechunk = getattr(revlog, '_slicechunk', None)
3146 slicechunk = getattr(revlog, '_slicechunk', None)
3140
3147
3141 size = r.length(rev)
3148 size = r.length(rev)
3142 chain = r._deltachain(rev)[0]
3149 chain = r._deltachain(rev)[0]
3143 if not getattr(r, '_withsparseread', False):
3150 if not getattr(r, '_withsparseread', False):
3144 slicedchain = (chain,)
3151 slicedchain = (chain,)
3145 else:
3152 else:
3146 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3153 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3147 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3154 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3148 rawchunks = getrawchunks(data, slicedchain)
3155 rawchunks = getrawchunks(data, slicedchain)
3149 bins = r._chunks(chain)
3156 bins = r._chunks(chain)
3150 text = bytes(bins[0])
3157 text = bytes(bins[0])
3151 bins = bins[1:]
3158 bins = bins[1:]
3152 text = mdiff.patches(text, bins)
3159 text = mdiff.patches(text, bins)
3153
3160
3154 benches = [
3161 benches = [
3155 (lambda: dorevision(), b'full'),
3162 (lambda: dorevision(), b'full'),
3156 (lambda: dodeltachain(rev), b'deltachain'),
3163 (lambda: dodeltachain(rev), b'deltachain'),
3157 (lambda: doread(chain), b'read'),
3164 (lambda: doread(chain), b'read'),
3158 ]
3165 ]
3159
3166
3160 if getattr(r, '_withsparseread', False):
3167 if getattr(r, '_withsparseread', False):
3161 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3168 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3162 benches.append(slicing)
3169 benches.append(slicing)
3163
3170
3164 benches.extend(
3171 benches.extend(
3165 [
3172 [
3166 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3173 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3167 (lambda: dodecompress(rawchunks), b'decompress'),
3174 (lambda: dodecompress(rawchunks), b'decompress'),
3168 (lambda: dopatch(text, bins), b'patch'),
3175 (lambda: dopatch(text, bins), b'patch'),
3169 (lambda: dohash(text), b'hash'),
3176 (lambda: dohash(text), b'hash'),
3170 ]
3177 ]
3171 )
3178 )
3172
3179
3173 timer, fm = gettimer(ui, opts)
3180 timer, fm = gettimer(ui, opts)
3174 for fn, title in benches:
3181 for fn, title in benches:
3175 timer(fn, title=title)
3182 timer(fn, title=title)
3176 fm.end()
3183 fm.end()
3177
3184
3178
3185
3179 @command(
3186 @command(
3180 b'perfrevset',
3187 b'perfrevset',
3181 [
3188 [
3182 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3189 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3183 (b'', b'contexts', False, b'obtain changectx for each revision'),
3190 (b'', b'contexts', False, b'obtain changectx for each revision'),
3184 ]
3191 ]
3185 + formatteropts,
3192 + formatteropts,
3186 b"REVSET",
3193 b"REVSET",
3187 )
3194 )
3188 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3195 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3189 """benchmark the execution time of a revset
3196 """benchmark the execution time of a revset
3190
3197
3191 Use the --clean option if need to evaluate the impact of build volatile
3198 Use the --clean option if need to evaluate the impact of build volatile
3192 revisions set cache on the revset execution. Volatile cache hold filtered
3199 revisions set cache on the revset execution. Volatile cache hold filtered
3193 and obsolete related cache."""
3200 and obsolete related cache."""
3194 opts = _byteskwargs(opts)
3201 opts = _byteskwargs(opts)
3195
3202
3196 timer, fm = gettimer(ui, opts)
3203 timer, fm = gettimer(ui, opts)
3197
3204
3198 def d():
3205 def d():
3199 if clear:
3206 if clear:
3200 repo.invalidatevolatilesets()
3207 repo.invalidatevolatilesets()
3201 if contexts:
3208 if contexts:
3202 for ctx in repo.set(expr):
3209 for ctx in repo.set(expr):
3203 pass
3210 pass
3204 else:
3211 else:
3205 for r in repo.revs(expr):
3212 for r in repo.revs(expr):
3206 pass
3213 pass
3207
3214
3208 timer(d)
3215 timer(d)
3209 fm.end()
3216 fm.end()
3210
3217
3211
3218
3212 @command(
3219 @command(
3213 b'perfvolatilesets',
3220 b'perfvolatilesets',
3214 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3221 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3215 + formatteropts,
3222 + formatteropts,
3216 )
3223 )
3217 def perfvolatilesets(ui, repo, *names, **opts):
3224 def perfvolatilesets(ui, repo, *names, **opts):
3218 """benchmark the computation of various volatile set
3225 """benchmark the computation of various volatile set
3219
3226
3220 Volatile set computes element related to filtering and obsolescence."""
3227 Volatile set computes element related to filtering and obsolescence."""
3221 opts = _byteskwargs(opts)
3228 opts = _byteskwargs(opts)
3222 timer, fm = gettimer(ui, opts)
3229 timer, fm = gettimer(ui, opts)
3223 repo = repo.unfiltered()
3230 repo = repo.unfiltered()
3224
3231
3225 def getobs(name):
3232 def getobs(name):
3226 def d():
3233 def d():
3227 repo.invalidatevolatilesets()
3234 repo.invalidatevolatilesets()
3228 if opts[b'clear_obsstore']:
3235 if opts[b'clear_obsstore']:
3229 clearfilecache(repo, b'obsstore')
3236 clearfilecache(repo, b'obsstore')
3230 obsolete.getrevs(repo, name)
3237 obsolete.getrevs(repo, name)
3231
3238
3232 return d
3239 return d
3233
3240
3234 allobs = sorted(obsolete.cachefuncs)
3241 allobs = sorted(obsolete.cachefuncs)
3235 if names:
3242 if names:
3236 allobs = [n for n in allobs if n in names]
3243 allobs = [n for n in allobs if n in names]
3237
3244
3238 for name in allobs:
3245 for name in allobs:
3239 timer(getobs(name), title=name)
3246 timer(getobs(name), title=name)
3240
3247
3241 def getfiltered(name):
3248 def getfiltered(name):
3242 def d():
3249 def d():
3243 repo.invalidatevolatilesets()
3250 repo.invalidatevolatilesets()
3244 if opts[b'clear_obsstore']:
3251 if opts[b'clear_obsstore']:
3245 clearfilecache(repo, b'obsstore')
3252 clearfilecache(repo, b'obsstore')
3246 repoview.filterrevs(repo, name)
3253 repoview.filterrevs(repo, name)
3247
3254
3248 return d
3255 return d
3249
3256
3250 allfilter = sorted(repoview.filtertable)
3257 allfilter = sorted(repoview.filtertable)
3251 if names:
3258 if names:
3252 allfilter = [n for n in allfilter if n in names]
3259 allfilter = [n for n in allfilter if n in names]
3253
3260
3254 for name in allfilter:
3261 for name in allfilter:
3255 timer(getfiltered(name), title=name)
3262 timer(getfiltered(name), title=name)
3256 fm.end()
3263 fm.end()
3257
3264
3258
3265
3259 @command(
3266 @command(
3260 b'perfbranchmap',
3267 b'perfbranchmap',
3261 [
3268 [
3262 (b'f', b'full', False, b'Includes build time of subset'),
3269 (b'f', b'full', False, b'Includes build time of subset'),
3263 (
3270 (
3264 b'',
3271 b'',
3265 b'clear-revbranch',
3272 b'clear-revbranch',
3266 False,
3273 False,
3267 b'purge the revbranch cache between computation',
3274 b'purge the revbranch cache between computation',
3268 ),
3275 ),
3269 ]
3276 ]
3270 + formatteropts,
3277 + formatteropts,
3271 )
3278 )
3272 def perfbranchmap(ui, repo, *filternames, **opts):
3279 def perfbranchmap(ui, repo, *filternames, **opts):
3273 """benchmark the update of a branchmap
3280 """benchmark the update of a branchmap
3274
3281
3275 This benchmarks the full repo.branchmap() call with read and write disabled
3282 This benchmarks the full repo.branchmap() call with read and write disabled
3276 """
3283 """
3277 opts = _byteskwargs(opts)
3284 opts = _byteskwargs(opts)
3278 full = opts.get(b"full", False)
3285 full = opts.get(b"full", False)
3279 clear_revbranch = opts.get(b"clear_revbranch", False)
3286 clear_revbranch = opts.get(b"clear_revbranch", False)
3280 timer, fm = gettimer(ui, opts)
3287 timer, fm = gettimer(ui, opts)
3281
3288
3282 def getbranchmap(filtername):
3289 def getbranchmap(filtername):
3283 """generate a benchmark function for the filtername"""
3290 """generate a benchmark function for the filtername"""
3284 if filtername is None:
3291 if filtername is None:
3285 view = repo
3292 view = repo
3286 else:
3293 else:
3287 view = repo.filtered(filtername)
3294 view = repo.filtered(filtername)
3288 if util.safehasattr(view._branchcaches, '_per_filter'):
3295 if util.safehasattr(view._branchcaches, '_per_filter'):
3289 filtered = view._branchcaches._per_filter
3296 filtered = view._branchcaches._per_filter
3290 else:
3297 else:
3291 # older versions
3298 # older versions
3292 filtered = view._branchcaches
3299 filtered = view._branchcaches
3293
3300
3294 def d():
3301 def d():
3295 if clear_revbranch:
3302 if clear_revbranch:
3296 repo.revbranchcache()._clear()
3303 repo.revbranchcache()._clear()
3297 if full:
3304 if full:
3298 view._branchcaches.clear()
3305 view._branchcaches.clear()
3299 else:
3306 else:
3300 filtered.pop(filtername, None)
3307 filtered.pop(filtername, None)
3301 view.branchmap()
3308 view.branchmap()
3302
3309
3303 return d
3310 return d
3304
3311
3305 # add filter in smaller subset to bigger subset
3312 # add filter in smaller subset to bigger subset
3306 possiblefilters = set(repoview.filtertable)
3313 possiblefilters = set(repoview.filtertable)
3307 if filternames:
3314 if filternames:
3308 possiblefilters &= set(filternames)
3315 possiblefilters &= set(filternames)
3309 subsettable = getbranchmapsubsettable()
3316 subsettable = getbranchmapsubsettable()
3310 allfilters = []
3317 allfilters = []
3311 while possiblefilters:
3318 while possiblefilters:
3312 for name in possiblefilters:
3319 for name in possiblefilters:
3313 subset = subsettable.get(name)
3320 subset = subsettable.get(name)
3314 if subset not in possiblefilters:
3321 if subset not in possiblefilters:
3315 break
3322 break
3316 else:
3323 else:
3317 assert False, b'subset cycle %s!' % possiblefilters
3324 assert False, b'subset cycle %s!' % possiblefilters
3318 allfilters.append(name)
3325 allfilters.append(name)
3319 possiblefilters.remove(name)
3326 possiblefilters.remove(name)
3320
3327
3321 # warm the cache
3328 # warm the cache
3322 if not full:
3329 if not full:
3323 for name in allfilters:
3330 for name in allfilters:
3324 repo.filtered(name).branchmap()
3331 repo.filtered(name).branchmap()
3325 if not filternames or b'unfiltered' in filternames:
3332 if not filternames or b'unfiltered' in filternames:
3326 # add unfiltered
3333 # add unfiltered
3327 allfilters.append(None)
3334 allfilters.append(None)
3328
3335
3329 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3336 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3330 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3337 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3331 branchcacheread.set(classmethod(lambda *args: None))
3338 branchcacheread.set(classmethod(lambda *args: None))
3332 else:
3339 else:
3333 # older versions
3340 # older versions
3334 branchcacheread = safeattrsetter(branchmap, b'read')
3341 branchcacheread = safeattrsetter(branchmap, b'read')
3335 branchcacheread.set(lambda *args: None)
3342 branchcacheread.set(lambda *args: None)
3336 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3343 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3337 branchcachewrite.set(lambda *args: None)
3344 branchcachewrite.set(lambda *args: None)
3338 try:
3345 try:
3339 for name in allfilters:
3346 for name in allfilters:
3340 printname = name
3347 printname = name
3341 if name is None:
3348 if name is None:
3342 printname = b'unfiltered'
3349 printname = b'unfiltered'
3343 timer(getbranchmap(name), title=str(printname))
3350 timer(getbranchmap(name), title=str(printname))
3344 finally:
3351 finally:
3345 branchcacheread.restore()
3352 branchcacheread.restore()
3346 branchcachewrite.restore()
3353 branchcachewrite.restore()
3347 fm.end()
3354 fm.end()
3348
3355
3349
3356
3350 @command(
3357 @command(
3351 b'perfbranchmapupdate',
3358 b'perfbranchmapupdate',
3352 [
3359 [
3353 (b'', b'base', [], b'subset of revision to start from'),
3360 (b'', b'base', [], b'subset of revision to start from'),
3354 (b'', b'target', [], b'subset of revision to end with'),
3361 (b'', b'target', [], b'subset of revision to end with'),
3355 (b'', b'clear-caches', False, b'clear cache between each runs'),
3362 (b'', b'clear-caches', False, b'clear cache between each runs'),
3356 ]
3363 ]
3357 + formatteropts,
3364 + formatteropts,
3358 )
3365 )
3359 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3366 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3360 """benchmark branchmap update from for <base> revs to <target> revs
3367 """benchmark branchmap update from for <base> revs to <target> revs
3361
3368
3362 If `--clear-caches` is passed, the following items will be reset before
3369 If `--clear-caches` is passed, the following items will be reset before
3363 each update:
3370 each update:
3364 * the changelog instance and associated indexes
3371 * the changelog instance and associated indexes
3365 * the rev-branch-cache instance
3372 * the rev-branch-cache instance
3366
3373
3367 Examples:
3374 Examples:
3368
3375
3369 # update for the one last revision
3376 # update for the one last revision
3370 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3377 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3371
3378
3372 $ update for change coming with a new branch
3379 $ update for change coming with a new branch
3373 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3380 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3374 """
3381 """
3375 from mercurial import branchmap
3382 from mercurial import branchmap
3376 from mercurial import repoview
3383 from mercurial import repoview
3377
3384
3378 opts = _byteskwargs(opts)
3385 opts = _byteskwargs(opts)
3379 timer, fm = gettimer(ui, opts)
3386 timer, fm = gettimer(ui, opts)
3380 clearcaches = opts[b'clear_caches']
3387 clearcaches = opts[b'clear_caches']
3381 unfi = repo.unfiltered()
3388 unfi = repo.unfiltered()
3382 x = [None] # used to pass data between closure
3389 x = [None] # used to pass data between closure
3383
3390
3384 # we use a `list` here to avoid possible side effect from smartset
3391 # we use a `list` here to avoid possible side effect from smartset
3385 baserevs = list(scmutil.revrange(repo, base))
3392 baserevs = list(scmutil.revrange(repo, base))
3386 targetrevs = list(scmutil.revrange(repo, target))
3393 targetrevs = list(scmutil.revrange(repo, target))
3387 if not baserevs:
3394 if not baserevs:
3388 raise error.Abort(b'no revisions selected for --base')
3395 raise error.Abort(b'no revisions selected for --base')
3389 if not targetrevs:
3396 if not targetrevs:
3390 raise error.Abort(b'no revisions selected for --target')
3397 raise error.Abort(b'no revisions selected for --target')
3391
3398
3392 # make sure the target branchmap also contains the one in the base
3399 # make sure the target branchmap also contains the one in the base
3393 targetrevs = list(set(baserevs) | set(targetrevs))
3400 targetrevs = list(set(baserevs) | set(targetrevs))
3394 targetrevs.sort()
3401 targetrevs.sort()
3395
3402
3396 cl = repo.changelog
3403 cl = repo.changelog
3397 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3404 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3398 allbaserevs.sort()
3405 allbaserevs.sort()
3399 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3406 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3400
3407
3401 newrevs = list(alltargetrevs.difference(allbaserevs))
3408 newrevs = list(alltargetrevs.difference(allbaserevs))
3402 newrevs.sort()
3409 newrevs.sort()
3403
3410
3404 allrevs = frozenset(unfi.changelog.revs())
3411 allrevs = frozenset(unfi.changelog.revs())
3405 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3412 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3406 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3413 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3407
3414
3408 def basefilter(repo, visibilityexceptions=None):
3415 def basefilter(repo, visibilityexceptions=None):
3409 return basefilterrevs
3416 return basefilterrevs
3410
3417
3411 def targetfilter(repo, visibilityexceptions=None):
3418 def targetfilter(repo, visibilityexceptions=None):
3412 return targetfilterrevs
3419 return targetfilterrevs
3413
3420
3414 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3421 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3415 ui.status(msg % (len(allbaserevs), len(newrevs)))
3422 ui.status(msg % (len(allbaserevs), len(newrevs)))
3416 if targetfilterrevs:
3423 if targetfilterrevs:
3417 msg = b'(%d revisions still filtered)\n'
3424 msg = b'(%d revisions still filtered)\n'
3418 ui.status(msg % len(targetfilterrevs))
3425 ui.status(msg % len(targetfilterrevs))
3419
3426
3420 try:
3427 try:
3421 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3428 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3422 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3429 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3423
3430
3424 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3431 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3425 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3432 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3426
3433
3427 # try to find an existing branchmap to reuse
3434 # try to find an existing branchmap to reuse
3428 subsettable = getbranchmapsubsettable()
3435 subsettable = getbranchmapsubsettable()
3429 candidatefilter = subsettable.get(None)
3436 candidatefilter = subsettable.get(None)
3430 while candidatefilter is not None:
3437 while candidatefilter is not None:
3431 candidatebm = repo.filtered(candidatefilter).branchmap()
3438 candidatebm = repo.filtered(candidatefilter).branchmap()
3432 if candidatebm.validfor(baserepo):
3439 if candidatebm.validfor(baserepo):
3433 filtered = repoview.filterrevs(repo, candidatefilter)
3440 filtered = repoview.filterrevs(repo, candidatefilter)
3434 missing = [r for r in allbaserevs if r in filtered]
3441 missing = [r for r in allbaserevs if r in filtered]
3435 base = candidatebm.copy()
3442 base = candidatebm.copy()
3436 base.update(baserepo, missing)
3443 base.update(baserepo, missing)
3437 break
3444 break
3438 candidatefilter = subsettable.get(candidatefilter)
3445 candidatefilter = subsettable.get(candidatefilter)
3439 else:
3446 else:
3440 # no suitable subset where found
3447 # no suitable subset where found
3441 base = branchmap.branchcache()
3448 base = branchmap.branchcache()
3442 base.update(baserepo, allbaserevs)
3449 base.update(baserepo, allbaserevs)
3443
3450
3444 def setup():
3451 def setup():
3445 x[0] = base.copy()
3452 x[0] = base.copy()
3446 if clearcaches:
3453 if clearcaches:
3447 unfi._revbranchcache = None
3454 unfi._revbranchcache = None
3448 clearchangelog(repo)
3455 clearchangelog(repo)
3449
3456
3450 def bench():
3457 def bench():
3451 x[0].update(targetrepo, newrevs)
3458 x[0].update(targetrepo, newrevs)
3452
3459
3453 timer(bench, setup=setup)
3460 timer(bench, setup=setup)
3454 fm.end()
3461 fm.end()
3455 finally:
3462 finally:
3456 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3463 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3457 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3464 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3458
3465
3459
3466
3460 @command(
3467 @command(
3461 b'perfbranchmapload',
3468 b'perfbranchmapload',
3462 [
3469 [
3463 (b'f', b'filter', b'', b'Specify repoview filter'),
3470 (b'f', b'filter', b'', b'Specify repoview filter'),
3464 (b'', b'list', False, b'List brachmap filter caches'),
3471 (b'', b'list', False, b'List brachmap filter caches'),
3465 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3472 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3466 ]
3473 ]
3467 + formatteropts,
3474 + formatteropts,
3468 )
3475 )
3469 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3476 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3470 """benchmark reading the branchmap"""
3477 """benchmark reading the branchmap"""
3471 opts = _byteskwargs(opts)
3478 opts = _byteskwargs(opts)
3472 clearrevlogs = opts[b'clear_revlogs']
3479 clearrevlogs = opts[b'clear_revlogs']
3473
3480
3474 if list:
3481 if list:
3475 for name, kind, st in repo.cachevfs.readdir(stat=True):
3482 for name, kind, st in repo.cachevfs.readdir(stat=True):
3476 if name.startswith(b'branch2'):
3483 if name.startswith(b'branch2'):
3477 filtername = name.partition(b'-')[2] or b'unfiltered'
3484 filtername = name.partition(b'-')[2] or b'unfiltered'
3478 ui.status(
3485 ui.status(
3479 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3486 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3480 )
3487 )
3481 return
3488 return
3482 if not filter:
3489 if not filter:
3483 filter = None
3490 filter = None
3484 subsettable = getbranchmapsubsettable()
3491 subsettable = getbranchmapsubsettable()
3485 if filter is None:
3492 if filter is None:
3486 repo = repo.unfiltered()
3493 repo = repo.unfiltered()
3487 else:
3494 else:
3488 repo = repoview.repoview(repo, filter)
3495 repo = repoview.repoview(repo, filter)
3489
3496
3490 repo.branchmap() # make sure we have a relevant, up to date branchmap
3497 repo.branchmap() # make sure we have a relevant, up to date branchmap
3491
3498
3492 try:
3499 try:
3493 fromfile = branchmap.branchcache.fromfile
3500 fromfile = branchmap.branchcache.fromfile
3494 except AttributeError:
3501 except AttributeError:
3495 # older versions
3502 # older versions
3496 fromfile = branchmap.read
3503 fromfile = branchmap.read
3497
3504
3498 currentfilter = filter
3505 currentfilter = filter
3499 # try once without timer, the filter may not be cached
3506 # try once without timer, the filter may not be cached
3500 while fromfile(repo) is None:
3507 while fromfile(repo) is None:
3501 currentfilter = subsettable.get(currentfilter)
3508 currentfilter = subsettable.get(currentfilter)
3502 if currentfilter is None:
3509 if currentfilter is None:
3503 raise error.Abort(
3510 raise error.Abort(
3504 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3511 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3505 )
3512 )
3506 repo = repo.filtered(currentfilter)
3513 repo = repo.filtered(currentfilter)
3507 timer, fm = gettimer(ui, opts)
3514 timer, fm = gettimer(ui, opts)
3508
3515
3509 def setup():
3516 def setup():
3510 if clearrevlogs:
3517 if clearrevlogs:
3511 clearchangelog(repo)
3518 clearchangelog(repo)
3512
3519
3513 def bench():
3520 def bench():
3514 fromfile(repo)
3521 fromfile(repo)
3515
3522
3516 timer(bench, setup=setup)
3523 timer(bench, setup=setup)
3517 fm.end()
3524 fm.end()
3518
3525
3519
3526
3520 @command(b'perfloadmarkers')
3527 @command(b'perfloadmarkers')
3521 def perfloadmarkers(ui, repo):
3528 def perfloadmarkers(ui, repo):
3522 """benchmark the time to parse the on-disk markers for a repo
3529 """benchmark the time to parse the on-disk markers for a repo
3523
3530
3524 Result is the number of markers in the repo."""
3531 Result is the number of markers in the repo."""
3525 timer, fm = gettimer(ui)
3532 timer, fm = gettimer(ui)
3526 svfs = getsvfs(repo)
3533 svfs = getsvfs(repo)
3527 timer(lambda: len(obsolete.obsstore(svfs)))
3534 timer(lambda: len(obsolete.obsstore(svfs)))
3528 fm.end()
3535 fm.end()
3529
3536
3530
3537
3531 @command(
3538 @command(
3532 b'perflrucachedict',
3539 b'perflrucachedict',
3533 formatteropts
3540 formatteropts
3534 + [
3541 + [
3535 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3542 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3536 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3543 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3537 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3544 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3538 (b'', b'size', 4, b'size of cache'),
3545 (b'', b'size', 4, b'size of cache'),
3539 (b'', b'gets', 10000, b'number of key lookups'),
3546 (b'', b'gets', 10000, b'number of key lookups'),
3540 (b'', b'sets', 10000, b'number of key sets'),
3547 (b'', b'sets', 10000, b'number of key sets'),
3541 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3548 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3542 (
3549 (
3543 b'',
3550 b'',
3544 b'mixedgetfreq',
3551 b'mixedgetfreq',
3545 50,
3552 50,
3546 b'frequency of get vs set ops in mixed mode',
3553 b'frequency of get vs set ops in mixed mode',
3547 ),
3554 ),
3548 ],
3555 ],
3549 norepo=True,
3556 norepo=True,
3550 )
3557 )
3551 def perflrucache(
3558 def perflrucache(
3552 ui,
3559 ui,
3553 mincost=0,
3560 mincost=0,
3554 maxcost=100,
3561 maxcost=100,
3555 costlimit=0,
3562 costlimit=0,
3556 size=4,
3563 size=4,
3557 gets=10000,
3564 gets=10000,
3558 sets=10000,
3565 sets=10000,
3559 mixed=10000,
3566 mixed=10000,
3560 mixedgetfreq=50,
3567 mixedgetfreq=50,
3561 **opts
3568 **opts
3562 ):
3569 ):
3563 opts = _byteskwargs(opts)
3570 opts = _byteskwargs(opts)
3564
3571
3565 def doinit():
3572 def doinit():
3566 for i in _xrange(10000):
3573 for i in _xrange(10000):
3567 util.lrucachedict(size)
3574 util.lrucachedict(size)
3568
3575
3569 costrange = list(range(mincost, maxcost + 1))
3576 costrange = list(range(mincost, maxcost + 1))
3570
3577
3571 values = []
3578 values = []
3572 for i in _xrange(size):
3579 for i in _xrange(size):
3573 values.append(random.randint(0, _maxint))
3580 values.append(random.randint(0, _maxint))
3574
3581
3575 # Get mode fills the cache and tests raw lookup performance with no
3582 # Get mode fills the cache and tests raw lookup performance with no
3576 # eviction.
3583 # eviction.
3577 getseq = []
3584 getseq = []
3578 for i in _xrange(gets):
3585 for i in _xrange(gets):
3579 getseq.append(random.choice(values))
3586 getseq.append(random.choice(values))
3580
3587
3581 def dogets():
3588 def dogets():
3582 d = util.lrucachedict(size)
3589 d = util.lrucachedict(size)
3583 for v in values:
3590 for v in values:
3584 d[v] = v
3591 d[v] = v
3585 for key in getseq:
3592 for key in getseq:
3586 value = d[key]
3593 value = d[key]
3587 value # silence pyflakes warning
3594 value # silence pyflakes warning
3588
3595
3589 def dogetscost():
3596 def dogetscost():
3590 d = util.lrucachedict(size, maxcost=costlimit)
3597 d = util.lrucachedict(size, maxcost=costlimit)
3591 for i, v in enumerate(values):
3598 for i, v in enumerate(values):
3592 d.insert(v, v, cost=costs[i])
3599 d.insert(v, v, cost=costs[i])
3593 for key in getseq:
3600 for key in getseq:
3594 try:
3601 try:
3595 value = d[key]
3602 value = d[key]
3596 value # silence pyflakes warning
3603 value # silence pyflakes warning
3597 except KeyError:
3604 except KeyError:
3598 pass
3605 pass
3599
3606
3600 # Set mode tests insertion speed with cache eviction.
3607 # Set mode tests insertion speed with cache eviction.
3601 setseq = []
3608 setseq = []
3602 costs = []
3609 costs = []
3603 for i in _xrange(sets):
3610 for i in _xrange(sets):
3604 setseq.append(random.randint(0, _maxint))
3611 setseq.append(random.randint(0, _maxint))
3605 costs.append(random.choice(costrange))
3612 costs.append(random.choice(costrange))
3606
3613
3607 def doinserts():
3614 def doinserts():
3608 d = util.lrucachedict(size)
3615 d = util.lrucachedict(size)
3609 for v in setseq:
3616 for v in setseq:
3610 d.insert(v, v)
3617 d.insert(v, v)
3611
3618
3612 def doinsertscost():
3619 def doinsertscost():
3613 d = util.lrucachedict(size, maxcost=costlimit)
3620 d = util.lrucachedict(size, maxcost=costlimit)
3614 for i, v in enumerate(setseq):
3621 for i, v in enumerate(setseq):
3615 d.insert(v, v, cost=costs[i])
3622 d.insert(v, v, cost=costs[i])
3616
3623
3617 def dosets():
3624 def dosets():
3618 d = util.lrucachedict(size)
3625 d = util.lrucachedict(size)
3619 for v in setseq:
3626 for v in setseq:
3620 d[v] = v
3627 d[v] = v
3621
3628
3622 # Mixed mode randomly performs gets and sets with eviction.
3629 # Mixed mode randomly performs gets and sets with eviction.
3623 mixedops = []
3630 mixedops = []
3624 for i in _xrange(mixed):
3631 for i in _xrange(mixed):
3625 r = random.randint(0, 100)
3632 r = random.randint(0, 100)
3626 if r < mixedgetfreq:
3633 if r < mixedgetfreq:
3627 op = 0
3634 op = 0
3628 else:
3635 else:
3629 op = 1
3636 op = 1
3630
3637
3631 mixedops.append(
3638 mixedops.append(
3632 (op, random.randint(0, size * 2), random.choice(costrange))
3639 (op, random.randint(0, size * 2), random.choice(costrange))
3633 )
3640 )
3634
3641
3635 def domixed():
3642 def domixed():
3636 d = util.lrucachedict(size)
3643 d = util.lrucachedict(size)
3637
3644
3638 for op, v, cost in mixedops:
3645 for op, v, cost in mixedops:
3639 if op == 0:
3646 if op == 0:
3640 try:
3647 try:
3641 d[v]
3648 d[v]
3642 except KeyError:
3649 except KeyError:
3643 pass
3650 pass
3644 else:
3651 else:
3645 d[v] = v
3652 d[v] = v
3646
3653
3647 def domixedcost():
3654 def domixedcost():
3648 d = util.lrucachedict(size, maxcost=costlimit)
3655 d = util.lrucachedict(size, maxcost=costlimit)
3649
3656
3650 for op, v, cost in mixedops:
3657 for op, v, cost in mixedops:
3651 if op == 0:
3658 if op == 0:
3652 try:
3659 try:
3653 d[v]
3660 d[v]
3654 except KeyError:
3661 except KeyError:
3655 pass
3662 pass
3656 else:
3663 else:
3657 d.insert(v, v, cost=cost)
3664 d.insert(v, v, cost=cost)
3658
3665
3659 benches = [
3666 benches = [
3660 (doinit, b'init'),
3667 (doinit, b'init'),
3661 ]
3668 ]
3662
3669
3663 if costlimit:
3670 if costlimit:
3664 benches.extend(
3671 benches.extend(
3665 [
3672 [
3666 (dogetscost, b'gets w/ cost limit'),
3673 (dogetscost, b'gets w/ cost limit'),
3667 (doinsertscost, b'inserts w/ cost limit'),
3674 (doinsertscost, b'inserts w/ cost limit'),
3668 (domixedcost, b'mixed w/ cost limit'),
3675 (domixedcost, b'mixed w/ cost limit'),
3669 ]
3676 ]
3670 )
3677 )
3671 else:
3678 else:
3672 benches.extend(
3679 benches.extend(
3673 [
3680 [
3674 (dogets, b'gets'),
3681 (dogets, b'gets'),
3675 (doinserts, b'inserts'),
3682 (doinserts, b'inserts'),
3676 (dosets, b'sets'),
3683 (dosets, b'sets'),
3677 (domixed, b'mixed'),
3684 (domixed, b'mixed'),
3678 ]
3685 ]
3679 )
3686 )
3680
3687
3681 for fn, title in benches:
3688 for fn, title in benches:
3682 timer, fm = gettimer(ui, opts)
3689 timer, fm = gettimer(ui, opts)
3683 timer(fn, title=title)
3690 timer(fn, title=title)
3684 fm.end()
3691 fm.end()
3685
3692
3686
3693
3687 @command(b'perfwrite', formatteropts)
3694 @command(b'perfwrite', formatteropts)
3688 def perfwrite(ui, repo, **opts):
3695 def perfwrite(ui, repo, **opts):
3689 """microbenchmark ui.write
3696 """microbenchmark ui.write
3690 """
3697 """
3691 opts = _byteskwargs(opts)
3698 opts = _byteskwargs(opts)
3692
3699
3693 timer, fm = gettimer(ui, opts)
3700 timer, fm = gettimer(ui, opts)
3694
3701
3695 def write():
3702 def write():
3696 for i in range(100000):
3703 for i in range(100000):
3697 ui.writenoi18n(b'Testing write performance\n')
3704 ui.writenoi18n(b'Testing write performance\n')
3698
3705
3699 timer(write)
3706 timer(write)
3700 fm.end()
3707 fm.end()
3701
3708
3702
3709
3703 def uisetup(ui):
3710 def uisetup(ui):
3704 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3711 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3705 commands, b'debugrevlogopts'
3712 commands, b'debugrevlogopts'
3706 ):
3713 ):
3707 # for "historical portability":
3714 # for "historical portability":
3708 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3715 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3709 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3716 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3710 # openrevlog() should cause failure, because it has been
3717 # openrevlog() should cause failure, because it has been
3711 # available since 3.5 (or 49c583ca48c4).
3718 # available since 3.5 (or 49c583ca48c4).
3712 def openrevlog(orig, repo, cmd, file_, opts):
3719 def openrevlog(orig, repo, cmd, file_, opts):
3713 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3720 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3714 raise error.Abort(
3721 raise error.Abort(
3715 b"This version doesn't support --dir option",
3722 b"This version doesn't support --dir option",
3716 hint=b"use 3.5 or later",
3723 hint=b"use 3.5 or later",
3717 )
3724 )
3718 return orig(repo, cmd, file_, opts)
3725 return orig(repo, cmd, file_, opts)
3719
3726
3720 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3727 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3721
3728
3722
3729
3723 @command(
3730 @command(
3724 b'perfprogress',
3731 b'perfprogress',
3725 formatteropts
3732 formatteropts
3726 + [
3733 + [
3727 (b'', b'topic', b'topic', b'topic for progress messages'),
3734 (b'', b'topic', b'topic', b'topic for progress messages'),
3728 (b'c', b'total', 1000000, b'total value we are progressing to'),
3735 (b'c', b'total', 1000000, b'total value we are progressing to'),
3729 ],
3736 ],
3730 norepo=True,
3737 norepo=True,
3731 )
3738 )
3732 def perfprogress(ui, topic=None, total=None, **opts):
3739 def perfprogress(ui, topic=None, total=None, **opts):
3733 """printing of progress bars"""
3740 """printing of progress bars"""
3734 opts = _byteskwargs(opts)
3741 opts = _byteskwargs(opts)
3735
3742
3736 timer, fm = gettimer(ui, opts)
3743 timer, fm = gettimer(ui, opts)
3737
3744
3738 def doprogress():
3745 def doprogress():
3739 with ui.makeprogress(topic, total=total) as progress:
3746 with ui.makeprogress(topic, total=total) as progress:
3740 for i in _xrange(total):
3747 for i in _xrange(total):
3741 progress.increment()
3748 progress.increment()
3742
3749
3743 timer(doprogress)
3750 timer(doprogress)
3744 fm.end()
3751 fm.end()
@@ -1,396 +1,396 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perfaddremove
81 perfaddremove
82 (no help text available)
82 (no help text available)
83 perfancestors
83 perfancestors
84 (no help text available)
84 (no help text available)
85 perfancestorset
85 perfancestorset
86 (no help text available)
86 (no help text available)
87 perfannotate (no help text available)
87 perfannotate (no help text available)
88 perfbdiff benchmark a bdiff between revisions
88 perfbdiff benchmark a bdiff between revisions
89 perfbookmarks
89 perfbookmarks
90 benchmark parsing bookmarks from disk to memory
90 benchmark parsing bookmarks from disk to memory
91 perfbranchmap
91 perfbranchmap
92 benchmark the update of a branchmap
92 benchmark the update of a branchmap
93 perfbranchmapload
93 perfbranchmapload
94 benchmark reading the branchmap
94 benchmark reading the branchmap
95 perfbranchmapupdate
95 perfbranchmapupdate
96 benchmark branchmap update from for <base> revs to <target>
96 benchmark branchmap update from for <base> revs to <target>
97 revs
97 revs
98 perfbundleread
98 perfbundleread
99 Benchmark reading of bundle files.
99 Benchmark reading of bundle files.
100 perfcca (no help text available)
100 perfcca (no help text available)
101 perfchangegroupchangelog
101 perfchangegroupchangelog
102 Benchmark producing a changelog group for a changegroup.
102 Benchmark producing a changelog group for a changegroup.
103 perfchangeset
103 perfchangeset
104 (no help text available)
104 (no help text available)
105 perfctxfiles (no help text available)
105 perfctxfiles (no help text available)
106 perfdiffwd Profile diff of working directory changes
106 perfdiffwd Profile diff of working directory changes
107 perfdirfoldmap
107 perfdirfoldmap
108 (no help text available)
108 (no help text available)
109 perfdirs (no help text available)
109 perfdirs (no help text available)
110 perfdirstate (no help text available)
110 perfdirstate (no help text available)
111 perfdirstatedirs
111 perfdirstatedirs
112 (no help text available)
112 (no help text available)
113 perfdirstatefoldmap
113 perfdirstatefoldmap
114 (no help text available)
114 (no help text available)
115 perfdirstatewrite
115 perfdirstatewrite
116 (no help text available)
116 (no help text available)
117 perfdiscovery
117 perfdiscovery
118 benchmark discovery between local repo and the peer at given
118 benchmark discovery between local repo and the peer at given
119 path
119 path
120 perffncacheencode
120 perffncacheencode
121 (no help text available)
121 (no help text available)
122 perffncacheload
122 perffncacheload
123 (no help text available)
123 (no help text available)
124 perffncachewrite
124 perffncachewrite
125 (no help text available)
125 (no help text available)
126 perfheads benchmark the computation of a changelog heads
126 perfheads benchmark the computation of a changelog heads
127 perfhelper-mergecopies
127 perfhelper-mergecopies
128 find statistics about potential parameters for
128 find statistics about potential parameters for
129 'perfmergecopies'
129 'perfmergecopies'
130 perfhelper-pathcopies
130 perfhelper-pathcopies
131 find statistic about potential parameters for the
131 find statistic about potential parameters for the
132 'perftracecopies'
132 'perftracecopies'
133 perfignore benchmark operation related to computing ignore
133 perfignore benchmark operation related to computing ignore
134 perfindex benchmark index creation time followed by a lookup
134 perfindex benchmark index creation time followed by a lookup
135 perflinelogedits
135 perflinelogedits
136 (no help text available)
136 (no help text available)
137 perfloadmarkers
137 perfloadmarkers
138 benchmark the time to parse the on-disk markers for a repo
138 benchmark the time to parse the on-disk markers for a repo
139 perflog (no help text available)
139 perflog (no help text available)
140 perflookup (no help text available)
140 perflookup (no help text available)
141 perflrucachedict
141 perflrucachedict
142 (no help text available)
142 (no help text available)
143 perfmanifest benchmark the time to read a manifest from disk and return a
143 perfmanifest benchmark the time to read a manifest from disk and return a
144 usable
144 usable
145 perfmergecalculate
145 perfmergecalculate
146 (no help text available)
146 (no help text available)
147 perfmergecopies
147 perfmergecopies
148 measure runtime of 'copies.mergecopies'
148 measure runtime of 'copies.mergecopies'
149 perfmoonwalk benchmark walking the changelog backwards
149 perfmoonwalk benchmark walking the changelog backwards
150 perfnodelookup
150 perfnodelookup
151 (no help text available)
151 (no help text available)
152 perfnodemap benchmark the time necessary to look up revision from a cold
152 perfnodemap benchmark the time necessary to look up revision from a cold
153 nodemap
153 nodemap
154 perfparents benchmark the time necessary to fetch one changeset's parents.
154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 perfpathcopies
155 perfpathcopies
156 benchmark the copy tracing logic
156 benchmark the copy tracing logic
157 perfphases benchmark phasesets computation
157 perfphases benchmark phasesets computation
158 perfphasesremote
158 perfphasesremote
159 benchmark time needed to analyse phases of the remote server
159 benchmark time needed to analyse phases of the remote server
160 perfprogress printing of progress bars
160 perfprogress printing of progress bars
161 perfrawfiles (no help text available)
161 perfrawfiles (no help text available)
162 perfrevlogchunks
162 perfrevlogchunks
163 Benchmark operations on revlog chunks.
163 Benchmark operations on revlog chunks.
164 perfrevlogindex
164 perfrevlogindex
165 Benchmark operations against a revlog index.
165 Benchmark operations against a revlog index.
166 perfrevlogrevision
166 perfrevlogrevision
167 Benchmark obtaining a revlog revision.
167 Benchmark obtaining a revlog revision.
168 perfrevlogrevisions
168 perfrevlogrevisions
169 Benchmark reading a series of revisions from a revlog.
169 Benchmark reading a series of revisions from a revlog.
170 perfrevlogwrite
170 perfrevlogwrite
171 Benchmark writing a series of revisions to a revlog.
171 Benchmark writing a series of revisions to a revlog.
172 perfrevrange (no help text available)
172 perfrevrange (no help text available)
173 perfrevset benchmark the execution time of a revset
173 perfrevset benchmark the execution time of a revset
174 perfstartup (no help text available)
174 perfstartup (no help text available)
175 perfstatus (no help text available)
175 perfstatus benchmark the performance of a single status call
176 perftags (no help text available)
176 perftags (no help text available)
177 perftemplating
177 perftemplating
178 test the rendering time of a given template
178 test the rendering time of a given template
179 perfunidiff benchmark a unified diff between revisions
179 perfunidiff benchmark a unified diff between revisions
180 perfvolatilesets
180 perfvolatilesets
181 benchmark the computation of various volatile set
181 benchmark the computation of various volatile set
182 perfwalk (no help text available)
182 perfwalk (no help text available)
183 perfwrite microbenchmark ui.write
183 perfwrite microbenchmark ui.write
184
184
185 (use 'hg help -v perf' to show built-in aliases and global options)
185 (use 'hg help -v perf' to show built-in aliases and global options)
186 $ hg perfaddremove
186 $ hg perfaddremove
187 $ hg perfancestors
187 $ hg perfancestors
188 $ hg perfancestorset 2
188 $ hg perfancestorset 2
189 $ hg perfannotate a
189 $ hg perfannotate a
190 $ hg perfbdiff -c 1
190 $ hg perfbdiff -c 1
191 $ hg perfbdiff --alldata 1
191 $ hg perfbdiff --alldata 1
192 $ hg perfunidiff -c 1
192 $ hg perfunidiff -c 1
193 $ hg perfunidiff --alldata 1
193 $ hg perfunidiff --alldata 1
194 $ hg perfbookmarks
194 $ hg perfbookmarks
195 $ hg perfbranchmap
195 $ hg perfbranchmap
196 $ hg perfbranchmapload
196 $ hg perfbranchmapload
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 benchmark of branchmap with 3 revisions with 1 new ones
198 benchmark of branchmap with 3 revisions with 1 new ones
199 $ hg perfcca
199 $ hg perfcca
200 $ hg perfchangegroupchangelog
200 $ hg perfchangegroupchangelog
201 $ hg perfchangegroupchangelog --cgversion 01
201 $ hg perfchangegroupchangelog --cgversion 01
202 $ hg perfchangeset 2
202 $ hg perfchangeset 2
203 $ hg perfctxfiles 2
203 $ hg perfctxfiles 2
204 $ hg perfdiffwd
204 $ hg perfdiffwd
205 $ hg perfdirfoldmap
205 $ hg perfdirfoldmap
206 $ hg perfdirs
206 $ hg perfdirs
207 $ hg perfdirstate
207 $ hg perfdirstate
208 $ hg perfdirstatedirs
208 $ hg perfdirstatedirs
209 $ hg perfdirstatefoldmap
209 $ hg perfdirstatefoldmap
210 $ hg perfdirstatewrite
210 $ hg perfdirstatewrite
211 #if repofncache
211 #if repofncache
212 $ hg perffncacheencode
212 $ hg perffncacheencode
213 $ hg perffncacheload
213 $ hg perffncacheload
214 $ hg debugrebuildfncache
214 $ hg debugrebuildfncache
215 fncache already up to date
215 fncache already up to date
216 $ hg perffncachewrite
216 $ hg perffncachewrite
217 $ hg debugrebuildfncache
217 $ hg debugrebuildfncache
218 fncache already up to date
218 fncache already up to date
219 #endif
219 #endif
220 $ hg perfheads
220 $ hg perfheads
221 $ hg perfignore
221 $ hg perfignore
222 $ hg perfindex
222 $ hg perfindex
223 $ hg perflinelogedits -n 1
223 $ hg perflinelogedits -n 1
224 $ hg perfloadmarkers
224 $ hg perfloadmarkers
225 $ hg perflog
225 $ hg perflog
226 $ hg perflookup 2
226 $ hg perflookup 2
227 $ hg perflrucache
227 $ hg perflrucache
228 $ hg perfmanifest 2
228 $ hg perfmanifest 2
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 $ hg perfmanifest -m 44fe2c8352bb
230 $ hg perfmanifest -m 44fe2c8352bb
231 abort: manifest revision must be integer or full node
231 abort: manifest revision must be integer or full node
232 [255]
232 [255]
233 $ hg perfmergecalculate -r 3
233 $ hg perfmergecalculate -r 3
234 $ hg perfmoonwalk
234 $ hg perfmoonwalk
235 $ hg perfnodelookup 2
235 $ hg perfnodelookup 2
236 $ hg perfpathcopies 1 2
236 $ hg perfpathcopies 1 2
237 $ hg perfprogress --total 1000
237 $ hg perfprogress --total 1000
238 $ hg perfrawfiles 2
238 $ hg perfrawfiles 2
239 $ hg perfrevlogindex -c
239 $ hg perfrevlogindex -c
240 #if reporevlogstore
240 #if reporevlogstore
241 $ hg perfrevlogrevisions .hg/store/data/a.i
241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 #endif
242 #endif
243 $ hg perfrevlogrevision -m 0
243 $ hg perfrevlogrevision -m 0
244 $ hg perfrevlogchunks -c
244 $ hg perfrevlogchunks -c
245 $ hg perfrevrange
245 $ hg perfrevrange
246 $ hg perfrevset 'all()'
246 $ hg perfrevset 'all()'
247 $ hg perfstartup
247 $ hg perfstartup
248 $ hg perfstatus
248 $ hg perfstatus
249 $ hg perftags
249 $ hg perftags
250 $ hg perftemplating
250 $ hg perftemplating
251 $ hg perfvolatilesets
251 $ hg perfvolatilesets
252 $ hg perfwalk
252 $ hg perfwalk
253 $ hg perfparents
253 $ hg perfparents
254 $ hg perfdiscovery -q .
254 $ hg perfdiscovery -q .
255
255
256 Test run control
256 Test run control
257 ----------------
257 ----------------
258
258
259 Simple single entry
259 Simple single entry
260
260
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 ! wall * comb * user * sys * (best of 15) (glob)
262 ! wall * comb * user * sys * (best of 15) (glob)
263
263
264 Multiple entries
264 Multiple entries
265
265
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 ! wall * comb * user * sys * (best of 5) (glob)
267 ! wall * comb * user * sys * (best of 5) (glob)
268
268
269 error case are ignored
269 error case are ignored
270
270
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 malformatted run limit entry, missing "-": 500
272 malformatted run limit entry, missing "-": 500
273 ! wall * comb * user * sys * (best of 5) (glob)
273 ! wall * comb * user * sys * (best of 5) (glob)
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 ! wall * comb * user * sys * (best of 5) (glob)
277 ! wall * comb * user * sys * (best of 5) (glob)
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 ! wall * comb * user * sys * (best of 5) (glob)
280 ! wall * comb * user * sys * (best of 5) (glob)
281
281
282 test actual output
282 test actual output
283 ------------------
283 ------------------
284
284
285 normal output:
285 normal output:
286
286
287 $ hg perfheads --config perf.stub=no
287 $ hg perfheads --config perf.stub=no
288 ! wall * comb * user * sys * (best of *) (glob)
288 ! wall * comb * user * sys * (best of *) (glob)
289
289
290 detailed output:
290 detailed output:
291
291
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 ! wall * comb * user * sys * (best of *) (glob)
293 ! wall * comb * user * sys * (best of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
294 ! wall * comb * user * sys * (max of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
295 ! wall * comb * user * sys * (avg of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
296 ! wall * comb * user * sys * (median of *) (glob)
297
297
298 test json output
298 test json output
299 ----------------
299 ----------------
300
300
301 normal output:
301 normal output:
302
302
303 $ hg perfheads --template json --config perf.stub=no
303 $ hg perfheads --template json --config perf.stub=no
304 [
304 [
305 {
305 {
306 "comb": *, (glob)
306 "comb": *, (glob)
307 "count": *, (glob)
307 "count": *, (glob)
308 "sys": *, (glob)
308 "sys": *, (glob)
309 "user": *, (glob)
309 "user": *, (glob)
310 "wall": * (glob)
310 "wall": * (glob)
311 }
311 }
312 ]
312 ]
313
313
314 detailed output:
314 detailed output:
315
315
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 [
317 [
318 {
318 {
319 "avg.comb": *, (glob)
319 "avg.comb": *, (glob)
320 "avg.count": *, (glob)
320 "avg.count": *, (glob)
321 "avg.sys": *, (glob)
321 "avg.sys": *, (glob)
322 "avg.user": *, (glob)
322 "avg.user": *, (glob)
323 "avg.wall": *, (glob)
323 "avg.wall": *, (glob)
324 "comb": *, (glob)
324 "comb": *, (glob)
325 "count": *, (glob)
325 "count": *, (glob)
326 "max.comb": *, (glob)
326 "max.comb": *, (glob)
327 "max.count": *, (glob)
327 "max.count": *, (glob)
328 "max.sys": *, (glob)
328 "max.sys": *, (glob)
329 "max.user": *, (glob)
329 "max.user": *, (glob)
330 "max.wall": *, (glob)
330 "max.wall": *, (glob)
331 "median.comb": *, (glob)
331 "median.comb": *, (glob)
332 "median.count": *, (glob)
332 "median.count": *, (glob)
333 "median.sys": *, (glob)
333 "median.sys": *, (glob)
334 "median.user": *, (glob)
334 "median.user": *, (glob)
335 "median.wall": *, (glob)
335 "median.wall": *, (glob)
336 "sys": *, (glob)
336 "sys": *, (glob)
337 "user": *, (glob)
337 "user": *, (glob)
338 "wall": * (glob)
338 "wall": * (glob)
339 }
339 }
340 ]
340 ]
341
341
342 Test pre-run feature
342 Test pre-run feature
343 --------------------
343 --------------------
344
344
345 (perf discovery has some spurious output)
345 (perf discovery has some spurious output)
346
346
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 ! wall * comb * user * sys * (best of 1) (glob)
348 ! wall * comb * user * sys * (best of 1) (glob)
349 searching for changes
349 searching for changes
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 ! wall * comb * user * sys * (best of 1) (glob)
351 ! wall * comb * user * sys * (best of 1) (glob)
352 searching for changes
352 searching for changes
353 searching for changes
353 searching for changes
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 ! wall * comb * user * sys * (best of 1) (glob)
355 ! wall * comb * user * sys * (best of 1) (glob)
356 searching for changes
356 searching for changes
357 searching for changes
357 searching for changes
358 searching for changes
358 searching for changes
359 searching for changes
359 searching for changes
360
360
361 test profile-benchmark option
361 test profile-benchmark option
362 ------------------------------
362 ------------------------------
363
363
364 Function to check that statprof ran
364 Function to check that statprof ran
365 $ statprofran () {
365 $ statprofran () {
366 > egrep 'Sample count:|No samples recorded' > /dev/null
366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 > }
367 > }
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369
369
370 Check perf.py for historical portability
370 Check perf.py for historical portability
371 ----------------------------------------
371 ----------------------------------------
372
372
373 $ cd "$TESTDIR/.."
373 $ cd "$TESTDIR/.."
374
374
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 contrib/perf.py:\d+: (re)
378 contrib/perf.py:\d+: (re)
379 > from mercurial import (
379 > from mercurial import (
380 import newer module separately in try clause for early Mercurial
380 import newer module separately in try clause for early Mercurial
381 contrib/perf.py:\d+: (re)
381 contrib/perf.py:\d+: (re)
382 > from mercurial import (
382 > from mercurial import (
383 import newer module separately in try clause for early Mercurial
383 import newer module separately in try clause for early Mercurial
384 contrib/perf.py:\d+: (re)
384 contrib/perf.py:\d+: (re)
385 > origindexpath = orig.opener.join(orig.indexfile)
385 > origindexpath = orig.opener.join(orig.indexfile)
386 use getvfs()/getsvfs() for early Mercurial
386 use getvfs()/getsvfs() for early Mercurial
387 contrib/perf.py:\d+: (re)
387 contrib/perf.py:\d+: (re)
388 > origdatapath = orig.opener.join(orig.datafile)
388 > origdatapath = orig.opener.join(orig.datafile)
389 use getvfs()/getsvfs() for early Mercurial
389 use getvfs()/getsvfs() for early Mercurial
390 contrib/perf.py:\d+: (re)
390 contrib/perf.py:\d+: (re)
391 > vfs = vfsmod.vfs(tmpdir)
391 > vfs = vfsmod.vfs(tmpdir)
392 use getvfs()/getsvfs() for early Mercurial
392 use getvfs()/getsvfs() for early Mercurial
393 contrib/perf.py:\d+: (re)
393 contrib/perf.py:\d+: (re)
394 > vfs.options = getattr(orig.opener, 'options', None)
394 > vfs.options = getattr(orig.opener, 'options', None)
395 use getvfs()/getsvfs() for early Mercurial
395 use getvfs()/getsvfs() for early Mercurial
396 [1]
396 [1]
General Comments 0
You need to be logged in to leave comments. Login now