##// END OF EJS Templates
perf-unbundle: add a perf command to time the unbundle operation...
marmoute -
r50309:3c5d0f87 default
parent child Browse files
Show More
@@ -1,4071 +1,4116 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238 # for "historical portability":
238 # for "historical portability":
239 # define parsealiases locally, because cmdutil.parsealiases has been
239 # define parsealiases locally, because cmdutil.parsealiases has been
240 # available since 1.5 (or 6252852b4332)
240 # available since 1.5 (or 6252852b4332)
241 def parsealiases(cmd):
241 def parsealiases(cmd):
242 return cmd.split(b"|")
242 return cmd.split(b"|")
243
243
244
244
245 if safehasattr(registrar, 'command'):
245 if safehasattr(registrar, 'command'):
246 command = registrar.command(cmdtable)
246 command = registrar.command(cmdtable)
247 elif safehasattr(cmdutil, 'command'):
247 elif safehasattr(cmdutil, 'command'):
248 command = cmdutil.command(cmdtable)
248 command = cmdutil.command(cmdtable)
249 if 'norepo' not in getargspec(command).args:
249 if 'norepo' not in getargspec(command).args:
250 # for "historical portability":
250 # for "historical portability":
251 # wrap original cmdutil.command, because "norepo" option has
251 # wrap original cmdutil.command, because "norepo" option has
252 # been available since 3.1 (or 75a96326cecb)
252 # been available since 3.1 (or 75a96326cecb)
253 _command = command
253 _command = command
254
254
255 def command(name, options=(), synopsis=None, norepo=False):
255 def command(name, options=(), synopsis=None, norepo=False):
256 if norepo:
256 if norepo:
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 return _command(name, list(options), synopsis)
258 return _command(name, list(options), synopsis)
259
259
260
260
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define "@command" annotation locally, because cmdutil.command
263 # define "@command" annotation locally, because cmdutil.command
264 # has been available since 1.9 (or 2daa5179e73f)
264 # has been available since 1.9 (or 2daa5179e73f)
265 def command(name, options=(), synopsis=None, norepo=False):
265 def command(name, options=(), synopsis=None, norepo=False):
266 def decorator(func):
266 def decorator(func):
267 if synopsis:
267 if synopsis:
268 cmdtable[name] = func, list(options), synopsis
268 cmdtable[name] = func, list(options), synopsis
269 else:
269 else:
270 cmdtable[name] = func, list(options)
270 cmdtable[name] = func, list(options)
271 if norepo:
271 if norepo:
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 return func
273 return func
274
274
275 return decorator
275 return decorator
276
276
277
277
278 try:
278 try:
279 import mercurial.registrar
279 import mercurial.registrar
280 import mercurial.configitems
280 import mercurial.configitems
281
281
282 configtable = {}
282 configtable = {}
283 configitem = mercurial.registrar.configitem(configtable)
283 configitem = mercurial.registrar.configitem(configtable)
284 configitem(
284 configitem(
285 b'perf',
285 b'perf',
286 b'presleep',
286 b'presleep',
287 default=mercurial.configitems.dynamicdefault,
287 default=mercurial.configitems.dynamicdefault,
288 experimental=True,
288 experimental=True,
289 )
289 )
290 configitem(
290 configitem(
291 b'perf',
291 b'perf',
292 b'stub',
292 b'stub',
293 default=mercurial.configitems.dynamicdefault,
293 default=mercurial.configitems.dynamicdefault,
294 experimental=True,
294 experimental=True,
295 )
295 )
296 configitem(
296 configitem(
297 b'perf',
297 b'perf',
298 b'parentscount',
298 b'parentscount',
299 default=mercurial.configitems.dynamicdefault,
299 default=mercurial.configitems.dynamicdefault,
300 experimental=True,
300 experimental=True,
301 )
301 )
302 configitem(
302 configitem(
303 b'perf',
303 b'perf',
304 b'all-timing',
304 b'all-timing',
305 default=mercurial.configitems.dynamicdefault,
305 default=mercurial.configitems.dynamicdefault,
306 experimental=True,
306 experimental=True,
307 )
307 )
308 configitem(
308 configitem(
309 b'perf',
309 b'perf',
310 b'pre-run',
310 b'pre-run',
311 default=mercurial.configitems.dynamicdefault,
311 default=mercurial.configitems.dynamicdefault,
312 )
312 )
313 configitem(
313 configitem(
314 b'perf',
314 b'perf',
315 b'profile-benchmark',
315 b'profile-benchmark',
316 default=mercurial.configitems.dynamicdefault,
316 default=mercurial.configitems.dynamicdefault,
317 )
317 )
318 configitem(
318 configitem(
319 b'perf',
319 b'perf',
320 b'run-limits',
320 b'run-limits',
321 default=mercurial.configitems.dynamicdefault,
321 default=mercurial.configitems.dynamicdefault,
322 experimental=True,
322 experimental=True,
323 )
323 )
324 except (ImportError, AttributeError):
324 except (ImportError, AttributeError):
325 pass
325 pass
326 except TypeError:
326 except TypeError:
327 # compatibility fix for a11fd395e83f
327 # compatibility fix for a11fd395e83f
328 # hg version: 5.2
328 # hg version: 5.2
329 configitem(
329 configitem(
330 b'perf',
330 b'perf',
331 b'presleep',
331 b'presleep',
332 default=mercurial.configitems.dynamicdefault,
332 default=mercurial.configitems.dynamicdefault,
333 )
333 )
334 configitem(
334 configitem(
335 b'perf',
335 b'perf',
336 b'stub',
336 b'stub',
337 default=mercurial.configitems.dynamicdefault,
337 default=mercurial.configitems.dynamicdefault,
338 )
338 )
339 configitem(
339 configitem(
340 b'perf',
340 b'perf',
341 b'parentscount',
341 b'parentscount',
342 default=mercurial.configitems.dynamicdefault,
342 default=mercurial.configitems.dynamicdefault,
343 )
343 )
344 configitem(
344 configitem(
345 b'perf',
345 b'perf',
346 b'all-timing',
346 b'all-timing',
347 default=mercurial.configitems.dynamicdefault,
347 default=mercurial.configitems.dynamicdefault,
348 )
348 )
349 configitem(
349 configitem(
350 b'perf',
350 b'perf',
351 b'pre-run',
351 b'pre-run',
352 default=mercurial.configitems.dynamicdefault,
352 default=mercurial.configitems.dynamicdefault,
353 )
353 )
354 configitem(
354 configitem(
355 b'perf',
355 b'perf',
356 b'profile-benchmark',
356 b'profile-benchmark',
357 default=mercurial.configitems.dynamicdefault,
357 default=mercurial.configitems.dynamicdefault,
358 )
358 )
359 configitem(
359 configitem(
360 b'perf',
360 b'perf',
361 b'run-limits',
361 b'run-limits',
362 default=mercurial.configitems.dynamicdefault,
362 default=mercurial.configitems.dynamicdefault,
363 )
363 )
364
364
365
365
366 def getlen(ui):
366 def getlen(ui):
367 if ui.configbool(b"perf", b"stub", False):
367 if ui.configbool(b"perf", b"stub", False):
368 return lambda x: 1
368 return lambda x: 1
369 return len
369 return len
370
370
371
371
372 class noop:
372 class noop:
373 """dummy context manager"""
373 """dummy context manager"""
374
374
375 def __enter__(self):
375 def __enter__(self):
376 pass
376 pass
377
377
378 def __exit__(self, *args):
378 def __exit__(self, *args):
379 pass
379 pass
380
380
381
381
382 NOOPCTX = noop()
382 NOOPCTX = noop()
383
383
384
384
385 def gettimer(ui, opts=None):
385 def gettimer(ui, opts=None):
386 """return a timer function and formatter: (timer, formatter)
386 """return a timer function and formatter: (timer, formatter)
387
387
388 This function exists to gather the creation of formatter in a single
388 This function exists to gather the creation of formatter in a single
389 place instead of duplicating it in all performance commands."""
389 place instead of duplicating it in all performance commands."""
390
390
391 # enforce an idle period before execution to counteract power management
391 # enforce an idle period before execution to counteract power management
392 # experimental config: perf.presleep
392 # experimental config: perf.presleep
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394
394
395 if opts is None:
395 if opts is None:
396 opts = {}
396 opts = {}
397 # redirect all to stderr unless buffer api is in use
397 # redirect all to stderr unless buffer api is in use
398 if not ui._buffers:
398 if not ui._buffers:
399 ui = ui.copy()
399 ui = ui.copy()
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 if uifout:
401 if uifout:
402 # for "historical portability":
402 # for "historical portability":
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 uifout.set(ui.ferr)
404 uifout.set(ui.ferr)
405
405
406 # get a formatter
406 # get a formatter
407 uiformatter = getattr(ui, 'formatter', None)
407 uiformatter = getattr(ui, 'formatter', None)
408 if uiformatter:
408 if uiformatter:
409 fm = uiformatter(b'perf', opts)
409 fm = uiformatter(b'perf', opts)
410 else:
410 else:
411 # for "historical portability":
411 # for "historical portability":
412 # define formatter locally, because ui.formatter has been
412 # define formatter locally, because ui.formatter has been
413 # available since 2.2 (or ae5f92e154d3)
413 # available since 2.2 (or ae5f92e154d3)
414 from mercurial import node
414 from mercurial import node
415
415
416 class defaultformatter:
416 class defaultformatter:
417 """Minimized composition of baseformatter and plainformatter"""
417 """Minimized composition of baseformatter and plainformatter"""
418
418
419 def __init__(self, ui, topic, opts):
419 def __init__(self, ui, topic, opts):
420 self._ui = ui
420 self._ui = ui
421 if ui.debugflag:
421 if ui.debugflag:
422 self.hexfunc = node.hex
422 self.hexfunc = node.hex
423 else:
423 else:
424 self.hexfunc = node.short
424 self.hexfunc = node.short
425
425
426 def __nonzero__(self):
426 def __nonzero__(self):
427 return False
427 return False
428
428
429 __bool__ = __nonzero__
429 __bool__ = __nonzero__
430
430
431 def startitem(self):
431 def startitem(self):
432 pass
432 pass
433
433
434 def data(self, **data):
434 def data(self, **data):
435 pass
435 pass
436
436
437 def write(self, fields, deftext, *fielddata, **opts):
437 def write(self, fields, deftext, *fielddata, **opts):
438 self._ui.write(deftext % fielddata, **opts)
438 self._ui.write(deftext % fielddata, **opts)
439
439
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 if cond:
441 if cond:
442 self._ui.write(deftext % fielddata, **opts)
442 self._ui.write(deftext % fielddata, **opts)
443
443
444 def plain(self, text, **opts):
444 def plain(self, text, **opts):
445 self._ui.write(text, **opts)
445 self._ui.write(text, **opts)
446
446
447 def end(self):
447 def end(self):
448 pass
448 pass
449
449
450 fm = defaultformatter(ui, b'perf', opts)
450 fm = defaultformatter(ui, b'perf', opts)
451
451
452 # stub function, runs code only once instead of in a loop
452 # stub function, runs code only once instead of in a loop
453 # experimental config: perf.stub
453 # experimental config: perf.stub
454 if ui.configbool(b"perf", b"stub", False):
454 if ui.configbool(b"perf", b"stub", False):
455 return functools.partial(stub_timer, fm), fm
455 return functools.partial(stub_timer, fm), fm
456
456
457 # experimental config: perf.all-timing
457 # experimental config: perf.all-timing
458 displayall = ui.configbool(b"perf", b"all-timing", False)
458 displayall = ui.configbool(b"perf", b"all-timing", False)
459
459
460 # experimental config: perf.run-limits
460 # experimental config: perf.run-limits
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limits = []
462 limits = []
463 for item in limitspec:
463 for item in limitspec:
464 parts = item.split(b'-', 1)
464 parts = item.split(b'-', 1)
465 if len(parts) < 2:
465 if len(parts) < 2:
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 continue
467 continue
468 try:
468 try:
469 time_limit = float(_sysstr(parts[0]))
469 time_limit = float(_sysstr(parts[0]))
470 except ValueError as e:
470 except ValueError as e:
471 ui.warn(
471 ui.warn(
472 (
472 (
473 b'malformatted run limit entry, %s: %s\n'
473 b'malformatted run limit entry, %s: %s\n'
474 % (_bytestr(e), item)
474 % (_bytestr(e), item)
475 )
475 )
476 )
476 )
477 continue
477 continue
478 try:
478 try:
479 run_limit = int(_sysstr(parts[1]))
479 run_limit = int(_sysstr(parts[1]))
480 except ValueError as e:
480 except ValueError as e:
481 ui.warn(
481 ui.warn(
482 (
482 (
483 b'malformatted run limit entry, %s: %s\n'
483 b'malformatted run limit entry, %s: %s\n'
484 % (_bytestr(e), item)
484 % (_bytestr(e), item)
485 )
485 )
486 )
486 )
487 continue
487 continue
488 limits.append((time_limit, run_limit))
488 limits.append((time_limit, run_limit))
489 if not limits:
489 if not limits:
490 limits = DEFAULTLIMITS
490 limits = DEFAULTLIMITS
491
491
492 profiler = None
492 profiler = None
493 if profiling is not None:
493 if profiling is not None:
494 if ui.configbool(b"perf", b"profile-benchmark", False):
494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 profiler = profiling.profile(ui)
495 profiler = profiling.profile(ui)
496
496
497 prerun = getint(ui, b"perf", b"pre-run", 0)
497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 t = functools.partial(
498 t = functools.partial(
499 _timer,
499 _timer,
500 fm,
500 fm,
501 displayall=displayall,
501 displayall=displayall,
502 limits=limits,
502 limits=limits,
503 prerun=prerun,
503 prerun=prerun,
504 profiler=profiler,
504 profiler=profiler,
505 )
505 )
506 return t, fm
506 return t, fm
507
507
508
508
509 def stub_timer(fm, func, setup=None, title=None):
509 def stub_timer(fm, func, setup=None, title=None):
510 if setup is not None:
510 if setup is not None:
511 setup()
511 setup()
512 func()
512 func()
513
513
514
514
515 @contextlib.contextmanager
515 @contextlib.contextmanager
516 def timeone():
516 def timeone():
517 r = []
517 r = []
518 ostart = os.times()
518 ostart = os.times()
519 cstart = util.timer()
519 cstart = util.timer()
520 yield r
520 yield r
521 cstop = util.timer()
521 cstop = util.timer()
522 ostop = os.times()
522 ostop = os.times()
523 a, b = ostart, ostop
523 a, b = ostart, ostop
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525
525
526
526
527 # list of stop condition (elapsed time, minimal run count)
527 # list of stop condition (elapsed time, minimal run count)
528 DEFAULTLIMITS = (
528 DEFAULTLIMITS = (
529 (3.0, 100),
529 (3.0, 100),
530 (10.0, 3),
530 (10.0, 3),
531 )
531 )
532
532
533
533
534 def _timer(
534 def _timer(
535 fm,
535 fm,
536 func,
536 func,
537 setup=None,
537 setup=None,
538 title=None,
538 title=None,
539 displayall=False,
539 displayall=False,
540 limits=DEFAULTLIMITS,
540 limits=DEFAULTLIMITS,
541 prerun=0,
541 prerun=0,
542 profiler=None,
542 profiler=None,
543 ):
543 ):
544 gc.collect()
544 gc.collect()
545 results = []
545 results = []
546 begin = util.timer()
546 begin = util.timer()
547 count = 0
547 count = 0
548 if profiler is None:
548 if profiler is None:
549 profiler = NOOPCTX
549 profiler = NOOPCTX
550 for i in range(prerun):
550 for i in range(prerun):
551 if setup is not None:
551 if setup is not None:
552 setup()
552 setup()
553 func()
553 func()
554 keepgoing = True
554 keepgoing = True
555 while keepgoing:
555 while keepgoing:
556 if setup is not None:
556 if setup is not None:
557 setup()
557 setup()
558 with profiler:
558 with profiler:
559 with timeone() as item:
559 with timeone() as item:
560 r = func()
560 r = func()
561 profiler = NOOPCTX
561 profiler = NOOPCTX
562 count += 1
562 count += 1
563 results.append(item[0])
563 results.append(item[0])
564 cstop = util.timer()
564 cstop = util.timer()
565 # Look for a stop condition.
565 # Look for a stop condition.
566 elapsed = cstop - begin
566 elapsed = cstop - begin
567 for t, mincount in limits:
567 for t, mincount in limits:
568 if elapsed >= t and count >= mincount:
568 if elapsed >= t and count >= mincount:
569 keepgoing = False
569 keepgoing = False
570 break
570 break
571
571
572 formatone(fm, results, title=title, result=r, displayall=displayall)
572 formatone(fm, results, title=title, result=r, displayall=displayall)
573
573
574
574
575 def formatone(fm, timings, title=None, result=None, displayall=False):
575 def formatone(fm, timings, title=None, result=None, displayall=False):
576
576
577 count = len(timings)
577 count = len(timings)
578
578
579 fm.startitem()
579 fm.startitem()
580
580
581 if title:
581 if title:
582 fm.write(b'title', b'! %s\n', title)
582 fm.write(b'title', b'! %s\n', title)
583 if result:
583 if result:
584 fm.write(b'result', b'! result: %s\n', result)
584 fm.write(b'result', b'! result: %s\n', result)
585
585
586 def display(role, entry):
586 def display(role, entry):
587 prefix = b''
587 prefix = b''
588 if role != b'best':
588 if role != b'best':
589 prefix = b'%s.' % role
589 prefix = b'%s.' % role
590 fm.plain(b'!')
590 fm.plain(b'!')
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 fm.write(prefix + b'user', b' user %f', entry[1])
593 fm.write(prefix + b'user', b' user %f', entry[1])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 fm.plain(b'\n')
596 fm.plain(b'\n')
597
597
598 timings.sort()
598 timings.sort()
599 min_val = timings[0]
599 min_val = timings[0]
600 display(b'best', min_val)
600 display(b'best', min_val)
601 if displayall:
601 if displayall:
602 max_val = timings[-1]
602 max_val = timings[-1]
603 display(b'max', max_val)
603 display(b'max', max_val)
604 avg = tuple([sum(x) / count for x in zip(*timings)])
604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 display(b'avg', avg)
605 display(b'avg', avg)
606 median = timings[len(timings) // 2]
606 median = timings[len(timings) // 2]
607 display(b'median', median)
607 display(b'median', median)
608
608
609
609
610 # utilities for historical portability
610 # utilities for historical portability
611
611
612
612
613 def getint(ui, section, name, default):
613 def getint(ui, section, name, default):
614 # for "historical portability":
614 # for "historical portability":
615 # ui.configint has been available since 1.9 (or fa2b596db182)
615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 v = ui.config(section, name, None)
616 v = ui.config(section, name, None)
617 if v is None:
617 if v is None:
618 return default
618 return default
619 try:
619 try:
620 return int(v)
620 return int(v)
621 except ValueError:
621 except ValueError:
622 raise error.ConfigError(
622 raise error.ConfigError(
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 )
624 )
625
625
626
626
627 def safeattrsetter(obj, name, ignoremissing=False):
627 def safeattrsetter(obj, name, ignoremissing=False):
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629
629
630 This function is aborted, if 'obj' doesn't have 'name' attribute
630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 at runtime. This avoids overlooking removal of an attribute, which
631 at runtime. This avoids overlooking removal of an attribute, which
632 breaks assumption of performance measurement, in the future.
632 breaks assumption of performance measurement, in the future.
633
633
634 This function returns the object to (1) assign a new value, and
634 This function returns the object to (1) assign a new value, and
635 (2) restore an original value to the attribute.
635 (2) restore an original value to the attribute.
636
636
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 abortion, and this function returns None. This is useful to
638 abortion, and this function returns None. This is useful to
639 examine an attribute, which isn't ensured in all Mercurial
639 examine an attribute, which isn't ensured in all Mercurial
640 versions.
640 versions.
641 """
641 """
642 if not util.safehasattr(obj, name):
642 if not util.safehasattr(obj, name):
643 if ignoremissing:
643 if ignoremissing:
644 return None
644 return None
645 raise error.Abort(
645 raise error.Abort(
646 (
646 (
647 b"missing attribute %s of %s might break assumption"
647 b"missing attribute %s of %s might break assumption"
648 b" of performance measurement"
648 b" of performance measurement"
649 )
649 )
650 % (name, obj)
650 % (name, obj)
651 )
651 )
652
652
653 origvalue = getattr(obj, _sysstr(name))
653 origvalue = getattr(obj, _sysstr(name))
654
654
655 class attrutil:
655 class attrutil:
656 def set(self, newvalue):
656 def set(self, newvalue):
657 setattr(obj, _sysstr(name), newvalue)
657 setattr(obj, _sysstr(name), newvalue)
658
658
659 def restore(self):
659 def restore(self):
660 setattr(obj, _sysstr(name), origvalue)
660 setattr(obj, _sysstr(name), origvalue)
661
661
662 return attrutil()
662 return attrutil()
663
663
664
664
665 # utilities to examine each internal API changes
665 # utilities to examine each internal API changes
666
666
667
667
668 def getbranchmapsubsettable():
668 def getbranchmapsubsettable():
669 # for "historical portability":
669 # for "historical portability":
670 # subsettable is defined in:
670 # subsettable is defined in:
671 # - branchmap since 2.9 (or 175c6fd8cacc)
671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 # - repoview since 2.5 (or 59a9f18d4587)
672 # - repoview since 2.5 (or 59a9f18d4587)
673 # - repoviewutil since 5.0
673 # - repoviewutil since 5.0
674 for mod in (branchmap, repoview, repoviewutil):
674 for mod in (branchmap, repoview, repoviewutil):
675 subsettable = getattr(mod, 'subsettable', None)
675 subsettable = getattr(mod, 'subsettable', None)
676 if subsettable:
676 if subsettable:
677 return subsettable
677 return subsettable
678
678
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 # branchmap and repoview modules exist, but subsettable attribute
680 # branchmap and repoview modules exist, but subsettable attribute
681 # doesn't)
681 # doesn't)
682 raise error.Abort(
682 raise error.Abort(
683 b"perfbranchmap not available with this Mercurial",
683 b"perfbranchmap not available with this Mercurial",
684 hint=b"use 2.5 or later",
684 hint=b"use 2.5 or later",
685 )
685 )
686
686
687
687
688 def getsvfs(repo):
688 def getsvfs(repo):
689 """Return appropriate object to access files under .hg/store"""
689 """Return appropriate object to access files under .hg/store"""
690 # for "historical portability":
690 # for "historical portability":
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 svfs = getattr(repo, 'svfs', None)
692 svfs = getattr(repo, 'svfs', None)
693 if svfs:
693 if svfs:
694 return svfs
694 return svfs
695 else:
695 else:
696 return getattr(repo, 'sopener')
696 return getattr(repo, 'sopener')
697
697
698
698
699 def getvfs(repo):
699 def getvfs(repo):
700 """Return appropriate object to access files under .hg"""
700 """Return appropriate object to access files under .hg"""
701 # for "historical portability":
701 # for "historical portability":
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 vfs = getattr(repo, 'vfs', None)
703 vfs = getattr(repo, 'vfs', None)
704 if vfs:
704 if vfs:
705 return vfs
705 return vfs
706 else:
706 else:
707 return getattr(repo, 'opener')
707 return getattr(repo, 'opener')
708
708
709
709
710 def repocleartagscachefunc(repo):
710 def repocleartagscachefunc(repo):
711 """Return the function to clear tags cache according to repo internal API"""
711 """Return the function to clear tags cache according to repo internal API"""
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 # correct way to clear tags cache, because existing code paths
714 # correct way to clear tags cache, because existing code paths
715 # expect _tagscache to be a structured object.
715 # expect _tagscache to be a structured object.
716 def clearcache():
716 def clearcache():
717 # _tagscache has been filteredpropertycache since 2.5 (or
717 # _tagscache has been filteredpropertycache since 2.5 (or
718 # 98c867ac1330), and delattr() can't work in such case
718 # 98c867ac1330), and delattr() can't work in such case
719 if '_tagscache' in vars(repo):
719 if '_tagscache' in vars(repo):
720 del repo.__dict__['_tagscache']
720 del repo.__dict__['_tagscache']
721
721
722 return clearcache
722 return clearcache
723
723
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 if repotags: # since 1.4 (or 5614a628d173)
725 if repotags: # since 1.4 (or 5614a628d173)
726 return lambda: repotags.set(None)
726 return lambda: repotags.set(None)
727
727
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 return lambda: repotagscache.set(None)
730 return lambda: repotagscache.set(None)
731
731
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 # this point, but it isn't so problematic, because:
733 # this point, but it isn't so problematic, because:
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 # in perftags() causes failure soon
735 # in perftags() causes failure soon
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 raise error.Abort(b"tags API of this hg command is unknown")
737 raise error.Abort(b"tags API of this hg command is unknown")
738
738
739
739
740 # utilities to clear cache
740 # utilities to clear cache
741
741
742
742
743 def clearfilecache(obj, attrname):
743 def clearfilecache(obj, attrname):
744 unfiltered = getattr(obj, 'unfiltered', None)
744 unfiltered = getattr(obj, 'unfiltered', None)
745 if unfiltered is not None:
745 if unfiltered is not None:
746 obj = obj.unfiltered()
746 obj = obj.unfiltered()
747 if attrname in vars(obj):
747 if attrname in vars(obj):
748 delattr(obj, attrname)
748 delattr(obj, attrname)
749 obj._filecache.pop(attrname, None)
749 obj._filecache.pop(attrname, None)
750
750
751
751
752 def clearchangelog(repo):
752 def clearchangelog(repo):
753 if repo is not repo.unfiltered():
753 if repo is not repo.unfiltered():
754 object.__setattr__(repo, '_clcachekey', None)
754 object.__setattr__(repo, '_clcachekey', None)
755 object.__setattr__(repo, '_clcache', None)
755 object.__setattr__(repo, '_clcache', None)
756 clearfilecache(repo.unfiltered(), 'changelog')
756 clearfilecache(repo.unfiltered(), 'changelog')
757
757
758
758
759 # perf commands
759 # perf commands
760
760
761
761
762 @command(b'perf::walk|perfwalk', formatteropts)
762 @command(b'perf::walk|perfwalk', formatteropts)
763 def perfwalk(ui, repo, *pats, **opts):
763 def perfwalk(ui, repo, *pats, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 m = scmutil.match(repo[None], pats, {})
766 m = scmutil.match(repo[None], pats, {})
767 timer(
767 timer(
768 lambda: len(
768 lambda: len(
769 list(
769 list(
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 )
771 )
772 )
772 )
773 )
773 )
774 fm.end()
774 fm.end()
775
775
776
776
777 @command(b'perf::annotate|perfannotate', formatteropts)
777 @command(b'perf::annotate|perfannotate', formatteropts)
778 def perfannotate(ui, repo, f, **opts):
778 def perfannotate(ui, repo, f, **opts):
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 fc = repo[b'.'][f]
781 fc = repo[b'.'][f]
782 timer(lambda: len(fc.annotate(True)))
782 timer(lambda: len(fc.annotate(True)))
783 fm.end()
783 fm.end()
784
784
785
785
786 @command(
786 @command(
787 b'perf::status|perfstatus',
787 b'perf::status|perfstatus',
788 [
788 [
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 ]
791 ]
792 + formatteropts,
792 + formatteropts,
793 )
793 )
794 def perfstatus(ui, repo, **opts):
794 def perfstatus(ui, repo, **opts):
795 """benchmark the performance of a single status call
795 """benchmark the performance of a single status call
796
796
797 The repository data are preserved between each call.
797 The repository data are preserved between each call.
798
798
799 By default, only the status of the tracked file are requested. If
799 By default, only the status of the tracked file are requested. If
800 `--unknown` is passed, the "unknown" files are also tracked.
800 `--unknown` is passed, the "unknown" files are also tracked.
801 """
801 """
802 opts = _byteskwargs(opts)
802 opts = _byteskwargs(opts)
803 # m = match.always(repo.root, repo.getcwd())
803 # m = match.always(repo.root, repo.getcwd())
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 # False))))
805 # False))))
806 timer, fm = gettimer(ui, opts)
806 timer, fm = gettimer(ui, opts)
807 if opts[b'dirstate']:
807 if opts[b'dirstate']:
808 dirstate = repo.dirstate
808 dirstate = repo.dirstate
809 m = scmutil.matchall(repo)
809 m = scmutil.matchall(repo)
810 unknown = opts[b'unknown']
810 unknown = opts[b'unknown']
811
811
812 def status_dirstate():
812 def status_dirstate():
813 s = dirstate.status(
813 s = dirstate.status(
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 )
815 )
816 sum(map(bool, s))
816 sum(map(bool, s))
817
817
818 timer(status_dirstate)
818 timer(status_dirstate)
819 else:
819 else:
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 fm.end()
821 fm.end()
822
822
823
823
824 @command(b'perf::addremove|perfaddremove', formatteropts)
824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 def perfaddremove(ui, repo, **opts):
825 def perfaddremove(ui, repo, **opts):
826 opts = _byteskwargs(opts)
826 opts = _byteskwargs(opts)
827 timer, fm = gettimer(ui, opts)
827 timer, fm = gettimer(ui, opts)
828 try:
828 try:
829 oldquiet = repo.ui.quiet
829 oldquiet = repo.ui.quiet
830 repo.ui.quiet = True
830 repo.ui.quiet = True
831 matcher = scmutil.match(repo[None])
831 matcher = scmutil.match(repo[None])
832 opts[b'dry_run'] = True
832 opts[b'dry_run'] = True
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 uipathfn = scmutil.getuipathfn(repo)
834 uipathfn = scmutil.getuipathfn(repo)
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 else:
836 else:
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 finally:
838 finally:
839 repo.ui.quiet = oldquiet
839 repo.ui.quiet = oldquiet
840 fm.end()
840 fm.end()
841
841
842
842
843 def clearcaches(cl):
843 def clearcaches(cl):
844 # behave somewhat consistently across internal API changes
844 # behave somewhat consistently across internal API changes
845 if util.safehasattr(cl, b'clearcaches'):
845 if util.safehasattr(cl, b'clearcaches'):
846 cl.clearcaches()
846 cl.clearcaches()
847 elif util.safehasattr(cl, b'_nodecache'):
847 elif util.safehasattr(cl, b'_nodecache'):
848 # <= hg-5.2
848 # <= hg-5.2
849 from mercurial.node import nullid, nullrev
849 from mercurial.node import nullid, nullrev
850
850
851 cl._nodecache = {nullid: nullrev}
851 cl._nodecache = {nullid: nullrev}
852 cl._nodepos = None
852 cl._nodepos = None
853
853
854
854
855 @command(b'perf::heads|perfheads', formatteropts)
855 @command(b'perf::heads|perfheads', formatteropts)
856 def perfheads(ui, repo, **opts):
856 def perfheads(ui, repo, **opts):
857 """benchmark the computation of a changelog heads"""
857 """benchmark the computation of a changelog heads"""
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 cl = repo.changelog
860 cl = repo.changelog
861
861
862 def s():
862 def s():
863 clearcaches(cl)
863 clearcaches(cl)
864
864
865 def d():
865 def d():
866 len(cl.headrevs())
866 len(cl.headrevs())
867
867
868 timer(d, setup=s)
868 timer(d, setup=s)
869 fm.end()
869 fm.end()
870
870
871
871
872 @command(
872 @command(
873 b'perf::tags|perftags',
873 b'perf::tags|perftags',
874 formatteropts
874 formatteropts
875 + [
875 + [
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 ],
877 ],
878 )
878 )
879 def perftags(ui, repo, **opts):
879 def perftags(ui, repo, **opts):
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 repocleartagscache = repocleartagscachefunc(repo)
882 repocleartagscache = repocleartagscachefunc(repo)
883 clearrevlogs = opts[b'clear_revlogs']
883 clearrevlogs = opts[b'clear_revlogs']
884
884
885 def s():
885 def s():
886 if clearrevlogs:
886 if clearrevlogs:
887 clearchangelog(repo)
887 clearchangelog(repo)
888 clearfilecache(repo.unfiltered(), 'manifest')
888 clearfilecache(repo.unfiltered(), 'manifest')
889 repocleartagscache()
889 repocleartagscache()
890
890
891 def t():
891 def t():
892 return len(repo.tags())
892 return len(repo.tags())
893
893
894 timer(t, setup=s)
894 timer(t, setup=s)
895 fm.end()
895 fm.end()
896
896
897
897
898 @command(b'perf::ancestors|perfancestors', formatteropts)
898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 def perfancestors(ui, repo, **opts):
899 def perfancestors(ui, repo, **opts):
900 opts = _byteskwargs(opts)
900 opts = _byteskwargs(opts)
901 timer, fm = gettimer(ui, opts)
901 timer, fm = gettimer(ui, opts)
902 heads = repo.changelog.headrevs()
902 heads = repo.changelog.headrevs()
903
903
904 def d():
904 def d():
905 for a in repo.changelog.ancestors(heads):
905 for a in repo.changelog.ancestors(heads):
906 pass
906 pass
907
907
908 timer(d)
908 timer(d)
909 fm.end()
909 fm.end()
910
910
911
911
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 def perfancestorset(ui, repo, revset, **opts):
913 def perfancestorset(ui, repo, revset, **opts):
914 opts = _byteskwargs(opts)
914 opts = _byteskwargs(opts)
915 timer, fm = gettimer(ui, opts)
915 timer, fm = gettimer(ui, opts)
916 revs = repo.revs(revset)
916 revs = repo.revs(revset)
917 heads = repo.changelog.headrevs()
917 heads = repo.changelog.headrevs()
918
918
919 def d():
919 def d():
920 s = repo.changelog.ancestors(heads)
920 s = repo.changelog.ancestors(heads)
921 for rev in revs:
921 for rev in revs:
922 rev in s
922 rev in s
923
923
924 timer(d)
924 timer(d)
925 fm.end()
925 fm.end()
926
926
927
927
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
929 def perfdiscovery(ui, repo, path, **opts):
929 def perfdiscovery(ui, repo, path, **opts):
930 """benchmark discovery between local repo and the peer at given path"""
930 """benchmark discovery between local repo and the peer at given path"""
931 repos = [repo, None]
931 repos = [repo, None]
932 timer, fm = gettimer(ui, opts)
932 timer, fm = gettimer(ui, opts)
933
933
934 try:
934 try:
935 from mercurial.utils.urlutil import get_unique_pull_path
935 from mercurial.utils.urlutil import get_unique_pull_path
936
936
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
938 except ImportError:
938 except ImportError:
939 path = ui.expandpath(path)
939 path = ui.expandpath(path)
940
940
941 def s():
941 def s():
942 repos[1] = hg.peer(ui, opts, path)
942 repos[1] = hg.peer(ui, opts, path)
943
943
944 def d():
944 def d():
945 setdiscovery.findcommonheads(ui, *repos)
945 setdiscovery.findcommonheads(ui, *repos)
946
946
947 timer(d, setup=s)
947 timer(d, setup=s)
948 fm.end()
948 fm.end()
949
949
950
950
951 @command(
951 @command(
952 b'perf::bookmarks|perfbookmarks',
952 b'perf::bookmarks|perfbookmarks',
953 formatteropts
953 formatteropts
954 + [
954 + [
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
956 ],
956 ],
957 )
957 )
958 def perfbookmarks(ui, repo, **opts):
958 def perfbookmarks(ui, repo, **opts):
959 """benchmark parsing bookmarks from disk to memory"""
959 """benchmark parsing bookmarks from disk to memory"""
960 opts = _byteskwargs(opts)
960 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
961 timer, fm = gettimer(ui, opts)
962
962
963 clearrevlogs = opts[b'clear_revlogs']
963 clearrevlogs = opts[b'clear_revlogs']
964
964
965 def s():
965 def s():
966 if clearrevlogs:
966 if clearrevlogs:
967 clearchangelog(repo)
967 clearchangelog(repo)
968 clearfilecache(repo, b'_bookmarks')
968 clearfilecache(repo, b'_bookmarks')
969
969
970 def d():
970 def d():
971 repo._bookmarks
971 repo._bookmarks
972
972
973 timer(d, setup=s)
973 timer(d, setup=s)
974 fm.end()
974 fm.end()
975
975
976
976
977 @command(
977 @command(
978 b'perf::bundle',
978 b'perf::bundle',
979 [
979 [
980 (
980 (
981 b'r',
981 b'r',
982 b'rev',
982 b'rev',
983 [],
983 [],
984 b'changesets to bundle',
984 b'changesets to bundle',
985 b'REV',
985 b'REV',
986 ),
986 ),
987 (
987 (
988 b't',
988 b't',
989 b'type',
989 b'type',
990 b'none',
990 b'none',
991 b'bundlespec to use (see `hg help bundlespec`)',
991 b'bundlespec to use (see `hg help bundlespec`)',
992 b'TYPE',
992 b'TYPE',
993 ),
993 ),
994 ]
994 ]
995 + formatteropts,
995 + formatteropts,
996 b'REVS',
996 b'REVS',
997 )
997 )
998 def perfbundle(ui, repo, *revs, **opts):
998 def perfbundle(ui, repo, *revs, **opts):
999 """benchmark the creation of a bundle from a repository
999 """benchmark the creation of a bundle from a repository
1000
1000
1001 For now, this only supports "none" compression.
1001 For now, this only supports "none" compression.
1002 """
1002 """
1003 from mercurial import bundlecaches
1003 from mercurial import bundlecaches
1004 from mercurial import discovery
1004 from mercurial import discovery
1005 from mercurial import bundle2
1005 from mercurial import bundle2
1006
1006
1007 opts = _byteskwargs(opts)
1007 opts = _byteskwargs(opts)
1008 timer, fm = gettimer(ui, opts)
1008 timer, fm = gettimer(ui, opts)
1009
1009
1010 cl = repo.changelog
1010 cl = repo.changelog
1011 revs = list(revs)
1011 revs = list(revs)
1012 revs.extend(opts.get(b'rev', ()))
1012 revs.extend(opts.get(b'rev', ()))
1013 revs = scmutil.revrange(repo, revs)
1013 revs = scmutil.revrange(repo, revs)
1014 if not revs:
1014 if not revs:
1015 raise error.Abort(b"not revision specified")
1015 raise error.Abort(b"not revision specified")
1016 # make it a consistent set (ie: without topological gaps)
1016 # make it a consistent set (ie: without topological gaps)
1017 old_len = len(revs)
1017 old_len = len(revs)
1018 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1018 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1019 if old_len != len(revs):
1019 if old_len != len(revs):
1020 new_count = len(revs) - old_len
1020 new_count = len(revs) - old_len
1021 msg = b"add %d new revisions to make it a consistent set\n"
1021 msg = b"add %d new revisions to make it a consistent set\n"
1022 ui.write_err(msg % new_count)
1022 ui.write_err(msg % new_count)
1023
1023
1024 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1024 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1025 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1025 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1026 outgoing = discovery.outgoing(repo, bases, targets)
1026 outgoing = discovery.outgoing(repo, bases, targets)
1027
1027
1028 bundle_spec = opts.get(b'type')
1028 bundle_spec = opts.get(b'type')
1029
1029
1030 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1030 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1031
1031
1032 cgversion = bundle_spec.params[b"cg.version"]
1032 cgversion = bundle_spec.params[b"cg.version"]
1033 if cgversion not in changegroup.supportedoutgoingversions(repo):
1033 if cgversion not in changegroup.supportedoutgoingversions(repo):
1034 err = b"repository does not support bundle version %s"
1034 err = b"repository does not support bundle version %s"
1035 raise error.Abort(err % cgversion)
1035 raise error.Abort(err % cgversion)
1036
1036
1037 if cgversion == b'01': # bundle1
1037 if cgversion == b'01': # bundle1
1038 bversion = b'HG10' + bundle_spec.wirecompression
1038 bversion = b'HG10' + bundle_spec.wirecompression
1039 bcompression = None
1039 bcompression = None
1040 elif cgversion in (b'02', b'03'):
1040 elif cgversion in (b'02', b'03'):
1041 bversion = b'HG20'
1041 bversion = b'HG20'
1042 bcompression = bundle_spec.wirecompression
1042 bcompression = bundle_spec.wirecompression
1043 else:
1043 else:
1044 err = b'perf::bundle: unexpected changegroup version %s'
1044 err = b'perf::bundle: unexpected changegroup version %s'
1045 raise error.ProgrammingError(err % cgversion)
1045 raise error.ProgrammingError(err % cgversion)
1046
1046
1047 if bcompression is None:
1047 if bcompression is None:
1048 bcompression = b'UN'
1048 bcompression = b'UN'
1049
1049
1050 if bcompression != b'UN':
1050 if bcompression != b'UN':
1051 err = b'perf::bundle: compression currently unsupported: %s'
1051 err = b'perf::bundle: compression currently unsupported: %s'
1052 raise error.ProgrammingError(err % bcompression)
1052 raise error.ProgrammingError(err % bcompression)
1053
1053
1054 def do_bundle():
1054 def do_bundle():
1055 bundle2.writenewbundle(
1055 bundle2.writenewbundle(
1056 ui,
1056 ui,
1057 repo,
1057 repo,
1058 b'perf::bundle',
1058 b'perf::bundle',
1059 os.devnull,
1059 os.devnull,
1060 bversion,
1060 bversion,
1061 outgoing,
1061 outgoing,
1062 bundle_spec.params,
1062 bundle_spec.params,
1063 )
1063 )
1064
1064
1065 timer(do_bundle)
1065 timer(do_bundle)
1066 fm.end()
1066 fm.end()
1067
1067
1068
1068
1069 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1069 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1070 def perfbundleread(ui, repo, bundlepath, **opts):
1070 def perfbundleread(ui, repo, bundlepath, **opts):
1071 """Benchmark reading of bundle files.
1071 """Benchmark reading of bundle files.
1072
1072
1073 This command is meant to isolate the I/O part of bundle reading as
1073 This command is meant to isolate the I/O part of bundle reading as
1074 much as possible.
1074 much as possible.
1075 """
1075 """
1076 from mercurial import (
1076 from mercurial import (
1077 bundle2,
1077 bundle2,
1078 exchange,
1078 exchange,
1079 streamclone,
1079 streamclone,
1080 )
1080 )
1081
1081
1082 opts = _byteskwargs(opts)
1082 opts = _byteskwargs(opts)
1083
1083
1084 def makebench(fn):
1084 def makebench(fn):
1085 def run():
1085 def run():
1086 with open(bundlepath, b'rb') as fh:
1086 with open(bundlepath, b'rb') as fh:
1087 bundle = exchange.readbundle(ui, fh, bundlepath)
1087 bundle = exchange.readbundle(ui, fh, bundlepath)
1088 fn(bundle)
1088 fn(bundle)
1089
1089
1090 return run
1090 return run
1091
1091
1092 def makereadnbytes(size):
1092 def makereadnbytes(size):
1093 def run():
1093 def run():
1094 with open(bundlepath, b'rb') as fh:
1094 with open(bundlepath, b'rb') as fh:
1095 bundle = exchange.readbundle(ui, fh, bundlepath)
1095 bundle = exchange.readbundle(ui, fh, bundlepath)
1096 while bundle.read(size):
1096 while bundle.read(size):
1097 pass
1097 pass
1098
1098
1099 return run
1099 return run
1100
1100
1101 def makestdioread(size):
1101 def makestdioread(size):
1102 def run():
1102 def run():
1103 with open(bundlepath, b'rb') as fh:
1103 with open(bundlepath, b'rb') as fh:
1104 while fh.read(size):
1104 while fh.read(size):
1105 pass
1105 pass
1106
1106
1107 return run
1107 return run
1108
1108
1109 # bundle1
1109 # bundle1
1110
1110
1111 def deltaiter(bundle):
1111 def deltaiter(bundle):
1112 for delta in bundle.deltaiter():
1112 for delta in bundle.deltaiter():
1113 pass
1113 pass
1114
1114
1115 def iterchunks(bundle):
1115 def iterchunks(bundle):
1116 for chunk in bundle.getchunks():
1116 for chunk in bundle.getchunks():
1117 pass
1117 pass
1118
1118
1119 # bundle2
1119 # bundle2
1120
1120
1121 def forwardchunks(bundle):
1121 def forwardchunks(bundle):
1122 for chunk in bundle._forwardchunks():
1122 for chunk in bundle._forwardchunks():
1123 pass
1123 pass
1124
1124
1125 def iterparts(bundle):
1125 def iterparts(bundle):
1126 for part in bundle.iterparts():
1126 for part in bundle.iterparts():
1127 pass
1127 pass
1128
1128
1129 def iterpartsseekable(bundle):
1129 def iterpartsseekable(bundle):
1130 for part in bundle.iterparts(seekable=True):
1130 for part in bundle.iterparts(seekable=True):
1131 pass
1131 pass
1132
1132
1133 def seek(bundle):
1133 def seek(bundle):
1134 for part in bundle.iterparts(seekable=True):
1134 for part in bundle.iterparts(seekable=True):
1135 part.seek(0, os.SEEK_END)
1135 part.seek(0, os.SEEK_END)
1136
1136
1137 def makepartreadnbytes(size):
1137 def makepartreadnbytes(size):
1138 def run():
1138 def run():
1139 with open(bundlepath, b'rb') as fh:
1139 with open(bundlepath, b'rb') as fh:
1140 bundle = exchange.readbundle(ui, fh, bundlepath)
1140 bundle = exchange.readbundle(ui, fh, bundlepath)
1141 for part in bundle.iterparts():
1141 for part in bundle.iterparts():
1142 while part.read(size):
1142 while part.read(size):
1143 pass
1143 pass
1144
1144
1145 return run
1145 return run
1146
1146
1147 benches = [
1147 benches = [
1148 (makestdioread(8192), b'read(8k)'),
1148 (makestdioread(8192), b'read(8k)'),
1149 (makestdioread(16384), b'read(16k)'),
1149 (makestdioread(16384), b'read(16k)'),
1150 (makestdioread(32768), b'read(32k)'),
1150 (makestdioread(32768), b'read(32k)'),
1151 (makestdioread(131072), b'read(128k)'),
1151 (makestdioread(131072), b'read(128k)'),
1152 ]
1152 ]
1153
1153
1154 with open(bundlepath, b'rb') as fh:
1154 with open(bundlepath, b'rb') as fh:
1155 bundle = exchange.readbundle(ui, fh, bundlepath)
1155 bundle = exchange.readbundle(ui, fh, bundlepath)
1156
1156
1157 if isinstance(bundle, changegroup.cg1unpacker):
1157 if isinstance(bundle, changegroup.cg1unpacker):
1158 benches.extend(
1158 benches.extend(
1159 [
1159 [
1160 (makebench(deltaiter), b'cg1 deltaiter()'),
1160 (makebench(deltaiter), b'cg1 deltaiter()'),
1161 (makebench(iterchunks), b'cg1 getchunks()'),
1161 (makebench(iterchunks), b'cg1 getchunks()'),
1162 (makereadnbytes(8192), b'cg1 read(8k)'),
1162 (makereadnbytes(8192), b'cg1 read(8k)'),
1163 (makereadnbytes(16384), b'cg1 read(16k)'),
1163 (makereadnbytes(16384), b'cg1 read(16k)'),
1164 (makereadnbytes(32768), b'cg1 read(32k)'),
1164 (makereadnbytes(32768), b'cg1 read(32k)'),
1165 (makereadnbytes(131072), b'cg1 read(128k)'),
1165 (makereadnbytes(131072), b'cg1 read(128k)'),
1166 ]
1166 ]
1167 )
1167 )
1168 elif isinstance(bundle, bundle2.unbundle20):
1168 elif isinstance(bundle, bundle2.unbundle20):
1169 benches.extend(
1169 benches.extend(
1170 [
1170 [
1171 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1171 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1172 (makebench(iterparts), b'bundle2 iterparts()'),
1172 (makebench(iterparts), b'bundle2 iterparts()'),
1173 (
1173 (
1174 makebench(iterpartsseekable),
1174 makebench(iterpartsseekable),
1175 b'bundle2 iterparts() seekable',
1175 b'bundle2 iterparts() seekable',
1176 ),
1176 ),
1177 (makebench(seek), b'bundle2 part seek()'),
1177 (makebench(seek), b'bundle2 part seek()'),
1178 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1178 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1179 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1179 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1180 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1180 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1181 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1181 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1182 ]
1182 ]
1183 )
1183 )
1184 elif isinstance(bundle, streamclone.streamcloneapplier):
1184 elif isinstance(bundle, streamclone.streamcloneapplier):
1185 raise error.Abort(b'stream clone bundles not supported')
1185 raise error.Abort(b'stream clone bundles not supported')
1186 else:
1186 else:
1187 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1187 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1188
1188
1189 for fn, title in benches:
1189 for fn, title in benches:
1190 timer, fm = gettimer(ui, opts)
1190 timer, fm = gettimer(ui, opts)
1191 timer(fn, title=title)
1191 timer(fn, title=title)
1192 fm.end()
1192 fm.end()
1193
1193
1194
1194
1195 @command(
1195 @command(
1196 b'perf::changegroupchangelog|perfchangegroupchangelog',
1196 b'perf::changegroupchangelog|perfchangegroupchangelog',
1197 formatteropts
1197 formatteropts
1198 + [
1198 + [
1199 (b'', b'cgversion', b'02', b'changegroup version'),
1199 (b'', b'cgversion', b'02', b'changegroup version'),
1200 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1200 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1201 ],
1201 ],
1202 )
1202 )
1203 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1203 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1204 """Benchmark producing a changelog group for a changegroup.
1204 """Benchmark producing a changelog group for a changegroup.
1205
1205
1206 This measures the time spent processing the changelog during a
1206 This measures the time spent processing the changelog during a
1207 bundle operation. This occurs during `hg bundle` and on a server
1207 bundle operation. This occurs during `hg bundle` and on a server
1208 processing a `getbundle` wire protocol request (handles clones
1208 processing a `getbundle` wire protocol request (handles clones
1209 and pull requests).
1209 and pull requests).
1210
1210
1211 By default, all revisions are added to the changegroup.
1211 By default, all revisions are added to the changegroup.
1212 """
1212 """
1213 opts = _byteskwargs(opts)
1213 opts = _byteskwargs(opts)
1214 cl = repo.changelog
1214 cl = repo.changelog
1215 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1215 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1216 bundler = changegroup.getbundler(cgversion, repo)
1216 bundler = changegroup.getbundler(cgversion, repo)
1217
1217
1218 def d():
1218 def d():
1219 state, chunks = bundler._generatechangelog(cl, nodes)
1219 state, chunks = bundler._generatechangelog(cl, nodes)
1220 for chunk in chunks:
1220 for chunk in chunks:
1221 pass
1221 pass
1222
1222
1223 timer, fm = gettimer(ui, opts)
1223 timer, fm = gettimer(ui, opts)
1224
1224
1225 # Terminal printing can interfere with timing. So disable it.
1225 # Terminal printing can interfere with timing. So disable it.
1226 with ui.configoverride({(b'progress', b'disable'): True}):
1226 with ui.configoverride({(b'progress', b'disable'): True}):
1227 timer(d)
1227 timer(d)
1228
1228
1229 fm.end()
1229 fm.end()
1230
1230
1231
1231
1232 @command(b'perf::dirs|perfdirs', formatteropts)
1232 @command(b'perf::dirs|perfdirs', formatteropts)
1233 def perfdirs(ui, repo, **opts):
1233 def perfdirs(ui, repo, **opts):
1234 opts = _byteskwargs(opts)
1234 opts = _byteskwargs(opts)
1235 timer, fm = gettimer(ui, opts)
1235 timer, fm = gettimer(ui, opts)
1236 dirstate = repo.dirstate
1236 dirstate = repo.dirstate
1237 b'a' in dirstate
1237 b'a' in dirstate
1238
1238
1239 def d():
1239 def d():
1240 dirstate.hasdir(b'a')
1240 dirstate.hasdir(b'a')
1241 try:
1241 try:
1242 del dirstate._map._dirs
1242 del dirstate._map._dirs
1243 except AttributeError:
1243 except AttributeError:
1244 pass
1244 pass
1245
1245
1246 timer(d)
1246 timer(d)
1247 fm.end()
1247 fm.end()
1248
1248
1249
1249
1250 @command(
1250 @command(
1251 b'perf::dirstate|perfdirstate',
1251 b'perf::dirstate|perfdirstate',
1252 [
1252 [
1253 (
1253 (
1254 b'',
1254 b'',
1255 b'iteration',
1255 b'iteration',
1256 None,
1256 None,
1257 b'benchmark a full iteration for the dirstate',
1257 b'benchmark a full iteration for the dirstate',
1258 ),
1258 ),
1259 (
1259 (
1260 b'',
1260 b'',
1261 b'contains',
1261 b'contains',
1262 None,
1262 None,
1263 b'benchmark a large amount of `nf in dirstate` calls',
1263 b'benchmark a large amount of `nf in dirstate` calls',
1264 ),
1264 ),
1265 ]
1265 ]
1266 + formatteropts,
1266 + formatteropts,
1267 )
1267 )
1268 def perfdirstate(ui, repo, **opts):
1268 def perfdirstate(ui, repo, **opts):
1269 """benchmap the time of various distate operations
1269 """benchmap the time of various distate operations
1270
1270
1271 By default benchmark the time necessary to load a dirstate from scratch.
1271 By default benchmark the time necessary to load a dirstate from scratch.
1272 The dirstate is loaded to the point were a "contains" request can be
1272 The dirstate is loaded to the point were a "contains" request can be
1273 answered.
1273 answered.
1274 """
1274 """
1275 opts = _byteskwargs(opts)
1275 opts = _byteskwargs(opts)
1276 timer, fm = gettimer(ui, opts)
1276 timer, fm = gettimer(ui, opts)
1277 b"a" in repo.dirstate
1277 b"a" in repo.dirstate
1278
1278
1279 if opts[b'iteration'] and opts[b'contains']:
1279 if opts[b'iteration'] and opts[b'contains']:
1280 msg = b'only specify one of --iteration or --contains'
1280 msg = b'only specify one of --iteration or --contains'
1281 raise error.Abort(msg)
1281 raise error.Abort(msg)
1282
1282
1283 if opts[b'iteration']:
1283 if opts[b'iteration']:
1284 setup = None
1284 setup = None
1285 dirstate = repo.dirstate
1285 dirstate = repo.dirstate
1286
1286
1287 def d():
1287 def d():
1288 for f in dirstate:
1288 for f in dirstate:
1289 pass
1289 pass
1290
1290
1291 elif opts[b'contains']:
1291 elif opts[b'contains']:
1292 setup = None
1292 setup = None
1293 dirstate = repo.dirstate
1293 dirstate = repo.dirstate
1294 allfiles = list(dirstate)
1294 allfiles = list(dirstate)
1295 # also add file path that will be "missing" from the dirstate
1295 # also add file path that will be "missing" from the dirstate
1296 allfiles.extend([f[::-1] for f in allfiles])
1296 allfiles.extend([f[::-1] for f in allfiles])
1297
1297
1298 def d():
1298 def d():
1299 for f in allfiles:
1299 for f in allfiles:
1300 f in dirstate
1300 f in dirstate
1301
1301
1302 else:
1302 else:
1303
1303
1304 def setup():
1304 def setup():
1305 repo.dirstate.invalidate()
1305 repo.dirstate.invalidate()
1306
1306
1307 def d():
1307 def d():
1308 b"a" in repo.dirstate
1308 b"a" in repo.dirstate
1309
1309
1310 timer(d, setup=setup)
1310 timer(d, setup=setup)
1311 fm.end()
1311 fm.end()
1312
1312
1313
1313
1314 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1314 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1315 def perfdirstatedirs(ui, repo, **opts):
1315 def perfdirstatedirs(ui, repo, **opts):
1316 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1316 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1317 opts = _byteskwargs(opts)
1317 opts = _byteskwargs(opts)
1318 timer, fm = gettimer(ui, opts)
1318 timer, fm = gettimer(ui, opts)
1319 repo.dirstate.hasdir(b"a")
1319 repo.dirstate.hasdir(b"a")
1320
1320
1321 def setup():
1321 def setup():
1322 try:
1322 try:
1323 del repo.dirstate._map._dirs
1323 del repo.dirstate._map._dirs
1324 except AttributeError:
1324 except AttributeError:
1325 pass
1325 pass
1326
1326
1327 def d():
1327 def d():
1328 repo.dirstate.hasdir(b"a")
1328 repo.dirstate.hasdir(b"a")
1329
1329
1330 timer(d, setup=setup)
1330 timer(d, setup=setup)
1331 fm.end()
1331 fm.end()
1332
1332
1333
1333
1334 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1334 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1335 def perfdirstatefoldmap(ui, repo, **opts):
1335 def perfdirstatefoldmap(ui, repo, **opts):
1336 """benchmap a `dirstate._map.filefoldmap.get()` request
1336 """benchmap a `dirstate._map.filefoldmap.get()` request
1337
1337
1338 The dirstate filefoldmap cache is dropped between every request.
1338 The dirstate filefoldmap cache is dropped between every request.
1339 """
1339 """
1340 opts = _byteskwargs(opts)
1340 opts = _byteskwargs(opts)
1341 timer, fm = gettimer(ui, opts)
1341 timer, fm = gettimer(ui, opts)
1342 dirstate = repo.dirstate
1342 dirstate = repo.dirstate
1343 dirstate._map.filefoldmap.get(b'a')
1343 dirstate._map.filefoldmap.get(b'a')
1344
1344
1345 def setup():
1345 def setup():
1346 del dirstate._map.filefoldmap
1346 del dirstate._map.filefoldmap
1347
1347
1348 def d():
1348 def d():
1349 dirstate._map.filefoldmap.get(b'a')
1349 dirstate._map.filefoldmap.get(b'a')
1350
1350
1351 timer(d, setup=setup)
1351 timer(d, setup=setup)
1352 fm.end()
1352 fm.end()
1353
1353
1354
1354
1355 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1355 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1356 def perfdirfoldmap(ui, repo, **opts):
1356 def perfdirfoldmap(ui, repo, **opts):
1357 """benchmap a `dirstate._map.dirfoldmap.get()` request
1357 """benchmap a `dirstate._map.dirfoldmap.get()` request
1358
1358
1359 The dirstate dirfoldmap cache is dropped between every request.
1359 The dirstate dirfoldmap cache is dropped between every request.
1360 """
1360 """
1361 opts = _byteskwargs(opts)
1361 opts = _byteskwargs(opts)
1362 timer, fm = gettimer(ui, opts)
1362 timer, fm = gettimer(ui, opts)
1363 dirstate = repo.dirstate
1363 dirstate = repo.dirstate
1364 dirstate._map.dirfoldmap.get(b'a')
1364 dirstate._map.dirfoldmap.get(b'a')
1365
1365
1366 def setup():
1366 def setup():
1367 del dirstate._map.dirfoldmap
1367 del dirstate._map.dirfoldmap
1368 try:
1368 try:
1369 del dirstate._map._dirs
1369 del dirstate._map._dirs
1370 except AttributeError:
1370 except AttributeError:
1371 pass
1371 pass
1372
1372
1373 def d():
1373 def d():
1374 dirstate._map.dirfoldmap.get(b'a')
1374 dirstate._map.dirfoldmap.get(b'a')
1375
1375
1376 timer(d, setup=setup)
1376 timer(d, setup=setup)
1377 fm.end()
1377 fm.end()
1378
1378
1379
1379
1380 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1380 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1381 def perfdirstatewrite(ui, repo, **opts):
1381 def perfdirstatewrite(ui, repo, **opts):
1382 """benchmap the time it take to write a dirstate on disk"""
1382 """benchmap the time it take to write a dirstate on disk"""
1383 opts = _byteskwargs(opts)
1383 opts = _byteskwargs(opts)
1384 timer, fm = gettimer(ui, opts)
1384 timer, fm = gettimer(ui, opts)
1385 ds = repo.dirstate
1385 ds = repo.dirstate
1386 b"a" in ds
1386 b"a" in ds
1387
1387
1388 def setup():
1388 def setup():
1389 ds._dirty = True
1389 ds._dirty = True
1390
1390
1391 def d():
1391 def d():
1392 ds.write(repo.currenttransaction())
1392 ds.write(repo.currenttransaction())
1393
1393
1394 timer(d, setup=setup)
1394 timer(d, setup=setup)
1395 fm.end()
1395 fm.end()
1396
1396
1397
1397
1398 def _getmergerevs(repo, opts):
1398 def _getmergerevs(repo, opts):
1399 """parse command argument to return rev involved in merge
1399 """parse command argument to return rev involved in merge
1400
1400
1401 input: options dictionnary with `rev`, `from` and `bse`
1401 input: options dictionnary with `rev`, `from` and `bse`
1402 output: (localctx, otherctx, basectx)
1402 output: (localctx, otherctx, basectx)
1403 """
1403 """
1404 if opts[b'from']:
1404 if opts[b'from']:
1405 fromrev = scmutil.revsingle(repo, opts[b'from'])
1405 fromrev = scmutil.revsingle(repo, opts[b'from'])
1406 wctx = repo[fromrev]
1406 wctx = repo[fromrev]
1407 else:
1407 else:
1408 wctx = repo[None]
1408 wctx = repo[None]
1409 # we don't want working dir files to be stat'd in the benchmark, so
1409 # we don't want working dir files to be stat'd in the benchmark, so
1410 # prime that cache
1410 # prime that cache
1411 wctx.dirty()
1411 wctx.dirty()
1412 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1412 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1413 if opts[b'base']:
1413 if opts[b'base']:
1414 fromrev = scmutil.revsingle(repo, opts[b'base'])
1414 fromrev = scmutil.revsingle(repo, opts[b'base'])
1415 ancestor = repo[fromrev]
1415 ancestor = repo[fromrev]
1416 else:
1416 else:
1417 ancestor = wctx.ancestor(rctx)
1417 ancestor = wctx.ancestor(rctx)
1418 return (wctx, rctx, ancestor)
1418 return (wctx, rctx, ancestor)
1419
1419
1420
1420
1421 @command(
1421 @command(
1422 b'perf::mergecalculate|perfmergecalculate',
1422 b'perf::mergecalculate|perfmergecalculate',
1423 [
1423 [
1424 (b'r', b'rev', b'.', b'rev to merge against'),
1424 (b'r', b'rev', b'.', b'rev to merge against'),
1425 (b'', b'from', b'', b'rev to merge from'),
1425 (b'', b'from', b'', b'rev to merge from'),
1426 (b'', b'base', b'', b'the revision to use as base'),
1426 (b'', b'base', b'', b'the revision to use as base'),
1427 ]
1427 ]
1428 + formatteropts,
1428 + formatteropts,
1429 )
1429 )
1430 def perfmergecalculate(ui, repo, **opts):
1430 def perfmergecalculate(ui, repo, **opts):
1431 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1432 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1433
1433
1434 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1434 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1435
1435
1436 def d():
1436 def d():
1437 # acceptremote is True because we don't want prompts in the middle of
1437 # acceptremote is True because we don't want prompts in the middle of
1438 # our benchmark
1438 # our benchmark
1439 merge.calculateupdates(
1439 merge.calculateupdates(
1440 repo,
1440 repo,
1441 wctx,
1441 wctx,
1442 rctx,
1442 rctx,
1443 [ancestor],
1443 [ancestor],
1444 branchmerge=False,
1444 branchmerge=False,
1445 force=False,
1445 force=False,
1446 acceptremote=True,
1446 acceptremote=True,
1447 followcopies=True,
1447 followcopies=True,
1448 )
1448 )
1449
1449
1450 timer(d)
1450 timer(d)
1451 fm.end()
1451 fm.end()
1452
1452
1453
1453
1454 @command(
1454 @command(
1455 b'perf::mergecopies|perfmergecopies',
1455 b'perf::mergecopies|perfmergecopies',
1456 [
1456 [
1457 (b'r', b'rev', b'.', b'rev to merge against'),
1457 (b'r', b'rev', b'.', b'rev to merge against'),
1458 (b'', b'from', b'', b'rev to merge from'),
1458 (b'', b'from', b'', b'rev to merge from'),
1459 (b'', b'base', b'', b'the revision to use as base'),
1459 (b'', b'base', b'', b'the revision to use as base'),
1460 ]
1460 ]
1461 + formatteropts,
1461 + formatteropts,
1462 )
1462 )
1463 def perfmergecopies(ui, repo, **opts):
1463 def perfmergecopies(ui, repo, **opts):
1464 """measure runtime of `copies.mergecopies`"""
1464 """measure runtime of `copies.mergecopies`"""
1465 opts = _byteskwargs(opts)
1465 opts = _byteskwargs(opts)
1466 timer, fm = gettimer(ui, opts)
1466 timer, fm = gettimer(ui, opts)
1467 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1467 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1468
1468
1469 def d():
1469 def d():
1470 # acceptremote is True because we don't want prompts in the middle of
1470 # acceptremote is True because we don't want prompts in the middle of
1471 # our benchmark
1471 # our benchmark
1472 copies.mergecopies(repo, wctx, rctx, ancestor)
1472 copies.mergecopies(repo, wctx, rctx, ancestor)
1473
1473
1474 timer(d)
1474 timer(d)
1475 fm.end()
1475 fm.end()
1476
1476
1477
1477
1478 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1478 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1479 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1479 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1480 """benchmark the copy tracing logic"""
1480 """benchmark the copy tracing logic"""
1481 opts = _byteskwargs(opts)
1481 opts = _byteskwargs(opts)
1482 timer, fm = gettimer(ui, opts)
1482 timer, fm = gettimer(ui, opts)
1483 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1483 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1484 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1484 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1485
1485
1486 def d():
1486 def d():
1487 copies.pathcopies(ctx1, ctx2)
1487 copies.pathcopies(ctx1, ctx2)
1488
1488
1489 timer(d)
1489 timer(d)
1490 fm.end()
1490 fm.end()
1491
1491
1492
1492
1493 @command(
1493 @command(
1494 b'perf::phases|perfphases',
1494 b'perf::phases|perfphases',
1495 [
1495 [
1496 (b'', b'full', False, b'include file reading time too'),
1496 (b'', b'full', False, b'include file reading time too'),
1497 ],
1497 ],
1498 b"",
1498 b"",
1499 )
1499 )
1500 def perfphases(ui, repo, **opts):
1500 def perfphases(ui, repo, **opts):
1501 """benchmark phasesets computation"""
1501 """benchmark phasesets computation"""
1502 opts = _byteskwargs(opts)
1502 opts = _byteskwargs(opts)
1503 timer, fm = gettimer(ui, opts)
1503 timer, fm = gettimer(ui, opts)
1504 _phases = repo._phasecache
1504 _phases = repo._phasecache
1505 full = opts.get(b'full')
1505 full = opts.get(b'full')
1506
1506
1507 def d():
1507 def d():
1508 phases = _phases
1508 phases = _phases
1509 if full:
1509 if full:
1510 clearfilecache(repo, b'_phasecache')
1510 clearfilecache(repo, b'_phasecache')
1511 phases = repo._phasecache
1511 phases = repo._phasecache
1512 phases.invalidate()
1512 phases.invalidate()
1513 phases.loadphaserevs(repo)
1513 phases.loadphaserevs(repo)
1514
1514
1515 timer(d)
1515 timer(d)
1516 fm.end()
1516 fm.end()
1517
1517
1518
1518
1519 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1519 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1520 def perfphasesremote(ui, repo, dest=None, **opts):
1520 def perfphasesremote(ui, repo, dest=None, **opts):
1521 """benchmark time needed to analyse phases of the remote server"""
1521 """benchmark time needed to analyse phases of the remote server"""
1522 from mercurial.node import bin
1522 from mercurial.node import bin
1523 from mercurial import (
1523 from mercurial import (
1524 exchange,
1524 exchange,
1525 hg,
1525 hg,
1526 phases,
1526 phases,
1527 )
1527 )
1528
1528
1529 opts = _byteskwargs(opts)
1529 opts = _byteskwargs(opts)
1530 timer, fm = gettimer(ui, opts)
1530 timer, fm = gettimer(ui, opts)
1531
1531
1532 path = ui.getpath(dest, default=(b'default-push', b'default'))
1532 path = ui.getpath(dest, default=(b'default-push', b'default'))
1533 if not path:
1533 if not path:
1534 raise error.Abort(
1534 raise error.Abort(
1535 b'default repository not configured!',
1535 b'default repository not configured!',
1536 hint=b"see 'hg help config.paths'",
1536 hint=b"see 'hg help config.paths'",
1537 )
1537 )
1538 dest = path.pushloc or path.loc
1538 dest = path.pushloc or path.loc
1539 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1539 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1540 other = hg.peer(repo, opts, dest)
1540 other = hg.peer(repo, opts, dest)
1541
1541
1542 # easier to perform discovery through the operation
1542 # easier to perform discovery through the operation
1543 op = exchange.pushoperation(repo, other)
1543 op = exchange.pushoperation(repo, other)
1544 exchange._pushdiscoverychangeset(op)
1544 exchange._pushdiscoverychangeset(op)
1545
1545
1546 remotesubset = op.fallbackheads
1546 remotesubset = op.fallbackheads
1547
1547
1548 with other.commandexecutor() as e:
1548 with other.commandexecutor() as e:
1549 remotephases = e.callcommand(
1549 remotephases = e.callcommand(
1550 b'listkeys', {b'namespace': b'phases'}
1550 b'listkeys', {b'namespace': b'phases'}
1551 ).result()
1551 ).result()
1552 del other
1552 del other
1553 publishing = remotephases.get(b'publishing', False)
1553 publishing = remotephases.get(b'publishing', False)
1554 if publishing:
1554 if publishing:
1555 ui.statusnoi18n(b'publishing: yes\n')
1555 ui.statusnoi18n(b'publishing: yes\n')
1556 else:
1556 else:
1557 ui.statusnoi18n(b'publishing: no\n')
1557 ui.statusnoi18n(b'publishing: no\n')
1558
1558
1559 has_node = getattr(repo.changelog.index, 'has_node', None)
1559 has_node = getattr(repo.changelog.index, 'has_node', None)
1560 if has_node is None:
1560 if has_node is None:
1561 has_node = repo.changelog.nodemap.__contains__
1561 has_node = repo.changelog.nodemap.__contains__
1562 nonpublishroots = 0
1562 nonpublishroots = 0
1563 for nhex, phase in remotephases.iteritems():
1563 for nhex, phase in remotephases.iteritems():
1564 if nhex == b'publishing': # ignore data related to publish option
1564 if nhex == b'publishing': # ignore data related to publish option
1565 continue
1565 continue
1566 node = bin(nhex)
1566 node = bin(nhex)
1567 if has_node(node) and int(phase):
1567 if has_node(node) and int(phase):
1568 nonpublishroots += 1
1568 nonpublishroots += 1
1569 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1569 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1570 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1570 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1571
1571
1572 def d():
1572 def d():
1573 phases.remotephasessummary(repo, remotesubset, remotephases)
1573 phases.remotephasessummary(repo, remotesubset, remotephases)
1574
1574
1575 timer(d)
1575 timer(d)
1576 fm.end()
1576 fm.end()
1577
1577
1578
1578
1579 @command(
1579 @command(
1580 b'perf::manifest|perfmanifest',
1580 b'perf::manifest|perfmanifest',
1581 [
1581 [
1582 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1582 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1583 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1583 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1584 ]
1584 ]
1585 + formatteropts,
1585 + formatteropts,
1586 b'REV|NODE',
1586 b'REV|NODE',
1587 )
1587 )
1588 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1588 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1589 """benchmark the time to read a manifest from disk and return a usable
1589 """benchmark the time to read a manifest from disk and return a usable
1590 dict-like object
1590 dict-like object
1591
1591
1592 Manifest caches are cleared before retrieval."""
1592 Manifest caches are cleared before retrieval."""
1593 opts = _byteskwargs(opts)
1593 opts = _byteskwargs(opts)
1594 timer, fm = gettimer(ui, opts)
1594 timer, fm = gettimer(ui, opts)
1595 if not manifest_rev:
1595 if not manifest_rev:
1596 ctx = scmutil.revsingle(repo, rev, rev)
1596 ctx = scmutil.revsingle(repo, rev, rev)
1597 t = ctx.manifestnode()
1597 t = ctx.manifestnode()
1598 else:
1598 else:
1599 from mercurial.node import bin
1599 from mercurial.node import bin
1600
1600
1601 if len(rev) == 40:
1601 if len(rev) == 40:
1602 t = bin(rev)
1602 t = bin(rev)
1603 else:
1603 else:
1604 try:
1604 try:
1605 rev = int(rev)
1605 rev = int(rev)
1606
1606
1607 if util.safehasattr(repo.manifestlog, b'getstorage'):
1607 if util.safehasattr(repo.manifestlog, b'getstorage'):
1608 t = repo.manifestlog.getstorage(b'').node(rev)
1608 t = repo.manifestlog.getstorage(b'').node(rev)
1609 else:
1609 else:
1610 t = repo.manifestlog._revlog.lookup(rev)
1610 t = repo.manifestlog._revlog.lookup(rev)
1611 except ValueError:
1611 except ValueError:
1612 raise error.Abort(
1612 raise error.Abort(
1613 b'manifest revision must be integer or full node'
1613 b'manifest revision must be integer or full node'
1614 )
1614 )
1615
1615
1616 def d():
1616 def d():
1617 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1617 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1618 repo.manifestlog[t].read()
1618 repo.manifestlog[t].read()
1619
1619
1620 timer(d)
1620 timer(d)
1621 fm.end()
1621 fm.end()
1622
1622
1623
1623
1624 @command(b'perf::changeset|perfchangeset', formatteropts)
1624 @command(b'perf::changeset|perfchangeset', formatteropts)
1625 def perfchangeset(ui, repo, rev, **opts):
1625 def perfchangeset(ui, repo, rev, **opts):
1626 opts = _byteskwargs(opts)
1626 opts = _byteskwargs(opts)
1627 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1628 n = scmutil.revsingle(repo, rev).node()
1628 n = scmutil.revsingle(repo, rev).node()
1629
1629
1630 def d():
1630 def d():
1631 repo.changelog.read(n)
1631 repo.changelog.read(n)
1632 # repo.changelog._cache = None
1632 # repo.changelog._cache = None
1633
1633
1634 timer(d)
1634 timer(d)
1635 fm.end()
1635 fm.end()
1636
1636
1637
1637
1638 @command(b'perf::ignore|perfignore', formatteropts)
1638 @command(b'perf::ignore|perfignore', formatteropts)
1639 def perfignore(ui, repo, **opts):
1639 def perfignore(ui, repo, **opts):
1640 """benchmark operation related to computing ignore"""
1640 """benchmark operation related to computing ignore"""
1641 opts = _byteskwargs(opts)
1641 opts = _byteskwargs(opts)
1642 timer, fm = gettimer(ui, opts)
1642 timer, fm = gettimer(ui, opts)
1643 dirstate = repo.dirstate
1643 dirstate = repo.dirstate
1644
1644
1645 def setupone():
1645 def setupone():
1646 dirstate.invalidate()
1646 dirstate.invalidate()
1647 clearfilecache(dirstate, b'_ignore')
1647 clearfilecache(dirstate, b'_ignore')
1648
1648
1649 def runone():
1649 def runone():
1650 dirstate._ignore
1650 dirstate._ignore
1651
1651
1652 timer(runone, setup=setupone, title=b"load")
1652 timer(runone, setup=setupone, title=b"load")
1653 fm.end()
1653 fm.end()
1654
1654
1655
1655
1656 @command(
1656 @command(
1657 b'perf::index|perfindex',
1657 b'perf::index|perfindex',
1658 [
1658 [
1659 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1659 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1660 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1660 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1661 ]
1661 ]
1662 + formatteropts,
1662 + formatteropts,
1663 )
1663 )
1664 def perfindex(ui, repo, **opts):
1664 def perfindex(ui, repo, **opts):
1665 """benchmark index creation time followed by a lookup
1665 """benchmark index creation time followed by a lookup
1666
1666
1667 The default is to look `tip` up. Depending on the index implementation,
1667 The default is to look `tip` up. Depending on the index implementation,
1668 the revision looked up can matters. For example, an implementation
1668 the revision looked up can matters. For example, an implementation
1669 scanning the index will have a faster lookup time for `--rev tip` than for
1669 scanning the index will have a faster lookup time for `--rev tip` than for
1670 `--rev 0`. The number of looked up revisions and their order can also
1670 `--rev 0`. The number of looked up revisions and their order can also
1671 matters.
1671 matters.
1672
1672
1673 Example of useful set to test:
1673 Example of useful set to test:
1674
1674
1675 * tip
1675 * tip
1676 * 0
1676 * 0
1677 * -10:
1677 * -10:
1678 * :10
1678 * :10
1679 * -10: + :10
1679 * -10: + :10
1680 * :10: + -10:
1680 * :10: + -10:
1681 * -10000:
1681 * -10000:
1682 * -10000: + 0
1682 * -10000: + 0
1683
1683
1684 It is not currently possible to check for lookup of a missing node. For
1684 It is not currently possible to check for lookup of a missing node. For
1685 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1685 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1686 import mercurial.revlog
1686 import mercurial.revlog
1687
1687
1688 opts = _byteskwargs(opts)
1688 opts = _byteskwargs(opts)
1689 timer, fm = gettimer(ui, opts)
1689 timer, fm = gettimer(ui, opts)
1690 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1690 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1691 if opts[b'no_lookup']:
1691 if opts[b'no_lookup']:
1692 if opts['rev']:
1692 if opts['rev']:
1693 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1693 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1694 nodes = []
1694 nodes = []
1695 elif not opts[b'rev']:
1695 elif not opts[b'rev']:
1696 nodes = [repo[b"tip"].node()]
1696 nodes = [repo[b"tip"].node()]
1697 else:
1697 else:
1698 revs = scmutil.revrange(repo, opts[b'rev'])
1698 revs = scmutil.revrange(repo, opts[b'rev'])
1699 cl = repo.changelog
1699 cl = repo.changelog
1700 nodes = [cl.node(r) for r in revs]
1700 nodes = [cl.node(r) for r in revs]
1701
1701
1702 unfi = repo.unfiltered()
1702 unfi = repo.unfiltered()
1703 # find the filecache func directly
1703 # find the filecache func directly
1704 # This avoid polluting the benchmark with the filecache logic
1704 # This avoid polluting the benchmark with the filecache logic
1705 makecl = unfi.__class__.changelog.func
1705 makecl = unfi.__class__.changelog.func
1706
1706
1707 def setup():
1707 def setup():
1708 # probably not necessary, but for good measure
1708 # probably not necessary, but for good measure
1709 clearchangelog(unfi)
1709 clearchangelog(unfi)
1710
1710
1711 def d():
1711 def d():
1712 cl = makecl(unfi)
1712 cl = makecl(unfi)
1713 for n in nodes:
1713 for n in nodes:
1714 cl.rev(n)
1714 cl.rev(n)
1715
1715
1716 timer(d, setup=setup)
1716 timer(d, setup=setup)
1717 fm.end()
1717 fm.end()
1718
1718
1719
1719
1720 @command(
1720 @command(
1721 b'perf::nodemap|perfnodemap',
1721 b'perf::nodemap|perfnodemap',
1722 [
1722 [
1723 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1723 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1724 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1724 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 ]
1725 ]
1726 + formatteropts,
1726 + formatteropts,
1727 )
1727 )
1728 def perfnodemap(ui, repo, **opts):
1728 def perfnodemap(ui, repo, **opts):
1729 """benchmark the time necessary to look up revision from a cold nodemap
1729 """benchmark the time necessary to look up revision from a cold nodemap
1730
1730
1731 Depending on the implementation, the amount and order of revision we look
1731 Depending on the implementation, the amount and order of revision we look
1732 up can varies. Example of useful set to test:
1732 up can varies. Example of useful set to test:
1733 * tip
1733 * tip
1734 * 0
1734 * 0
1735 * -10:
1735 * -10:
1736 * :10
1736 * :10
1737 * -10: + :10
1737 * -10: + :10
1738 * :10: + -10:
1738 * :10: + -10:
1739 * -10000:
1739 * -10000:
1740 * -10000: + 0
1740 * -10000: + 0
1741
1741
1742 The command currently focus on valid binary lookup. Benchmarking for
1742 The command currently focus on valid binary lookup. Benchmarking for
1743 hexlookup, prefix lookup and missing lookup would also be valuable.
1743 hexlookup, prefix lookup and missing lookup would also be valuable.
1744 """
1744 """
1745 import mercurial.revlog
1745 import mercurial.revlog
1746
1746
1747 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1748 timer, fm = gettimer(ui, opts)
1748 timer, fm = gettimer(ui, opts)
1749 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1749 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1750
1750
1751 unfi = repo.unfiltered()
1751 unfi = repo.unfiltered()
1752 clearcaches = opts[b'clear_caches']
1752 clearcaches = opts[b'clear_caches']
1753 # find the filecache func directly
1753 # find the filecache func directly
1754 # This avoid polluting the benchmark with the filecache logic
1754 # This avoid polluting the benchmark with the filecache logic
1755 makecl = unfi.__class__.changelog.func
1755 makecl = unfi.__class__.changelog.func
1756 if not opts[b'rev']:
1756 if not opts[b'rev']:
1757 raise error.Abort(b'use --rev to specify revisions to look up')
1757 raise error.Abort(b'use --rev to specify revisions to look up')
1758 revs = scmutil.revrange(repo, opts[b'rev'])
1758 revs = scmutil.revrange(repo, opts[b'rev'])
1759 cl = repo.changelog
1759 cl = repo.changelog
1760 nodes = [cl.node(r) for r in revs]
1760 nodes = [cl.node(r) for r in revs]
1761
1761
1762 # use a list to pass reference to a nodemap from one closure to the next
1762 # use a list to pass reference to a nodemap from one closure to the next
1763 nodeget = [None]
1763 nodeget = [None]
1764
1764
1765 def setnodeget():
1765 def setnodeget():
1766 # probably not necessary, but for good measure
1766 # probably not necessary, but for good measure
1767 clearchangelog(unfi)
1767 clearchangelog(unfi)
1768 cl = makecl(unfi)
1768 cl = makecl(unfi)
1769 if util.safehasattr(cl.index, 'get_rev'):
1769 if util.safehasattr(cl.index, 'get_rev'):
1770 nodeget[0] = cl.index.get_rev
1770 nodeget[0] = cl.index.get_rev
1771 else:
1771 else:
1772 nodeget[0] = cl.nodemap.get
1772 nodeget[0] = cl.nodemap.get
1773
1773
1774 def d():
1774 def d():
1775 get = nodeget[0]
1775 get = nodeget[0]
1776 for n in nodes:
1776 for n in nodes:
1777 get(n)
1777 get(n)
1778
1778
1779 setup = None
1779 setup = None
1780 if clearcaches:
1780 if clearcaches:
1781
1781
1782 def setup():
1782 def setup():
1783 setnodeget()
1783 setnodeget()
1784
1784
1785 else:
1785 else:
1786 setnodeget()
1786 setnodeget()
1787 d() # prewarm the data structure
1787 d() # prewarm the data structure
1788 timer(d, setup=setup)
1788 timer(d, setup=setup)
1789 fm.end()
1789 fm.end()
1790
1790
1791
1791
1792 @command(b'perf::startup|perfstartup', formatteropts)
1792 @command(b'perf::startup|perfstartup', formatteropts)
1793 def perfstartup(ui, repo, **opts):
1793 def perfstartup(ui, repo, **opts):
1794 opts = _byteskwargs(opts)
1794 opts = _byteskwargs(opts)
1795 timer, fm = gettimer(ui, opts)
1795 timer, fm = gettimer(ui, opts)
1796
1796
1797 def d():
1797 def d():
1798 if os.name != 'nt':
1798 if os.name != 'nt':
1799 os.system(
1799 os.system(
1800 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1800 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1801 )
1801 )
1802 else:
1802 else:
1803 os.environ['HGRCPATH'] = r' '
1803 os.environ['HGRCPATH'] = r' '
1804 os.system("%s version -q > NUL" % sys.argv[0])
1804 os.system("%s version -q > NUL" % sys.argv[0])
1805
1805
1806 timer(d)
1806 timer(d)
1807 fm.end()
1807 fm.end()
1808
1808
1809
1809
1810 @command(b'perf::parents|perfparents', formatteropts)
1810 @command(b'perf::parents|perfparents', formatteropts)
1811 def perfparents(ui, repo, **opts):
1811 def perfparents(ui, repo, **opts):
1812 """benchmark the time necessary to fetch one changeset's parents.
1812 """benchmark the time necessary to fetch one changeset's parents.
1813
1813
1814 The fetch is done using the `node identifier`, traversing all object layers
1814 The fetch is done using the `node identifier`, traversing all object layers
1815 from the repository object. The first N revisions will be used for this
1815 from the repository object. The first N revisions will be used for this
1816 benchmark. N is controlled by the ``perf.parentscount`` config option
1816 benchmark. N is controlled by the ``perf.parentscount`` config option
1817 (default: 1000).
1817 (default: 1000).
1818 """
1818 """
1819 opts = _byteskwargs(opts)
1819 opts = _byteskwargs(opts)
1820 timer, fm = gettimer(ui, opts)
1820 timer, fm = gettimer(ui, opts)
1821 # control the number of commits perfparents iterates over
1821 # control the number of commits perfparents iterates over
1822 # experimental config: perf.parentscount
1822 # experimental config: perf.parentscount
1823 count = getint(ui, b"perf", b"parentscount", 1000)
1823 count = getint(ui, b"perf", b"parentscount", 1000)
1824 if len(repo.changelog) < count:
1824 if len(repo.changelog) < count:
1825 raise error.Abort(b"repo needs %d commits for this test" % count)
1825 raise error.Abort(b"repo needs %d commits for this test" % count)
1826 repo = repo.unfiltered()
1826 repo = repo.unfiltered()
1827 nl = [repo.changelog.node(i) for i in _xrange(count)]
1827 nl = [repo.changelog.node(i) for i in _xrange(count)]
1828
1828
1829 def d():
1829 def d():
1830 for n in nl:
1830 for n in nl:
1831 repo.changelog.parents(n)
1831 repo.changelog.parents(n)
1832
1832
1833 timer(d)
1833 timer(d)
1834 fm.end()
1834 fm.end()
1835
1835
1836
1836
1837 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1837 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1838 def perfctxfiles(ui, repo, x, **opts):
1838 def perfctxfiles(ui, repo, x, **opts):
1839 opts = _byteskwargs(opts)
1839 opts = _byteskwargs(opts)
1840 x = int(x)
1840 x = int(x)
1841 timer, fm = gettimer(ui, opts)
1841 timer, fm = gettimer(ui, opts)
1842
1842
1843 def d():
1843 def d():
1844 len(repo[x].files())
1844 len(repo[x].files())
1845
1845
1846 timer(d)
1846 timer(d)
1847 fm.end()
1847 fm.end()
1848
1848
1849
1849
1850 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1850 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1851 def perfrawfiles(ui, repo, x, **opts):
1851 def perfrawfiles(ui, repo, x, **opts):
1852 opts = _byteskwargs(opts)
1852 opts = _byteskwargs(opts)
1853 x = int(x)
1853 x = int(x)
1854 timer, fm = gettimer(ui, opts)
1854 timer, fm = gettimer(ui, opts)
1855 cl = repo.changelog
1855 cl = repo.changelog
1856
1856
1857 def d():
1857 def d():
1858 len(cl.read(x)[3])
1858 len(cl.read(x)[3])
1859
1859
1860 timer(d)
1860 timer(d)
1861 fm.end()
1861 fm.end()
1862
1862
1863
1863
1864 @command(b'perf::lookup|perflookup', formatteropts)
1864 @command(b'perf::lookup|perflookup', formatteropts)
1865 def perflookup(ui, repo, rev, **opts):
1865 def perflookup(ui, repo, rev, **opts):
1866 opts = _byteskwargs(opts)
1866 opts = _byteskwargs(opts)
1867 timer, fm = gettimer(ui, opts)
1867 timer, fm = gettimer(ui, opts)
1868 timer(lambda: len(repo.lookup(rev)))
1868 timer(lambda: len(repo.lookup(rev)))
1869 fm.end()
1869 fm.end()
1870
1870
1871
1871
1872 @command(
1872 @command(
1873 b'perf::linelogedits|perflinelogedits',
1873 b'perf::linelogedits|perflinelogedits',
1874 [
1874 [
1875 (b'n', b'edits', 10000, b'number of edits'),
1875 (b'n', b'edits', 10000, b'number of edits'),
1876 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1876 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1877 ],
1877 ],
1878 norepo=True,
1878 norepo=True,
1879 )
1879 )
1880 def perflinelogedits(ui, **opts):
1880 def perflinelogedits(ui, **opts):
1881 from mercurial import linelog
1881 from mercurial import linelog
1882
1882
1883 opts = _byteskwargs(opts)
1883 opts = _byteskwargs(opts)
1884
1884
1885 edits = opts[b'edits']
1885 edits = opts[b'edits']
1886 maxhunklines = opts[b'max_hunk_lines']
1886 maxhunklines = opts[b'max_hunk_lines']
1887
1887
1888 maxb1 = 100000
1888 maxb1 = 100000
1889 random.seed(0)
1889 random.seed(0)
1890 randint = random.randint
1890 randint = random.randint
1891 currentlines = 0
1891 currentlines = 0
1892 arglist = []
1892 arglist = []
1893 for rev in _xrange(edits):
1893 for rev in _xrange(edits):
1894 a1 = randint(0, currentlines)
1894 a1 = randint(0, currentlines)
1895 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1895 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1896 b1 = randint(0, maxb1)
1896 b1 = randint(0, maxb1)
1897 b2 = randint(b1, b1 + maxhunklines)
1897 b2 = randint(b1, b1 + maxhunklines)
1898 currentlines += (b2 - b1) - (a2 - a1)
1898 currentlines += (b2 - b1) - (a2 - a1)
1899 arglist.append((rev, a1, a2, b1, b2))
1899 arglist.append((rev, a1, a2, b1, b2))
1900
1900
1901 def d():
1901 def d():
1902 ll = linelog.linelog()
1902 ll = linelog.linelog()
1903 for args in arglist:
1903 for args in arglist:
1904 ll.replacelines(*args)
1904 ll.replacelines(*args)
1905
1905
1906 timer, fm = gettimer(ui, opts)
1906 timer, fm = gettimer(ui, opts)
1907 timer(d)
1907 timer(d)
1908 fm.end()
1908 fm.end()
1909
1909
1910
1910
1911 @command(b'perf::revrange|perfrevrange', formatteropts)
1911 @command(b'perf::revrange|perfrevrange', formatteropts)
1912 def perfrevrange(ui, repo, *specs, **opts):
1912 def perfrevrange(ui, repo, *specs, **opts):
1913 opts = _byteskwargs(opts)
1913 opts = _byteskwargs(opts)
1914 timer, fm = gettimer(ui, opts)
1914 timer, fm = gettimer(ui, opts)
1915 revrange = scmutil.revrange
1915 revrange = scmutil.revrange
1916 timer(lambda: len(revrange(repo, specs)))
1916 timer(lambda: len(revrange(repo, specs)))
1917 fm.end()
1917 fm.end()
1918
1918
1919
1919
1920 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1920 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1921 def perfnodelookup(ui, repo, rev, **opts):
1921 def perfnodelookup(ui, repo, rev, **opts):
1922 opts = _byteskwargs(opts)
1922 opts = _byteskwargs(opts)
1923 timer, fm = gettimer(ui, opts)
1923 timer, fm = gettimer(ui, opts)
1924 import mercurial.revlog
1924 import mercurial.revlog
1925
1925
1926 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1926 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1927 n = scmutil.revsingle(repo, rev).node()
1927 n = scmutil.revsingle(repo, rev).node()
1928
1928
1929 try:
1929 try:
1930 cl = revlog(getsvfs(repo), radix=b"00changelog")
1930 cl = revlog(getsvfs(repo), radix=b"00changelog")
1931 except TypeError:
1931 except TypeError:
1932 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1932 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1933
1933
1934 def d():
1934 def d():
1935 cl.rev(n)
1935 cl.rev(n)
1936 clearcaches(cl)
1936 clearcaches(cl)
1937
1937
1938 timer(d)
1938 timer(d)
1939 fm.end()
1939 fm.end()
1940
1940
1941
1941
1942 @command(
1942 @command(
1943 b'perf::log|perflog',
1943 b'perf::log|perflog',
1944 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1944 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1945 )
1945 )
1946 def perflog(ui, repo, rev=None, **opts):
1946 def perflog(ui, repo, rev=None, **opts):
1947 opts = _byteskwargs(opts)
1947 opts = _byteskwargs(opts)
1948 if rev is None:
1948 if rev is None:
1949 rev = []
1949 rev = []
1950 timer, fm = gettimer(ui, opts)
1950 timer, fm = gettimer(ui, opts)
1951 ui.pushbuffer()
1951 ui.pushbuffer()
1952 timer(
1952 timer(
1953 lambda: commands.log(
1953 lambda: commands.log(
1954 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1954 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1955 )
1955 )
1956 )
1956 )
1957 ui.popbuffer()
1957 ui.popbuffer()
1958 fm.end()
1958 fm.end()
1959
1959
1960
1960
1961 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1961 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1962 def perfmoonwalk(ui, repo, **opts):
1962 def perfmoonwalk(ui, repo, **opts):
1963 """benchmark walking the changelog backwards
1963 """benchmark walking the changelog backwards
1964
1964
1965 This also loads the changelog data for each revision in the changelog.
1965 This also loads the changelog data for each revision in the changelog.
1966 """
1966 """
1967 opts = _byteskwargs(opts)
1967 opts = _byteskwargs(opts)
1968 timer, fm = gettimer(ui, opts)
1968 timer, fm = gettimer(ui, opts)
1969
1969
1970 def moonwalk():
1970 def moonwalk():
1971 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1971 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1972 ctx = repo[i]
1972 ctx = repo[i]
1973 ctx.branch() # read changelog data (in addition to the index)
1973 ctx.branch() # read changelog data (in addition to the index)
1974
1974
1975 timer(moonwalk)
1975 timer(moonwalk)
1976 fm.end()
1976 fm.end()
1977
1977
1978
1978
1979 @command(
1979 @command(
1980 b'perf::templating|perftemplating',
1980 b'perf::templating|perftemplating',
1981 [
1981 [
1982 (b'r', b'rev', [], b'revisions to run the template on'),
1982 (b'r', b'rev', [], b'revisions to run the template on'),
1983 ]
1983 ]
1984 + formatteropts,
1984 + formatteropts,
1985 )
1985 )
1986 def perftemplating(ui, repo, testedtemplate=None, **opts):
1986 def perftemplating(ui, repo, testedtemplate=None, **opts):
1987 """test the rendering time of a given template"""
1987 """test the rendering time of a given template"""
1988 if makelogtemplater is None:
1988 if makelogtemplater is None:
1989 raise error.Abort(
1989 raise error.Abort(
1990 b"perftemplating not available with this Mercurial",
1990 b"perftemplating not available with this Mercurial",
1991 hint=b"use 4.3 or later",
1991 hint=b"use 4.3 or later",
1992 )
1992 )
1993
1993
1994 opts = _byteskwargs(opts)
1994 opts = _byteskwargs(opts)
1995
1995
1996 nullui = ui.copy()
1996 nullui = ui.copy()
1997 nullui.fout = open(os.devnull, 'wb')
1997 nullui.fout = open(os.devnull, 'wb')
1998 nullui.disablepager()
1998 nullui.disablepager()
1999 revs = opts.get(b'rev')
1999 revs = opts.get(b'rev')
2000 if not revs:
2000 if not revs:
2001 revs = [b'all()']
2001 revs = [b'all()']
2002 revs = list(scmutil.revrange(repo, revs))
2002 revs = list(scmutil.revrange(repo, revs))
2003
2003
2004 defaulttemplate = (
2004 defaulttemplate = (
2005 b'{date|shortdate} [{rev}:{node|short}]'
2005 b'{date|shortdate} [{rev}:{node|short}]'
2006 b' {author|person}: {desc|firstline}\n'
2006 b' {author|person}: {desc|firstline}\n'
2007 )
2007 )
2008 if testedtemplate is None:
2008 if testedtemplate is None:
2009 testedtemplate = defaulttemplate
2009 testedtemplate = defaulttemplate
2010 displayer = makelogtemplater(nullui, repo, testedtemplate)
2010 displayer = makelogtemplater(nullui, repo, testedtemplate)
2011
2011
2012 def format():
2012 def format():
2013 for r in revs:
2013 for r in revs:
2014 ctx = repo[r]
2014 ctx = repo[r]
2015 displayer.show(ctx)
2015 displayer.show(ctx)
2016 displayer.flush(ctx)
2016 displayer.flush(ctx)
2017
2017
2018 timer, fm = gettimer(ui, opts)
2018 timer, fm = gettimer(ui, opts)
2019 timer(format)
2019 timer(format)
2020 fm.end()
2020 fm.end()
2021
2021
2022
2022
2023 def _displaystats(ui, opts, entries, data):
2023 def _displaystats(ui, opts, entries, data):
2024 # use a second formatter because the data are quite different, not sure
2024 # use a second formatter because the data are quite different, not sure
2025 # how it flies with the templater.
2025 # how it flies with the templater.
2026 fm = ui.formatter(b'perf-stats', opts)
2026 fm = ui.formatter(b'perf-stats', opts)
2027 for key, title in entries:
2027 for key, title in entries:
2028 values = data[key]
2028 values = data[key]
2029 nbvalues = len(data)
2029 nbvalues = len(data)
2030 values.sort()
2030 values.sort()
2031 stats = {
2031 stats = {
2032 'key': key,
2032 'key': key,
2033 'title': title,
2033 'title': title,
2034 'nbitems': len(values),
2034 'nbitems': len(values),
2035 'min': values[0][0],
2035 'min': values[0][0],
2036 '10%': values[(nbvalues * 10) // 100][0],
2036 '10%': values[(nbvalues * 10) // 100][0],
2037 '25%': values[(nbvalues * 25) // 100][0],
2037 '25%': values[(nbvalues * 25) // 100][0],
2038 '50%': values[(nbvalues * 50) // 100][0],
2038 '50%': values[(nbvalues * 50) // 100][0],
2039 '75%': values[(nbvalues * 75) // 100][0],
2039 '75%': values[(nbvalues * 75) // 100][0],
2040 '80%': values[(nbvalues * 80) // 100][0],
2040 '80%': values[(nbvalues * 80) // 100][0],
2041 '85%': values[(nbvalues * 85) // 100][0],
2041 '85%': values[(nbvalues * 85) // 100][0],
2042 '90%': values[(nbvalues * 90) // 100][0],
2042 '90%': values[(nbvalues * 90) // 100][0],
2043 '95%': values[(nbvalues * 95) // 100][0],
2043 '95%': values[(nbvalues * 95) // 100][0],
2044 '99%': values[(nbvalues * 99) // 100][0],
2044 '99%': values[(nbvalues * 99) // 100][0],
2045 'max': values[-1][0],
2045 'max': values[-1][0],
2046 }
2046 }
2047 fm.startitem()
2047 fm.startitem()
2048 fm.data(**stats)
2048 fm.data(**stats)
2049 # make node pretty for the human output
2049 # make node pretty for the human output
2050 fm.plain('### %s (%d items)\n' % (title, len(values)))
2050 fm.plain('### %s (%d items)\n' % (title, len(values)))
2051 lines = [
2051 lines = [
2052 'min',
2052 'min',
2053 '10%',
2053 '10%',
2054 '25%',
2054 '25%',
2055 '50%',
2055 '50%',
2056 '75%',
2056 '75%',
2057 '80%',
2057 '80%',
2058 '85%',
2058 '85%',
2059 '90%',
2059 '90%',
2060 '95%',
2060 '95%',
2061 '99%',
2061 '99%',
2062 'max',
2062 'max',
2063 ]
2063 ]
2064 for l in lines:
2064 for l in lines:
2065 fm.plain('%s: %s\n' % (l, stats[l]))
2065 fm.plain('%s: %s\n' % (l, stats[l]))
2066 fm.end()
2066 fm.end()
2067
2067
2068
2068
2069 @command(
2069 @command(
2070 b'perf::helper-mergecopies|perfhelper-mergecopies',
2070 b'perf::helper-mergecopies|perfhelper-mergecopies',
2071 formatteropts
2071 formatteropts
2072 + [
2072 + [
2073 (b'r', b'revs', [], b'restrict search to these revisions'),
2073 (b'r', b'revs', [], b'restrict search to these revisions'),
2074 (b'', b'timing', False, b'provides extra data (costly)'),
2074 (b'', b'timing', False, b'provides extra data (costly)'),
2075 (b'', b'stats', False, b'provides statistic about the measured data'),
2075 (b'', b'stats', False, b'provides statistic about the measured data'),
2076 ],
2076 ],
2077 )
2077 )
2078 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2078 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2079 """find statistics about potential parameters for `perfmergecopies`
2079 """find statistics about potential parameters for `perfmergecopies`
2080
2080
2081 This command find (base, p1, p2) triplet relevant for copytracing
2081 This command find (base, p1, p2) triplet relevant for copytracing
2082 benchmarking in the context of a merge. It reports values for some of the
2082 benchmarking in the context of a merge. It reports values for some of the
2083 parameters that impact merge copy tracing time during merge.
2083 parameters that impact merge copy tracing time during merge.
2084
2084
2085 If `--timing` is set, rename detection is run and the associated timing
2085 If `--timing` is set, rename detection is run and the associated timing
2086 will be reported. The extra details come at the cost of slower command
2086 will be reported. The extra details come at the cost of slower command
2087 execution.
2087 execution.
2088
2088
2089 Since rename detection is only run once, other factors might easily
2089 Since rename detection is only run once, other factors might easily
2090 affect the precision of the timing. However it should give a good
2090 affect the precision of the timing. However it should give a good
2091 approximation of which revision triplets are very costly.
2091 approximation of which revision triplets are very costly.
2092 """
2092 """
2093 opts = _byteskwargs(opts)
2093 opts = _byteskwargs(opts)
2094 fm = ui.formatter(b'perf', opts)
2094 fm = ui.formatter(b'perf', opts)
2095 dotiming = opts[b'timing']
2095 dotiming = opts[b'timing']
2096 dostats = opts[b'stats']
2096 dostats = opts[b'stats']
2097
2097
2098 output_template = [
2098 output_template = [
2099 ("base", "%(base)12s"),
2099 ("base", "%(base)12s"),
2100 ("p1", "%(p1.node)12s"),
2100 ("p1", "%(p1.node)12s"),
2101 ("p2", "%(p2.node)12s"),
2101 ("p2", "%(p2.node)12s"),
2102 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2102 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2103 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2103 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2104 ("p1.renames", "%(p1.renamedfiles)12d"),
2104 ("p1.renames", "%(p1.renamedfiles)12d"),
2105 ("p1.time", "%(p1.time)12.3f"),
2105 ("p1.time", "%(p1.time)12.3f"),
2106 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2106 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2107 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2107 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2108 ("p2.renames", "%(p2.renamedfiles)12d"),
2108 ("p2.renames", "%(p2.renamedfiles)12d"),
2109 ("p2.time", "%(p2.time)12.3f"),
2109 ("p2.time", "%(p2.time)12.3f"),
2110 ("renames", "%(nbrenamedfiles)12d"),
2110 ("renames", "%(nbrenamedfiles)12d"),
2111 ("total.time", "%(time)12.3f"),
2111 ("total.time", "%(time)12.3f"),
2112 ]
2112 ]
2113 if not dotiming:
2113 if not dotiming:
2114 output_template = [
2114 output_template = [
2115 i
2115 i
2116 for i in output_template
2116 for i in output_template
2117 if not ('time' in i[0] or 'renames' in i[0])
2117 if not ('time' in i[0] or 'renames' in i[0])
2118 ]
2118 ]
2119 header_names = [h for (h, v) in output_template]
2119 header_names = [h for (h, v) in output_template]
2120 output = ' '.join([v for (h, v) in output_template]) + '\n'
2120 output = ' '.join([v for (h, v) in output_template]) + '\n'
2121 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2121 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2122 fm.plain(header % tuple(header_names))
2122 fm.plain(header % tuple(header_names))
2123
2123
2124 if not revs:
2124 if not revs:
2125 revs = ['all()']
2125 revs = ['all()']
2126 revs = scmutil.revrange(repo, revs)
2126 revs = scmutil.revrange(repo, revs)
2127
2127
2128 if dostats:
2128 if dostats:
2129 alldata = {
2129 alldata = {
2130 'nbrevs': [],
2130 'nbrevs': [],
2131 'nbmissingfiles': [],
2131 'nbmissingfiles': [],
2132 }
2132 }
2133 if dotiming:
2133 if dotiming:
2134 alldata['parentnbrenames'] = []
2134 alldata['parentnbrenames'] = []
2135 alldata['totalnbrenames'] = []
2135 alldata['totalnbrenames'] = []
2136 alldata['parenttime'] = []
2136 alldata['parenttime'] = []
2137 alldata['totaltime'] = []
2137 alldata['totaltime'] = []
2138
2138
2139 roi = repo.revs('merge() and %ld', revs)
2139 roi = repo.revs('merge() and %ld', revs)
2140 for r in roi:
2140 for r in roi:
2141 ctx = repo[r]
2141 ctx = repo[r]
2142 p1 = ctx.p1()
2142 p1 = ctx.p1()
2143 p2 = ctx.p2()
2143 p2 = ctx.p2()
2144 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2144 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2145 for b in bases:
2145 for b in bases:
2146 b = repo[b]
2146 b = repo[b]
2147 p1missing = copies._computeforwardmissing(b, p1)
2147 p1missing = copies._computeforwardmissing(b, p1)
2148 p2missing = copies._computeforwardmissing(b, p2)
2148 p2missing = copies._computeforwardmissing(b, p2)
2149 data = {
2149 data = {
2150 b'base': b.hex(),
2150 b'base': b.hex(),
2151 b'p1.node': p1.hex(),
2151 b'p1.node': p1.hex(),
2152 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2152 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2153 b'p1.nbmissingfiles': len(p1missing),
2153 b'p1.nbmissingfiles': len(p1missing),
2154 b'p2.node': p2.hex(),
2154 b'p2.node': p2.hex(),
2155 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2155 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2156 b'p2.nbmissingfiles': len(p2missing),
2156 b'p2.nbmissingfiles': len(p2missing),
2157 }
2157 }
2158 if dostats:
2158 if dostats:
2159 if p1missing:
2159 if p1missing:
2160 alldata['nbrevs'].append(
2160 alldata['nbrevs'].append(
2161 (data['p1.nbrevs'], b.hex(), p1.hex())
2161 (data['p1.nbrevs'], b.hex(), p1.hex())
2162 )
2162 )
2163 alldata['nbmissingfiles'].append(
2163 alldata['nbmissingfiles'].append(
2164 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2164 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2165 )
2165 )
2166 if p2missing:
2166 if p2missing:
2167 alldata['nbrevs'].append(
2167 alldata['nbrevs'].append(
2168 (data['p2.nbrevs'], b.hex(), p2.hex())
2168 (data['p2.nbrevs'], b.hex(), p2.hex())
2169 )
2169 )
2170 alldata['nbmissingfiles'].append(
2170 alldata['nbmissingfiles'].append(
2171 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2171 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2172 )
2172 )
2173 if dotiming:
2173 if dotiming:
2174 begin = util.timer()
2174 begin = util.timer()
2175 mergedata = copies.mergecopies(repo, p1, p2, b)
2175 mergedata = copies.mergecopies(repo, p1, p2, b)
2176 end = util.timer()
2176 end = util.timer()
2177 # not very stable timing since we did only one run
2177 # not very stable timing since we did only one run
2178 data['time'] = end - begin
2178 data['time'] = end - begin
2179 # mergedata contains five dicts: "copy", "movewithdir",
2179 # mergedata contains five dicts: "copy", "movewithdir",
2180 # "diverge", "renamedelete" and "dirmove".
2180 # "diverge", "renamedelete" and "dirmove".
2181 # The first 4 are about renamed file so lets count that.
2181 # The first 4 are about renamed file so lets count that.
2182 renames = len(mergedata[0])
2182 renames = len(mergedata[0])
2183 renames += len(mergedata[1])
2183 renames += len(mergedata[1])
2184 renames += len(mergedata[2])
2184 renames += len(mergedata[2])
2185 renames += len(mergedata[3])
2185 renames += len(mergedata[3])
2186 data['nbrenamedfiles'] = renames
2186 data['nbrenamedfiles'] = renames
2187 begin = util.timer()
2187 begin = util.timer()
2188 p1renames = copies.pathcopies(b, p1)
2188 p1renames = copies.pathcopies(b, p1)
2189 end = util.timer()
2189 end = util.timer()
2190 data['p1.time'] = end - begin
2190 data['p1.time'] = end - begin
2191 begin = util.timer()
2191 begin = util.timer()
2192 p2renames = copies.pathcopies(b, p2)
2192 p2renames = copies.pathcopies(b, p2)
2193 end = util.timer()
2193 end = util.timer()
2194 data['p2.time'] = end - begin
2194 data['p2.time'] = end - begin
2195 data['p1.renamedfiles'] = len(p1renames)
2195 data['p1.renamedfiles'] = len(p1renames)
2196 data['p2.renamedfiles'] = len(p2renames)
2196 data['p2.renamedfiles'] = len(p2renames)
2197
2197
2198 if dostats:
2198 if dostats:
2199 if p1missing:
2199 if p1missing:
2200 alldata['parentnbrenames'].append(
2200 alldata['parentnbrenames'].append(
2201 (data['p1.renamedfiles'], b.hex(), p1.hex())
2201 (data['p1.renamedfiles'], b.hex(), p1.hex())
2202 )
2202 )
2203 alldata['parenttime'].append(
2203 alldata['parenttime'].append(
2204 (data['p1.time'], b.hex(), p1.hex())
2204 (data['p1.time'], b.hex(), p1.hex())
2205 )
2205 )
2206 if p2missing:
2206 if p2missing:
2207 alldata['parentnbrenames'].append(
2207 alldata['parentnbrenames'].append(
2208 (data['p2.renamedfiles'], b.hex(), p2.hex())
2208 (data['p2.renamedfiles'], b.hex(), p2.hex())
2209 )
2209 )
2210 alldata['parenttime'].append(
2210 alldata['parenttime'].append(
2211 (data['p2.time'], b.hex(), p2.hex())
2211 (data['p2.time'], b.hex(), p2.hex())
2212 )
2212 )
2213 if p1missing or p2missing:
2213 if p1missing or p2missing:
2214 alldata['totalnbrenames'].append(
2214 alldata['totalnbrenames'].append(
2215 (
2215 (
2216 data['nbrenamedfiles'],
2216 data['nbrenamedfiles'],
2217 b.hex(),
2217 b.hex(),
2218 p1.hex(),
2218 p1.hex(),
2219 p2.hex(),
2219 p2.hex(),
2220 )
2220 )
2221 )
2221 )
2222 alldata['totaltime'].append(
2222 alldata['totaltime'].append(
2223 (data['time'], b.hex(), p1.hex(), p2.hex())
2223 (data['time'], b.hex(), p1.hex(), p2.hex())
2224 )
2224 )
2225 fm.startitem()
2225 fm.startitem()
2226 fm.data(**data)
2226 fm.data(**data)
2227 # make node pretty for the human output
2227 # make node pretty for the human output
2228 out = data.copy()
2228 out = data.copy()
2229 out['base'] = fm.hexfunc(b.node())
2229 out['base'] = fm.hexfunc(b.node())
2230 out['p1.node'] = fm.hexfunc(p1.node())
2230 out['p1.node'] = fm.hexfunc(p1.node())
2231 out['p2.node'] = fm.hexfunc(p2.node())
2231 out['p2.node'] = fm.hexfunc(p2.node())
2232 fm.plain(output % out)
2232 fm.plain(output % out)
2233
2233
2234 fm.end()
2234 fm.end()
2235 if dostats:
2235 if dostats:
2236 # use a second formatter because the data are quite different, not sure
2236 # use a second formatter because the data are quite different, not sure
2237 # how it flies with the templater.
2237 # how it flies with the templater.
2238 entries = [
2238 entries = [
2239 ('nbrevs', 'number of revision covered'),
2239 ('nbrevs', 'number of revision covered'),
2240 ('nbmissingfiles', 'number of missing files at head'),
2240 ('nbmissingfiles', 'number of missing files at head'),
2241 ]
2241 ]
2242 if dotiming:
2242 if dotiming:
2243 entries.append(
2243 entries.append(
2244 ('parentnbrenames', 'rename from one parent to base')
2244 ('parentnbrenames', 'rename from one parent to base')
2245 )
2245 )
2246 entries.append(('totalnbrenames', 'total number of renames'))
2246 entries.append(('totalnbrenames', 'total number of renames'))
2247 entries.append(('parenttime', 'time for one parent'))
2247 entries.append(('parenttime', 'time for one parent'))
2248 entries.append(('totaltime', 'time for both parents'))
2248 entries.append(('totaltime', 'time for both parents'))
2249 _displaystats(ui, opts, entries, alldata)
2249 _displaystats(ui, opts, entries, alldata)
2250
2250
2251
2251
2252 @command(
2252 @command(
2253 b'perf::helper-pathcopies|perfhelper-pathcopies',
2253 b'perf::helper-pathcopies|perfhelper-pathcopies',
2254 formatteropts
2254 formatteropts
2255 + [
2255 + [
2256 (b'r', b'revs', [], b'restrict search to these revisions'),
2256 (b'r', b'revs', [], b'restrict search to these revisions'),
2257 (b'', b'timing', False, b'provides extra data (costly)'),
2257 (b'', b'timing', False, b'provides extra data (costly)'),
2258 (b'', b'stats', False, b'provides statistic about the measured data'),
2258 (b'', b'stats', False, b'provides statistic about the measured data'),
2259 ],
2259 ],
2260 )
2260 )
2261 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2261 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2262 """find statistic about potential parameters for the `perftracecopies`
2262 """find statistic about potential parameters for the `perftracecopies`
2263
2263
2264 This command find source-destination pair relevant for copytracing testing.
2264 This command find source-destination pair relevant for copytracing testing.
2265 It report value for some of the parameters that impact copy tracing time.
2265 It report value for some of the parameters that impact copy tracing time.
2266
2266
2267 If `--timing` is set, rename detection is run and the associated timing
2267 If `--timing` is set, rename detection is run and the associated timing
2268 will be reported. The extra details comes at the cost of a slower command
2268 will be reported. The extra details comes at the cost of a slower command
2269 execution.
2269 execution.
2270
2270
2271 Since the rename detection is only run once, other factors might easily
2271 Since the rename detection is only run once, other factors might easily
2272 affect the precision of the timing. However it should give a good
2272 affect the precision of the timing. However it should give a good
2273 approximation of which revision pairs are very costly.
2273 approximation of which revision pairs are very costly.
2274 """
2274 """
2275 opts = _byteskwargs(opts)
2275 opts = _byteskwargs(opts)
2276 fm = ui.formatter(b'perf', opts)
2276 fm = ui.formatter(b'perf', opts)
2277 dotiming = opts[b'timing']
2277 dotiming = opts[b'timing']
2278 dostats = opts[b'stats']
2278 dostats = opts[b'stats']
2279
2279
2280 if dotiming:
2280 if dotiming:
2281 header = '%12s %12s %12s %12s %12s %12s\n'
2281 header = '%12s %12s %12s %12s %12s %12s\n'
2282 output = (
2282 output = (
2283 "%(source)12s %(destination)12s "
2283 "%(source)12s %(destination)12s "
2284 "%(nbrevs)12d %(nbmissingfiles)12d "
2284 "%(nbrevs)12d %(nbmissingfiles)12d "
2285 "%(nbrenamedfiles)12d %(time)18.5f\n"
2285 "%(nbrenamedfiles)12d %(time)18.5f\n"
2286 )
2286 )
2287 header_names = (
2287 header_names = (
2288 "source",
2288 "source",
2289 "destination",
2289 "destination",
2290 "nb-revs",
2290 "nb-revs",
2291 "nb-files",
2291 "nb-files",
2292 "nb-renames",
2292 "nb-renames",
2293 "time",
2293 "time",
2294 )
2294 )
2295 fm.plain(header % header_names)
2295 fm.plain(header % header_names)
2296 else:
2296 else:
2297 header = '%12s %12s %12s %12s\n'
2297 header = '%12s %12s %12s %12s\n'
2298 output = (
2298 output = (
2299 "%(source)12s %(destination)12s "
2299 "%(source)12s %(destination)12s "
2300 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2300 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2301 )
2301 )
2302 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2302 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2303
2303
2304 if not revs:
2304 if not revs:
2305 revs = ['all()']
2305 revs = ['all()']
2306 revs = scmutil.revrange(repo, revs)
2306 revs = scmutil.revrange(repo, revs)
2307
2307
2308 if dostats:
2308 if dostats:
2309 alldata = {
2309 alldata = {
2310 'nbrevs': [],
2310 'nbrevs': [],
2311 'nbmissingfiles': [],
2311 'nbmissingfiles': [],
2312 }
2312 }
2313 if dotiming:
2313 if dotiming:
2314 alldata['nbrenames'] = []
2314 alldata['nbrenames'] = []
2315 alldata['time'] = []
2315 alldata['time'] = []
2316
2316
2317 roi = repo.revs('merge() and %ld', revs)
2317 roi = repo.revs('merge() and %ld', revs)
2318 for r in roi:
2318 for r in roi:
2319 ctx = repo[r]
2319 ctx = repo[r]
2320 p1 = ctx.p1().rev()
2320 p1 = ctx.p1().rev()
2321 p2 = ctx.p2().rev()
2321 p2 = ctx.p2().rev()
2322 bases = repo.changelog._commonancestorsheads(p1, p2)
2322 bases = repo.changelog._commonancestorsheads(p1, p2)
2323 for p in (p1, p2):
2323 for p in (p1, p2):
2324 for b in bases:
2324 for b in bases:
2325 base = repo[b]
2325 base = repo[b]
2326 parent = repo[p]
2326 parent = repo[p]
2327 missing = copies._computeforwardmissing(base, parent)
2327 missing = copies._computeforwardmissing(base, parent)
2328 if not missing:
2328 if not missing:
2329 continue
2329 continue
2330 data = {
2330 data = {
2331 b'source': base.hex(),
2331 b'source': base.hex(),
2332 b'destination': parent.hex(),
2332 b'destination': parent.hex(),
2333 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2333 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2334 b'nbmissingfiles': len(missing),
2334 b'nbmissingfiles': len(missing),
2335 }
2335 }
2336 if dostats:
2336 if dostats:
2337 alldata['nbrevs'].append(
2337 alldata['nbrevs'].append(
2338 (
2338 (
2339 data['nbrevs'],
2339 data['nbrevs'],
2340 base.hex(),
2340 base.hex(),
2341 parent.hex(),
2341 parent.hex(),
2342 )
2342 )
2343 )
2343 )
2344 alldata['nbmissingfiles'].append(
2344 alldata['nbmissingfiles'].append(
2345 (
2345 (
2346 data['nbmissingfiles'],
2346 data['nbmissingfiles'],
2347 base.hex(),
2347 base.hex(),
2348 parent.hex(),
2348 parent.hex(),
2349 )
2349 )
2350 )
2350 )
2351 if dotiming:
2351 if dotiming:
2352 begin = util.timer()
2352 begin = util.timer()
2353 renames = copies.pathcopies(base, parent)
2353 renames = copies.pathcopies(base, parent)
2354 end = util.timer()
2354 end = util.timer()
2355 # not very stable timing since we did only one run
2355 # not very stable timing since we did only one run
2356 data['time'] = end - begin
2356 data['time'] = end - begin
2357 data['nbrenamedfiles'] = len(renames)
2357 data['nbrenamedfiles'] = len(renames)
2358 if dostats:
2358 if dostats:
2359 alldata['time'].append(
2359 alldata['time'].append(
2360 (
2360 (
2361 data['time'],
2361 data['time'],
2362 base.hex(),
2362 base.hex(),
2363 parent.hex(),
2363 parent.hex(),
2364 )
2364 )
2365 )
2365 )
2366 alldata['nbrenames'].append(
2366 alldata['nbrenames'].append(
2367 (
2367 (
2368 data['nbrenamedfiles'],
2368 data['nbrenamedfiles'],
2369 base.hex(),
2369 base.hex(),
2370 parent.hex(),
2370 parent.hex(),
2371 )
2371 )
2372 )
2372 )
2373 fm.startitem()
2373 fm.startitem()
2374 fm.data(**data)
2374 fm.data(**data)
2375 out = data.copy()
2375 out = data.copy()
2376 out['source'] = fm.hexfunc(base.node())
2376 out['source'] = fm.hexfunc(base.node())
2377 out['destination'] = fm.hexfunc(parent.node())
2377 out['destination'] = fm.hexfunc(parent.node())
2378 fm.plain(output % out)
2378 fm.plain(output % out)
2379
2379
2380 fm.end()
2380 fm.end()
2381 if dostats:
2381 if dostats:
2382 entries = [
2382 entries = [
2383 ('nbrevs', 'number of revision covered'),
2383 ('nbrevs', 'number of revision covered'),
2384 ('nbmissingfiles', 'number of missing files at head'),
2384 ('nbmissingfiles', 'number of missing files at head'),
2385 ]
2385 ]
2386 if dotiming:
2386 if dotiming:
2387 entries.append(('nbrenames', 'renamed files'))
2387 entries.append(('nbrenames', 'renamed files'))
2388 entries.append(('time', 'time'))
2388 entries.append(('time', 'time'))
2389 _displaystats(ui, opts, entries, alldata)
2389 _displaystats(ui, opts, entries, alldata)
2390
2390
2391
2391
2392 @command(b'perf::cca|perfcca', formatteropts)
2392 @command(b'perf::cca|perfcca', formatteropts)
2393 def perfcca(ui, repo, **opts):
2393 def perfcca(ui, repo, **opts):
2394 opts = _byteskwargs(opts)
2394 opts = _byteskwargs(opts)
2395 timer, fm = gettimer(ui, opts)
2395 timer, fm = gettimer(ui, opts)
2396 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2396 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2397 fm.end()
2397 fm.end()
2398
2398
2399
2399
2400 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2400 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2401 def perffncacheload(ui, repo, **opts):
2401 def perffncacheload(ui, repo, **opts):
2402 opts = _byteskwargs(opts)
2402 opts = _byteskwargs(opts)
2403 timer, fm = gettimer(ui, opts)
2403 timer, fm = gettimer(ui, opts)
2404 s = repo.store
2404 s = repo.store
2405
2405
2406 def d():
2406 def d():
2407 s.fncache._load()
2407 s.fncache._load()
2408
2408
2409 timer(d)
2409 timer(d)
2410 fm.end()
2410 fm.end()
2411
2411
2412
2412
2413 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2413 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2414 def perffncachewrite(ui, repo, **opts):
2414 def perffncachewrite(ui, repo, **opts):
2415 opts = _byteskwargs(opts)
2415 opts = _byteskwargs(opts)
2416 timer, fm = gettimer(ui, opts)
2416 timer, fm = gettimer(ui, opts)
2417 s = repo.store
2417 s = repo.store
2418 lock = repo.lock()
2418 lock = repo.lock()
2419 s.fncache._load()
2419 s.fncache._load()
2420 tr = repo.transaction(b'perffncachewrite')
2420 tr = repo.transaction(b'perffncachewrite')
2421 tr.addbackup(b'fncache')
2421 tr.addbackup(b'fncache')
2422
2422
2423 def d():
2423 def d():
2424 s.fncache._dirty = True
2424 s.fncache._dirty = True
2425 s.fncache.write(tr)
2425 s.fncache.write(tr)
2426
2426
2427 timer(d)
2427 timer(d)
2428 tr.close()
2428 tr.close()
2429 lock.release()
2429 lock.release()
2430 fm.end()
2430 fm.end()
2431
2431
2432
2432
2433 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2433 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2434 def perffncacheencode(ui, repo, **opts):
2434 def perffncacheencode(ui, repo, **opts):
2435 opts = _byteskwargs(opts)
2435 opts = _byteskwargs(opts)
2436 timer, fm = gettimer(ui, opts)
2436 timer, fm = gettimer(ui, opts)
2437 s = repo.store
2437 s = repo.store
2438 s.fncache._load()
2438 s.fncache._load()
2439
2439
2440 def d():
2440 def d():
2441 for p in s.fncache.entries:
2441 for p in s.fncache.entries:
2442 s.encode(p)
2442 s.encode(p)
2443
2443
2444 timer(d)
2444 timer(d)
2445 fm.end()
2445 fm.end()
2446
2446
2447
2447
2448 def _bdiffworker(q, blocks, xdiff, ready, done):
2448 def _bdiffworker(q, blocks, xdiff, ready, done):
2449 while not done.is_set():
2449 while not done.is_set():
2450 pair = q.get()
2450 pair = q.get()
2451 while pair is not None:
2451 while pair is not None:
2452 if xdiff:
2452 if xdiff:
2453 mdiff.bdiff.xdiffblocks(*pair)
2453 mdiff.bdiff.xdiffblocks(*pair)
2454 elif blocks:
2454 elif blocks:
2455 mdiff.bdiff.blocks(*pair)
2455 mdiff.bdiff.blocks(*pair)
2456 else:
2456 else:
2457 mdiff.textdiff(*pair)
2457 mdiff.textdiff(*pair)
2458 q.task_done()
2458 q.task_done()
2459 pair = q.get()
2459 pair = q.get()
2460 q.task_done() # for the None one
2460 q.task_done() # for the None one
2461 with ready:
2461 with ready:
2462 ready.wait()
2462 ready.wait()
2463
2463
2464
2464
2465 def _manifestrevision(repo, mnode):
2465 def _manifestrevision(repo, mnode):
2466 ml = repo.manifestlog
2466 ml = repo.manifestlog
2467
2467
2468 if util.safehasattr(ml, b'getstorage'):
2468 if util.safehasattr(ml, b'getstorage'):
2469 store = ml.getstorage(b'')
2469 store = ml.getstorage(b'')
2470 else:
2470 else:
2471 store = ml._revlog
2471 store = ml._revlog
2472
2472
2473 return store.revision(mnode)
2473 return store.revision(mnode)
2474
2474
2475
2475
2476 @command(
2476 @command(
2477 b'perf::bdiff|perfbdiff',
2477 b'perf::bdiff|perfbdiff',
2478 revlogopts
2478 revlogopts
2479 + formatteropts
2479 + formatteropts
2480 + [
2480 + [
2481 (
2481 (
2482 b'',
2482 b'',
2483 b'count',
2483 b'count',
2484 1,
2484 1,
2485 b'number of revisions to test (when using --startrev)',
2485 b'number of revisions to test (when using --startrev)',
2486 ),
2486 ),
2487 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2487 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2488 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2488 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2489 (b'', b'blocks', False, b'test computing diffs into blocks'),
2489 (b'', b'blocks', False, b'test computing diffs into blocks'),
2490 (b'', b'xdiff', False, b'use xdiff algorithm'),
2490 (b'', b'xdiff', False, b'use xdiff algorithm'),
2491 ],
2491 ],
2492 b'-c|-m|FILE REV',
2492 b'-c|-m|FILE REV',
2493 )
2493 )
2494 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2494 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2495 """benchmark a bdiff between revisions
2495 """benchmark a bdiff between revisions
2496
2496
2497 By default, benchmark a bdiff between its delta parent and itself.
2497 By default, benchmark a bdiff between its delta parent and itself.
2498
2498
2499 With ``--count``, benchmark bdiffs between delta parents and self for N
2499 With ``--count``, benchmark bdiffs between delta parents and self for N
2500 revisions starting at the specified revision.
2500 revisions starting at the specified revision.
2501
2501
2502 With ``--alldata``, assume the requested revision is a changeset and
2502 With ``--alldata``, assume the requested revision is a changeset and
2503 measure bdiffs for all changes related to that changeset (manifest
2503 measure bdiffs for all changes related to that changeset (manifest
2504 and filelogs).
2504 and filelogs).
2505 """
2505 """
2506 opts = _byteskwargs(opts)
2506 opts = _byteskwargs(opts)
2507
2507
2508 if opts[b'xdiff'] and not opts[b'blocks']:
2508 if opts[b'xdiff'] and not opts[b'blocks']:
2509 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2509 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2510
2510
2511 if opts[b'alldata']:
2511 if opts[b'alldata']:
2512 opts[b'changelog'] = True
2512 opts[b'changelog'] = True
2513
2513
2514 if opts.get(b'changelog') or opts.get(b'manifest'):
2514 if opts.get(b'changelog') or opts.get(b'manifest'):
2515 file_, rev = None, file_
2515 file_, rev = None, file_
2516 elif rev is None:
2516 elif rev is None:
2517 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2517 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2518
2518
2519 blocks = opts[b'blocks']
2519 blocks = opts[b'blocks']
2520 xdiff = opts[b'xdiff']
2520 xdiff = opts[b'xdiff']
2521 textpairs = []
2521 textpairs = []
2522
2522
2523 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2523 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2524
2524
2525 startrev = r.rev(r.lookup(rev))
2525 startrev = r.rev(r.lookup(rev))
2526 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2526 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2527 if opts[b'alldata']:
2527 if opts[b'alldata']:
2528 # Load revisions associated with changeset.
2528 # Load revisions associated with changeset.
2529 ctx = repo[rev]
2529 ctx = repo[rev]
2530 mtext = _manifestrevision(repo, ctx.manifestnode())
2530 mtext = _manifestrevision(repo, ctx.manifestnode())
2531 for pctx in ctx.parents():
2531 for pctx in ctx.parents():
2532 pman = _manifestrevision(repo, pctx.manifestnode())
2532 pman = _manifestrevision(repo, pctx.manifestnode())
2533 textpairs.append((pman, mtext))
2533 textpairs.append((pman, mtext))
2534
2534
2535 # Load filelog revisions by iterating manifest delta.
2535 # Load filelog revisions by iterating manifest delta.
2536 man = ctx.manifest()
2536 man = ctx.manifest()
2537 pman = ctx.p1().manifest()
2537 pman = ctx.p1().manifest()
2538 for filename, change in pman.diff(man).items():
2538 for filename, change in pman.diff(man).items():
2539 fctx = repo.file(filename)
2539 fctx = repo.file(filename)
2540 f1 = fctx.revision(change[0][0] or -1)
2540 f1 = fctx.revision(change[0][0] or -1)
2541 f2 = fctx.revision(change[1][0] or -1)
2541 f2 = fctx.revision(change[1][0] or -1)
2542 textpairs.append((f1, f2))
2542 textpairs.append((f1, f2))
2543 else:
2543 else:
2544 dp = r.deltaparent(rev)
2544 dp = r.deltaparent(rev)
2545 textpairs.append((r.revision(dp), r.revision(rev)))
2545 textpairs.append((r.revision(dp), r.revision(rev)))
2546
2546
2547 withthreads = threads > 0
2547 withthreads = threads > 0
2548 if not withthreads:
2548 if not withthreads:
2549
2549
2550 def d():
2550 def d():
2551 for pair in textpairs:
2551 for pair in textpairs:
2552 if xdiff:
2552 if xdiff:
2553 mdiff.bdiff.xdiffblocks(*pair)
2553 mdiff.bdiff.xdiffblocks(*pair)
2554 elif blocks:
2554 elif blocks:
2555 mdiff.bdiff.blocks(*pair)
2555 mdiff.bdiff.blocks(*pair)
2556 else:
2556 else:
2557 mdiff.textdiff(*pair)
2557 mdiff.textdiff(*pair)
2558
2558
2559 else:
2559 else:
2560 q = queue()
2560 q = queue()
2561 for i in _xrange(threads):
2561 for i in _xrange(threads):
2562 q.put(None)
2562 q.put(None)
2563 ready = threading.Condition()
2563 ready = threading.Condition()
2564 done = threading.Event()
2564 done = threading.Event()
2565 for i in _xrange(threads):
2565 for i in _xrange(threads):
2566 threading.Thread(
2566 threading.Thread(
2567 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2567 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2568 ).start()
2568 ).start()
2569 q.join()
2569 q.join()
2570
2570
2571 def d():
2571 def d():
2572 for pair in textpairs:
2572 for pair in textpairs:
2573 q.put(pair)
2573 q.put(pair)
2574 for i in _xrange(threads):
2574 for i in _xrange(threads):
2575 q.put(None)
2575 q.put(None)
2576 with ready:
2576 with ready:
2577 ready.notify_all()
2577 ready.notify_all()
2578 q.join()
2578 q.join()
2579
2579
2580 timer, fm = gettimer(ui, opts)
2580 timer, fm = gettimer(ui, opts)
2581 timer(d)
2581 timer(d)
2582 fm.end()
2582 fm.end()
2583
2583
2584 if withthreads:
2584 if withthreads:
2585 done.set()
2585 done.set()
2586 for i in _xrange(threads):
2586 for i in _xrange(threads):
2587 q.put(None)
2587 q.put(None)
2588 with ready:
2588 with ready:
2589 ready.notify_all()
2589 ready.notify_all()
2590
2590
2591
2591
2592 @command(
2592 @command(
2593 b'perf::unbundle',
2594 formatteropts,
2595 b'BUNDLE_FILE',
2596 )
2597 def perf_unbundle(ui, repo, fname, **opts):
2598 """benchmark application of a bundle in a repository.
2599
2600 This does not include the final transaction processing"""
2601 from mercurial import exchange
2602 from mercurial import bundle2
2603
2604 with repo.lock():
2605 bundle = [None, None]
2606 try:
2607 with open(fname, mode="rb") as f:
2608
2609 def setup():
2610 gen, tr = bundle
2611 if tr is not None:
2612 tr.abort()
2613 bundle[:] = [None, None]
2614 f.seek(0)
2615 bundle[0] = exchange.readbundle(ui, f, fname)
2616 bundle[1] = repo.transaction(b'perf::unbundle')
2617
2618 def apply():
2619 gen, tr = bundle
2620 bundle2.applybundle(
2621 repo,
2622 gen,
2623 tr,
2624 source=b'perf::unbundle',
2625 url=fname,
2626 )
2627
2628 timer, fm = gettimer(ui, opts)
2629 timer(apply, setup=setup)
2630 fm.end()
2631 finally:
2632 gen, tr = bundle
2633 if tr is not None:
2634 tr.abort()
2635
2636
2637 @command(
2593 b'perf::unidiff|perfunidiff',
2638 b'perf::unidiff|perfunidiff',
2594 revlogopts
2639 revlogopts
2595 + formatteropts
2640 + formatteropts
2596 + [
2641 + [
2597 (
2642 (
2598 b'',
2643 b'',
2599 b'count',
2644 b'count',
2600 1,
2645 1,
2601 b'number of revisions to test (when using --startrev)',
2646 b'number of revisions to test (when using --startrev)',
2602 ),
2647 ),
2603 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2648 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2604 ],
2649 ],
2605 b'-c|-m|FILE REV',
2650 b'-c|-m|FILE REV',
2606 )
2651 )
2607 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2652 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2608 """benchmark a unified diff between revisions
2653 """benchmark a unified diff between revisions
2609
2654
2610 This doesn't include any copy tracing - it's just a unified diff
2655 This doesn't include any copy tracing - it's just a unified diff
2611 of the texts.
2656 of the texts.
2612
2657
2613 By default, benchmark a diff between its delta parent and itself.
2658 By default, benchmark a diff between its delta parent and itself.
2614
2659
2615 With ``--count``, benchmark diffs between delta parents and self for N
2660 With ``--count``, benchmark diffs between delta parents and self for N
2616 revisions starting at the specified revision.
2661 revisions starting at the specified revision.
2617
2662
2618 With ``--alldata``, assume the requested revision is a changeset and
2663 With ``--alldata``, assume the requested revision is a changeset and
2619 measure diffs for all changes related to that changeset (manifest
2664 measure diffs for all changes related to that changeset (manifest
2620 and filelogs).
2665 and filelogs).
2621 """
2666 """
2622 opts = _byteskwargs(opts)
2667 opts = _byteskwargs(opts)
2623 if opts[b'alldata']:
2668 if opts[b'alldata']:
2624 opts[b'changelog'] = True
2669 opts[b'changelog'] = True
2625
2670
2626 if opts.get(b'changelog') or opts.get(b'manifest'):
2671 if opts.get(b'changelog') or opts.get(b'manifest'):
2627 file_, rev = None, file_
2672 file_, rev = None, file_
2628 elif rev is None:
2673 elif rev is None:
2629 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2674 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2630
2675
2631 textpairs = []
2676 textpairs = []
2632
2677
2633 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2678 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2634
2679
2635 startrev = r.rev(r.lookup(rev))
2680 startrev = r.rev(r.lookup(rev))
2636 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2681 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2637 if opts[b'alldata']:
2682 if opts[b'alldata']:
2638 # Load revisions associated with changeset.
2683 # Load revisions associated with changeset.
2639 ctx = repo[rev]
2684 ctx = repo[rev]
2640 mtext = _manifestrevision(repo, ctx.manifestnode())
2685 mtext = _manifestrevision(repo, ctx.manifestnode())
2641 for pctx in ctx.parents():
2686 for pctx in ctx.parents():
2642 pman = _manifestrevision(repo, pctx.manifestnode())
2687 pman = _manifestrevision(repo, pctx.manifestnode())
2643 textpairs.append((pman, mtext))
2688 textpairs.append((pman, mtext))
2644
2689
2645 # Load filelog revisions by iterating manifest delta.
2690 # Load filelog revisions by iterating manifest delta.
2646 man = ctx.manifest()
2691 man = ctx.manifest()
2647 pman = ctx.p1().manifest()
2692 pman = ctx.p1().manifest()
2648 for filename, change in pman.diff(man).items():
2693 for filename, change in pman.diff(man).items():
2649 fctx = repo.file(filename)
2694 fctx = repo.file(filename)
2650 f1 = fctx.revision(change[0][0] or -1)
2695 f1 = fctx.revision(change[0][0] or -1)
2651 f2 = fctx.revision(change[1][0] or -1)
2696 f2 = fctx.revision(change[1][0] or -1)
2652 textpairs.append((f1, f2))
2697 textpairs.append((f1, f2))
2653 else:
2698 else:
2654 dp = r.deltaparent(rev)
2699 dp = r.deltaparent(rev)
2655 textpairs.append((r.revision(dp), r.revision(rev)))
2700 textpairs.append((r.revision(dp), r.revision(rev)))
2656
2701
2657 def d():
2702 def d():
2658 for left, right in textpairs:
2703 for left, right in textpairs:
2659 # The date strings don't matter, so we pass empty strings.
2704 # The date strings don't matter, so we pass empty strings.
2660 headerlines, hunks = mdiff.unidiff(
2705 headerlines, hunks = mdiff.unidiff(
2661 left, b'', right, b'', b'left', b'right', binary=False
2706 left, b'', right, b'', b'left', b'right', binary=False
2662 )
2707 )
2663 # consume iterators in roughly the way patch.py does
2708 # consume iterators in roughly the way patch.py does
2664 b'\n'.join(headerlines)
2709 b'\n'.join(headerlines)
2665 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2710 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2666
2711
2667 timer, fm = gettimer(ui, opts)
2712 timer, fm = gettimer(ui, opts)
2668 timer(d)
2713 timer(d)
2669 fm.end()
2714 fm.end()
2670
2715
2671
2716
2672 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2717 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2673 def perfdiffwd(ui, repo, **opts):
2718 def perfdiffwd(ui, repo, **opts):
2674 """Profile diff of working directory changes"""
2719 """Profile diff of working directory changes"""
2675 opts = _byteskwargs(opts)
2720 opts = _byteskwargs(opts)
2676 timer, fm = gettimer(ui, opts)
2721 timer, fm = gettimer(ui, opts)
2677 options = {
2722 options = {
2678 'w': 'ignore_all_space',
2723 'w': 'ignore_all_space',
2679 'b': 'ignore_space_change',
2724 'b': 'ignore_space_change',
2680 'B': 'ignore_blank_lines',
2725 'B': 'ignore_blank_lines',
2681 }
2726 }
2682
2727
2683 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2728 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2684 opts = {options[c]: b'1' for c in diffopt}
2729 opts = {options[c]: b'1' for c in diffopt}
2685
2730
2686 def d():
2731 def d():
2687 ui.pushbuffer()
2732 ui.pushbuffer()
2688 commands.diff(ui, repo, **opts)
2733 commands.diff(ui, repo, **opts)
2689 ui.popbuffer()
2734 ui.popbuffer()
2690
2735
2691 diffopt = diffopt.encode('ascii')
2736 diffopt = diffopt.encode('ascii')
2692 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2737 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2693 timer(d, title=title)
2738 timer(d, title=title)
2694 fm.end()
2739 fm.end()
2695
2740
2696
2741
2697 @command(
2742 @command(
2698 b'perf::revlogindex|perfrevlogindex',
2743 b'perf::revlogindex|perfrevlogindex',
2699 revlogopts + formatteropts,
2744 revlogopts + formatteropts,
2700 b'-c|-m|FILE',
2745 b'-c|-m|FILE',
2701 )
2746 )
2702 def perfrevlogindex(ui, repo, file_=None, **opts):
2747 def perfrevlogindex(ui, repo, file_=None, **opts):
2703 """Benchmark operations against a revlog index.
2748 """Benchmark operations against a revlog index.
2704
2749
2705 This tests constructing a revlog instance, reading index data,
2750 This tests constructing a revlog instance, reading index data,
2706 parsing index data, and performing various operations related to
2751 parsing index data, and performing various operations related to
2707 index data.
2752 index data.
2708 """
2753 """
2709
2754
2710 opts = _byteskwargs(opts)
2755 opts = _byteskwargs(opts)
2711
2756
2712 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2757 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2713
2758
2714 opener = getattr(rl, 'opener') # trick linter
2759 opener = getattr(rl, 'opener') # trick linter
2715 # compat with hg <= 5.8
2760 # compat with hg <= 5.8
2716 radix = getattr(rl, 'radix', None)
2761 radix = getattr(rl, 'radix', None)
2717 indexfile = getattr(rl, '_indexfile', None)
2762 indexfile = getattr(rl, '_indexfile', None)
2718 if indexfile is None:
2763 if indexfile is None:
2719 # compatibility with <= hg-5.8
2764 # compatibility with <= hg-5.8
2720 indexfile = getattr(rl, 'indexfile')
2765 indexfile = getattr(rl, 'indexfile')
2721 data = opener.read(indexfile)
2766 data = opener.read(indexfile)
2722
2767
2723 header = struct.unpack(b'>I', data[0:4])[0]
2768 header = struct.unpack(b'>I', data[0:4])[0]
2724 version = header & 0xFFFF
2769 version = header & 0xFFFF
2725 if version == 1:
2770 if version == 1:
2726 inline = header & (1 << 16)
2771 inline = header & (1 << 16)
2727 else:
2772 else:
2728 raise error.Abort(b'unsupported revlog version: %d' % version)
2773 raise error.Abort(b'unsupported revlog version: %d' % version)
2729
2774
2730 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2775 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2731 if parse_index_v1 is None:
2776 if parse_index_v1 is None:
2732 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2777 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2733
2778
2734 rllen = len(rl)
2779 rllen = len(rl)
2735
2780
2736 node0 = rl.node(0)
2781 node0 = rl.node(0)
2737 node25 = rl.node(rllen // 4)
2782 node25 = rl.node(rllen // 4)
2738 node50 = rl.node(rllen // 2)
2783 node50 = rl.node(rllen // 2)
2739 node75 = rl.node(rllen // 4 * 3)
2784 node75 = rl.node(rllen // 4 * 3)
2740 node100 = rl.node(rllen - 1)
2785 node100 = rl.node(rllen - 1)
2741
2786
2742 allrevs = range(rllen)
2787 allrevs = range(rllen)
2743 allrevsrev = list(reversed(allrevs))
2788 allrevsrev = list(reversed(allrevs))
2744 allnodes = [rl.node(rev) for rev in range(rllen)]
2789 allnodes = [rl.node(rev) for rev in range(rllen)]
2745 allnodesrev = list(reversed(allnodes))
2790 allnodesrev = list(reversed(allnodes))
2746
2791
2747 def constructor():
2792 def constructor():
2748 if radix is not None:
2793 if radix is not None:
2749 revlog(opener, radix=radix)
2794 revlog(opener, radix=radix)
2750 else:
2795 else:
2751 # hg <= 5.8
2796 # hg <= 5.8
2752 revlog(opener, indexfile=indexfile)
2797 revlog(opener, indexfile=indexfile)
2753
2798
2754 def read():
2799 def read():
2755 with opener(indexfile) as fh:
2800 with opener(indexfile) as fh:
2756 fh.read()
2801 fh.read()
2757
2802
2758 def parseindex():
2803 def parseindex():
2759 parse_index_v1(data, inline)
2804 parse_index_v1(data, inline)
2760
2805
2761 def getentry(revornode):
2806 def getentry(revornode):
2762 index = parse_index_v1(data, inline)[0]
2807 index = parse_index_v1(data, inline)[0]
2763 index[revornode]
2808 index[revornode]
2764
2809
2765 def getentries(revs, count=1):
2810 def getentries(revs, count=1):
2766 index = parse_index_v1(data, inline)[0]
2811 index = parse_index_v1(data, inline)[0]
2767
2812
2768 for i in range(count):
2813 for i in range(count):
2769 for rev in revs:
2814 for rev in revs:
2770 index[rev]
2815 index[rev]
2771
2816
2772 def resolvenode(node):
2817 def resolvenode(node):
2773 index = parse_index_v1(data, inline)[0]
2818 index = parse_index_v1(data, inline)[0]
2774 rev = getattr(index, 'rev', None)
2819 rev = getattr(index, 'rev', None)
2775 if rev is None:
2820 if rev is None:
2776 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2821 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2777 # This only works for the C code.
2822 # This only works for the C code.
2778 if nodemap is None:
2823 if nodemap is None:
2779 return
2824 return
2780 rev = nodemap.__getitem__
2825 rev = nodemap.__getitem__
2781
2826
2782 try:
2827 try:
2783 rev(node)
2828 rev(node)
2784 except error.RevlogError:
2829 except error.RevlogError:
2785 pass
2830 pass
2786
2831
2787 def resolvenodes(nodes, count=1):
2832 def resolvenodes(nodes, count=1):
2788 index = parse_index_v1(data, inline)[0]
2833 index = parse_index_v1(data, inline)[0]
2789 rev = getattr(index, 'rev', None)
2834 rev = getattr(index, 'rev', None)
2790 if rev is None:
2835 if rev is None:
2791 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2836 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2792 # This only works for the C code.
2837 # This only works for the C code.
2793 if nodemap is None:
2838 if nodemap is None:
2794 return
2839 return
2795 rev = nodemap.__getitem__
2840 rev = nodemap.__getitem__
2796
2841
2797 for i in range(count):
2842 for i in range(count):
2798 for node in nodes:
2843 for node in nodes:
2799 try:
2844 try:
2800 rev(node)
2845 rev(node)
2801 except error.RevlogError:
2846 except error.RevlogError:
2802 pass
2847 pass
2803
2848
2804 benches = [
2849 benches = [
2805 (constructor, b'revlog constructor'),
2850 (constructor, b'revlog constructor'),
2806 (read, b'read'),
2851 (read, b'read'),
2807 (parseindex, b'create index object'),
2852 (parseindex, b'create index object'),
2808 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2853 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2809 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2854 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2810 (lambda: resolvenode(node0), b'look up node at rev 0'),
2855 (lambda: resolvenode(node0), b'look up node at rev 0'),
2811 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2856 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2812 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2857 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2813 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2858 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2814 (lambda: resolvenode(node100), b'look up node at tip'),
2859 (lambda: resolvenode(node100), b'look up node at tip'),
2815 # 2x variation is to measure caching impact.
2860 # 2x variation is to measure caching impact.
2816 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2861 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2817 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2862 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2818 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2863 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2819 (
2864 (
2820 lambda: resolvenodes(allnodesrev, 2),
2865 lambda: resolvenodes(allnodesrev, 2),
2821 b'look up all nodes 2x (reverse)',
2866 b'look up all nodes 2x (reverse)',
2822 ),
2867 ),
2823 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2868 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2824 (
2869 (
2825 lambda: getentries(allrevs, 2),
2870 lambda: getentries(allrevs, 2),
2826 b'retrieve all index entries 2x (forward)',
2871 b'retrieve all index entries 2x (forward)',
2827 ),
2872 ),
2828 (
2873 (
2829 lambda: getentries(allrevsrev),
2874 lambda: getentries(allrevsrev),
2830 b'retrieve all index entries (reverse)',
2875 b'retrieve all index entries (reverse)',
2831 ),
2876 ),
2832 (
2877 (
2833 lambda: getentries(allrevsrev, 2),
2878 lambda: getentries(allrevsrev, 2),
2834 b'retrieve all index entries 2x (reverse)',
2879 b'retrieve all index entries 2x (reverse)',
2835 ),
2880 ),
2836 ]
2881 ]
2837
2882
2838 for fn, title in benches:
2883 for fn, title in benches:
2839 timer, fm = gettimer(ui, opts)
2884 timer, fm = gettimer(ui, opts)
2840 timer(fn, title=title)
2885 timer(fn, title=title)
2841 fm.end()
2886 fm.end()
2842
2887
2843
2888
2844 @command(
2889 @command(
2845 b'perf::revlogrevisions|perfrevlogrevisions',
2890 b'perf::revlogrevisions|perfrevlogrevisions',
2846 revlogopts
2891 revlogopts
2847 + formatteropts
2892 + formatteropts
2848 + [
2893 + [
2849 (b'd', b'dist', 100, b'distance between the revisions'),
2894 (b'd', b'dist', 100, b'distance between the revisions'),
2850 (b's', b'startrev', 0, b'revision to start reading at'),
2895 (b's', b'startrev', 0, b'revision to start reading at'),
2851 (b'', b'reverse', False, b'read in reverse'),
2896 (b'', b'reverse', False, b'read in reverse'),
2852 ],
2897 ],
2853 b'-c|-m|FILE',
2898 b'-c|-m|FILE',
2854 )
2899 )
2855 def perfrevlogrevisions(
2900 def perfrevlogrevisions(
2856 ui, repo, file_=None, startrev=0, reverse=False, **opts
2901 ui, repo, file_=None, startrev=0, reverse=False, **opts
2857 ):
2902 ):
2858 """Benchmark reading a series of revisions from a revlog.
2903 """Benchmark reading a series of revisions from a revlog.
2859
2904
2860 By default, we read every ``-d/--dist`` revision from 0 to tip of
2905 By default, we read every ``-d/--dist`` revision from 0 to tip of
2861 the specified revlog.
2906 the specified revlog.
2862
2907
2863 The start revision can be defined via ``-s/--startrev``.
2908 The start revision can be defined via ``-s/--startrev``.
2864 """
2909 """
2865 opts = _byteskwargs(opts)
2910 opts = _byteskwargs(opts)
2866
2911
2867 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2912 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2868 rllen = getlen(ui)(rl)
2913 rllen = getlen(ui)(rl)
2869
2914
2870 if startrev < 0:
2915 if startrev < 0:
2871 startrev = rllen + startrev
2916 startrev = rllen + startrev
2872
2917
2873 def d():
2918 def d():
2874 rl.clearcaches()
2919 rl.clearcaches()
2875
2920
2876 beginrev = startrev
2921 beginrev = startrev
2877 endrev = rllen
2922 endrev = rllen
2878 dist = opts[b'dist']
2923 dist = opts[b'dist']
2879
2924
2880 if reverse:
2925 if reverse:
2881 beginrev, endrev = endrev - 1, beginrev - 1
2926 beginrev, endrev = endrev - 1, beginrev - 1
2882 dist = -1 * dist
2927 dist = -1 * dist
2883
2928
2884 for x in _xrange(beginrev, endrev, dist):
2929 for x in _xrange(beginrev, endrev, dist):
2885 # Old revisions don't support passing int.
2930 # Old revisions don't support passing int.
2886 n = rl.node(x)
2931 n = rl.node(x)
2887 rl.revision(n)
2932 rl.revision(n)
2888
2933
2889 timer, fm = gettimer(ui, opts)
2934 timer, fm = gettimer(ui, opts)
2890 timer(d)
2935 timer(d)
2891 fm.end()
2936 fm.end()
2892
2937
2893
2938
2894 @command(
2939 @command(
2895 b'perf::revlogwrite|perfrevlogwrite',
2940 b'perf::revlogwrite|perfrevlogwrite',
2896 revlogopts
2941 revlogopts
2897 + formatteropts
2942 + formatteropts
2898 + [
2943 + [
2899 (b's', b'startrev', 1000, b'revision to start writing at'),
2944 (b's', b'startrev', 1000, b'revision to start writing at'),
2900 (b'', b'stoprev', -1, b'last revision to write'),
2945 (b'', b'stoprev', -1, b'last revision to write'),
2901 (b'', b'count', 3, b'number of passes to perform'),
2946 (b'', b'count', 3, b'number of passes to perform'),
2902 (b'', b'details', False, b'print timing for every revisions tested'),
2947 (b'', b'details', False, b'print timing for every revisions tested'),
2903 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2948 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2904 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2949 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2905 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2950 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2906 ],
2951 ],
2907 b'-c|-m|FILE',
2952 b'-c|-m|FILE',
2908 )
2953 )
2909 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2954 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2910 """Benchmark writing a series of revisions to a revlog.
2955 """Benchmark writing a series of revisions to a revlog.
2911
2956
2912 Possible source values are:
2957 Possible source values are:
2913 * `full`: add from a full text (default).
2958 * `full`: add from a full text (default).
2914 * `parent-1`: add from a delta to the first parent
2959 * `parent-1`: add from a delta to the first parent
2915 * `parent-2`: add from a delta to the second parent if it exists
2960 * `parent-2`: add from a delta to the second parent if it exists
2916 (use a delta from the first parent otherwise)
2961 (use a delta from the first parent otherwise)
2917 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2962 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2918 * `storage`: add from the existing precomputed deltas
2963 * `storage`: add from the existing precomputed deltas
2919
2964
2920 Note: This performance command measures performance in a custom way. As a
2965 Note: This performance command measures performance in a custom way. As a
2921 result some of the global configuration of the 'perf' command does not
2966 result some of the global configuration of the 'perf' command does not
2922 apply to it:
2967 apply to it:
2923
2968
2924 * ``pre-run``: disabled
2969 * ``pre-run``: disabled
2925
2970
2926 * ``profile-benchmark``: disabled
2971 * ``profile-benchmark``: disabled
2927
2972
2928 * ``run-limits``: disabled use --count instead
2973 * ``run-limits``: disabled use --count instead
2929 """
2974 """
2930 opts = _byteskwargs(opts)
2975 opts = _byteskwargs(opts)
2931
2976
2932 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2977 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2933 rllen = getlen(ui)(rl)
2978 rllen = getlen(ui)(rl)
2934 if startrev < 0:
2979 if startrev < 0:
2935 startrev = rllen + startrev
2980 startrev = rllen + startrev
2936 if stoprev < 0:
2981 if stoprev < 0:
2937 stoprev = rllen + stoprev
2982 stoprev = rllen + stoprev
2938
2983
2939 lazydeltabase = opts['lazydeltabase']
2984 lazydeltabase = opts['lazydeltabase']
2940 source = opts['source']
2985 source = opts['source']
2941 clearcaches = opts['clear_caches']
2986 clearcaches = opts['clear_caches']
2942 validsource = (
2987 validsource = (
2943 b'full',
2988 b'full',
2944 b'parent-1',
2989 b'parent-1',
2945 b'parent-2',
2990 b'parent-2',
2946 b'parent-smallest',
2991 b'parent-smallest',
2947 b'storage',
2992 b'storage',
2948 )
2993 )
2949 if source not in validsource:
2994 if source not in validsource:
2950 raise error.Abort('invalid source type: %s' % source)
2995 raise error.Abort('invalid source type: %s' % source)
2951
2996
2952 ### actually gather results
2997 ### actually gather results
2953 count = opts['count']
2998 count = opts['count']
2954 if count <= 0:
2999 if count <= 0:
2955 raise error.Abort('invalide run count: %d' % count)
3000 raise error.Abort('invalide run count: %d' % count)
2956 allresults = []
3001 allresults = []
2957 for c in range(count):
3002 for c in range(count):
2958 timing = _timeonewrite(
3003 timing = _timeonewrite(
2959 ui,
3004 ui,
2960 rl,
3005 rl,
2961 source,
3006 source,
2962 startrev,
3007 startrev,
2963 stoprev,
3008 stoprev,
2964 c + 1,
3009 c + 1,
2965 lazydeltabase=lazydeltabase,
3010 lazydeltabase=lazydeltabase,
2966 clearcaches=clearcaches,
3011 clearcaches=clearcaches,
2967 )
3012 )
2968 allresults.append(timing)
3013 allresults.append(timing)
2969
3014
2970 ### consolidate the results in a single list
3015 ### consolidate the results in a single list
2971 results = []
3016 results = []
2972 for idx, (rev, t) in enumerate(allresults[0]):
3017 for idx, (rev, t) in enumerate(allresults[0]):
2973 ts = [t]
3018 ts = [t]
2974 for other in allresults[1:]:
3019 for other in allresults[1:]:
2975 orev, ot = other[idx]
3020 orev, ot = other[idx]
2976 assert orev == rev
3021 assert orev == rev
2977 ts.append(ot)
3022 ts.append(ot)
2978 results.append((rev, ts))
3023 results.append((rev, ts))
2979 resultcount = len(results)
3024 resultcount = len(results)
2980
3025
2981 ### Compute and display relevant statistics
3026 ### Compute and display relevant statistics
2982
3027
2983 # get a formatter
3028 # get a formatter
2984 fm = ui.formatter(b'perf', opts)
3029 fm = ui.formatter(b'perf', opts)
2985 displayall = ui.configbool(b"perf", b"all-timing", False)
3030 displayall = ui.configbool(b"perf", b"all-timing", False)
2986
3031
2987 # print individual details if requested
3032 # print individual details if requested
2988 if opts['details']:
3033 if opts['details']:
2989 for idx, item in enumerate(results, 1):
3034 for idx, item in enumerate(results, 1):
2990 rev, data = item
3035 rev, data = item
2991 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3036 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2992 formatone(fm, data, title=title, displayall=displayall)
3037 formatone(fm, data, title=title, displayall=displayall)
2993
3038
2994 # sorts results by median time
3039 # sorts results by median time
2995 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3040 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2996 # list of (name, index) to display)
3041 # list of (name, index) to display)
2997 relevants = [
3042 relevants = [
2998 ("min", 0),
3043 ("min", 0),
2999 ("10%", resultcount * 10 // 100),
3044 ("10%", resultcount * 10 // 100),
3000 ("25%", resultcount * 25 // 100),
3045 ("25%", resultcount * 25 // 100),
3001 ("50%", resultcount * 70 // 100),
3046 ("50%", resultcount * 70 // 100),
3002 ("75%", resultcount * 75 // 100),
3047 ("75%", resultcount * 75 // 100),
3003 ("90%", resultcount * 90 // 100),
3048 ("90%", resultcount * 90 // 100),
3004 ("95%", resultcount * 95 // 100),
3049 ("95%", resultcount * 95 // 100),
3005 ("99%", resultcount * 99 // 100),
3050 ("99%", resultcount * 99 // 100),
3006 ("99.9%", resultcount * 999 // 1000),
3051 ("99.9%", resultcount * 999 // 1000),
3007 ("99.99%", resultcount * 9999 // 10000),
3052 ("99.99%", resultcount * 9999 // 10000),
3008 ("99.999%", resultcount * 99999 // 100000),
3053 ("99.999%", resultcount * 99999 // 100000),
3009 ("max", -1),
3054 ("max", -1),
3010 ]
3055 ]
3011 if not ui.quiet:
3056 if not ui.quiet:
3012 for name, idx in relevants:
3057 for name, idx in relevants:
3013 data = results[idx]
3058 data = results[idx]
3014 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3059 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3015 formatone(fm, data[1], title=title, displayall=displayall)
3060 formatone(fm, data[1], title=title, displayall=displayall)
3016
3061
3017 # XXX summing that many float will not be very precise, we ignore this fact
3062 # XXX summing that many float will not be very precise, we ignore this fact
3018 # for now
3063 # for now
3019 totaltime = []
3064 totaltime = []
3020 for item in allresults:
3065 for item in allresults:
3021 totaltime.append(
3066 totaltime.append(
3022 (
3067 (
3023 sum(x[1][0] for x in item),
3068 sum(x[1][0] for x in item),
3024 sum(x[1][1] for x in item),
3069 sum(x[1][1] for x in item),
3025 sum(x[1][2] for x in item),
3070 sum(x[1][2] for x in item),
3026 )
3071 )
3027 )
3072 )
3028 formatone(
3073 formatone(
3029 fm,
3074 fm,
3030 totaltime,
3075 totaltime,
3031 title="total time (%d revs)" % resultcount,
3076 title="total time (%d revs)" % resultcount,
3032 displayall=displayall,
3077 displayall=displayall,
3033 )
3078 )
3034 fm.end()
3079 fm.end()
3035
3080
3036
3081
3037 class _faketr:
3082 class _faketr:
3038 def add(s, x, y, z=None):
3083 def add(s, x, y, z=None):
3039 return None
3084 return None
3040
3085
3041
3086
3042 def _timeonewrite(
3087 def _timeonewrite(
3043 ui,
3088 ui,
3044 orig,
3089 orig,
3045 source,
3090 source,
3046 startrev,
3091 startrev,
3047 stoprev,
3092 stoprev,
3048 runidx=None,
3093 runidx=None,
3049 lazydeltabase=True,
3094 lazydeltabase=True,
3050 clearcaches=True,
3095 clearcaches=True,
3051 ):
3096 ):
3052 timings = []
3097 timings = []
3053 tr = _faketr()
3098 tr = _faketr()
3054 with _temprevlog(ui, orig, startrev) as dest:
3099 with _temprevlog(ui, orig, startrev) as dest:
3055 dest._lazydeltabase = lazydeltabase
3100 dest._lazydeltabase = lazydeltabase
3056 revs = list(orig.revs(startrev, stoprev))
3101 revs = list(orig.revs(startrev, stoprev))
3057 total = len(revs)
3102 total = len(revs)
3058 topic = 'adding'
3103 topic = 'adding'
3059 if runidx is not None:
3104 if runidx is not None:
3060 topic += ' (run #%d)' % runidx
3105 topic += ' (run #%d)' % runidx
3061 # Support both old and new progress API
3106 # Support both old and new progress API
3062 if util.safehasattr(ui, 'makeprogress'):
3107 if util.safehasattr(ui, 'makeprogress'):
3063 progress = ui.makeprogress(topic, unit='revs', total=total)
3108 progress = ui.makeprogress(topic, unit='revs', total=total)
3064
3109
3065 def updateprogress(pos):
3110 def updateprogress(pos):
3066 progress.update(pos)
3111 progress.update(pos)
3067
3112
3068 def completeprogress():
3113 def completeprogress():
3069 progress.complete()
3114 progress.complete()
3070
3115
3071 else:
3116 else:
3072
3117
3073 def updateprogress(pos):
3118 def updateprogress(pos):
3074 ui.progress(topic, pos, unit='revs', total=total)
3119 ui.progress(topic, pos, unit='revs', total=total)
3075
3120
3076 def completeprogress():
3121 def completeprogress():
3077 ui.progress(topic, None, unit='revs', total=total)
3122 ui.progress(topic, None, unit='revs', total=total)
3078
3123
3079 for idx, rev in enumerate(revs):
3124 for idx, rev in enumerate(revs):
3080 updateprogress(idx)
3125 updateprogress(idx)
3081 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3126 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3082 if clearcaches:
3127 if clearcaches:
3083 dest.index.clearcaches()
3128 dest.index.clearcaches()
3084 dest.clearcaches()
3129 dest.clearcaches()
3085 with timeone() as r:
3130 with timeone() as r:
3086 dest.addrawrevision(*addargs, **addkwargs)
3131 dest.addrawrevision(*addargs, **addkwargs)
3087 timings.append((rev, r[0]))
3132 timings.append((rev, r[0]))
3088 updateprogress(total)
3133 updateprogress(total)
3089 completeprogress()
3134 completeprogress()
3090 return timings
3135 return timings
3091
3136
3092
3137
3093 def _getrevisionseed(orig, rev, tr, source):
3138 def _getrevisionseed(orig, rev, tr, source):
3094 from mercurial.node import nullid
3139 from mercurial.node import nullid
3095
3140
3096 linkrev = orig.linkrev(rev)
3141 linkrev = orig.linkrev(rev)
3097 node = orig.node(rev)
3142 node = orig.node(rev)
3098 p1, p2 = orig.parents(node)
3143 p1, p2 = orig.parents(node)
3099 flags = orig.flags(rev)
3144 flags = orig.flags(rev)
3100 cachedelta = None
3145 cachedelta = None
3101 text = None
3146 text = None
3102
3147
3103 if source == b'full':
3148 if source == b'full':
3104 text = orig.revision(rev)
3149 text = orig.revision(rev)
3105 elif source == b'parent-1':
3150 elif source == b'parent-1':
3106 baserev = orig.rev(p1)
3151 baserev = orig.rev(p1)
3107 cachedelta = (baserev, orig.revdiff(p1, rev))
3152 cachedelta = (baserev, orig.revdiff(p1, rev))
3108 elif source == b'parent-2':
3153 elif source == b'parent-2':
3109 parent = p2
3154 parent = p2
3110 if p2 == nullid:
3155 if p2 == nullid:
3111 parent = p1
3156 parent = p1
3112 baserev = orig.rev(parent)
3157 baserev = orig.rev(parent)
3113 cachedelta = (baserev, orig.revdiff(parent, rev))
3158 cachedelta = (baserev, orig.revdiff(parent, rev))
3114 elif source == b'parent-smallest':
3159 elif source == b'parent-smallest':
3115 p1diff = orig.revdiff(p1, rev)
3160 p1diff = orig.revdiff(p1, rev)
3116 parent = p1
3161 parent = p1
3117 diff = p1diff
3162 diff = p1diff
3118 if p2 != nullid:
3163 if p2 != nullid:
3119 p2diff = orig.revdiff(p2, rev)
3164 p2diff = orig.revdiff(p2, rev)
3120 if len(p1diff) > len(p2diff):
3165 if len(p1diff) > len(p2diff):
3121 parent = p2
3166 parent = p2
3122 diff = p2diff
3167 diff = p2diff
3123 baserev = orig.rev(parent)
3168 baserev = orig.rev(parent)
3124 cachedelta = (baserev, diff)
3169 cachedelta = (baserev, diff)
3125 elif source == b'storage':
3170 elif source == b'storage':
3126 baserev = orig.deltaparent(rev)
3171 baserev = orig.deltaparent(rev)
3127 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3172 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3128
3173
3129 return (
3174 return (
3130 (text, tr, linkrev, p1, p2),
3175 (text, tr, linkrev, p1, p2),
3131 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3176 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3132 )
3177 )
3133
3178
3134
3179
3135 @contextlib.contextmanager
3180 @contextlib.contextmanager
3136 def _temprevlog(ui, orig, truncaterev):
3181 def _temprevlog(ui, orig, truncaterev):
3137 from mercurial import vfs as vfsmod
3182 from mercurial import vfs as vfsmod
3138
3183
3139 if orig._inline:
3184 if orig._inline:
3140 raise error.Abort('not supporting inline revlog (yet)')
3185 raise error.Abort('not supporting inline revlog (yet)')
3141 revlogkwargs = {}
3186 revlogkwargs = {}
3142 k = 'upperboundcomp'
3187 k = 'upperboundcomp'
3143 if util.safehasattr(orig, k):
3188 if util.safehasattr(orig, k):
3144 revlogkwargs[k] = getattr(orig, k)
3189 revlogkwargs[k] = getattr(orig, k)
3145
3190
3146 indexfile = getattr(orig, '_indexfile', None)
3191 indexfile = getattr(orig, '_indexfile', None)
3147 if indexfile is None:
3192 if indexfile is None:
3148 # compatibility with <= hg-5.8
3193 # compatibility with <= hg-5.8
3149 indexfile = getattr(orig, 'indexfile')
3194 indexfile = getattr(orig, 'indexfile')
3150 origindexpath = orig.opener.join(indexfile)
3195 origindexpath = orig.opener.join(indexfile)
3151
3196
3152 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3197 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3153 origdatapath = orig.opener.join(datafile)
3198 origdatapath = orig.opener.join(datafile)
3154 radix = b'revlog'
3199 radix = b'revlog'
3155 indexname = b'revlog.i'
3200 indexname = b'revlog.i'
3156 dataname = b'revlog.d'
3201 dataname = b'revlog.d'
3157
3202
3158 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3203 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3159 try:
3204 try:
3160 # copy the data file in a temporary directory
3205 # copy the data file in a temporary directory
3161 ui.debug('copying data in %s\n' % tmpdir)
3206 ui.debug('copying data in %s\n' % tmpdir)
3162 destindexpath = os.path.join(tmpdir, 'revlog.i')
3207 destindexpath = os.path.join(tmpdir, 'revlog.i')
3163 destdatapath = os.path.join(tmpdir, 'revlog.d')
3208 destdatapath = os.path.join(tmpdir, 'revlog.d')
3164 shutil.copyfile(origindexpath, destindexpath)
3209 shutil.copyfile(origindexpath, destindexpath)
3165 shutil.copyfile(origdatapath, destdatapath)
3210 shutil.copyfile(origdatapath, destdatapath)
3166
3211
3167 # remove the data we want to add again
3212 # remove the data we want to add again
3168 ui.debug('truncating data to be rewritten\n')
3213 ui.debug('truncating data to be rewritten\n')
3169 with open(destindexpath, 'ab') as index:
3214 with open(destindexpath, 'ab') as index:
3170 index.seek(0)
3215 index.seek(0)
3171 index.truncate(truncaterev * orig._io.size)
3216 index.truncate(truncaterev * orig._io.size)
3172 with open(destdatapath, 'ab') as data:
3217 with open(destdatapath, 'ab') as data:
3173 data.seek(0)
3218 data.seek(0)
3174 data.truncate(orig.start(truncaterev))
3219 data.truncate(orig.start(truncaterev))
3175
3220
3176 # instantiate a new revlog from the temporary copy
3221 # instantiate a new revlog from the temporary copy
3177 ui.debug('truncating adding to be rewritten\n')
3222 ui.debug('truncating adding to be rewritten\n')
3178 vfs = vfsmod.vfs(tmpdir)
3223 vfs = vfsmod.vfs(tmpdir)
3179 vfs.options = getattr(orig.opener, 'options', None)
3224 vfs.options = getattr(orig.opener, 'options', None)
3180
3225
3181 try:
3226 try:
3182 dest = revlog(vfs, radix=radix, **revlogkwargs)
3227 dest = revlog(vfs, radix=radix, **revlogkwargs)
3183 except TypeError:
3228 except TypeError:
3184 dest = revlog(
3229 dest = revlog(
3185 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3230 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3186 )
3231 )
3187 if dest._inline:
3232 if dest._inline:
3188 raise error.Abort('not supporting inline revlog (yet)')
3233 raise error.Abort('not supporting inline revlog (yet)')
3189 # make sure internals are initialized
3234 # make sure internals are initialized
3190 dest.revision(len(dest) - 1)
3235 dest.revision(len(dest) - 1)
3191 yield dest
3236 yield dest
3192 del dest, vfs
3237 del dest, vfs
3193 finally:
3238 finally:
3194 shutil.rmtree(tmpdir, True)
3239 shutil.rmtree(tmpdir, True)
3195
3240
3196
3241
3197 @command(
3242 @command(
3198 b'perf::revlogchunks|perfrevlogchunks',
3243 b'perf::revlogchunks|perfrevlogchunks',
3199 revlogopts
3244 revlogopts
3200 + formatteropts
3245 + formatteropts
3201 + [
3246 + [
3202 (b'e', b'engines', b'', b'compression engines to use'),
3247 (b'e', b'engines', b'', b'compression engines to use'),
3203 (b's', b'startrev', 0, b'revision to start at'),
3248 (b's', b'startrev', 0, b'revision to start at'),
3204 ],
3249 ],
3205 b'-c|-m|FILE',
3250 b'-c|-m|FILE',
3206 )
3251 )
3207 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3252 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3208 """Benchmark operations on revlog chunks.
3253 """Benchmark operations on revlog chunks.
3209
3254
3210 Logically, each revlog is a collection of fulltext revisions. However,
3255 Logically, each revlog is a collection of fulltext revisions. However,
3211 stored within each revlog are "chunks" of possibly compressed data. This
3256 stored within each revlog are "chunks" of possibly compressed data. This
3212 data needs to be read and decompressed or compressed and written.
3257 data needs to be read and decompressed or compressed and written.
3213
3258
3214 This command measures the time it takes to read+decompress and recompress
3259 This command measures the time it takes to read+decompress and recompress
3215 chunks in a revlog. It effectively isolates I/O and compression performance.
3260 chunks in a revlog. It effectively isolates I/O and compression performance.
3216 For measurements of higher-level operations like resolving revisions,
3261 For measurements of higher-level operations like resolving revisions,
3217 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3262 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3218 """
3263 """
3219 opts = _byteskwargs(opts)
3264 opts = _byteskwargs(opts)
3220
3265
3221 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3266 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3222
3267
3223 # _chunkraw was renamed to _getsegmentforrevs.
3268 # _chunkraw was renamed to _getsegmentforrevs.
3224 try:
3269 try:
3225 segmentforrevs = rl._getsegmentforrevs
3270 segmentforrevs = rl._getsegmentforrevs
3226 except AttributeError:
3271 except AttributeError:
3227 segmentforrevs = rl._chunkraw
3272 segmentforrevs = rl._chunkraw
3228
3273
3229 # Verify engines argument.
3274 # Verify engines argument.
3230 if engines:
3275 if engines:
3231 engines = {e.strip() for e in engines.split(b',')}
3276 engines = {e.strip() for e in engines.split(b',')}
3232 for engine in engines:
3277 for engine in engines:
3233 try:
3278 try:
3234 util.compressionengines[engine]
3279 util.compressionengines[engine]
3235 except KeyError:
3280 except KeyError:
3236 raise error.Abort(b'unknown compression engine: %s' % engine)
3281 raise error.Abort(b'unknown compression engine: %s' % engine)
3237 else:
3282 else:
3238 engines = []
3283 engines = []
3239 for e in util.compengines:
3284 for e in util.compengines:
3240 engine = util.compengines[e]
3285 engine = util.compengines[e]
3241 try:
3286 try:
3242 if engine.available():
3287 if engine.available():
3243 engine.revlogcompressor().compress(b'dummy')
3288 engine.revlogcompressor().compress(b'dummy')
3244 engines.append(e)
3289 engines.append(e)
3245 except NotImplementedError:
3290 except NotImplementedError:
3246 pass
3291 pass
3247
3292
3248 revs = list(rl.revs(startrev, len(rl) - 1))
3293 revs = list(rl.revs(startrev, len(rl) - 1))
3249
3294
3250 def rlfh(rl):
3295 def rlfh(rl):
3251 if rl._inline:
3296 if rl._inline:
3252 indexfile = getattr(rl, '_indexfile', None)
3297 indexfile = getattr(rl, '_indexfile', None)
3253 if indexfile is None:
3298 if indexfile is None:
3254 # compatibility with <= hg-5.8
3299 # compatibility with <= hg-5.8
3255 indexfile = getattr(rl, 'indexfile')
3300 indexfile = getattr(rl, 'indexfile')
3256 return getsvfs(repo)(indexfile)
3301 return getsvfs(repo)(indexfile)
3257 else:
3302 else:
3258 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3303 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3259 return getsvfs(repo)(datafile)
3304 return getsvfs(repo)(datafile)
3260
3305
3261 def doread():
3306 def doread():
3262 rl.clearcaches()
3307 rl.clearcaches()
3263 for rev in revs:
3308 for rev in revs:
3264 segmentforrevs(rev, rev)
3309 segmentforrevs(rev, rev)
3265
3310
3266 def doreadcachedfh():
3311 def doreadcachedfh():
3267 rl.clearcaches()
3312 rl.clearcaches()
3268 fh = rlfh(rl)
3313 fh = rlfh(rl)
3269 for rev in revs:
3314 for rev in revs:
3270 segmentforrevs(rev, rev, df=fh)
3315 segmentforrevs(rev, rev, df=fh)
3271
3316
3272 def doreadbatch():
3317 def doreadbatch():
3273 rl.clearcaches()
3318 rl.clearcaches()
3274 segmentforrevs(revs[0], revs[-1])
3319 segmentforrevs(revs[0], revs[-1])
3275
3320
3276 def doreadbatchcachedfh():
3321 def doreadbatchcachedfh():
3277 rl.clearcaches()
3322 rl.clearcaches()
3278 fh = rlfh(rl)
3323 fh = rlfh(rl)
3279 segmentforrevs(revs[0], revs[-1], df=fh)
3324 segmentforrevs(revs[0], revs[-1], df=fh)
3280
3325
3281 def dochunk():
3326 def dochunk():
3282 rl.clearcaches()
3327 rl.clearcaches()
3283 fh = rlfh(rl)
3328 fh = rlfh(rl)
3284 for rev in revs:
3329 for rev in revs:
3285 rl._chunk(rev, df=fh)
3330 rl._chunk(rev, df=fh)
3286
3331
3287 chunks = [None]
3332 chunks = [None]
3288
3333
3289 def dochunkbatch():
3334 def dochunkbatch():
3290 rl.clearcaches()
3335 rl.clearcaches()
3291 fh = rlfh(rl)
3336 fh = rlfh(rl)
3292 # Save chunks as a side-effect.
3337 # Save chunks as a side-effect.
3293 chunks[0] = rl._chunks(revs, df=fh)
3338 chunks[0] = rl._chunks(revs, df=fh)
3294
3339
3295 def docompress(compressor):
3340 def docompress(compressor):
3296 rl.clearcaches()
3341 rl.clearcaches()
3297
3342
3298 try:
3343 try:
3299 # Swap in the requested compression engine.
3344 # Swap in the requested compression engine.
3300 oldcompressor = rl._compressor
3345 oldcompressor = rl._compressor
3301 rl._compressor = compressor
3346 rl._compressor = compressor
3302 for chunk in chunks[0]:
3347 for chunk in chunks[0]:
3303 rl.compress(chunk)
3348 rl.compress(chunk)
3304 finally:
3349 finally:
3305 rl._compressor = oldcompressor
3350 rl._compressor = oldcompressor
3306
3351
3307 benches = [
3352 benches = [
3308 (lambda: doread(), b'read'),
3353 (lambda: doread(), b'read'),
3309 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3354 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3310 (lambda: doreadbatch(), b'read batch'),
3355 (lambda: doreadbatch(), b'read batch'),
3311 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3356 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3312 (lambda: dochunk(), b'chunk'),
3357 (lambda: dochunk(), b'chunk'),
3313 (lambda: dochunkbatch(), b'chunk batch'),
3358 (lambda: dochunkbatch(), b'chunk batch'),
3314 ]
3359 ]
3315
3360
3316 for engine in sorted(engines):
3361 for engine in sorted(engines):
3317 compressor = util.compengines[engine].revlogcompressor()
3362 compressor = util.compengines[engine].revlogcompressor()
3318 benches.append(
3363 benches.append(
3319 (
3364 (
3320 functools.partial(docompress, compressor),
3365 functools.partial(docompress, compressor),
3321 b'compress w/ %s' % engine,
3366 b'compress w/ %s' % engine,
3322 )
3367 )
3323 )
3368 )
3324
3369
3325 for fn, title in benches:
3370 for fn, title in benches:
3326 timer, fm = gettimer(ui, opts)
3371 timer, fm = gettimer(ui, opts)
3327 timer(fn, title=title)
3372 timer(fn, title=title)
3328 fm.end()
3373 fm.end()
3329
3374
3330
3375
3331 @command(
3376 @command(
3332 b'perf::revlogrevision|perfrevlogrevision',
3377 b'perf::revlogrevision|perfrevlogrevision',
3333 revlogopts
3378 revlogopts
3334 + formatteropts
3379 + formatteropts
3335 + [(b'', b'cache', False, b'use caches instead of clearing')],
3380 + [(b'', b'cache', False, b'use caches instead of clearing')],
3336 b'-c|-m|FILE REV',
3381 b'-c|-m|FILE REV',
3337 )
3382 )
3338 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3383 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3339 """Benchmark obtaining a revlog revision.
3384 """Benchmark obtaining a revlog revision.
3340
3385
3341 Obtaining a revlog revision consists of roughly the following steps:
3386 Obtaining a revlog revision consists of roughly the following steps:
3342
3387
3343 1. Compute the delta chain
3388 1. Compute the delta chain
3344 2. Slice the delta chain if applicable
3389 2. Slice the delta chain if applicable
3345 3. Obtain the raw chunks for that delta chain
3390 3. Obtain the raw chunks for that delta chain
3346 4. Decompress each raw chunk
3391 4. Decompress each raw chunk
3347 5. Apply binary patches to obtain fulltext
3392 5. Apply binary patches to obtain fulltext
3348 6. Verify hash of fulltext
3393 6. Verify hash of fulltext
3349
3394
3350 This command measures the time spent in each of these phases.
3395 This command measures the time spent in each of these phases.
3351 """
3396 """
3352 opts = _byteskwargs(opts)
3397 opts = _byteskwargs(opts)
3353
3398
3354 if opts.get(b'changelog') or opts.get(b'manifest'):
3399 if opts.get(b'changelog') or opts.get(b'manifest'):
3355 file_, rev = None, file_
3400 file_, rev = None, file_
3356 elif rev is None:
3401 elif rev is None:
3357 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3402 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3358
3403
3359 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3404 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3360
3405
3361 # _chunkraw was renamed to _getsegmentforrevs.
3406 # _chunkraw was renamed to _getsegmentforrevs.
3362 try:
3407 try:
3363 segmentforrevs = r._getsegmentforrevs
3408 segmentforrevs = r._getsegmentforrevs
3364 except AttributeError:
3409 except AttributeError:
3365 segmentforrevs = r._chunkraw
3410 segmentforrevs = r._chunkraw
3366
3411
3367 node = r.lookup(rev)
3412 node = r.lookup(rev)
3368 rev = r.rev(node)
3413 rev = r.rev(node)
3369
3414
3370 def getrawchunks(data, chain):
3415 def getrawchunks(data, chain):
3371 start = r.start
3416 start = r.start
3372 length = r.length
3417 length = r.length
3373 inline = r._inline
3418 inline = r._inline
3374 try:
3419 try:
3375 iosize = r.index.entry_size
3420 iosize = r.index.entry_size
3376 except AttributeError:
3421 except AttributeError:
3377 iosize = r._io.size
3422 iosize = r._io.size
3378 buffer = util.buffer
3423 buffer = util.buffer
3379
3424
3380 chunks = []
3425 chunks = []
3381 ladd = chunks.append
3426 ladd = chunks.append
3382 for idx, item in enumerate(chain):
3427 for idx, item in enumerate(chain):
3383 offset = start(item[0])
3428 offset = start(item[0])
3384 bits = data[idx]
3429 bits = data[idx]
3385 for rev in item:
3430 for rev in item:
3386 chunkstart = start(rev)
3431 chunkstart = start(rev)
3387 if inline:
3432 if inline:
3388 chunkstart += (rev + 1) * iosize
3433 chunkstart += (rev + 1) * iosize
3389 chunklength = length(rev)
3434 chunklength = length(rev)
3390 ladd(buffer(bits, chunkstart - offset, chunklength))
3435 ladd(buffer(bits, chunkstart - offset, chunklength))
3391
3436
3392 return chunks
3437 return chunks
3393
3438
3394 def dodeltachain(rev):
3439 def dodeltachain(rev):
3395 if not cache:
3440 if not cache:
3396 r.clearcaches()
3441 r.clearcaches()
3397 r._deltachain(rev)
3442 r._deltachain(rev)
3398
3443
3399 def doread(chain):
3444 def doread(chain):
3400 if not cache:
3445 if not cache:
3401 r.clearcaches()
3446 r.clearcaches()
3402 for item in slicedchain:
3447 for item in slicedchain:
3403 segmentforrevs(item[0], item[-1])
3448 segmentforrevs(item[0], item[-1])
3404
3449
3405 def doslice(r, chain, size):
3450 def doslice(r, chain, size):
3406 for s in slicechunk(r, chain, targetsize=size):
3451 for s in slicechunk(r, chain, targetsize=size):
3407 pass
3452 pass
3408
3453
3409 def dorawchunks(data, chain):
3454 def dorawchunks(data, chain):
3410 if not cache:
3455 if not cache:
3411 r.clearcaches()
3456 r.clearcaches()
3412 getrawchunks(data, chain)
3457 getrawchunks(data, chain)
3413
3458
3414 def dodecompress(chunks):
3459 def dodecompress(chunks):
3415 decomp = r.decompress
3460 decomp = r.decompress
3416 for chunk in chunks:
3461 for chunk in chunks:
3417 decomp(chunk)
3462 decomp(chunk)
3418
3463
3419 def dopatch(text, bins):
3464 def dopatch(text, bins):
3420 if not cache:
3465 if not cache:
3421 r.clearcaches()
3466 r.clearcaches()
3422 mdiff.patches(text, bins)
3467 mdiff.patches(text, bins)
3423
3468
3424 def dohash(text):
3469 def dohash(text):
3425 if not cache:
3470 if not cache:
3426 r.clearcaches()
3471 r.clearcaches()
3427 r.checkhash(text, node, rev=rev)
3472 r.checkhash(text, node, rev=rev)
3428
3473
3429 def dorevision():
3474 def dorevision():
3430 if not cache:
3475 if not cache:
3431 r.clearcaches()
3476 r.clearcaches()
3432 r.revision(node)
3477 r.revision(node)
3433
3478
3434 try:
3479 try:
3435 from mercurial.revlogutils.deltas import slicechunk
3480 from mercurial.revlogutils.deltas import slicechunk
3436 except ImportError:
3481 except ImportError:
3437 slicechunk = getattr(revlog, '_slicechunk', None)
3482 slicechunk = getattr(revlog, '_slicechunk', None)
3438
3483
3439 size = r.length(rev)
3484 size = r.length(rev)
3440 chain = r._deltachain(rev)[0]
3485 chain = r._deltachain(rev)[0]
3441 if not getattr(r, '_withsparseread', False):
3486 if not getattr(r, '_withsparseread', False):
3442 slicedchain = (chain,)
3487 slicedchain = (chain,)
3443 else:
3488 else:
3444 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3489 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3445 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3490 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3446 rawchunks = getrawchunks(data, slicedchain)
3491 rawchunks = getrawchunks(data, slicedchain)
3447 bins = r._chunks(chain)
3492 bins = r._chunks(chain)
3448 text = bytes(bins[0])
3493 text = bytes(bins[0])
3449 bins = bins[1:]
3494 bins = bins[1:]
3450 text = mdiff.patches(text, bins)
3495 text = mdiff.patches(text, bins)
3451
3496
3452 benches = [
3497 benches = [
3453 (lambda: dorevision(), b'full'),
3498 (lambda: dorevision(), b'full'),
3454 (lambda: dodeltachain(rev), b'deltachain'),
3499 (lambda: dodeltachain(rev), b'deltachain'),
3455 (lambda: doread(chain), b'read'),
3500 (lambda: doread(chain), b'read'),
3456 ]
3501 ]
3457
3502
3458 if getattr(r, '_withsparseread', False):
3503 if getattr(r, '_withsparseread', False):
3459 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3504 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3460 benches.append(slicing)
3505 benches.append(slicing)
3461
3506
3462 benches.extend(
3507 benches.extend(
3463 [
3508 [
3464 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3509 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3465 (lambda: dodecompress(rawchunks), b'decompress'),
3510 (lambda: dodecompress(rawchunks), b'decompress'),
3466 (lambda: dopatch(text, bins), b'patch'),
3511 (lambda: dopatch(text, bins), b'patch'),
3467 (lambda: dohash(text), b'hash'),
3512 (lambda: dohash(text), b'hash'),
3468 ]
3513 ]
3469 )
3514 )
3470
3515
3471 timer, fm = gettimer(ui, opts)
3516 timer, fm = gettimer(ui, opts)
3472 for fn, title in benches:
3517 for fn, title in benches:
3473 timer(fn, title=title)
3518 timer(fn, title=title)
3474 fm.end()
3519 fm.end()
3475
3520
3476
3521
3477 @command(
3522 @command(
3478 b'perf::revset|perfrevset',
3523 b'perf::revset|perfrevset',
3479 [
3524 [
3480 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3525 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3481 (b'', b'contexts', False, b'obtain changectx for each revision'),
3526 (b'', b'contexts', False, b'obtain changectx for each revision'),
3482 ]
3527 ]
3483 + formatteropts,
3528 + formatteropts,
3484 b"REVSET",
3529 b"REVSET",
3485 )
3530 )
3486 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3531 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3487 """benchmark the execution time of a revset
3532 """benchmark the execution time of a revset
3488
3533
3489 Use the --clean option if need to evaluate the impact of build volatile
3534 Use the --clean option if need to evaluate the impact of build volatile
3490 revisions set cache on the revset execution. Volatile cache hold filtered
3535 revisions set cache on the revset execution. Volatile cache hold filtered
3491 and obsolete related cache."""
3536 and obsolete related cache."""
3492 opts = _byteskwargs(opts)
3537 opts = _byteskwargs(opts)
3493
3538
3494 timer, fm = gettimer(ui, opts)
3539 timer, fm = gettimer(ui, opts)
3495
3540
3496 def d():
3541 def d():
3497 if clear:
3542 if clear:
3498 repo.invalidatevolatilesets()
3543 repo.invalidatevolatilesets()
3499 if contexts:
3544 if contexts:
3500 for ctx in repo.set(expr):
3545 for ctx in repo.set(expr):
3501 pass
3546 pass
3502 else:
3547 else:
3503 for r in repo.revs(expr):
3548 for r in repo.revs(expr):
3504 pass
3549 pass
3505
3550
3506 timer(d)
3551 timer(d)
3507 fm.end()
3552 fm.end()
3508
3553
3509
3554
3510 @command(
3555 @command(
3511 b'perf::volatilesets|perfvolatilesets',
3556 b'perf::volatilesets|perfvolatilesets',
3512 [
3557 [
3513 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3558 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3514 ]
3559 ]
3515 + formatteropts,
3560 + formatteropts,
3516 )
3561 )
3517 def perfvolatilesets(ui, repo, *names, **opts):
3562 def perfvolatilesets(ui, repo, *names, **opts):
3518 """benchmark the computation of various volatile set
3563 """benchmark the computation of various volatile set
3519
3564
3520 Volatile set computes element related to filtering and obsolescence."""
3565 Volatile set computes element related to filtering and obsolescence."""
3521 opts = _byteskwargs(opts)
3566 opts = _byteskwargs(opts)
3522 timer, fm = gettimer(ui, opts)
3567 timer, fm = gettimer(ui, opts)
3523 repo = repo.unfiltered()
3568 repo = repo.unfiltered()
3524
3569
3525 def getobs(name):
3570 def getobs(name):
3526 def d():
3571 def d():
3527 repo.invalidatevolatilesets()
3572 repo.invalidatevolatilesets()
3528 if opts[b'clear_obsstore']:
3573 if opts[b'clear_obsstore']:
3529 clearfilecache(repo, b'obsstore')
3574 clearfilecache(repo, b'obsstore')
3530 obsolete.getrevs(repo, name)
3575 obsolete.getrevs(repo, name)
3531
3576
3532 return d
3577 return d
3533
3578
3534 allobs = sorted(obsolete.cachefuncs)
3579 allobs = sorted(obsolete.cachefuncs)
3535 if names:
3580 if names:
3536 allobs = [n for n in allobs if n in names]
3581 allobs = [n for n in allobs if n in names]
3537
3582
3538 for name in allobs:
3583 for name in allobs:
3539 timer(getobs(name), title=name)
3584 timer(getobs(name), title=name)
3540
3585
3541 def getfiltered(name):
3586 def getfiltered(name):
3542 def d():
3587 def d():
3543 repo.invalidatevolatilesets()
3588 repo.invalidatevolatilesets()
3544 if opts[b'clear_obsstore']:
3589 if opts[b'clear_obsstore']:
3545 clearfilecache(repo, b'obsstore')
3590 clearfilecache(repo, b'obsstore')
3546 repoview.filterrevs(repo, name)
3591 repoview.filterrevs(repo, name)
3547
3592
3548 return d
3593 return d
3549
3594
3550 allfilter = sorted(repoview.filtertable)
3595 allfilter = sorted(repoview.filtertable)
3551 if names:
3596 if names:
3552 allfilter = [n for n in allfilter if n in names]
3597 allfilter = [n for n in allfilter if n in names]
3553
3598
3554 for name in allfilter:
3599 for name in allfilter:
3555 timer(getfiltered(name), title=name)
3600 timer(getfiltered(name), title=name)
3556 fm.end()
3601 fm.end()
3557
3602
3558
3603
3559 @command(
3604 @command(
3560 b'perf::branchmap|perfbranchmap',
3605 b'perf::branchmap|perfbranchmap',
3561 [
3606 [
3562 (b'f', b'full', False, b'Includes build time of subset'),
3607 (b'f', b'full', False, b'Includes build time of subset'),
3563 (
3608 (
3564 b'',
3609 b'',
3565 b'clear-revbranch',
3610 b'clear-revbranch',
3566 False,
3611 False,
3567 b'purge the revbranch cache between computation',
3612 b'purge the revbranch cache between computation',
3568 ),
3613 ),
3569 ]
3614 ]
3570 + formatteropts,
3615 + formatteropts,
3571 )
3616 )
3572 def perfbranchmap(ui, repo, *filternames, **opts):
3617 def perfbranchmap(ui, repo, *filternames, **opts):
3573 """benchmark the update of a branchmap
3618 """benchmark the update of a branchmap
3574
3619
3575 This benchmarks the full repo.branchmap() call with read and write disabled
3620 This benchmarks the full repo.branchmap() call with read and write disabled
3576 """
3621 """
3577 opts = _byteskwargs(opts)
3622 opts = _byteskwargs(opts)
3578 full = opts.get(b"full", False)
3623 full = opts.get(b"full", False)
3579 clear_revbranch = opts.get(b"clear_revbranch", False)
3624 clear_revbranch = opts.get(b"clear_revbranch", False)
3580 timer, fm = gettimer(ui, opts)
3625 timer, fm = gettimer(ui, opts)
3581
3626
3582 def getbranchmap(filtername):
3627 def getbranchmap(filtername):
3583 """generate a benchmark function for the filtername"""
3628 """generate a benchmark function for the filtername"""
3584 if filtername is None:
3629 if filtername is None:
3585 view = repo
3630 view = repo
3586 else:
3631 else:
3587 view = repo.filtered(filtername)
3632 view = repo.filtered(filtername)
3588 if util.safehasattr(view._branchcaches, '_per_filter'):
3633 if util.safehasattr(view._branchcaches, '_per_filter'):
3589 filtered = view._branchcaches._per_filter
3634 filtered = view._branchcaches._per_filter
3590 else:
3635 else:
3591 # older versions
3636 # older versions
3592 filtered = view._branchcaches
3637 filtered = view._branchcaches
3593
3638
3594 def d():
3639 def d():
3595 if clear_revbranch:
3640 if clear_revbranch:
3596 repo.revbranchcache()._clear()
3641 repo.revbranchcache()._clear()
3597 if full:
3642 if full:
3598 view._branchcaches.clear()
3643 view._branchcaches.clear()
3599 else:
3644 else:
3600 filtered.pop(filtername, None)
3645 filtered.pop(filtername, None)
3601 view.branchmap()
3646 view.branchmap()
3602
3647
3603 return d
3648 return d
3604
3649
3605 # add filter in smaller subset to bigger subset
3650 # add filter in smaller subset to bigger subset
3606 possiblefilters = set(repoview.filtertable)
3651 possiblefilters = set(repoview.filtertable)
3607 if filternames:
3652 if filternames:
3608 possiblefilters &= set(filternames)
3653 possiblefilters &= set(filternames)
3609 subsettable = getbranchmapsubsettable()
3654 subsettable = getbranchmapsubsettable()
3610 allfilters = []
3655 allfilters = []
3611 while possiblefilters:
3656 while possiblefilters:
3612 for name in possiblefilters:
3657 for name in possiblefilters:
3613 subset = subsettable.get(name)
3658 subset = subsettable.get(name)
3614 if subset not in possiblefilters:
3659 if subset not in possiblefilters:
3615 break
3660 break
3616 else:
3661 else:
3617 assert False, b'subset cycle %s!' % possiblefilters
3662 assert False, b'subset cycle %s!' % possiblefilters
3618 allfilters.append(name)
3663 allfilters.append(name)
3619 possiblefilters.remove(name)
3664 possiblefilters.remove(name)
3620
3665
3621 # warm the cache
3666 # warm the cache
3622 if not full:
3667 if not full:
3623 for name in allfilters:
3668 for name in allfilters:
3624 repo.filtered(name).branchmap()
3669 repo.filtered(name).branchmap()
3625 if not filternames or b'unfiltered' in filternames:
3670 if not filternames or b'unfiltered' in filternames:
3626 # add unfiltered
3671 # add unfiltered
3627 allfilters.append(None)
3672 allfilters.append(None)
3628
3673
3629 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3674 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3630 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3675 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3631 branchcacheread.set(classmethod(lambda *args: None))
3676 branchcacheread.set(classmethod(lambda *args: None))
3632 else:
3677 else:
3633 # older versions
3678 # older versions
3634 branchcacheread = safeattrsetter(branchmap, b'read')
3679 branchcacheread = safeattrsetter(branchmap, b'read')
3635 branchcacheread.set(lambda *args: None)
3680 branchcacheread.set(lambda *args: None)
3636 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3681 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3637 branchcachewrite.set(lambda *args: None)
3682 branchcachewrite.set(lambda *args: None)
3638 try:
3683 try:
3639 for name in allfilters:
3684 for name in allfilters:
3640 printname = name
3685 printname = name
3641 if name is None:
3686 if name is None:
3642 printname = b'unfiltered'
3687 printname = b'unfiltered'
3643 timer(getbranchmap(name), title=printname)
3688 timer(getbranchmap(name), title=printname)
3644 finally:
3689 finally:
3645 branchcacheread.restore()
3690 branchcacheread.restore()
3646 branchcachewrite.restore()
3691 branchcachewrite.restore()
3647 fm.end()
3692 fm.end()
3648
3693
3649
3694
3650 @command(
3695 @command(
3651 b'perf::branchmapupdate|perfbranchmapupdate',
3696 b'perf::branchmapupdate|perfbranchmapupdate',
3652 [
3697 [
3653 (b'', b'base', [], b'subset of revision to start from'),
3698 (b'', b'base', [], b'subset of revision to start from'),
3654 (b'', b'target', [], b'subset of revision to end with'),
3699 (b'', b'target', [], b'subset of revision to end with'),
3655 (b'', b'clear-caches', False, b'clear cache between each runs'),
3700 (b'', b'clear-caches', False, b'clear cache between each runs'),
3656 ]
3701 ]
3657 + formatteropts,
3702 + formatteropts,
3658 )
3703 )
3659 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3704 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3660 """benchmark branchmap update from for <base> revs to <target> revs
3705 """benchmark branchmap update from for <base> revs to <target> revs
3661
3706
3662 If `--clear-caches` is passed, the following items will be reset before
3707 If `--clear-caches` is passed, the following items will be reset before
3663 each update:
3708 each update:
3664 * the changelog instance and associated indexes
3709 * the changelog instance and associated indexes
3665 * the rev-branch-cache instance
3710 * the rev-branch-cache instance
3666
3711
3667 Examples:
3712 Examples:
3668
3713
3669 # update for the one last revision
3714 # update for the one last revision
3670 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3715 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3671
3716
3672 $ update for change coming with a new branch
3717 $ update for change coming with a new branch
3673 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3718 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3674 """
3719 """
3675 from mercurial import branchmap
3720 from mercurial import branchmap
3676 from mercurial import repoview
3721 from mercurial import repoview
3677
3722
3678 opts = _byteskwargs(opts)
3723 opts = _byteskwargs(opts)
3679 timer, fm = gettimer(ui, opts)
3724 timer, fm = gettimer(ui, opts)
3680 clearcaches = opts[b'clear_caches']
3725 clearcaches = opts[b'clear_caches']
3681 unfi = repo.unfiltered()
3726 unfi = repo.unfiltered()
3682 x = [None] # used to pass data between closure
3727 x = [None] # used to pass data between closure
3683
3728
3684 # we use a `list` here to avoid possible side effect from smartset
3729 # we use a `list` here to avoid possible side effect from smartset
3685 baserevs = list(scmutil.revrange(repo, base))
3730 baserevs = list(scmutil.revrange(repo, base))
3686 targetrevs = list(scmutil.revrange(repo, target))
3731 targetrevs = list(scmutil.revrange(repo, target))
3687 if not baserevs:
3732 if not baserevs:
3688 raise error.Abort(b'no revisions selected for --base')
3733 raise error.Abort(b'no revisions selected for --base')
3689 if not targetrevs:
3734 if not targetrevs:
3690 raise error.Abort(b'no revisions selected for --target')
3735 raise error.Abort(b'no revisions selected for --target')
3691
3736
3692 # make sure the target branchmap also contains the one in the base
3737 # make sure the target branchmap also contains the one in the base
3693 targetrevs = list(set(baserevs) | set(targetrevs))
3738 targetrevs = list(set(baserevs) | set(targetrevs))
3694 targetrevs.sort()
3739 targetrevs.sort()
3695
3740
3696 cl = repo.changelog
3741 cl = repo.changelog
3697 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3742 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3698 allbaserevs.sort()
3743 allbaserevs.sort()
3699 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3744 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3700
3745
3701 newrevs = list(alltargetrevs.difference(allbaserevs))
3746 newrevs = list(alltargetrevs.difference(allbaserevs))
3702 newrevs.sort()
3747 newrevs.sort()
3703
3748
3704 allrevs = frozenset(unfi.changelog.revs())
3749 allrevs = frozenset(unfi.changelog.revs())
3705 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3750 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3706 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3751 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3707
3752
3708 def basefilter(repo, visibilityexceptions=None):
3753 def basefilter(repo, visibilityexceptions=None):
3709 return basefilterrevs
3754 return basefilterrevs
3710
3755
3711 def targetfilter(repo, visibilityexceptions=None):
3756 def targetfilter(repo, visibilityexceptions=None):
3712 return targetfilterrevs
3757 return targetfilterrevs
3713
3758
3714 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3759 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3715 ui.status(msg % (len(allbaserevs), len(newrevs)))
3760 ui.status(msg % (len(allbaserevs), len(newrevs)))
3716 if targetfilterrevs:
3761 if targetfilterrevs:
3717 msg = b'(%d revisions still filtered)\n'
3762 msg = b'(%d revisions still filtered)\n'
3718 ui.status(msg % len(targetfilterrevs))
3763 ui.status(msg % len(targetfilterrevs))
3719
3764
3720 try:
3765 try:
3721 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3766 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3722 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3767 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3723
3768
3724 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3769 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3725 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3770 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3726
3771
3727 # try to find an existing branchmap to reuse
3772 # try to find an existing branchmap to reuse
3728 subsettable = getbranchmapsubsettable()
3773 subsettable = getbranchmapsubsettable()
3729 candidatefilter = subsettable.get(None)
3774 candidatefilter = subsettable.get(None)
3730 while candidatefilter is not None:
3775 while candidatefilter is not None:
3731 candidatebm = repo.filtered(candidatefilter).branchmap()
3776 candidatebm = repo.filtered(candidatefilter).branchmap()
3732 if candidatebm.validfor(baserepo):
3777 if candidatebm.validfor(baserepo):
3733 filtered = repoview.filterrevs(repo, candidatefilter)
3778 filtered = repoview.filterrevs(repo, candidatefilter)
3734 missing = [r for r in allbaserevs if r in filtered]
3779 missing = [r for r in allbaserevs if r in filtered]
3735 base = candidatebm.copy()
3780 base = candidatebm.copy()
3736 base.update(baserepo, missing)
3781 base.update(baserepo, missing)
3737 break
3782 break
3738 candidatefilter = subsettable.get(candidatefilter)
3783 candidatefilter = subsettable.get(candidatefilter)
3739 else:
3784 else:
3740 # no suitable subset where found
3785 # no suitable subset where found
3741 base = branchmap.branchcache()
3786 base = branchmap.branchcache()
3742 base.update(baserepo, allbaserevs)
3787 base.update(baserepo, allbaserevs)
3743
3788
3744 def setup():
3789 def setup():
3745 x[0] = base.copy()
3790 x[0] = base.copy()
3746 if clearcaches:
3791 if clearcaches:
3747 unfi._revbranchcache = None
3792 unfi._revbranchcache = None
3748 clearchangelog(repo)
3793 clearchangelog(repo)
3749
3794
3750 def bench():
3795 def bench():
3751 x[0].update(targetrepo, newrevs)
3796 x[0].update(targetrepo, newrevs)
3752
3797
3753 timer(bench, setup=setup)
3798 timer(bench, setup=setup)
3754 fm.end()
3799 fm.end()
3755 finally:
3800 finally:
3756 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3801 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3757 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3802 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3758
3803
3759
3804
3760 @command(
3805 @command(
3761 b'perf::branchmapload|perfbranchmapload',
3806 b'perf::branchmapload|perfbranchmapload',
3762 [
3807 [
3763 (b'f', b'filter', b'', b'Specify repoview filter'),
3808 (b'f', b'filter', b'', b'Specify repoview filter'),
3764 (b'', b'list', False, b'List brachmap filter caches'),
3809 (b'', b'list', False, b'List brachmap filter caches'),
3765 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3810 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3766 ]
3811 ]
3767 + formatteropts,
3812 + formatteropts,
3768 )
3813 )
3769 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3814 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3770 """benchmark reading the branchmap"""
3815 """benchmark reading the branchmap"""
3771 opts = _byteskwargs(opts)
3816 opts = _byteskwargs(opts)
3772 clearrevlogs = opts[b'clear_revlogs']
3817 clearrevlogs = opts[b'clear_revlogs']
3773
3818
3774 if list:
3819 if list:
3775 for name, kind, st in repo.cachevfs.readdir(stat=True):
3820 for name, kind, st in repo.cachevfs.readdir(stat=True):
3776 if name.startswith(b'branch2'):
3821 if name.startswith(b'branch2'):
3777 filtername = name.partition(b'-')[2] or b'unfiltered'
3822 filtername = name.partition(b'-')[2] or b'unfiltered'
3778 ui.status(
3823 ui.status(
3779 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3824 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3780 )
3825 )
3781 return
3826 return
3782 if not filter:
3827 if not filter:
3783 filter = None
3828 filter = None
3784 subsettable = getbranchmapsubsettable()
3829 subsettable = getbranchmapsubsettable()
3785 if filter is None:
3830 if filter is None:
3786 repo = repo.unfiltered()
3831 repo = repo.unfiltered()
3787 else:
3832 else:
3788 repo = repoview.repoview(repo, filter)
3833 repo = repoview.repoview(repo, filter)
3789
3834
3790 repo.branchmap() # make sure we have a relevant, up to date branchmap
3835 repo.branchmap() # make sure we have a relevant, up to date branchmap
3791
3836
3792 try:
3837 try:
3793 fromfile = branchmap.branchcache.fromfile
3838 fromfile = branchmap.branchcache.fromfile
3794 except AttributeError:
3839 except AttributeError:
3795 # older versions
3840 # older versions
3796 fromfile = branchmap.read
3841 fromfile = branchmap.read
3797
3842
3798 currentfilter = filter
3843 currentfilter = filter
3799 # try once without timer, the filter may not be cached
3844 # try once without timer, the filter may not be cached
3800 while fromfile(repo) is None:
3845 while fromfile(repo) is None:
3801 currentfilter = subsettable.get(currentfilter)
3846 currentfilter = subsettable.get(currentfilter)
3802 if currentfilter is None:
3847 if currentfilter is None:
3803 raise error.Abort(
3848 raise error.Abort(
3804 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3849 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3805 )
3850 )
3806 repo = repo.filtered(currentfilter)
3851 repo = repo.filtered(currentfilter)
3807 timer, fm = gettimer(ui, opts)
3852 timer, fm = gettimer(ui, opts)
3808
3853
3809 def setup():
3854 def setup():
3810 if clearrevlogs:
3855 if clearrevlogs:
3811 clearchangelog(repo)
3856 clearchangelog(repo)
3812
3857
3813 def bench():
3858 def bench():
3814 fromfile(repo)
3859 fromfile(repo)
3815
3860
3816 timer(bench, setup=setup)
3861 timer(bench, setup=setup)
3817 fm.end()
3862 fm.end()
3818
3863
3819
3864
3820 @command(b'perf::loadmarkers|perfloadmarkers')
3865 @command(b'perf::loadmarkers|perfloadmarkers')
3821 def perfloadmarkers(ui, repo):
3866 def perfloadmarkers(ui, repo):
3822 """benchmark the time to parse the on-disk markers for a repo
3867 """benchmark the time to parse the on-disk markers for a repo
3823
3868
3824 Result is the number of markers in the repo."""
3869 Result is the number of markers in the repo."""
3825 timer, fm = gettimer(ui)
3870 timer, fm = gettimer(ui)
3826 svfs = getsvfs(repo)
3871 svfs = getsvfs(repo)
3827 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3872 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3828 fm.end()
3873 fm.end()
3829
3874
3830
3875
3831 @command(
3876 @command(
3832 b'perf::lrucachedict|perflrucachedict',
3877 b'perf::lrucachedict|perflrucachedict',
3833 formatteropts
3878 formatteropts
3834 + [
3879 + [
3835 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3880 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3836 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3881 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3837 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3882 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3838 (b'', b'size', 4, b'size of cache'),
3883 (b'', b'size', 4, b'size of cache'),
3839 (b'', b'gets', 10000, b'number of key lookups'),
3884 (b'', b'gets', 10000, b'number of key lookups'),
3840 (b'', b'sets', 10000, b'number of key sets'),
3885 (b'', b'sets', 10000, b'number of key sets'),
3841 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3886 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3842 (
3887 (
3843 b'',
3888 b'',
3844 b'mixedgetfreq',
3889 b'mixedgetfreq',
3845 50,
3890 50,
3846 b'frequency of get vs set ops in mixed mode',
3891 b'frequency of get vs set ops in mixed mode',
3847 ),
3892 ),
3848 ],
3893 ],
3849 norepo=True,
3894 norepo=True,
3850 )
3895 )
3851 def perflrucache(
3896 def perflrucache(
3852 ui,
3897 ui,
3853 mincost=0,
3898 mincost=0,
3854 maxcost=100,
3899 maxcost=100,
3855 costlimit=0,
3900 costlimit=0,
3856 size=4,
3901 size=4,
3857 gets=10000,
3902 gets=10000,
3858 sets=10000,
3903 sets=10000,
3859 mixed=10000,
3904 mixed=10000,
3860 mixedgetfreq=50,
3905 mixedgetfreq=50,
3861 **opts
3906 **opts
3862 ):
3907 ):
3863 opts = _byteskwargs(opts)
3908 opts = _byteskwargs(opts)
3864
3909
3865 def doinit():
3910 def doinit():
3866 for i in _xrange(10000):
3911 for i in _xrange(10000):
3867 util.lrucachedict(size)
3912 util.lrucachedict(size)
3868
3913
3869 costrange = list(range(mincost, maxcost + 1))
3914 costrange = list(range(mincost, maxcost + 1))
3870
3915
3871 values = []
3916 values = []
3872 for i in _xrange(size):
3917 for i in _xrange(size):
3873 values.append(random.randint(0, _maxint))
3918 values.append(random.randint(0, _maxint))
3874
3919
3875 # Get mode fills the cache and tests raw lookup performance with no
3920 # Get mode fills the cache and tests raw lookup performance with no
3876 # eviction.
3921 # eviction.
3877 getseq = []
3922 getseq = []
3878 for i in _xrange(gets):
3923 for i in _xrange(gets):
3879 getseq.append(random.choice(values))
3924 getseq.append(random.choice(values))
3880
3925
3881 def dogets():
3926 def dogets():
3882 d = util.lrucachedict(size)
3927 d = util.lrucachedict(size)
3883 for v in values:
3928 for v in values:
3884 d[v] = v
3929 d[v] = v
3885 for key in getseq:
3930 for key in getseq:
3886 value = d[key]
3931 value = d[key]
3887 value # silence pyflakes warning
3932 value # silence pyflakes warning
3888
3933
3889 def dogetscost():
3934 def dogetscost():
3890 d = util.lrucachedict(size, maxcost=costlimit)
3935 d = util.lrucachedict(size, maxcost=costlimit)
3891 for i, v in enumerate(values):
3936 for i, v in enumerate(values):
3892 d.insert(v, v, cost=costs[i])
3937 d.insert(v, v, cost=costs[i])
3893 for key in getseq:
3938 for key in getseq:
3894 try:
3939 try:
3895 value = d[key]
3940 value = d[key]
3896 value # silence pyflakes warning
3941 value # silence pyflakes warning
3897 except KeyError:
3942 except KeyError:
3898 pass
3943 pass
3899
3944
3900 # Set mode tests insertion speed with cache eviction.
3945 # Set mode tests insertion speed with cache eviction.
3901 setseq = []
3946 setseq = []
3902 costs = []
3947 costs = []
3903 for i in _xrange(sets):
3948 for i in _xrange(sets):
3904 setseq.append(random.randint(0, _maxint))
3949 setseq.append(random.randint(0, _maxint))
3905 costs.append(random.choice(costrange))
3950 costs.append(random.choice(costrange))
3906
3951
3907 def doinserts():
3952 def doinserts():
3908 d = util.lrucachedict(size)
3953 d = util.lrucachedict(size)
3909 for v in setseq:
3954 for v in setseq:
3910 d.insert(v, v)
3955 d.insert(v, v)
3911
3956
3912 def doinsertscost():
3957 def doinsertscost():
3913 d = util.lrucachedict(size, maxcost=costlimit)
3958 d = util.lrucachedict(size, maxcost=costlimit)
3914 for i, v in enumerate(setseq):
3959 for i, v in enumerate(setseq):
3915 d.insert(v, v, cost=costs[i])
3960 d.insert(v, v, cost=costs[i])
3916
3961
3917 def dosets():
3962 def dosets():
3918 d = util.lrucachedict(size)
3963 d = util.lrucachedict(size)
3919 for v in setseq:
3964 for v in setseq:
3920 d[v] = v
3965 d[v] = v
3921
3966
3922 # Mixed mode randomly performs gets and sets with eviction.
3967 # Mixed mode randomly performs gets and sets with eviction.
3923 mixedops = []
3968 mixedops = []
3924 for i in _xrange(mixed):
3969 for i in _xrange(mixed):
3925 r = random.randint(0, 100)
3970 r = random.randint(0, 100)
3926 if r < mixedgetfreq:
3971 if r < mixedgetfreq:
3927 op = 0
3972 op = 0
3928 else:
3973 else:
3929 op = 1
3974 op = 1
3930
3975
3931 mixedops.append(
3976 mixedops.append(
3932 (op, random.randint(0, size * 2), random.choice(costrange))
3977 (op, random.randint(0, size * 2), random.choice(costrange))
3933 )
3978 )
3934
3979
3935 def domixed():
3980 def domixed():
3936 d = util.lrucachedict(size)
3981 d = util.lrucachedict(size)
3937
3982
3938 for op, v, cost in mixedops:
3983 for op, v, cost in mixedops:
3939 if op == 0:
3984 if op == 0:
3940 try:
3985 try:
3941 d[v]
3986 d[v]
3942 except KeyError:
3987 except KeyError:
3943 pass
3988 pass
3944 else:
3989 else:
3945 d[v] = v
3990 d[v] = v
3946
3991
3947 def domixedcost():
3992 def domixedcost():
3948 d = util.lrucachedict(size, maxcost=costlimit)
3993 d = util.lrucachedict(size, maxcost=costlimit)
3949
3994
3950 for op, v, cost in mixedops:
3995 for op, v, cost in mixedops:
3951 if op == 0:
3996 if op == 0:
3952 try:
3997 try:
3953 d[v]
3998 d[v]
3954 except KeyError:
3999 except KeyError:
3955 pass
4000 pass
3956 else:
4001 else:
3957 d.insert(v, v, cost=cost)
4002 d.insert(v, v, cost=cost)
3958
4003
3959 benches = [
4004 benches = [
3960 (doinit, b'init'),
4005 (doinit, b'init'),
3961 ]
4006 ]
3962
4007
3963 if costlimit:
4008 if costlimit:
3964 benches.extend(
4009 benches.extend(
3965 [
4010 [
3966 (dogetscost, b'gets w/ cost limit'),
4011 (dogetscost, b'gets w/ cost limit'),
3967 (doinsertscost, b'inserts w/ cost limit'),
4012 (doinsertscost, b'inserts w/ cost limit'),
3968 (domixedcost, b'mixed w/ cost limit'),
4013 (domixedcost, b'mixed w/ cost limit'),
3969 ]
4014 ]
3970 )
4015 )
3971 else:
4016 else:
3972 benches.extend(
4017 benches.extend(
3973 [
4018 [
3974 (dogets, b'gets'),
4019 (dogets, b'gets'),
3975 (doinserts, b'inserts'),
4020 (doinserts, b'inserts'),
3976 (dosets, b'sets'),
4021 (dosets, b'sets'),
3977 (domixed, b'mixed'),
4022 (domixed, b'mixed'),
3978 ]
4023 ]
3979 )
4024 )
3980
4025
3981 for fn, title in benches:
4026 for fn, title in benches:
3982 timer, fm = gettimer(ui, opts)
4027 timer, fm = gettimer(ui, opts)
3983 timer(fn, title=title)
4028 timer(fn, title=title)
3984 fm.end()
4029 fm.end()
3985
4030
3986
4031
3987 @command(
4032 @command(
3988 b'perf::write|perfwrite',
4033 b'perf::write|perfwrite',
3989 formatteropts
4034 formatteropts
3990 + [
4035 + [
3991 (b'', b'write-method', b'write', b'ui write method'),
4036 (b'', b'write-method', b'write', b'ui write method'),
3992 (b'', b'nlines', 100, b'number of lines'),
4037 (b'', b'nlines', 100, b'number of lines'),
3993 (b'', b'nitems', 100, b'number of items (per line)'),
4038 (b'', b'nitems', 100, b'number of items (per line)'),
3994 (b'', b'item', b'x', b'item that is written'),
4039 (b'', b'item', b'x', b'item that is written'),
3995 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4040 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3996 (b'', b'flush-line', None, b'flush after each line'),
4041 (b'', b'flush-line', None, b'flush after each line'),
3997 ],
4042 ],
3998 )
4043 )
3999 def perfwrite(ui, repo, **opts):
4044 def perfwrite(ui, repo, **opts):
4000 """microbenchmark ui.write (and others)"""
4045 """microbenchmark ui.write (and others)"""
4001 opts = _byteskwargs(opts)
4046 opts = _byteskwargs(opts)
4002
4047
4003 write = getattr(ui, _sysstr(opts[b'write_method']))
4048 write = getattr(ui, _sysstr(opts[b'write_method']))
4004 nlines = int(opts[b'nlines'])
4049 nlines = int(opts[b'nlines'])
4005 nitems = int(opts[b'nitems'])
4050 nitems = int(opts[b'nitems'])
4006 item = opts[b'item']
4051 item = opts[b'item']
4007 batch_line = opts.get(b'batch_line')
4052 batch_line = opts.get(b'batch_line')
4008 flush_line = opts.get(b'flush_line')
4053 flush_line = opts.get(b'flush_line')
4009
4054
4010 if batch_line:
4055 if batch_line:
4011 line = item * nitems + b'\n'
4056 line = item * nitems + b'\n'
4012
4057
4013 def benchmark():
4058 def benchmark():
4014 for i in pycompat.xrange(nlines):
4059 for i in pycompat.xrange(nlines):
4015 if batch_line:
4060 if batch_line:
4016 write(line)
4061 write(line)
4017 else:
4062 else:
4018 for i in pycompat.xrange(nitems):
4063 for i in pycompat.xrange(nitems):
4019 write(item)
4064 write(item)
4020 write(b'\n')
4065 write(b'\n')
4021 if flush_line:
4066 if flush_line:
4022 ui.flush()
4067 ui.flush()
4023 ui.flush()
4068 ui.flush()
4024
4069
4025 timer, fm = gettimer(ui, opts)
4070 timer, fm = gettimer(ui, opts)
4026 timer(benchmark)
4071 timer(benchmark)
4027 fm.end()
4072 fm.end()
4028
4073
4029
4074
4030 def uisetup(ui):
4075 def uisetup(ui):
4031 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4076 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4032 commands, b'debugrevlogopts'
4077 commands, b'debugrevlogopts'
4033 ):
4078 ):
4034 # for "historical portability":
4079 # for "historical portability":
4035 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4080 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4036 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4081 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4037 # openrevlog() should cause failure, because it has been
4082 # openrevlog() should cause failure, because it has been
4038 # available since 3.5 (or 49c583ca48c4).
4083 # available since 3.5 (or 49c583ca48c4).
4039 def openrevlog(orig, repo, cmd, file_, opts):
4084 def openrevlog(orig, repo, cmd, file_, opts):
4040 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4085 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4041 raise error.Abort(
4086 raise error.Abort(
4042 b"This version doesn't support --dir option",
4087 b"This version doesn't support --dir option",
4043 hint=b"use 3.5 or later",
4088 hint=b"use 3.5 or later",
4044 )
4089 )
4045 return orig(repo, cmd, file_, opts)
4090 return orig(repo, cmd, file_, opts)
4046
4091
4047 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4092 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4048
4093
4049
4094
4050 @command(
4095 @command(
4051 b'perf::progress|perfprogress',
4096 b'perf::progress|perfprogress',
4052 formatteropts
4097 formatteropts
4053 + [
4098 + [
4054 (b'', b'topic', b'topic', b'topic for progress messages'),
4099 (b'', b'topic', b'topic', b'topic for progress messages'),
4055 (b'c', b'total', 1000000, b'total value we are progressing to'),
4100 (b'c', b'total', 1000000, b'total value we are progressing to'),
4056 ],
4101 ],
4057 norepo=True,
4102 norepo=True,
4058 )
4103 )
4059 def perfprogress(ui, topic=None, total=None, **opts):
4104 def perfprogress(ui, topic=None, total=None, **opts):
4060 """printing of progress bars"""
4105 """printing of progress bars"""
4061 opts = _byteskwargs(opts)
4106 opts = _byteskwargs(opts)
4062
4107
4063 timer, fm = gettimer(ui, opts)
4108 timer, fm = gettimer(ui, opts)
4064
4109
4065 def doprogress():
4110 def doprogress():
4066 with ui.makeprogress(topic, total=total) as progress:
4111 with ui.makeprogress(topic, total=total) as progress:
4067 for i in _xrange(total):
4112 for i in _xrange(total):
4068 progress.increment()
4113 progress.increment()
4069
4114
4070 timer(doprogress)
4115 timer(doprogress)
4071 fm.end()
4116 fm.end()
@@ -1,426 +1,437 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::diffwd Profile diff of working directory changes
109 perf::diffwd Profile diff of working directory changes
110 perf::dirfoldmap
110 perf::dirfoldmap
111 benchmap a 'dirstate._map.dirfoldmap.get()' request
111 benchmap a 'dirstate._map.dirfoldmap.get()' request
112 perf::dirs (no help text available)
112 perf::dirs (no help text available)
113 perf::dirstate
113 perf::dirstate
114 benchmap the time of various distate operations
114 benchmap the time of various distate operations
115 perf::dirstatedirs
115 perf::dirstatedirs
116 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
116 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
117 perf::dirstatefoldmap
117 perf::dirstatefoldmap
118 benchmap a 'dirstate._map.filefoldmap.get()' request
118 benchmap a 'dirstate._map.filefoldmap.get()' request
119 perf::dirstatewrite
119 perf::dirstatewrite
120 benchmap the time it take to write a dirstate on disk
120 benchmap the time it take to write a dirstate on disk
121 perf::discovery
121 perf::discovery
122 benchmark discovery between local repo and the peer at given
122 benchmark discovery between local repo and the peer at given
123 path
123 path
124 perf::fncacheencode
124 perf::fncacheencode
125 (no help text available)
125 (no help text available)
126 perf::fncacheload
126 perf::fncacheload
127 (no help text available)
127 (no help text available)
128 perf::fncachewrite
128 perf::fncachewrite
129 (no help text available)
129 (no help text available)
130 perf::heads benchmark the computation of a changelog heads
130 perf::heads benchmark the computation of a changelog heads
131 perf::helper-mergecopies
131 perf::helper-mergecopies
132 find statistics about potential parameters for
132 find statistics about potential parameters for
133 'perfmergecopies'
133 'perfmergecopies'
134 perf::helper-pathcopies
134 perf::helper-pathcopies
135 find statistic about potential parameters for the
135 find statistic about potential parameters for the
136 'perftracecopies'
136 'perftracecopies'
137 perf::ignore benchmark operation related to computing ignore
137 perf::ignore benchmark operation related to computing ignore
138 perf::index benchmark index creation time followed by a lookup
138 perf::index benchmark index creation time followed by a lookup
139 perf::linelogedits
139 perf::linelogedits
140 (no help text available)
140 (no help text available)
141 perf::loadmarkers
141 perf::loadmarkers
142 benchmark the time to parse the on-disk markers for a repo
142 benchmark the time to parse the on-disk markers for a repo
143 perf::log (no help text available)
143 perf::log (no help text available)
144 perf::lookup (no help text available)
144 perf::lookup (no help text available)
145 perf::lrucachedict
145 perf::lrucachedict
146 (no help text available)
146 (no help text available)
147 perf::manifest
147 perf::manifest
148 benchmark the time to read a manifest from disk and return a
148 benchmark the time to read a manifest from disk and return a
149 usable
149 usable
150 perf::mergecalculate
150 perf::mergecalculate
151 (no help text available)
151 (no help text available)
152 perf::mergecopies
152 perf::mergecopies
153 measure runtime of 'copies.mergecopies'
153 measure runtime of 'copies.mergecopies'
154 perf::moonwalk
154 perf::moonwalk
155 benchmark walking the changelog backwards
155 benchmark walking the changelog backwards
156 perf::nodelookup
156 perf::nodelookup
157 (no help text available)
157 (no help text available)
158 perf::nodemap
158 perf::nodemap
159 benchmark the time necessary to look up revision from a cold
159 benchmark the time necessary to look up revision from a cold
160 nodemap
160 nodemap
161 perf::parents
161 perf::parents
162 benchmark the time necessary to fetch one changeset's parents.
162 benchmark the time necessary to fetch one changeset's parents.
163 perf::pathcopies
163 perf::pathcopies
164 benchmark the copy tracing logic
164 benchmark the copy tracing logic
165 perf::phases benchmark phasesets computation
165 perf::phases benchmark phasesets computation
166 perf::phasesremote
166 perf::phasesremote
167 benchmark time needed to analyse phases of the remote server
167 benchmark time needed to analyse phases of the remote server
168 perf::progress
168 perf::progress
169 printing of progress bars
169 printing of progress bars
170 perf::rawfiles
170 perf::rawfiles
171 (no help text available)
171 (no help text available)
172 perf::revlogchunks
172 perf::revlogchunks
173 Benchmark operations on revlog chunks.
173 Benchmark operations on revlog chunks.
174 perf::revlogindex
174 perf::revlogindex
175 Benchmark operations against a revlog index.
175 Benchmark operations against a revlog index.
176 perf::revlogrevision
176 perf::revlogrevision
177 Benchmark obtaining a revlog revision.
177 Benchmark obtaining a revlog revision.
178 perf::revlogrevisions
178 perf::revlogrevisions
179 Benchmark reading a series of revisions from a revlog.
179 Benchmark reading a series of revisions from a revlog.
180 perf::revlogwrite
180 perf::revlogwrite
181 Benchmark writing a series of revisions to a revlog.
181 Benchmark writing a series of revisions to a revlog.
182 perf::revrange
182 perf::revrange
183 (no help text available)
183 (no help text available)
184 perf::revset benchmark the execution time of a revset
184 perf::revset benchmark the execution time of a revset
185 perf::startup
185 perf::startup
186 (no help text available)
186 (no help text available)
187 perf::status benchmark the performance of a single status call
187 perf::status benchmark the performance of a single status call
188 perf::tags (no help text available)
188 perf::tags (no help text available)
189 perf::templating
189 perf::templating
190 test the rendering time of a given template
190 test the rendering time of a given template
191 perf::unbundle
192 benchmark application of a bundle in a repository.
191 perf::unidiff
193 perf::unidiff
192 benchmark a unified diff between revisions
194 benchmark a unified diff between revisions
193 perf::volatilesets
195 perf::volatilesets
194 benchmark the computation of various volatile set
196 benchmark the computation of various volatile set
195 perf::walk (no help text available)
197 perf::walk (no help text available)
196 perf::write microbenchmark ui.write (and others)
198 perf::write microbenchmark ui.write (and others)
197
199
198 (use 'hg help -v perf' to show built-in aliases and global options)
200 (use 'hg help -v perf' to show built-in aliases and global options)
199
201
200 $ hg help perfaddremove
202 $ hg help perfaddremove
201 hg perf::addremove
203 hg perf::addremove
202
204
203 aliases: perfaddremove
205 aliases: perfaddremove
204
206
205 (no help text available)
207 (no help text available)
206
208
207 options:
209 options:
208
210
209 -T --template TEMPLATE display with template
211 -T --template TEMPLATE display with template
210
212
211 (some details hidden, use --verbose to show complete help)
213 (some details hidden, use --verbose to show complete help)
212
214
213 $ hg perfaddremove
215 $ hg perfaddremove
214 $ hg perfancestors
216 $ hg perfancestors
215 $ hg perfancestorset 2
217 $ hg perfancestorset 2
216 $ hg perfannotate a
218 $ hg perfannotate a
217 $ hg perfbdiff -c 1
219 $ hg perfbdiff -c 1
218 $ hg perfbdiff --alldata 1
220 $ hg perfbdiff --alldata 1
219 $ hg perfunidiff -c 1
221 $ hg perfunidiff -c 1
220 $ hg perfunidiff --alldata 1
222 $ hg perfunidiff --alldata 1
221 $ hg perfbookmarks
223 $ hg perfbookmarks
222 $ hg perfbranchmap
224 $ hg perfbranchmap
223 $ hg perfbranchmapload
225 $ hg perfbranchmapload
224 $ hg perfbranchmapupdate --base "not tip" --target "tip"
226 $ hg perfbranchmapupdate --base "not tip" --target "tip"
225 benchmark of branchmap with 3 revisions with 1 new ones
227 benchmark of branchmap with 3 revisions with 1 new ones
226 $ hg perfcca
228 $ hg perfcca
227 $ hg perfchangegroupchangelog
229 $ hg perfchangegroupchangelog
228 $ hg perfchangegroupchangelog --cgversion 01
230 $ hg perfchangegroupchangelog --cgversion 01
229 $ hg perfchangeset 2
231 $ hg perfchangeset 2
230 $ hg perfctxfiles 2
232 $ hg perfctxfiles 2
231 $ hg perfdiffwd
233 $ hg perfdiffwd
232 $ hg perfdirfoldmap
234 $ hg perfdirfoldmap
233 $ hg perfdirs
235 $ hg perfdirs
234 $ hg perfdirstate
236 $ hg perfdirstate
235 $ hg perfdirstate --contains
237 $ hg perfdirstate --contains
236 $ hg perfdirstate --iteration
238 $ hg perfdirstate --iteration
237 $ hg perfdirstatedirs
239 $ hg perfdirstatedirs
238 $ hg perfdirstatefoldmap
240 $ hg perfdirstatefoldmap
239 $ hg perfdirstatewrite
241 $ hg perfdirstatewrite
240 #if repofncache
242 #if repofncache
241 $ hg perffncacheencode
243 $ hg perffncacheencode
242 $ hg perffncacheload
244 $ hg perffncacheload
243 $ hg debugrebuildfncache
245 $ hg debugrebuildfncache
244 fncache already up to date
246 fncache already up to date
245 $ hg perffncachewrite
247 $ hg perffncachewrite
246 $ hg debugrebuildfncache
248 $ hg debugrebuildfncache
247 fncache already up to date
249 fncache already up to date
248 #endif
250 #endif
249 $ hg perfheads
251 $ hg perfheads
250 $ hg perfignore
252 $ hg perfignore
251 $ hg perfindex
253 $ hg perfindex
252 $ hg perflinelogedits -n 1
254 $ hg perflinelogedits -n 1
253 $ hg perfloadmarkers
255 $ hg perfloadmarkers
254 $ hg perflog
256 $ hg perflog
255 $ hg perflookup 2
257 $ hg perflookup 2
256 $ hg perflrucache
258 $ hg perflrucache
257 $ hg perfmanifest 2
259 $ hg perfmanifest 2
258 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
260 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
259 $ hg perfmanifest -m 44fe2c8352bb
261 $ hg perfmanifest -m 44fe2c8352bb
260 abort: manifest revision must be integer or full node
262 abort: manifest revision must be integer or full node
261 [255]
263 [255]
262 $ hg perfmergecalculate -r 3
264 $ hg perfmergecalculate -r 3
263 $ hg perfmoonwalk
265 $ hg perfmoonwalk
264 $ hg perfnodelookup 2
266 $ hg perfnodelookup 2
265 $ hg perfpathcopies 1 2
267 $ hg perfpathcopies 1 2
266 $ hg perfprogress --total 1000
268 $ hg perfprogress --total 1000
267 $ hg perfrawfiles 2
269 $ hg perfrawfiles 2
268 $ hg perfrevlogindex -c
270 $ hg perfrevlogindex -c
269 #if reporevlogstore
271 #if reporevlogstore
270 $ hg perfrevlogrevisions .hg/store/data/a.i
272 $ hg perfrevlogrevisions .hg/store/data/a.i
271 #endif
273 #endif
272 $ hg perfrevlogrevision -m 0
274 $ hg perfrevlogrevision -m 0
273 $ hg perfrevlogchunks -c
275 $ hg perfrevlogchunks -c
274 $ hg perfrevrange
276 $ hg perfrevrange
275 $ hg perfrevset 'all()'
277 $ hg perfrevset 'all()'
276 $ hg perfstartup
278 $ hg perfstartup
277 $ hg perfstatus
279 $ hg perfstatus
278 $ hg perfstatus --dirstate
280 $ hg perfstatus --dirstate
279 $ hg perftags
281 $ hg perftags
280 $ hg perftemplating
282 $ hg perftemplating
281 $ hg perfvolatilesets
283 $ hg perfvolatilesets
282 $ hg perfwalk
284 $ hg perfwalk
283 $ hg perfparents
285 $ hg perfparents
284 $ hg perfdiscovery -q .
286 $ hg perfdiscovery -q .
285
287
286 Test run control
288 Test run control
287 ----------------
289 ----------------
288
290
289 Simple single entry
291 Simple single entry
290
292
291 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
293 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
292 ! wall * comb * user * sys * (best of 15) (glob)
294 ! wall * comb * user * sys * (best of 15) (glob)
293
295
294 Multiple entries
296 Multiple entries
295
297
296 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
298 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
297 ! wall * comb * user * sys * (best of 5) (glob)
299 ! wall * comb * user * sys * (best of 5) (glob)
298
300
299 error case are ignored
301 error case are ignored
300
302
301 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
303 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
302 malformatted run limit entry, missing "-": 500
304 malformatted run limit entry, missing "-": 500
303 ! wall * comb * user * sys * (best of 5) (glob)
305 ! wall * comb * user * sys * (best of 5) (glob)
304 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
306 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
305 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
307 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
306 ! wall * comb * user * sys * (best of 5) (glob)
308 ! wall * comb * user * sys * (best of 5) (glob)
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
309 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
308 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
310 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
309 ! wall * comb * user * sys * (best of 5) (glob)
311 ! wall * comb * user * sys * (best of 5) (glob)
310
312
311 test actual output
313 test actual output
312 ------------------
314 ------------------
313
315
314 normal output:
316 normal output:
315
317
316 $ hg perfheads --config perf.stub=no
318 $ hg perfheads --config perf.stub=no
317 ! wall * comb * user * sys * (best of *) (glob)
319 ! wall * comb * user * sys * (best of *) (glob)
318
320
319 detailed output:
321 detailed output:
320
322
321 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
323 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
322 ! wall * comb * user * sys * (best of *) (glob)
324 ! wall * comb * user * sys * (best of *) (glob)
323 ! wall * comb * user * sys * (max of *) (glob)
325 ! wall * comb * user * sys * (max of *) (glob)
324 ! wall * comb * user * sys * (avg of *) (glob)
326 ! wall * comb * user * sys * (avg of *) (glob)
325 ! wall * comb * user * sys * (median of *) (glob)
327 ! wall * comb * user * sys * (median of *) (glob)
326
328
327 test json output
329 test json output
328 ----------------
330 ----------------
329
331
330 normal output:
332 normal output:
331
333
332 $ hg perfheads --template json --config perf.stub=no
334 $ hg perfheads --template json --config perf.stub=no
333 [
335 [
334 {
336 {
335 "comb": *, (glob)
337 "comb": *, (glob)
336 "count": *, (glob)
338 "count": *, (glob)
337 "sys": *, (glob)
339 "sys": *, (glob)
338 "user": *, (glob)
340 "user": *, (glob)
339 "wall": * (glob)
341 "wall": * (glob)
340 }
342 }
341 ]
343 ]
342
344
343 detailed output:
345 detailed output:
344
346
345 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
347 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
346 [
348 [
347 {
349 {
348 "avg.comb": *, (glob)
350 "avg.comb": *, (glob)
349 "avg.count": *, (glob)
351 "avg.count": *, (glob)
350 "avg.sys": *, (glob)
352 "avg.sys": *, (glob)
351 "avg.user": *, (glob)
353 "avg.user": *, (glob)
352 "avg.wall": *, (glob)
354 "avg.wall": *, (glob)
353 "comb": *, (glob)
355 "comb": *, (glob)
354 "count": *, (glob)
356 "count": *, (glob)
355 "max.comb": *, (glob)
357 "max.comb": *, (glob)
356 "max.count": *, (glob)
358 "max.count": *, (glob)
357 "max.sys": *, (glob)
359 "max.sys": *, (glob)
358 "max.user": *, (glob)
360 "max.user": *, (glob)
359 "max.wall": *, (glob)
361 "max.wall": *, (glob)
360 "median.comb": *, (glob)
362 "median.comb": *, (glob)
361 "median.count": *, (glob)
363 "median.count": *, (glob)
362 "median.sys": *, (glob)
364 "median.sys": *, (glob)
363 "median.user": *, (glob)
365 "median.user": *, (glob)
364 "median.wall": *, (glob)
366 "median.wall": *, (glob)
365 "sys": *, (glob)
367 "sys": *, (glob)
366 "user": *, (glob)
368 "user": *, (glob)
367 "wall": * (glob)
369 "wall": * (glob)
368 }
370 }
369 ]
371 ]
370
372
371 Test pre-run feature
373 Test pre-run feature
372 --------------------
374 --------------------
373
375
374 (perf discovery has some spurious output)
376 (perf discovery has some spurious output)
375
377
376 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
378 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
377 ! wall * comb * user * sys * (best of 1) (glob)
379 ! wall * comb * user * sys * (best of 1) (glob)
378 searching for changes
380 searching for changes
379 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
381 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
380 ! wall * comb * user * sys * (best of 1) (glob)
382 ! wall * comb * user * sys * (best of 1) (glob)
381 searching for changes
383 searching for changes
382 searching for changes
384 searching for changes
383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
385 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
384 ! wall * comb * user * sys * (best of 1) (glob)
386 ! wall * comb * user * sys * (best of 1) (glob)
385 searching for changes
387 searching for changes
386 searching for changes
388 searching for changes
387 searching for changes
389 searching for changes
388 searching for changes
390 searching for changes
389 $ hg perf::bundle 'last(all(), 5)'
391 $ hg perf::bundle 'last(all(), 5)'
392 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
393 4 changesets found
394 $ hg perf::unbundle last-5.hg
395 adding changesets
396 adding manifests
397 adding file changes
398 transaction abort!
399 rollback completed
400
390
401
391 test profile-benchmark option
402 test profile-benchmark option
392 ------------------------------
403 ------------------------------
393
404
394 Function to check that statprof ran
405 Function to check that statprof ran
395 $ statprofran () {
406 $ statprofran () {
396 > egrep 'Sample count:|No samples recorded' > /dev/null
407 > egrep 'Sample count:|No samples recorded' > /dev/null
397 > }
408 > }
398 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
409 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
399
410
400 Check perf.py for historical portability
411 Check perf.py for historical portability
401 ----------------------------------------
412 ----------------------------------------
402
413
403 $ cd "$TESTDIR/.."
414 $ cd "$TESTDIR/.."
404
415
405 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
416 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
406 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
417 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
407 > "$TESTDIR"/check-perf-code.py contrib/perf.py
418 > "$TESTDIR"/check-perf-code.py contrib/perf.py
408 contrib/perf.py:\d+: (re)
419 contrib/perf.py:\d+: (re)
409 > from mercurial import (
420 > from mercurial import (
410 import newer module separately in try clause for early Mercurial
421 import newer module separately in try clause for early Mercurial
411 contrib/perf.py:\d+: (re)
422 contrib/perf.py:\d+: (re)
412 > from mercurial import (
423 > from mercurial import (
413 import newer module separately in try clause for early Mercurial
424 import newer module separately in try clause for early Mercurial
414 contrib/perf.py:\d+: (re)
425 contrib/perf.py:\d+: (re)
415 > origindexpath = orig.opener.join(indexfile)
426 > origindexpath = orig.opener.join(indexfile)
416 use getvfs()/getsvfs() for early Mercurial
427 use getvfs()/getsvfs() for early Mercurial
417 contrib/perf.py:\d+: (re)
428 contrib/perf.py:\d+: (re)
418 > origdatapath = orig.opener.join(datafile)
429 > origdatapath = orig.opener.join(datafile)
419 use getvfs()/getsvfs() for early Mercurial
430 use getvfs()/getsvfs() for early Mercurial
420 contrib/perf.py:\d+: (re)
431 contrib/perf.py:\d+: (re)
421 > vfs = vfsmod.vfs(tmpdir)
432 > vfs = vfsmod.vfs(tmpdir)
422 use getvfs()/getsvfs() for early Mercurial
433 use getvfs()/getsvfs() for early Mercurial
423 contrib/perf.py:\d+: (re)
434 contrib/perf.py:\d+: (re)
424 > vfs.options = getattr(orig.opener, 'options', None)
435 > vfs.options = getattr(orig.opener, 'options', None)
425 use getvfs()/getsvfs() for early Mercurial
436 use getvfs()/getsvfs() for early Mercurial
426 [1]
437 [1]
General Comments 0
You need to be logged in to leave comments. Login now