##// END OF EJS Templates
perf: introduce more cache invalidation option in perf::tags...
marmoute -
r51831:f02b62b7 stable
parent child Browse files
Show More
@@ -1,4448 +1,4497 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
896
885 @command(
897 @command(
886 b'perf::tags|perftags',
898 b'perf::tags|perftags',
887 formatteropts
899 formatteropts
888 + [
900 + [
889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
901 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
902 (
903 b'',
904 b'clear-on-disk-cache',
905 False,
906 b'clear on disk tags cache (DESTRUCTIVE)',
907 ),
908 (
909 b'',
910 b'clear-fnode-cache',
911 False,
912 b'clear on disk file node cache (DESTRUCTIVE),',
913 ),
890 ],
914 ],
891 )
915 )
892 def perftags(ui, repo, **opts):
916 def perftags(ui, repo, **opts):
917 """Benchmark tags retrieval in various situation
918
919 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
920 altering performance after the command was run. However, it does not
921 destroy any stored data.
922 """
923 from mercurial import tags
924
893 opts = _byteskwargs(opts)
925 opts = _byteskwargs(opts)
894 timer, fm = gettimer(ui, opts)
926 timer, fm = gettimer(ui, opts)
895 repocleartagscache = repocleartagscachefunc(repo)
927 repocleartagscache = repocleartagscachefunc(repo)
896 clearrevlogs = opts[b'clear_revlogs']
928 clearrevlogs = opts[b'clear_revlogs']
929 clear_disk = opts[b'clear_on_disk_cache']
930 clear_fnode = opts[b'clear_fnode_cache']
931
932 clear_disk_fn = getattr(
933 tags,
934 "clear_cache_on_disk",
935 _default_clear_on_disk_tags_cache,
936 )
937 clear_fnodes_fn = getattr(
938 tags,
939 "clear_cache_fnodes",
940 _default_clear_on_disk_tags_fnodes_cache,
941 )
897
942
898 def s():
943 def s():
899 if clearrevlogs:
944 if clearrevlogs:
900 clearchangelog(repo)
945 clearchangelog(repo)
901 clearfilecache(repo.unfiltered(), 'manifest')
946 clearfilecache(repo.unfiltered(), 'manifest')
947 if clear_disk:
948 clear_disk_fn(repo)
949 if clear_fnode:
950 clear_fnodes_fn(repo)
902 repocleartagscache()
951 repocleartagscache()
903
952
904 def t():
953 def t():
905 len(repo.tags())
954 len(repo.tags())
906
955
907 timer(t, setup=s)
956 timer(t, setup=s)
908 fm.end()
957 fm.end()
909
958
910
959
911 @command(b'perf::ancestors|perfancestors', formatteropts)
960 @command(b'perf::ancestors|perfancestors', formatteropts)
912 def perfancestors(ui, repo, **opts):
961 def perfancestors(ui, repo, **opts):
913 opts = _byteskwargs(opts)
962 opts = _byteskwargs(opts)
914 timer, fm = gettimer(ui, opts)
963 timer, fm = gettimer(ui, opts)
915 heads = repo.changelog.headrevs()
964 heads = repo.changelog.headrevs()
916
965
917 def d():
966 def d():
918 for a in repo.changelog.ancestors(heads):
967 for a in repo.changelog.ancestors(heads):
919 pass
968 pass
920
969
921 timer(d)
970 timer(d)
922 fm.end()
971 fm.end()
923
972
924
973
925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
974 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 def perfancestorset(ui, repo, revset, **opts):
975 def perfancestorset(ui, repo, revset, **opts):
927 opts = _byteskwargs(opts)
976 opts = _byteskwargs(opts)
928 timer, fm = gettimer(ui, opts)
977 timer, fm = gettimer(ui, opts)
929 revs = repo.revs(revset)
978 revs = repo.revs(revset)
930 heads = repo.changelog.headrevs()
979 heads = repo.changelog.headrevs()
931
980
932 def d():
981 def d():
933 s = repo.changelog.ancestors(heads)
982 s = repo.changelog.ancestors(heads)
934 for rev in revs:
983 for rev in revs:
935 rev in s
984 rev in s
936
985
937 timer(d)
986 timer(d)
938 fm.end()
987 fm.end()
939
988
940
989
941 @command(
990 @command(
942 b'perf::delta-find',
991 b'perf::delta-find',
943 revlogopts + formatteropts,
992 revlogopts + formatteropts,
944 b'-c|-m|FILE REV',
993 b'-c|-m|FILE REV',
945 )
994 )
946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
995 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 """benchmark the process of finding a valid delta for a revlog revision
996 """benchmark the process of finding a valid delta for a revlog revision
948
997
949 When a revlog receives a new revision (e.g. from a commit, or from an
998 When a revlog receives a new revision (e.g. from a commit, or from an
950 incoming bundle), it searches for a suitable delta-base to produce a delta.
999 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 This perf command measures how much time we spend in this process. It
1000 This perf command measures how much time we spend in this process. It
952 operates on an already stored revision.
1001 operates on an already stored revision.
953
1002
954 See `hg help debug-delta-find` for another related command.
1003 See `hg help debug-delta-find` for another related command.
955 """
1004 """
956 from mercurial import revlogutils
1005 from mercurial import revlogutils
957 import mercurial.revlogutils.deltas as deltautil
1006 import mercurial.revlogutils.deltas as deltautil
958
1007
959 opts = _byteskwargs(opts)
1008 opts = _byteskwargs(opts)
960 if arg_2 is None:
1009 if arg_2 is None:
961 file_ = None
1010 file_ = None
962 rev = arg_1
1011 rev = arg_1
963 else:
1012 else:
964 file_ = arg_1
1013 file_ = arg_1
965 rev = arg_2
1014 rev = arg_2
966
1015
967 repo = repo.unfiltered()
1016 repo = repo.unfiltered()
968
1017
969 timer, fm = gettimer(ui, opts)
1018 timer, fm = gettimer(ui, opts)
970
1019
971 rev = int(rev)
1020 rev = int(rev)
972
1021
973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1022 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974
1023
975 deltacomputer = deltautil.deltacomputer(revlog)
1024 deltacomputer = deltautil.deltacomputer(revlog)
976
1025
977 node = revlog.node(rev)
1026 node = revlog.node(rev)
978 p1r, p2r = revlog.parentrevs(rev)
1027 p1r, p2r = revlog.parentrevs(rev)
979 p1 = revlog.node(p1r)
1028 p1 = revlog.node(p1r)
980 p2 = revlog.node(p2r)
1029 p2 = revlog.node(p2r)
981 full_text = revlog.revision(rev)
1030 full_text = revlog.revision(rev)
982 textlen = len(full_text)
1031 textlen = len(full_text)
983 cachedelta = None
1032 cachedelta = None
984 flags = revlog.flags(rev)
1033 flags = revlog.flags(rev)
985
1034
986 revinfo = revlogutils.revisioninfo(
1035 revinfo = revlogutils.revisioninfo(
987 node,
1036 node,
988 p1,
1037 p1,
989 p2,
1038 p2,
990 [full_text], # btext
1039 [full_text], # btext
991 textlen,
1040 textlen,
992 cachedelta,
1041 cachedelta,
993 flags,
1042 flags,
994 )
1043 )
995
1044
996 # Note: we should probably purge the potential caches (like the full
1045 # Note: we should probably purge the potential caches (like the full
997 # manifest cache) between runs.
1046 # manifest cache) between runs.
998 def find_one():
1047 def find_one():
999 with revlog._datafp() as fh:
1048 with revlog._datafp() as fh:
1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1049 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001
1050
1002 timer(find_one)
1051 timer(find_one)
1003 fm.end()
1052 fm.end()
1004
1053
1005
1054
1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1055 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 def perfdiscovery(ui, repo, path, **opts):
1056 def perfdiscovery(ui, repo, path, **opts):
1008 """benchmark discovery between local repo and the peer at given path"""
1057 """benchmark discovery between local repo and the peer at given path"""
1009 repos = [repo, None]
1058 repos = [repo, None]
1010 timer, fm = gettimer(ui, opts)
1059 timer, fm = gettimer(ui, opts)
1011
1060
1012 try:
1061 try:
1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1062 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014
1063
1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1064 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 except ImportError:
1065 except ImportError:
1017 try:
1066 try:
1018 from mercurial.utils.urlutil import get_unique_pull_path
1067 from mercurial.utils.urlutil import get_unique_pull_path
1019
1068
1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1069 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 except ImportError:
1070 except ImportError:
1022 path = ui.expandpath(path)
1071 path = ui.expandpath(path)
1023
1072
1024 def s():
1073 def s():
1025 repos[1] = hg.peer(ui, opts, path)
1074 repos[1] = hg.peer(ui, opts, path)
1026
1075
1027 def d():
1076 def d():
1028 setdiscovery.findcommonheads(ui, *repos)
1077 setdiscovery.findcommonheads(ui, *repos)
1029
1078
1030 timer(d, setup=s)
1079 timer(d, setup=s)
1031 fm.end()
1080 fm.end()
1032
1081
1033
1082
1034 @command(
1083 @command(
1035 b'perf::bookmarks|perfbookmarks',
1084 b'perf::bookmarks|perfbookmarks',
1036 formatteropts
1085 formatteropts
1037 + [
1086 + [
1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1087 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 ],
1088 ],
1040 )
1089 )
1041 def perfbookmarks(ui, repo, **opts):
1090 def perfbookmarks(ui, repo, **opts):
1042 """benchmark parsing bookmarks from disk to memory"""
1091 """benchmark parsing bookmarks from disk to memory"""
1043 opts = _byteskwargs(opts)
1092 opts = _byteskwargs(opts)
1044 timer, fm = gettimer(ui, opts)
1093 timer, fm = gettimer(ui, opts)
1045
1094
1046 clearrevlogs = opts[b'clear_revlogs']
1095 clearrevlogs = opts[b'clear_revlogs']
1047
1096
1048 def s():
1097 def s():
1049 if clearrevlogs:
1098 if clearrevlogs:
1050 clearchangelog(repo)
1099 clearchangelog(repo)
1051 clearfilecache(repo, b'_bookmarks')
1100 clearfilecache(repo, b'_bookmarks')
1052
1101
1053 def d():
1102 def d():
1054 repo._bookmarks
1103 repo._bookmarks
1055
1104
1056 timer(d, setup=s)
1105 timer(d, setup=s)
1057 fm.end()
1106 fm.end()
1058
1107
1059
1108
1060 @command(
1109 @command(
1061 b'perf::bundle',
1110 b'perf::bundle',
1062 [
1111 [
1063 (
1112 (
1064 b'r',
1113 b'r',
1065 b'rev',
1114 b'rev',
1066 [],
1115 [],
1067 b'changesets to bundle',
1116 b'changesets to bundle',
1068 b'REV',
1117 b'REV',
1069 ),
1118 ),
1070 (
1119 (
1071 b't',
1120 b't',
1072 b'type',
1121 b'type',
1073 b'none',
1122 b'none',
1074 b'bundlespec to use (see `hg help bundlespec`)',
1123 b'bundlespec to use (see `hg help bundlespec`)',
1075 b'TYPE',
1124 b'TYPE',
1076 ),
1125 ),
1077 ]
1126 ]
1078 + formatteropts,
1127 + formatteropts,
1079 b'REVS',
1128 b'REVS',
1080 )
1129 )
1081 def perfbundle(ui, repo, *revs, **opts):
1130 def perfbundle(ui, repo, *revs, **opts):
1082 """benchmark the creation of a bundle from a repository
1131 """benchmark the creation of a bundle from a repository
1083
1132
1084 For now, this only supports "none" compression.
1133 For now, this only supports "none" compression.
1085 """
1134 """
1086 try:
1135 try:
1087 from mercurial import bundlecaches
1136 from mercurial import bundlecaches
1088
1137
1089 parsebundlespec = bundlecaches.parsebundlespec
1138 parsebundlespec = bundlecaches.parsebundlespec
1090 except ImportError:
1139 except ImportError:
1091 from mercurial import exchange
1140 from mercurial import exchange
1092
1141
1093 parsebundlespec = exchange.parsebundlespec
1142 parsebundlespec = exchange.parsebundlespec
1094
1143
1095 from mercurial import discovery
1144 from mercurial import discovery
1096 from mercurial import bundle2
1145 from mercurial import bundle2
1097
1146
1098 opts = _byteskwargs(opts)
1147 opts = _byteskwargs(opts)
1099 timer, fm = gettimer(ui, opts)
1148 timer, fm = gettimer(ui, opts)
1100
1149
1101 cl = repo.changelog
1150 cl = repo.changelog
1102 revs = list(revs)
1151 revs = list(revs)
1103 revs.extend(opts.get(b'rev', ()))
1152 revs.extend(opts.get(b'rev', ()))
1104 revs = scmutil.revrange(repo, revs)
1153 revs = scmutil.revrange(repo, revs)
1105 if not revs:
1154 if not revs:
1106 raise error.Abort(b"not revision specified")
1155 raise error.Abort(b"not revision specified")
1107 # make it a consistent set (ie: without topological gaps)
1156 # make it a consistent set (ie: without topological gaps)
1108 old_len = len(revs)
1157 old_len = len(revs)
1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1158 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 if old_len != len(revs):
1159 if old_len != len(revs):
1111 new_count = len(revs) - old_len
1160 new_count = len(revs) - old_len
1112 msg = b"add %d new revisions to make it a consistent set\n"
1161 msg = b"add %d new revisions to make it a consistent set\n"
1113 ui.write_err(msg % new_count)
1162 ui.write_err(msg % new_count)
1114
1163
1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1164 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1165 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 outgoing = discovery.outgoing(repo, bases, targets)
1166 outgoing = discovery.outgoing(repo, bases, targets)
1118
1167
1119 bundle_spec = opts.get(b'type')
1168 bundle_spec = opts.get(b'type')
1120
1169
1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1170 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122
1171
1123 cgversion = bundle_spec.params.get(b"cg.version")
1172 cgversion = bundle_spec.params.get(b"cg.version")
1124 if cgversion is None:
1173 if cgversion is None:
1125 if bundle_spec.version == b'v1':
1174 if bundle_spec.version == b'v1':
1126 cgversion = b'01'
1175 cgversion = b'01'
1127 if bundle_spec.version == b'v2':
1176 if bundle_spec.version == b'v2':
1128 cgversion = b'02'
1177 cgversion = b'02'
1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1178 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 err = b"repository does not support bundle version %s"
1179 err = b"repository does not support bundle version %s"
1131 raise error.Abort(err % cgversion)
1180 raise error.Abort(err % cgversion)
1132
1181
1133 if cgversion == b'01': # bundle1
1182 if cgversion == b'01': # bundle1
1134 bversion = b'HG10' + bundle_spec.wirecompression
1183 bversion = b'HG10' + bundle_spec.wirecompression
1135 bcompression = None
1184 bcompression = None
1136 elif cgversion in (b'02', b'03'):
1185 elif cgversion in (b'02', b'03'):
1137 bversion = b'HG20'
1186 bversion = b'HG20'
1138 bcompression = bundle_spec.wirecompression
1187 bcompression = bundle_spec.wirecompression
1139 else:
1188 else:
1140 err = b'perf::bundle: unexpected changegroup version %s'
1189 err = b'perf::bundle: unexpected changegroup version %s'
1141 raise error.ProgrammingError(err % cgversion)
1190 raise error.ProgrammingError(err % cgversion)
1142
1191
1143 if bcompression is None:
1192 if bcompression is None:
1144 bcompression = b'UN'
1193 bcompression = b'UN'
1145
1194
1146 if bcompression != b'UN':
1195 if bcompression != b'UN':
1147 err = b'perf::bundle: compression currently unsupported: %s'
1196 err = b'perf::bundle: compression currently unsupported: %s'
1148 raise error.ProgrammingError(err % bcompression)
1197 raise error.ProgrammingError(err % bcompression)
1149
1198
1150 def do_bundle():
1199 def do_bundle():
1151 bundle2.writenewbundle(
1200 bundle2.writenewbundle(
1152 ui,
1201 ui,
1153 repo,
1202 repo,
1154 b'perf::bundle',
1203 b'perf::bundle',
1155 os.devnull,
1204 os.devnull,
1156 bversion,
1205 bversion,
1157 outgoing,
1206 outgoing,
1158 bundle_spec.params,
1207 bundle_spec.params,
1159 )
1208 )
1160
1209
1161 timer(do_bundle)
1210 timer(do_bundle)
1162 fm.end()
1211 fm.end()
1163
1212
1164
1213
1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1214 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 def perfbundleread(ui, repo, bundlepath, **opts):
1215 def perfbundleread(ui, repo, bundlepath, **opts):
1167 """Benchmark reading of bundle files.
1216 """Benchmark reading of bundle files.
1168
1217
1169 This command is meant to isolate the I/O part of bundle reading as
1218 This command is meant to isolate the I/O part of bundle reading as
1170 much as possible.
1219 much as possible.
1171 """
1220 """
1172 from mercurial import (
1221 from mercurial import (
1173 bundle2,
1222 bundle2,
1174 exchange,
1223 exchange,
1175 streamclone,
1224 streamclone,
1176 )
1225 )
1177
1226
1178 opts = _byteskwargs(opts)
1227 opts = _byteskwargs(opts)
1179
1228
1180 def makebench(fn):
1229 def makebench(fn):
1181 def run():
1230 def run():
1182 with open(bundlepath, b'rb') as fh:
1231 with open(bundlepath, b'rb') as fh:
1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1232 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 fn(bundle)
1233 fn(bundle)
1185
1234
1186 return run
1235 return run
1187
1236
1188 def makereadnbytes(size):
1237 def makereadnbytes(size):
1189 def run():
1238 def run():
1190 with open(bundlepath, b'rb') as fh:
1239 with open(bundlepath, b'rb') as fh:
1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1240 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 while bundle.read(size):
1241 while bundle.read(size):
1193 pass
1242 pass
1194
1243
1195 return run
1244 return run
1196
1245
1197 def makestdioread(size):
1246 def makestdioread(size):
1198 def run():
1247 def run():
1199 with open(bundlepath, b'rb') as fh:
1248 with open(bundlepath, b'rb') as fh:
1200 while fh.read(size):
1249 while fh.read(size):
1201 pass
1250 pass
1202
1251
1203 return run
1252 return run
1204
1253
1205 # bundle1
1254 # bundle1
1206
1255
1207 def deltaiter(bundle):
1256 def deltaiter(bundle):
1208 for delta in bundle.deltaiter():
1257 for delta in bundle.deltaiter():
1209 pass
1258 pass
1210
1259
1211 def iterchunks(bundle):
1260 def iterchunks(bundle):
1212 for chunk in bundle.getchunks():
1261 for chunk in bundle.getchunks():
1213 pass
1262 pass
1214
1263
1215 # bundle2
1264 # bundle2
1216
1265
1217 def forwardchunks(bundle):
1266 def forwardchunks(bundle):
1218 for chunk in bundle._forwardchunks():
1267 for chunk in bundle._forwardchunks():
1219 pass
1268 pass
1220
1269
1221 def iterparts(bundle):
1270 def iterparts(bundle):
1222 for part in bundle.iterparts():
1271 for part in bundle.iterparts():
1223 pass
1272 pass
1224
1273
1225 def iterpartsseekable(bundle):
1274 def iterpartsseekable(bundle):
1226 for part in bundle.iterparts(seekable=True):
1275 for part in bundle.iterparts(seekable=True):
1227 pass
1276 pass
1228
1277
1229 def seek(bundle):
1278 def seek(bundle):
1230 for part in bundle.iterparts(seekable=True):
1279 for part in bundle.iterparts(seekable=True):
1231 part.seek(0, os.SEEK_END)
1280 part.seek(0, os.SEEK_END)
1232
1281
1233 def makepartreadnbytes(size):
1282 def makepartreadnbytes(size):
1234 def run():
1283 def run():
1235 with open(bundlepath, b'rb') as fh:
1284 with open(bundlepath, b'rb') as fh:
1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1285 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 for part in bundle.iterparts():
1286 for part in bundle.iterparts():
1238 while part.read(size):
1287 while part.read(size):
1239 pass
1288 pass
1240
1289
1241 return run
1290 return run
1242
1291
1243 benches = [
1292 benches = [
1244 (makestdioread(8192), b'read(8k)'),
1293 (makestdioread(8192), b'read(8k)'),
1245 (makestdioread(16384), b'read(16k)'),
1294 (makestdioread(16384), b'read(16k)'),
1246 (makestdioread(32768), b'read(32k)'),
1295 (makestdioread(32768), b'read(32k)'),
1247 (makestdioread(131072), b'read(128k)'),
1296 (makestdioread(131072), b'read(128k)'),
1248 ]
1297 ]
1249
1298
1250 with open(bundlepath, b'rb') as fh:
1299 with open(bundlepath, b'rb') as fh:
1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1300 bundle = exchange.readbundle(ui, fh, bundlepath)
1252
1301
1253 if isinstance(bundle, changegroup.cg1unpacker):
1302 if isinstance(bundle, changegroup.cg1unpacker):
1254 benches.extend(
1303 benches.extend(
1255 [
1304 [
1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1305 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 (makebench(iterchunks), b'cg1 getchunks()'),
1306 (makebench(iterchunks), b'cg1 getchunks()'),
1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1307 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1308 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1309 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1310 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 ]
1311 ]
1263 )
1312 )
1264 elif isinstance(bundle, bundle2.unbundle20):
1313 elif isinstance(bundle, bundle2.unbundle20):
1265 benches.extend(
1314 benches.extend(
1266 [
1315 [
1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1316 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 (makebench(iterparts), b'bundle2 iterparts()'),
1317 (makebench(iterparts), b'bundle2 iterparts()'),
1269 (
1318 (
1270 makebench(iterpartsseekable),
1319 makebench(iterpartsseekable),
1271 b'bundle2 iterparts() seekable',
1320 b'bundle2 iterparts() seekable',
1272 ),
1321 ),
1273 (makebench(seek), b'bundle2 part seek()'),
1322 (makebench(seek), b'bundle2 part seek()'),
1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1323 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1324 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1325 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1326 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 ]
1327 ]
1279 )
1328 )
1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1329 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 raise error.Abort(b'stream clone bundles not supported')
1330 raise error.Abort(b'stream clone bundles not supported')
1282 else:
1331 else:
1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1332 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284
1333
1285 for fn, title in benches:
1334 for fn, title in benches:
1286 timer, fm = gettimer(ui, opts)
1335 timer, fm = gettimer(ui, opts)
1287 timer(fn, title=title)
1336 timer(fn, title=title)
1288 fm.end()
1337 fm.end()
1289
1338
1290
1339
1291 @command(
1340 @command(
1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1341 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 formatteropts
1342 formatteropts
1294 + [
1343 + [
1295 (b'', b'cgversion', b'02', b'changegroup version'),
1344 (b'', b'cgversion', b'02', b'changegroup version'),
1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1345 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 ],
1346 ],
1298 )
1347 )
1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1348 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 """Benchmark producing a changelog group for a changegroup.
1349 """Benchmark producing a changelog group for a changegroup.
1301
1350
1302 This measures the time spent processing the changelog during a
1351 This measures the time spent processing the changelog during a
1303 bundle operation. This occurs during `hg bundle` and on a server
1352 bundle operation. This occurs during `hg bundle` and on a server
1304 processing a `getbundle` wire protocol request (handles clones
1353 processing a `getbundle` wire protocol request (handles clones
1305 and pull requests).
1354 and pull requests).
1306
1355
1307 By default, all revisions are added to the changegroup.
1356 By default, all revisions are added to the changegroup.
1308 """
1357 """
1309 opts = _byteskwargs(opts)
1358 opts = _byteskwargs(opts)
1310 cl = repo.changelog
1359 cl = repo.changelog
1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1360 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 bundler = changegroup.getbundler(cgversion, repo)
1361 bundler = changegroup.getbundler(cgversion, repo)
1313
1362
1314 def d():
1363 def d():
1315 state, chunks = bundler._generatechangelog(cl, nodes)
1364 state, chunks = bundler._generatechangelog(cl, nodes)
1316 for chunk in chunks:
1365 for chunk in chunks:
1317 pass
1366 pass
1318
1367
1319 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1320
1369
1321 # Terminal printing can interfere with timing. So disable it.
1370 # Terminal printing can interfere with timing. So disable it.
1322 with ui.configoverride({(b'progress', b'disable'): True}):
1371 with ui.configoverride({(b'progress', b'disable'): True}):
1323 timer(d)
1372 timer(d)
1324
1373
1325 fm.end()
1374 fm.end()
1326
1375
1327
1376
1328 @command(b'perf::dirs|perfdirs', formatteropts)
1377 @command(b'perf::dirs|perfdirs', formatteropts)
1329 def perfdirs(ui, repo, **opts):
1378 def perfdirs(ui, repo, **opts):
1330 opts = _byteskwargs(opts)
1379 opts = _byteskwargs(opts)
1331 timer, fm = gettimer(ui, opts)
1380 timer, fm = gettimer(ui, opts)
1332 dirstate = repo.dirstate
1381 dirstate = repo.dirstate
1333 b'a' in dirstate
1382 b'a' in dirstate
1334
1383
1335 def d():
1384 def d():
1336 dirstate.hasdir(b'a')
1385 dirstate.hasdir(b'a')
1337 try:
1386 try:
1338 del dirstate._map._dirs
1387 del dirstate._map._dirs
1339 except AttributeError:
1388 except AttributeError:
1340 pass
1389 pass
1341
1390
1342 timer(d)
1391 timer(d)
1343 fm.end()
1392 fm.end()
1344
1393
1345
1394
1346 @command(
1395 @command(
1347 b'perf::dirstate|perfdirstate',
1396 b'perf::dirstate|perfdirstate',
1348 [
1397 [
1349 (
1398 (
1350 b'',
1399 b'',
1351 b'iteration',
1400 b'iteration',
1352 None,
1401 None,
1353 b'benchmark a full iteration for the dirstate',
1402 b'benchmark a full iteration for the dirstate',
1354 ),
1403 ),
1355 (
1404 (
1356 b'',
1405 b'',
1357 b'contains',
1406 b'contains',
1358 None,
1407 None,
1359 b'benchmark a large amount of `nf in dirstate` calls',
1408 b'benchmark a large amount of `nf in dirstate` calls',
1360 ),
1409 ),
1361 ]
1410 ]
1362 + formatteropts,
1411 + formatteropts,
1363 )
1412 )
1364 def perfdirstate(ui, repo, **opts):
1413 def perfdirstate(ui, repo, **opts):
1365 """benchmap the time of various distate operations
1414 """benchmap the time of various distate operations
1366
1415
1367 By default benchmark the time necessary to load a dirstate from scratch.
1416 By default benchmark the time necessary to load a dirstate from scratch.
1368 The dirstate is loaded to the point were a "contains" request can be
1417 The dirstate is loaded to the point were a "contains" request can be
1369 answered.
1418 answered.
1370 """
1419 """
1371 opts = _byteskwargs(opts)
1420 opts = _byteskwargs(opts)
1372 timer, fm = gettimer(ui, opts)
1421 timer, fm = gettimer(ui, opts)
1373 b"a" in repo.dirstate
1422 b"a" in repo.dirstate
1374
1423
1375 if opts[b'iteration'] and opts[b'contains']:
1424 if opts[b'iteration'] and opts[b'contains']:
1376 msg = b'only specify one of --iteration or --contains'
1425 msg = b'only specify one of --iteration or --contains'
1377 raise error.Abort(msg)
1426 raise error.Abort(msg)
1378
1427
1379 if opts[b'iteration']:
1428 if opts[b'iteration']:
1380 setup = None
1429 setup = None
1381 dirstate = repo.dirstate
1430 dirstate = repo.dirstate
1382
1431
1383 def d():
1432 def d():
1384 for f in dirstate:
1433 for f in dirstate:
1385 pass
1434 pass
1386
1435
1387 elif opts[b'contains']:
1436 elif opts[b'contains']:
1388 setup = None
1437 setup = None
1389 dirstate = repo.dirstate
1438 dirstate = repo.dirstate
1390 allfiles = list(dirstate)
1439 allfiles = list(dirstate)
1391 # also add file path that will be "missing" from the dirstate
1440 # also add file path that will be "missing" from the dirstate
1392 allfiles.extend([f[::-1] for f in allfiles])
1441 allfiles.extend([f[::-1] for f in allfiles])
1393
1442
1394 def d():
1443 def d():
1395 for f in allfiles:
1444 for f in allfiles:
1396 f in dirstate
1445 f in dirstate
1397
1446
1398 else:
1447 else:
1399
1448
1400 def setup():
1449 def setup():
1401 repo.dirstate.invalidate()
1450 repo.dirstate.invalidate()
1402
1451
1403 def d():
1452 def d():
1404 b"a" in repo.dirstate
1453 b"a" in repo.dirstate
1405
1454
1406 timer(d, setup=setup)
1455 timer(d, setup=setup)
1407 fm.end()
1456 fm.end()
1408
1457
1409
1458
1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1459 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 def perfdirstatedirs(ui, repo, **opts):
1460 def perfdirstatedirs(ui, repo, **opts):
1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1461 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 opts = _byteskwargs(opts)
1462 opts = _byteskwargs(opts)
1414 timer, fm = gettimer(ui, opts)
1463 timer, fm = gettimer(ui, opts)
1415 repo.dirstate.hasdir(b"a")
1464 repo.dirstate.hasdir(b"a")
1416
1465
1417 def setup():
1466 def setup():
1418 try:
1467 try:
1419 del repo.dirstate._map._dirs
1468 del repo.dirstate._map._dirs
1420 except AttributeError:
1469 except AttributeError:
1421 pass
1470 pass
1422
1471
1423 def d():
1472 def d():
1424 repo.dirstate.hasdir(b"a")
1473 repo.dirstate.hasdir(b"a")
1425
1474
1426 timer(d, setup=setup)
1475 timer(d, setup=setup)
1427 fm.end()
1476 fm.end()
1428
1477
1429
1478
1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1479 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 def perfdirstatefoldmap(ui, repo, **opts):
1480 def perfdirstatefoldmap(ui, repo, **opts):
1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1481 """benchmap a `dirstate._map.filefoldmap.get()` request
1433
1482
1434 The dirstate filefoldmap cache is dropped between every request.
1483 The dirstate filefoldmap cache is dropped between every request.
1435 """
1484 """
1436 opts = _byteskwargs(opts)
1485 opts = _byteskwargs(opts)
1437 timer, fm = gettimer(ui, opts)
1486 timer, fm = gettimer(ui, opts)
1438 dirstate = repo.dirstate
1487 dirstate = repo.dirstate
1439 dirstate._map.filefoldmap.get(b'a')
1488 dirstate._map.filefoldmap.get(b'a')
1440
1489
1441 def setup():
1490 def setup():
1442 del dirstate._map.filefoldmap
1491 del dirstate._map.filefoldmap
1443
1492
1444 def d():
1493 def d():
1445 dirstate._map.filefoldmap.get(b'a')
1494 dirstate._map.filefoldmap.get(b'a')
1446
1495
1447 timer(d, setup=setup)
1496 timer(d, setup=setup)
1448 fm.end()
1497 fm.end()
1449
1498
1450
1499
1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1500 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 def perfdirfoldmap(ui, repo, **opts):
1501 def perfdirfoldmap(ui, repo, **opts):
1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1502 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454
1503
1455 The dirstate dirfoldmap cache is dropped between every request.
1504 The dirstate dirfoldmap cache is dropped between every request.
1456 """
1505 """
1457 opts = _byteskwargs(opts)
1506 opts = _byteskwargs(opts)
1458 timer, fm = gettimer(ui, opts)
1507 timer, fm = gettimer(ui, opts)
1459 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1460 dirstate._map.dirfoldmap.get(b'a')
1509 dirstate._map.dirfoldmap.get(b'a')
1461
1510
1462 def setup():
1511 def setup():
1463 del dirstate._map.dirfoldmap
1512 del dirstate._map.dirfoldmap
1464 try:
1513 try:
1465 del dirstate._map._dirs
1514 del dirstate._map._dirs
1466 except AttributeError:
1515 except AttributeError:
1467 pass
1516 pass
1468
1517
1469 def d():
1518 def d():
1470 dirstate._map.dirfoldmap.get(b'a')
1519 dirstate._map.dirfoldmap.get(b'a')
1471
1520
1472 timer(d, setup=setup)
1521 timer(d, setup=setup)
1473 fm.end()
1522 fm.end()
1474
1523
1475
1524
1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1525 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 def perfdirstatewrite(ui, repo, **opts):
1526 def perfdirstatewrite(ui, repo, **opts):
1478 """benchmap the time it take to write a dirstate on disk"""
1527 """benchmap the time it take to write a dirstate on disk"""
1479 opts = _byteskwargs(opts)
1528 opts = _byteskwargs(opts)
1480 timer, fm = gettimer(ui, opts)
1529 timer, fm = gettimer(ui, opts)
1481 ds = repo.dirstate
1530 ds = repo.dirstate
1482 b"a" in ds
1531 b"a" in ds
1483
1532
1484 def setup():
1533 def setup():
1485 ds._dirty = True
1534 ds._dirty = True
1486
1535
1487 def d():
1536 def d():
1488 ds.write(repo.currenttransaction())
1537 ds.write(repo.currenttransaction())
1489
1538
1490 with repo.wlock():
1539 with repo.wlock():
1491 timer(d, setup=setup)
1540 timer(d, setup=setup)
1492 fm.end()
1541 fm.end()
1493
1542
1494
1543
1495 def _getmergerevs(repo, opts):
1544 def _getmergerevs(repo, opts):
1496 """parse command argument to return rev involved in merge
1545 """parse command argument to return rev involved in merge
1497
1546
1498 input: options dictionnary with `rev`, `from` and `bse`
1547 input: options dictionnary with `rev`, `from` and `bse`
1499 output: (localctx, otherctx, basectx)
1548 output: (localctx, otherctx, basectx)
1500 """
1549 """
1501 if opts[b'from']:
1550 if opts[b'from']:
1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1551 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 wctx = repo[fromrev]
1552 wctx = repo[fromrev]
1504 else:
1553 else:
1505 wctx = repo[None]
1554 wctx = repo[None]
1506 # we don't want working dir files to be stat'd in the benchmark, so
1555 # we don't want working dir files to be stat'd in the benchmark, so
1507 # prime that cache
1556 # prime that cache
1508 wctx.dirty()
1557 wctx.dirty()
1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1558 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 if opts[b'base']:
1559 if opts[b'base']:
1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1560 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 ancestor = repo[fromrev]
1561 ancestor = repo[fromrev]
1513 else:
1562 else:
1514 ancestor = wctx.ancestor(rctx)
1563 ancestor = wctx.ancestor(rctx)
1515 return (wctx, rctx, ancestor)
1564 return (wctx, rctx, ancestor)
1516
1565
1517
1566
1518 @command(
1567 @command(
1519 b'perf::mergecalculate|perfmergecalculate',
1568 b'perf::mergecalculate|perfmergecalculate',
1520 [
1569 [
1521 (b'r', b'rev', b'.', b'rev to merge against'),
1570 (b'r', b'rev', b'.', b'rev to merge against'),
1522 (b'', b'from', b'', b'rev to merge from'),
1571 (b'', b'from', b'', b'rev to merge from'),
1523 (b'', b'base', b'', b'the revision to use as base'),
1572 (b'', b'base', b'', b'the revision to use as base'),
1524 ]
1573 ]
1525 + formatteropts,
1574 + formatteropts,
1526 )
1575 )
1527 def perfmergecalculate(ui, repo, **opts):
1576 def perfmergecalculate(ui, repo, **opts):
1528 opts = _byteskwargs(opts)
1577 opts = _byteskwargs(opts)
1529 timer, fm = gettimer(ui, opts)
1578 timer, fm = gettimer(ui, opts)
1530
1579
1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1580 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532
1581
1533 def d():
1582 def d():
1534 # acceptremote is True because we don't want prompts in the middle of
1583 # acceptremote is True because we don't want prompts in the middle of
1535 # our benchmark
1584 # our benchmark
1536 merge.calculateupdates(
1585 merge.calculateupdates(
1537 repo,
1586 repo,
1538 wctx,
1587 wctx,
1539 rctx,
1588 rctx,
1540 [ancestor],
1589 [ancestor],
1541 branchmerge=False,
1590 branchmerge=False,
1542 force=False,
1591 force=False,
1543 acceptremote=True,
1592 acceptremote=True,
1544 followcopies=True,
1593 followcopies=True,
1545 )
1594 )
1546
1595
1547 timer(d)
1596 timer(d)
1548 fm.end()
1597 fm.end()
1549
1598
1550
1599
1551 @command(
1600 @command(
1552 b'perf::mergecopies|perfmergecopies',
1601 b'perf::mergecopies|perfmergecopies',
1553 [
1602 [
1554 (b'r', b'rev', b'.', b'rev to merge against'),
1603 (b'r', b'rev', b'.', b'rev to merge against'),
1555 (b'', b'from', b'', b'rev to merge from'),
1604 (b'', b'from', b'', b'rev to merge from'),
1556 (b'', b'base', b'', b'the revision to use as base'),
1605 (b'', b'base', b'', b'the revision to use as base'),
1557 ]
1606 ]
1558 + formatteropts,
1607 + formatteropts,
1559 )
1608 )
1560 def perfmergecopies(ui, repo, **opts):
1609 def perfmergecopies(ui, repo, **opts):
1561 """measure runtime of `copies.mergecopies`"""
1610 """measure runtime of `copies.mergecopies`"""
1562 opts = _byteskwargs(opts)
1611 opts = _byteskwargs(opts)
1563 timer, fm = gettimer(ui, opts)
1612 timer, fm = gettimer(ui, opts)
1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1613 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565
1614
1566 def d():
1615 def d():
1567 # acceptremote is True because we don't want prompts in the middle of
1616 # acceptremote is True because we don't want prompts in the middle of
1568 # our benchmark
1617 # our benchmark
1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1618 copies.mergecopies(repo, wctx, rctx, ancestor)
1570
1619
1571 timer(d)
1620 timer(d)
1572 fm.end()
1621 fm.end()
1573
1622
1574
1623
1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1624 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1625 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 """benchmark the copy tracing logic"""
1626 """benchmark the copy tracing logic"""
1578 opts = _byteskwargs(opts)
1627 opts = _byteskwargs(opts)
1579 timer, fm = gettimer(ui, opts)
1628 timer, fm = gettimer(ui, opts)
1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1629 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1630 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582
1631
1583 def d():
1632 def d():
1584 copies.pathcopies(ctx1, ctx2)
1633 copies.pathcopies(ctx1, ctx2)
1585
1634
1586 timer(d)
1635 timer(d)
1587 fm.end()
1636 fm.end()
1588
1637
1589
1638
1590 @command(
1639 @command(
1591 b'perf::phases|perfphases',
1640 b'perf::phases|perfphases',
1592 [
1641 [
1593 (b'', b'full', False, b'include file reading time too'),
1642 (b'', b'full', False, b'include file reading time too'),
1594 ],
1643 ],
1595 b"",
1644 b"",
1596 )
1645 )
1597 def perfphases(ui, repo, **opts):
1646 def perfphases(ui, repo, **opts):
1598 """benchmark phasesets computation"""
1647 """benchmark phasesets computation"""
1599 opts = _byteskwargs(opts)
1648 opts = _byteskwargs(opts)
1600 timer, fm = gettimer(ui, opts)
1649 timer, fm = gettimer(ui, opts)
1601 _phases = repo._phasecache
1650 _phases = repo._phasecache
1602 full = opts.get(b'full')
1651 full = opts.get(b'full')
1603
1652
1604 def d():
1653 def d():
1605 phases = _phases
1654 phases = _phases
1606 if full:
1655 if full:
1607 clearfilecache(repo, b'_phasecache')
1656 clearfilecache(repo, b'_phasecache')
1608 phases = repo._phasecache
1657 phases = repo._phasecache
1609 phases.invalidate()
1658 phases.invalidate()
1610 phases.loadphaserevs(repo)
1659 phases.loadphaserevs(repo)
1611
1660
1612 timer(d)
1661 timer(d)
1613 fm.end()
1662 fm.end()
1614
1663
1615
1664
1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1665 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 def perfphasesremote(ui, repo, dest=None, **opts):
1666 def perfphasesremote(ui, repo, dest=None, **opts):
1618 """benchmark time needed to analyse phases of the remote server"""
1667 """benchmark time needed to analyse phases of the remote server"""
1619 from mercurial.node import bin
1668 from mercurial.node import bin
1620 from mercurial import (
1669 from mercurial import (
1621 exchange,
1670 exchange,
1622 hg,
1671 hg,
1623 phases,
1672 phases,
1624 )
1673 )
1625
1674
1626 opts = _byteskwargs(opts)
1675 opts = _byteskwargs(opts)
1627 timer, fm = gettimer(ui, opts)
1676 timer, fm = gettimer(ui, opts)
1628
1677
1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1678 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 if not path:
1679 if not path:
1631 raise error.Abort(
1680 raise error.Abort(
1632 b'default repository not configured!',
1681 b'default repository not configured!',
1633 hint=b"see 'hg help config.paths'",
1682 hint=b"see 'hg help config.paths'",
1634 )
1683 )
1635 if util.safehasattr(path, 'main_path'):
1684 if util.safehasattr(path, 'main_path'):
1636 path = path.get_push_variant()
1685 path = path.get_push_variant()
1637 dest = path.loc
1686 dest = path.loc
1638 else:
1687 else:
1639 dest = path.pushloc or path.loc
1688 dest = path.pushloc or path.loc
1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1689 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 other = hg.peer(repo, opts, dest)
1690 other = hg.peer(repo, opts, dest)
1642
1691
1643 # easier to perform discovery through the operation
1692 # easier to perform discovery through the operation
1644 op = exchange.pushoperation(repo, other)
1693 op = exchange.pushoperation(repo, other)
1645 exchange._pushdiscoverychangeset(op)
1694 exchange._pushdiscoverychangeset(op)
1646
1695
1647 remotesubset = op.fallbackheads
1696 remotesubset = op.fallbackheads
1648
1697
1649 with other.commandexecutor() as e:
1698 with other.commandexecutor() as e:
1650 remotephases = e.callcommand(
1699 remotephases = e.callcommand(
1651 b'listkeys', {b'namespace': b'phases'}
1700 b'listkeys', {b'namespace': b'phases'}
1652 ).result()
1701 ).result()
1653 del other
1702 del other
1654 publishing = remotephases.get(b'publishing', False)
1703 publishing = remotephases.get(b'publishing', False)
1655 if publishing:
1704 if publishing:
1656 ui.statusnoi18n(b'publishing: yes\n')
1705 ui.statusnoi18n(b'publishing: yes\n')
1657 else:
1706 else:
1658 ui.statusnoi18n(b'publishing: no\n')
1707 ui.statusnoi18n(b'publishing: no\n')
1659
1708
1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1709 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 if has_node is None:
1710 if has_node is None:
1662 has_node = repo.changelog.nodemap.__contains__
1711 has_node = repo.changelog.nodemap.__contains__
1663 nonpublishroots = 0
1712 nonpublishroots = 0
1664 for nhex, phase in remotephases.iteritems():
1713 for nhex, phase in remotephases.iteritems():
1665 if nhex == b'publishing': # ignore data related to publish option
1714 if nhex == b'publishing': # ignore data related to publish option
1666 continue
1715 continue
1667 node = bin(nhex)
1716 node = bin(nhex)
1668 if has_node(node) and int(phase):
1717 if has_node(node) and int(phase):
1669 nonpublishroots += 1
1718 nonpublishroots += 1
1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1719 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1720 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672
1721
1673 def d():
1722 def d():
1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1723 phases.remotephasessummary(repo, remotesubset, remotephases)
1675
1724
1676 timer(d)
1725 timer(d)
1677 fm.end()
1726 fm.end()
1678
1727
1679
1728
1680 @command(
1729 @command(
1681 b'perf::manifest|perfmanifest',
1730 b'perf::manifest|perfmanifest',
1682 [
1731 [
1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1732 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1733 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 ]
1734 ]
1686 + formatteropts,
1735 + formatteropts,
1687 b'REV|NODE',
1736 b'REV|NODE',
1688 )
1737 )
1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1738 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 """benchmark the time to read a manifest from disk and return a usable
1739 """benchmark the time to read a manifest from disk and return a usable
1691 dict-like object
1740 dict-like object
1692
1741
1693 Manifest caches are cleared before retrieval."""
1742 Manifest caches are cleared before retrieval."""
1694 opts = _byteskwargs(opts)
1743 opts = _byteskwargs(opts)
1695 timer, fm = gettimer(ui, opts)
1744 timer, fm = gettimer(ui, opts)
1696 if not manifest_rev:
1745 if not manifest_rev:
1697 ctx = scmutil.revsingle(repo, rev, rev)
1746 ctx = scmutil.revsingle(repo, rev, rev)
1698 t = ctx.manifestnode()
1747 t = ctx.manifestnode()
1699 else:
1748 else:
1700 from mercurial.node import bin
1749 from mercurial.node import bin
1701
1750
1702 if len(rev) == 40:
1751 if len(rev) == 40:
1703 t = bin(rev)
1752 t = bin(rev)
1704 else:
1753 else:
1705 try:
1754 try:
1706 rev = int(rev)
1755 rev = int(rev)
1707
1756
1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1757 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 t = repo.manifestlog.getstorage(b'').node(rev)
1758 t = repo.manifestlog.getstorage(b'').node(rev)
1710 else:
1759 else:
1711 t = repo.manifestlog._revlog.lookup(rev)
1760 t = repo.manifestlog._revlog.lookup(rev)
1712 except ValueError:
1761 except ValueError:
1713 raise error.Abort(
1762 raise error.Abort(
1714 b'manifest revision must be integer or full node'
1763 b'manifest revision must be integer or full node'
1715 )
1764 )
1716
1765
1717 def d():
1766 def d():
1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1767 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 repo.manifestlog[t].read()
1768 repo.manifestlog[t].read()
1720
1769
1721 timer(d)
1770 timer(d)
1722 fm.end()
1771 fm.end()
1723
1772
1724
1773
1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1774 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 def perfchangeset(ui, repo, rev, **opts):
1775 def perfchangeset(ui, repo, rev, **opts):
1727 opts = _byteskwargs(opts)
1776 opts = _byteskwargs(opts)
1728 timer, fm = gettimer(ui, opts)
1777 timer, fm = gettimer(ui, opts)
1729 n = scmutil.revsingle(repo, rev).node()
1778 n = scmutil.revsingle(repo, rev).node()
1730
1779
1731 def d():
1780 def d():
1732 repo.changelog.read(n)
1781 repo.changelog.read(n)
1733 # repo.changelog._cache = None
1782 # repo.changelog._cache = None
1734
1783
1735 timer(d)
1784 timer(d)
1736 fm.end()
1785 fm.end()
1737
1786
1738
1787
1739 @command(b'perf::ignore|perfignore', formatteropts)
1788 @command(b'perf::ignore|perfignore', formatteropts)
1740 def perfignore(ui, repo, **opts):
1789 def perfignore(ui, repo, **opts):
1741 """benchmark operation related to computing ignore"""
1790 """benchmark operation related to computing ignore"""
1742 opts = _byteskwargs(opts)
1791 opts = _byteskwargs(opts)
1743 timer, fm = gettimer(ui, opts)
1792 timer, fm = gettimer(ui, opts)
1744 dirstate = repo.dirstate
1793 dirstate = repo.dirstate
1745
1794
1746 def setupone():
1795 def setupone():
1747 dirstate.invalidate()
1796 dirstate.invalidate()
1748 clearfilecache(dirstate, b'_ignore')
1797 clearfilecache(dirstate, b'_ignore')
1749
1798
1750 def runone():
1799 def runone():
1751 dirstate._ignore
1800 dirstate._ignore
1752
1801
1753 timer(runone, setup=setupone, title=b"load")
1802 timer(runone, setup=setupone, title=b"load")
1754 fm.end()
1803 fm.end()
1755
1804
1756
1805
1757 @command(
1806 @command(
1758 b'perf::index|perfindex',
1807 b'perf::index|perfindex',
1759 [
1808 [
1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1809 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1810 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 ]
1811 ]
1763 + formatteropts,
1812 + formatteropts,
1764 )
1813 )
1765 def perfindex(ui, repo, **opts):
1814 def perfindex(ui, repo, **opts):
1766 """benchmark index creation time followed by a lookup
1815 """benchmark index creation time followed by a lookup
1767
1816
1768 The default is to look `tip` up. Depending on the index implementation,
1817 The default is to look `tip` up. Depending on the index implementation,
1769 the revision looked up can matters. For example, an implementation
1818 the revision looked up can matters. For example, an implementation
1770 scanning the index will have a faster lookup time for `--rev tip` than for
1819 scanning the index will have a faster lookup time for `--rev tip` than for
1771 `--rev 0`. The number of looked up revisions and their order can also
1820 `--rev 0`. The number of looked up revisions and their order can also
1772 matters.
1821 matters.
1773
1822
1774 Example of useful set to test:
1823 Example of useful set to test:
1775
1824
1776 * tip
1825 * tip
1777 * 0
1826 * 0
1778 * -10:
1827 * -10:
1779 * :10
1828 * :10
1780 * -10: + :10
1829 * -10: + :10
1781 * :10: + -10:
1830 * :10: + -10:
1782 * -10000:
1831 * -10000:
1783 * -10000: + 0
1832 * -10000: + 0
1784
1833
1785 It is not currently possible to check for lookup of a missing node. For
1834 It is not currently possible to check for lookup of a missing node. For
1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1835 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 import mercurial.revlog
1836 import mercurial.revlog
1788
1837
1789 opts = _byteskwargs(opts)
1838 opts = _byteskwargs(opts)
1790 timer, fm = gettimer(ui, opts)
1839 timer, fm = gettimer(ui, opts)
1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1840 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 if opts[b'no_lookup']:
1841 if opts[b'no_lookup']:
1793 if opts['rev']:
1842 if opts['rev']:
1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1843 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 nodes = []
1844 nodes = []
1796 elif not opts[b'rev']:
1845 elif not opts[b'rev']:
1797 nodes = [repo[b"tip"].node()]
1846 nodes = [repo[b"tip"].node()]
1798 else:
1847 else:
1799 revs = scmutil.revrange(repo, opts[b'rev'])
1848 revs = scmutil.revrange(repo, opts[b'rev'])
1800 cl = repo.changelog
1849 cl = repo.changelog
1801 nodes = [cl.node(r) for r in revs]
1850 nodes = [cl.node(r) for r in revs]
1802
1851
1803 unfi = repo.unfiltered()
1852 unfi = repo.unfiltered()
1804 # find the filecache func directly
1853 # find the filecache func directly
1805 # This avoid polluting the benchmark with the filecache logic
1854 # This avoid polluting the benchmark with the filecache logic
1806 makecl = unfi.__class__.changelog.func
1855 makecl = unfi.__class__.changelog.func
1807
1856
1808 def setup():
1857 def setup():
1809 # probably not necessary, but for good measure
1858 # probably not necessary, but for good measure
1810 clearchangelog(unfi)
1859 clearchangelog(unfi)
1811
1860
1812 def d():
1861 def d():
1813 cl = makecl(unfi)
1862 cl = makecl(unfi)
1814 for n in nodes:
1863 for n in nodes:
1815 cl.rev(n)
1864 cl.rev(n)
1816
1865
1817 timer(d, setup=setup)
1866 timer(d, setup=setup)
1818 fm.end()
1867 fm.end()
1819
1868
1820
1869
1821 @command(
1870 @command(
1822 b'perf::nodemap|perfnodemap',
1871 b'perf::nodemap|perfnodemap',
1823 [
1872 [
1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1873 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1874 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 ]
1875 ]
1827 + formatteropts,
1876 + formatteropts,
1828 )
1877 )
1829 def perfnodemap(ui, repo, **opts):
1878 def perfnodemap(ui, repo, **opts):
1830 """benchmark the time necessary to look up revision from a cold nodemap
1879 """benchmark the time necessary to look up revision from a cold nodemap
1831
1880
1832 Depending on the implementation, the amount and order of revision we look
1881 Depending on the implementation, the amount and order of revision we look
1833 up can varies. Example of useful set to test:
1882 up can varies. Example of useful set to test:
1834 * tip
1883 * tip
1835 * 0
1884 * 0
1836 * -10:
1885 * -10:
1837 * :10
1886 * :10
1838 * -10: + :10
1887 * -10: + :10
1839 * :10: + -10:
1888 * :10: + -10:
1840 * -10000:
1889 * -10000:
1841 * -10000: + 0
1890 * -10000: + 0
1842
1891
1843 The command currently focus on valid binary lookup. Benchmarking for
1892 The command currently focus on valid binary lookup. Benchmarking for
1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1893 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 """
1894 """
1846 import mercurial.revlog
1895 import mercurial.revlog
1847
1896
1848 opts = _byteskwargs(opts)
1897 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1898 timer, fm = gettimer(ui, opts)
1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1899 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851
1900
1852 unfi = repo.unfiltered()
1901 unfi = repo.unfiltered()
1853 clearcaches = opts[b'clear_caches']
1902 clearcaches = opts[b'clear_caches']
1854 # find the filecache func directly
1903 # find the filecache func directly
1855 # This avoid polluting the benchmark with the filecache logic
1904 # This avoid polluting the benchmark with the filecache logic
1856 makecl = unfi.__class__.changelog.func
1905 makecl = unfi.__class__.changelog.func
1857 if not opts[b'rev']:
1906 if not opts[b'rev']:
1858 raise error.Abort(b'use --rev to specify revisions to look up')
1907 raise error.Abort(b'use --rev to specify revisions to look up')
1859 revs = scmutil.revrange(repo, opts[b'rev'])
1908 revs = scmutil.revrange(repo, opts[b'rev'])
1860 cl = repo.changelog
1909 cl = repo.changelog
1861 nodes = [cl.node(r) for r in revs]
1910 nodes = [cl.node(r) for r in revs]
1862
1911
1863 # use a list to pass reference to a nodemap from one closure to the next
1912 # use a list to pass reference to a nodemap from one closure to the next
1864 nodeget = [None]
1913 nodeget = [None]
1865
1914
1866 def setnodeget():
1915 def setnodeget():
1867 # probably not necessary, but for good measure
1916 # probably not necessary, but for good measure
1868 clearchangelog(unfi)
1917 clearchangelog(unfi)
1869 cl = makecl(unfi)
1918 cl = makecl(unfi)
1870 if util.safehasattr(cl.index, 'get_rev'):
1919 if util.safehasattr(cl.index, 'get_rev'):
1871 nodeget[0] = cl.index.get_rev
1920 nodeget[0] = cl.index.get_rev
1872 else:
1921 else:
1873 nodeget[0] = cl.nodemap.get
1922 nodeget[0] = cl.nodemap.get
1874
1923
1875 def d():
1924 def d():
1876 get = nodeget[0]
1925 get = nodeget[0]
1877 for n in nodes:
1926 for n in nodes:
1878 get(n)
1927 get(n)
1879
1928
1880 setup = None
1929 setup = None
1881 if clearcaches:
1930 if clearcaches:
1882
1931
1883 def setup():
1932 def setup():
1884 setnodeget()
1933 setnodeget()
1885
1934
1886 else:
1935 else:
1887 setnodeget()
1936 setnodeget()
1888 d() # prewarm the data structure
1937 d() # prewarm the data structure
1889 timer(d, setup=setup)
1938 timer(d, setup=setup)
1890 fm.end()
1939 fm.end()
1891
1940
1892
1941
1893 @command(b'perf::startup|perfstartup', formatteropts)
1942 @command(b'perf::startup|perfstartup', formatteropts)
1894 def perfstartup(ui, repo, **opts):
1943 def perfstartup(ui, repo, **opts):
1895 opts = _byteskwargs(opts)
1944 opts = _byteskwargs(opts)
1896 timer, fm = gettimer(ui, opts)
1945 timer, fm = gettimer(ui, opts)
1897
1946
1898 def d():
1947 def d():
1899 if os.name != 'nt':
1948 if os.name != 'nt':
1900 os.system(
1949 os.system(
1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1950 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 )
1951 )
1903 else:
1952 else:
1904 os.environ['HGRCPATH'] = r' '
1953 os.environ['HGRCPATH'] = r' '
1905 os.system("%s version -q > NUL" % sys.argv[0])
1954 os.system("%s version -q > NUL" % sys.argv[0])
1906
1955
1907 timer(d)
1956 timer(d)
1908 fm.end()
1957 fm.end()
1909
1958
1910
1959
1911 def _find_stream_generator(version):
1960 def _find_stream_generator(version):
1912 """find the proper generator function for this stream version"""
1961 """find the proper generator function for this stream version"""
1913 import mercurial.streamclone
1962 import mercurial.streamclone
1914
1963
1915 available = {}
1964 available = {}
1916
1965
1917 # try to fetch a v1 generator
1966 # try to fetch a v1 generator
1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1967 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 if generatev1 is not None:
1968 if generatev1 is not None:
1920
1969
1921 def generate(repo):
1970 def generate(repo):
1922 entries, bytes, data = generatev2(repo, None, None, True)
1971 entries, bytes, data = generatev2(repo, None, None, True)
1923 return data
1972 return data
1924
1973
1925 available[b'v1'] = generatev1
1974 available[b'v1'] = generatev1
1926 # try to fetch a v2 generator
1975 # try to fetch a v2 generator
1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1976 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 if generatev2 is not None:
1977 if generatev2 is not None:
1929
1978
1930 def generate(repo):
1979 def generate(repo):
1931 entries, bytes, data = generatev2(repo, None, None, True)
1980 entries, bytes, data = generatev2(repo, None, None, True)
1932 return data
1981 return data
1933
1982
1934 available[b'v2'] = generate
1983 available[b'v2'] = generate
1935 # try to fetch a v3 generator
1984 # try to fetch a v3 generator
1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1985 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 if generatev3 is not None:
1986 if generatev3 is not None:
1938
1987
1939 def generate(repo):
1988 def generate(repo):
1940 entries, bytes, data = generatev3(repo, None, None, True)
1989 entries, bytes, data = generatev3(repo, None, None, True)
1941 return data
1990 return data
1942
1991
1943 available[b'v3-exp'] = generate
1992 available[b'v3-exp'] = generate
1944
1993
1945 # resolve the request
1994 # resolve the request
1946 if version == b"latest":
1995 if version == b"latest":
1947 # latest is the highest non experimental version
1996 # latest is the highest non experimental version
1948 latest_key = max(v for v in available if b'-exp' not in v)
1997 latest_key = max(v for v in available if b'-exp' not in v)
1949 return available[latest_key]
1998 return available[latest_key]
1950 elif version in available:
1999 elif version in available:
1951 return available[version]
2000 return available[version]
1952 else:
2001 else:
1953 msg = b"unkown or unavailable version: %s"
2002 msg = b"unkown or unavailable version: %s"
1954 msg %= version
2003 msg %= version
1955 hint = b"available versions: %s"
2004 hint = b"available versions: %s"
1956 hint %= b', '.join(sorted(available))
2005 hint %= b', '.join(sorted(available))
1957 raise error.Abort(msg, hint=hint)
2006 raise error.Abort(msg, hint=hint)
1958
2007
1959
2008
1960 @command(
2009 @command(
1961 b'perf::stream-locked-section',
2010 b'perf::stream-locked-section',
1962 [
2011 [
1963 (
2012 (
1964 b'',
2013 b'',
1965 b'stream-version',
2014 b'stream-version',
1966 b'latest',
2015 b'latest',
1967 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2016 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1968 ),
2017 ),
1969 ]
2018 ]
1970 + formatteropts,
2019 + formatteropts,
1971 )
2020 )
1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2021 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 """benchmark the initial, repo-locked, section of a stream-clone"""
2022 """benchmark the initial, repo-locked, section of a stream-clone"""
1974
2023
1975 opts = _byteskwargs(opts)
2024 opts = _byteskwargs(opts)
1976 timer, fm = gettimer(ui, opts)
2025 timer, fm = gettimer(ui, opts)
1977
2026
1978 # deletion of the generator may trigger some cleanup that we do not want to
2027 # deletion of the generator may trigger some cleanup that we do not want to
1979 # measure
2028 # measure
1980 result_holder = [None]
2029 result_holder = [None]
1981
2030
1982 def setupone():
2031 def setupone():
1983 result_holder[0] = None
2032 result_holder[0] = None
1984
2033
1985 generate = _find_stream_generator(stream_version)
2034 generate = _find_stream_generator(stream_version)
1986
2035
1987 def runone():
2036 def runone():
1988 # the lock is held for the duration the initialisation
2037 # the lock is held for the duration the initialisation
1989 result_holder[0] = generate(repo)
2038 result_holder[0] = generate(repo)
1990
2039
1991 timer(runone, setup=setupone, title=b"load")
2040 timer(runone, setup=setupone, title=b"load")
1992 fm.end()
2041 fm.end()
1993
2042
1994
2043
1995 @command(
2044 @command(
1996 b'perf::stream-generate',
2045 b'perf::stream-generate',
1997 [
2046 [
1998 (
2047 (
1999 b'',
2048 b'',
2000 b'stream-version',
2049 b'stream-version',
2001 b'latest',
2050 b'latest',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2051 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 ),
2052 ),
2004 ]
2053 ]
2005 + formatteropts,
2054 + formatteropts,
2006 )
2055 )
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2056 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 """benchmark the full generation of a stream clone"""
2057 """benchmark the full generation of a stream clone"""
2009
2058
2010 opts = _byteskwargs(opts)
2059 opts = _byteskwargs(opts)
2011 timer, fm = gettimer(ui, opts)
2060 timer, fm = gettimer(ui, opts)
2012
2061
2013 # deletion of the generator may trigger some cleanup that we do not want to
2062 # deletion of the generator may trigger some cleanup that we do not want to
2014 # measure
2063 # measure
2015
2064
2016 generate = _find_stream_generator(stream_version)
2065 generate = _find_stream_generator(stream_version)
2017
2066
2018 def runone():
2067 def runone():
2019 # the lock is held for the duration the initialisation
2068 # the lock is held for the duration the initialisation
2020 for chunk in generate(repo):
2069 for chunk in generate(repo):
2021 pass
2070 pass
2022
2071
2023 timer(runone, title=b"generate")
2072 timer(runone, title=b"generate")
2024 fm.end()
2073 fm.end()
2025
2074
2026
2075
2027 @command(
2076 @command(
2028 b'perf::stream-consume',
2077 b'perf::stream-consume',
2029 formatteropts,
2078 formatteropts,
2030 )
2079 )
2031 def perf_stream_clone_consume(ui, repo, filename, **opts):
2080 def perf_stream_clone_consume(ui, repo, filename, **opts):
2032 """benchmark the full application of a stream clone
2081 """benchmark the full application of a stream clone
2033
2082
2034 This include the creation of the repository
2083 This include the creation of the repository
2035 """
2084 """
2036 # try except to appease check code
2085 # try except to appease check code
2037 msg = b"mercurial too old, missing necessary module: %s"
2086 msg = b"mercurial too old, missing necessary module: %s"
2038 try:
2087 try:
2039 from mercurial import bundle2
2088 from mercurial import bundle2
2040 except ImportError as exc:
2089 except ImportError as exc:
2041 msg %= _bytestr(exc)
2090 msg %= _bytestr(exc)
2042 raise error.Abort(msg)
2091 raise error.Abort(msg)
2043 try:
2092 try:
2044 from mercurial import exchange
2093 from mercurial import exchange
2045 except ImportError as exc:
2094 except ImportError as exc:
2046 msg %= _bytestr(exc)
2095 msg %= _bytestr(exc)
2047 raise error.Abort(msg)
2096 raise error.Abort(msg)
2048 try:
2097 try:
2049 from mercurial import hg
2098 from mercurial import hg
2050 except ImportError as exc:
2099 except ImportError as exc:
2051 msg %= _bytestr(exc)
2100 msg %= _bytestr(exc)
2052 raise error.Abort(msg)
2101 raise error.Abort(msg)
2053 try:
2102 try:
2054 from mercurial import localrepo
2103 from mercurial import localrepo
2055 except ImportError as exc:
2104 except ImportError as exc:
2056 msg %= _bytestr(exc)
2105 msg %= _bytestr(exc)
2057 raise error.Abort(msg)
2106 raise error.Abort(msg)
2058
2107
2059 opts = _byteskwargs(opts)
2108 opts = _byteskwargs(opts)
2060 timer, fm = gettimer(ui, opts)
2109 timer, fm = gettimer(ui, opts)
2061
2110
2062 # deletion of the generator may trigger some cleanup that we do not want to
2111 # deletion of the generator may trigger some cleanup that we do not want to
2063 # measure
2112 # measure
2064 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2113 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2065 raise error.Abort("not a readable file: %s" % filename)
2114 raise error.Abort("not a readable file: %s" % filename)
2066
2115
2067 run_variables = [None, None]
2116 run_variables = [None, None]
2068
2117
2069 @contextlib.contextmanager
2118 @contextlib.contextmanager
2070 def context():
2119 def context():
2071 with open(filename, mode='rb') as bundle:
2120 with open(filename, mode='rb') as bundle:
2072 with tempfile.TemporaryDirectory() as tmp_dir:
2121 with tempfile.TemporaryDirectory() as tmp_dir:
2073 tmp_dir = fsencode(tmp_dir)
2122 tmp_dir = fsencode(tmp_dir)
2074 run_variables[0] = bundle
2123 run_variables[0] = bundle
2075 run_variables[1] = tmp_dir
2124 run_variables[1] = tmp_dir
2076 yield
2125 yield
2077 run_variables[0] = None
2126 run_variables[0] = None
2078 run_variables[1] = None
2127 run_variables[1] = None
2079
2128
2080 def runone():
2129 def runone():
2081 bundle = run_variables[0]
2130 bundle = run_variables[0]
2082 tmp_dir = run_variables[1]
2131 tmp_dir = run_variables[1]
2083 # only pass ui when no srcrepo
2132 # only pass ui when no srcrepo
2084 localrepo.createrepository(
2133 localrepo.createrepository(
2085 repo.ui, tmp_dir, requirements=repo.requirements
2134 repo.ui, tmp_dir, requirements=repo.requirements
2086 )
2135 )
2087 target = hg.repository(repo.ui, tmp_dir)
2136 target = hg.repository(repo.ui, tmp_dir)
2088 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2137 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2089 # stream v1
2138 # stream v1
2090 if util.safehasattr(gen, 'apply'):
2139 if util.safehasattr(gen, 'apply'):
2091 gen.apply(target)
2140 gen.apply(target)
2092 else:
2141 else:
2093 with target.transaction(b"perf::stream-consume") as tr:
2142 with target.transaction(b"perf::stream-consume") as tr:
2094 bundle2.applybundle(
2143 bundle2.applybundle(
2095 target,
2144 target,
2096 gen,
2145 gen,
2097 tr,
2146 tr,
2098 source=b'unbundle',
2147 source=b'unbundle',
2099 url=filename,
2148 url=filename,
2100 )
2149 )
2101
2150
2102 timer(runone, context=context, title=b"consume")
2151 timer(runone, context=context, title=b"consume")
2103 fm.end()
2152 fm.end()
2104
2153
2105
2154
2106 @command(b'perf::parents|perfparents', formatteropts)
2155 @command(b'perf::parents|perfparents', formatteropts)
2107 def perfparents(ui, repo, **opts):
2156 def perfparents(ui, repo, **opts):
2108 """benchmark the time necessary to fetch one changeset's parents.
2157 """benchmark the time necessary to fetch one changeset's parents.
2109
2158
2110 The fetch is done using the `node identifier`, traversing all object layers
2159 The fetch is done using the `node identifier`, traversing all object layers
2111 from the repository object. The first N revisions will be used for this
2160 from the repository object. The first N revisions will be used for this
2112 benchmark. N is controlled by the ``perf.parentscount`` config option
2161 benchmark. N is controlled by the ``perf.parentscount`` config option
2113 (default: 1000).
2162 (default: 1000).
2114 """
2163 """
2115 opts = _byteskwargs(opts)
2164 opts = _byteskwargs(opts)
2116 timer, fm = gettimer(ui, opts)
2165 timer, fm = gettimer(ui, opts)
2117 # control the number of commits perfparents iterates over
2166 # control the number of commits perfparents iterates over
2118 # experimental config: perf.parentscount
2167 # experimental config: perf.parentscount
2119 count = getint(ui, b"perf", b"parentscount", 1000)
2168 count = getint(ui, b"perf", b"parentscount", 1000)
2120 if len(repo.changelog) < count:
2169 if len(repo.changelog) < count:
2121 raise error.Abort(b"repo needs %d commits for this test" % count)
2170 raise error.Abort(b"repo needs %d commits for this test" % count)
2122 repo = repo.unfiltered()
2171 repo = repo.unfiltered()
2123 nl = [repo.changelog.node(i) for i in _xrange(count)]
2172 nl = [repo.changelog.node(i) for i in _xrange(count)]
2124
2173
2125 def d():
2174 def d():
2126 for n in nl:
2175 for n in nl:
2127 repo.changelog.parents(n)
2176 repo.changelog.parents(n)
2128
2177
2129 timer(d)
2178 timer(d)
2130 fm.end()
2179 fm.end()
2131
2180
2132
2181
2133 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2182 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2134 def perfctxfiles(ui, repo, x, **opts):
2183 def perfctxfiles(ui, repo, x, **opts):
2135 opts = _byteskwargs(opts)
2184 opts = _byteskwargs(opts)
2136 x = int(x)
2185 x = int(x)
2137 timer, fm = gettimer(ui, opts)
2186 timer, fm = gettimer(ui, opts)
2138
2187
2139 def d():
2188 def d():
2140 len(repo[x].files())
2189 len(repo[x].files())
2141
2190
2142 timer(d)
2191 timer(d)
2143 fm.end()
2192 fm.end()
2144
2193
2145
2194
2146 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2195 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2147 def perfrawfiles(ui, repo, x, **opts):
2196 def perfrawfiles(ui, repo, x, **opts):
2148 opts = _byteskwargs(opts)
2197 opts = _byteskwargs(opts)
2149 x = int(x)
2198 x = int(x)
2150 timer, fm = gettimer(ui, opts)
2199 timer, fm = gettimer(ui, opts)
2151 cl = repo.changelog
2200 cl = repo.changelog
2152
2201
2153 def d():
2202 def d():
2154 len(cl.read(x)[3])
2203 len(cl.read(x)[3])
2155
2204
2156 timer(d)
2205 timer(d)
2157 fm.end()
2206 fm.end()
2158
2207
2159
2208
2160 @command(b'perf::lookup|perflookup', formatteropts)
2209 @command(b'perf::lookup|perflookup', formatteropts)
2161 def perflookup(ui, repo, rev, **opts):
2210 def perflookup(ui, repo, rev, **opts):
2162 opts = _byteskwargs(opts)
2211 opts = _byteskwargs(opts)
2163 timer, fm = gettimer(ui, opts)
2212 timer, fm = gettimer(ui, opts)
2164 timer(lambda: len(repo.lookup(rev)))
2213 timer(lambda: len(repo.lookup(rev)))
2165 fm.end()
2214 fm.end()
2166
2215
2167
2216
2168 @command(
2217 @command(
2169 b'perf::linelogedits|perflinelogedits',
2218 b'perf::linelogedits|perflinelogedits',
2170 [
2219 [
2171 (b'n', b'edits', 10000, b'number of edits'),
2220 (b'n', b'edits', 10000, b'number of edits'),
2172 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2221 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2173 ],
2222 ],
2174 norepo=True,
2223 norepo=True,
2175 )
2224 )
2176 def perflinelogedits(ui, **opts):
2225 def perflinelogedits(ui, **opts):
2177 from mercurial import linelog
2226 from mercurial import linelog
2178
2227
2179 opts = _byteskwargs(opts)
2228 opts = _byteskwargs(opts)
2180
2229
2181 edits = opts[b'edits']
2230 edits = opts[b'edits']
2182 maxhunklines = opts[b'max_hunk_lines']
2231 maxhunklines = opts[b'max_hunk_lines']
2183
2232
2184 maxb1 = 100000
2233 maxb1 = 100000
2185 random.seed(0)
2234 random.seed(0)
2186 randint = random.randint
2235 randint = random.randint
2187 currentlines = 0
2236 currentlines = 0
2188 arglist = []
2237 arglist = []
2189 for rev in _xrange(edits):
2238 for rev in _xrange(edits):
2190 a1 = randint(0, currentlines)
2239 a1 = randint(0, currentlines)
2191 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2240 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2192 b1 = randint(0, maxb1)
2241 b1 = randint(0, maxb1)
2193 b2 = randint(b1, b1 + maxhunklines)
2242 b2 = randint(b1, b1 + maxhunklines)
2194 currentlines += (b2 - b1) - (a2 - a1)
2243 currentlines += (b2 - b1) - (a2 - a1)
2195 arglist.append((rev, a1, a2, b1, b2))
2244 arglist.append((rev, a1, a2, b1, b2))
2196
2245
2197 def d():
2246 def d():
2198 ll = linelog.linelog()
2247 ll = linelog.linelog()
2199 for args in arglist:
2248 for args in arglist:
2200 ll.replacelines(*args)
2249 ll.replacelines(*args)
2201
2250
2202 timer, fm = gettimer(ui, opts)
2251 timer, fm = gettimer(ui, opts)
2203 timer(d)
2252 timer(d)
2204 fm.end()
2253 fm.end()
2205
2254
2206
2255
2207 @command(b'perf::revrange|perfrevrange', formatteropts)
2256 @command(b'perf::revrange|perfrevrange', formatteropts)
2208 def perfrevrange(ui, repo, *specs, **opts):
2257 def perfrevrange(ui, repo, *specs, **opts):
2209 opts = _byteskwargs(opts)
2258 opts = _byteskwargs(opts)
2210 timer, fm = gettimer(ui, opts)
2259 timer, fm = gettimer(ui, opts)
2211 revrange = scmutil.revrange
2260 revrange = scmutil.revrange
2212 timer(lambda: len(revrange(repo, specs)))
2261 timer(lambda: len(revrange(repo, specs)))
2213 fm.end()
2262 fm.end()
2214
2263
2215
2264
2216 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2265 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2217 def perfnodelookup(ui, repo, rev, **opts):
2266 def perfnodelookup(ui, repo, rev, **opts):
2218 opts = _byteskwargs(opts)
2267 opts = _byteskwargs(opts)
2219 timer, fm = gettimer(ui, opts)
2268 timer, fm = gettimer(ui, opts)
2220 import mercurial.revlog
2269 import mercurial.revlog
2221
2270
2222 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2271 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2223 n = scmutil.revsingle(repo, rev).node()
2272 n = scmutil.revsingle(repo, rev).node()
2224
2273
2225 try:
2274 try:
2226 cl = revlog(getsvfs(repo), radix=b"00changelog")
2275 cl = revlog(getsvfs(repo), radix=b"00changelog")
2227 except TypeError:
2276 except TypeError:
2228 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2277 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2229
2278
2230 def d():
2279 def d():
2231 cl.rev(n)
2280 cl.rev(n)
2232 clearcaches(cl)
2281 clearcaches(cl)
2233
2282
2234 timer(d)
2283 timer(d)
2235 fm.end()
2284 fm.end()
2236
2285
2237
2286
2238 @command(
2287 @command(
2239 b'perf::log|perflog',
2288 b'perf::log|perflog',
2240 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2289 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2241 )
2290 )
2242 def perflog(ui, repo, rev=None, **opts):
2291 def perflog(ui, repo, rev=None, **opts):
2243 opts = _byteskwargs(opts)
2292 opts = _byteskwargs(opts)
2244 if rev is None:
2293 if rev is None:
2245 rev = []
2294 rev = []
2246 timer, fm = gettimer(ui, opts)
2295 timer, fm = gettimer(ui, opts)
2247 ui.pushbuffer()
2296 ui.pushbuffer()
2248 timer(
2297 timer(
2249 lambda: commands.log(
2298 lambda: commands.log(
2250 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2299 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2251 )
2300 )
2252 )
2301 )
2253 ui.popbuffer()
2302 ui.popbuffer()
2254 fm.end()
2303 fm.end()
2255
2304
2256
2305
2257 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2306 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2258 def perfmoonwalk(ui, repo, **opts):
2307 def perfmoonwalk(ui, repo, **opts):
2259 """benchmark walking the changelog backwards
2308 """benchmark walking the changelog backwards
2260
2309
2261 This also loads the changelog data for each revision in the changelog.
2310 This also loads the changelog data for each revision in the changelog.
2262 """
2311 """
2263 opts = _byteskwargs(opts)
2312 opts = _byteskwargs(opts)
2264 timer, fm = gettimer(ui, opts)
2313 timer, fm = gettimer(ui, opts)
2265
2314
2266 def moonwalk():
2315 def moonwalk():
2267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2316 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2268 ctx = repo[i]
2317 ctx = repo[i]
2269 ctx.branch() # read changelog data (in addition to the index)
2318 ctx.branch() # read changelog data (in addition to the index)
2270
2319
2271 timer(moonwalk)
2320 timer(moonwalk)
2272 fm.end()
2321 fm.end()
2273
2322
2274
2323
2275 @command(
2324 @command(
2276 b'perf::templating|perftemplating',
2325 b'perf::templating|perftemplating',
2277 [
2326 [
2278 (b'r', b'rev', [], b'revisions to run the template on'),
2327 (b'r', b'rev', [], b'revisions to run the template on'),
2279 ]
2328 ]
2280 + formatteropts,
2329 + formatteropts,
2281 )
2330 )
2282 def perftemplating(ui, repo, testedtemplate=None, **opts):
2331 def perftemplating(ui, repo, testedtemplate=None, **opts):
2283 """test the rendering time of a given template"""
2332 """test the rendering time of a given template"""
2284 if makelogtemplater is None:
2333 if makelogtemplater is None:
2285 raise error.Abort(
2334 raise error.Abort(
2286 b"perftemplating not available with this Mercurial",
2335 b"perftemplating not available with this Mercurial",
2287 hint=b"use 4.3 or later",
2336 hint=b"use 4.3 or later",
2288 )
2337 )
2289
2338
2290 opts = _byteskwargs(opts)
2339 opts = _byteskwargs(opts)
2291
2340
2292 nullui = ui.copy()
2341 nullui = ui.copy()
2293 nullui.fout = open(os.devnull, 'wb')
2342 nullui.fout = open(os.devnull, 'wb')
2294 nullui.disablepager()
2343 nullui.disablepager()
2295 revs = opts.get(b'rev')
2344 revs = opts.get(b'rev')
2296 if not revs:
2345 if not revs:
2297 revs = [b'all()']
2346 revs = [b'all()']
2298 revs = list(scmutil.revrange(repo, revs))
2347 revs = list(scmutil.revrange(repo, revs))
2299
2348
2300 defaulttemplate = (
2349 defaulttemplate = (
2301 b'{date|shortdate} [{rev}:{node|short}]'
2350 b'{date|shortdate} [{rev}:{node|short}]'
2302 b' {author|person}: {desc|firstline}\n'
2351 b' {author|person}: {desc|firstline}\n'
2303 )
2352 )
2304 if testedtemplate is None:
2353 if testedtemplate is None:
2305 testedtemplate = defaulttemplate
2354 testedtemplate = defaulttemplate
2306 displayer = makelogtemplater(nullui, repo, testedtemplate)
2355 displayer = makelogtemplater(nullui, repo, testedtemplate)
2307
2356
2308 def format():
2357 def format():
2309 for r in revs:
2358 for r in revs:
2310 ctx = repo[r]
2359 ctx = repo[r]
2311 displayer.show(ctx)
2360 displayer.show(ctx)
2312 displayer.flush(ctx)
2361 displayer.flush(ctx)
2313
2362
2314 timer, fm = gettimer(ui, opts)
2363 timer, fm = gettimer(ui, opts)
2315 timer(format)
2364 timer(format)
2316 fm.end()
2365 fm.end()
2317
2366
2318
2367
2319 def _displaystats(ui, opts, entries, data):
2368 def _displaystats(ui, opts, entries, data):
2320 # use a second formatter because the data are quite different, not sure
2369 # use a second formatter because the data are quite different, not sure
2321 # how it flies with the templater.
2370 # how it flies with the templater.
2322 fm = ui.formatter(b'perf-stats', opts)
2371 fm = ui.formatter(b'perf-stats', opts)
2323 for key, title in entries:
2372 for key, title in entries:
2324 values = data[key]
2373 values = data[key]
2325 nbvalues = len(data)
2374 nbvalues = len(data)
2326 values.sort()
2375 values.sort()
2327 stats = {
2376 stats = {
2328 'key': key,
2377 'key': key,
2329 'title': title,
2378 'title': title,
2330 'nbitems': len(values),
2379 'nbitems': len(values),
2331 'min': values[0][0],
2380 'min': values[0][0],
2332 '10%': values[(nbvalues * 10) // 100][0],
2381 '10%': values[(nbvalues * 10) // 100][0],
2333 '25%': values[(nbvalues * 25) // 100][0],
2382 '25%': values[(nbvalues * 25) // 100][0],
2334 '50%': values[(nbvalues * 50) // 100][0],
2383 '50%': values[(nbvalues * 50) // 100][0],
2335 '75%': values[(nbvalues * 75) // 100][0],
2384 '75%': values[(nbvalues * 75) // 100][0],
2336 '80%': values[(nbvalues * 80) // 100][0],
2385 '80%': values[(nbvalues * 80) // 100][0],
2337 '85%': values[(nbvalues * 85) // 100][0],
2386 '85%': values[(nbvalues * 85) // 100][0],
2338 '90%': values[(nbvalues * 90) // 100][0],
2387 '90%': values[(nbvalues * 90) // 100][0],
2339 '95%': values[(nbvalues * 95) // 100][0],
2388 '95%': values[(nbvalues * 95) // 100][0],
2340 '99%': values[(nbvalues * 99) // 100][0],
2389 '99%': values[(nbvalues * 99) // 100][0],
2341 'max': values[-1][0],
2390 'max': values[-1][0],
2342 }
2391 }
2343 fm.startitem()
2392 fm.startitem()
2344 fm.data(**stats)
2393 fm.data(**stats)
2345 # make node pretty for the human output
2394 # make node pretty for the human output
2346 fm.plain('### %s (%d items)\n' % (title, len(values)))
2395 fm.plain('### %s (%d items)\n' % (title, len(values)))
2347 lines = [
2396 lines = [
2348 'min',
2397 'min',
2349 '10%',
2398 '10%',
2350 '25%',
2399 '25%',
2351 '50%',
2400 '50%',
2352 '75%',
2401 '75%',
2353 '80%',
2402 '80%',
2354 '85%',
2403 '85%',
2355 '90%',
2404 '90%',
2356 '95%',
2405 '95%',
2357 '99%',
2406 '99%',
2358 'max',
2407 'max',
2359 ]
2408 ]
2360 for l in lines:
2409 for l in lines:
2361 fm.plain('%s: %s\n' % (l, stats[l]))
2410 fm.plain('%s: %s\n' % (l, stats[l]))
2362 fm.end()
2411 fm.end()
2363
2412
2364
2413
2365 @command(
2414 @command(
2366 b'perf::helper-mergecopies|perfhelper-mergecopies',
2415 b'perf::helper-mergecopies|perfhelper-mergecopies',
2367 formatteropts
2416 formatteropts
2368 + [
2417 + [
2369 (b'r', b'revs', [], b'restrict search to these revisions'),
2418 (b'r', b'revs', [], b'restrict search to these revisions'),
2370 (b'', b'timing', False, b'provides extra data (costly)'),
2419 (b'', b'timing', False, b'provides extra data (costly)'),
2371 (b'', b'stats', False, b'provides statistic about the measured data'),
2420 (b'', b'stats', False, b'provides statistic about the measured data'),
2372 ],
2421 ],
2373 )
2422 )
2374 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2423 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2375 """find statistics about potential parameters for `perfmergecopies`
2424 """find statistics about potential parameters for `perfmergecopies`
2376
2425
2377 This command find (base, p1, p2) triplet relevant for copytracing
2426 This command find (base, p1, p2) triplet relevant for copytracing
2378 benchmarking in the context of a merge. It reports values for some of the
2427 benchmarking in the context of a merge. It reports values for some of the
2379 parameters that impact merge copy tracing time during merge.
2428 parameters that impact merge copy tracing time during merge.
2380
2429
2381 If `--timing` is set, rename detection is run and the associated timing
2430 If `--timing` is set, rename detection is run and the associated timing
2382 will be reported. The extra details come at the cost of slower command
2431 will be reported. The extra details come at the cost of slower command
2383 execution.
2432 execution.
2384
2433
2385 Since rename detection is only run once, other factors might easily
2434 Since rename detection is only run once, other factors might easily
2386 affect the precision of the timing. However it should give a good
2435 affect the precision of the timing. However it should give a good
2387 approximation of which revision triplets are very costly.
2436 approximation of which revision triplets are very costly.
2388 """
2437 """
2389 opts = _byteskwargs(opts)
2438 opts = _byteskwargs(opts)
2390 fm = ui.formatter(b'perf', opts)
2439 fm = ui.formatter(b'perf', opts)
2391 dotiming = opts[b'timing']
2440 dotiming = opts[b'timing']
2392 dostats = opts[b'stats']
2441 dostats = opts[b'stats']
2393
2442
2394 output_template = [
2443 output_template = [
2395 ("base", "%(base)12s"),
2444 ("base", "%(base)12s"),
2396 ("p1", "%(p1.node)12s"),
2445 ("p1", "%(p1.node)12s"),
2397 ("p2", "%(p2.node)12s"),
2446 ("p2", "%(p2.node)12s"),
2398 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2447 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2399 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2448 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2400 ("p1.renames", "%(p1.renamedfiles)12d"),
2449 ("p1.renames", "%(p1.renamedfiles)12d"),
2401 ("p1.time", "%(p1.time)12.3f"),
2450 ("p1.time", "%(p1.time)12.3f"),
2402 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2451 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2403 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2452 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2404 ("p2.renames", "%(p2.renamedfiles)12d"),
2453 ("p2.renames", "%(p2.renamedfiles)12d"),
2405 ("p2.time", "%(p2.time)12.3f"),
2454 ("p2.time", "%(p2.time)12.3f"),
2406 ("renames", "%(nbrenamedfiles)12d"),
2455 ("renames", "%(nbrenamedfiles)12d"),
2407 ("total.time", "%(time)12.3f"),
2456 ("total.time", "%(time)12.3f"),
2408 ]
2457 ]
2409 if not dotiming:
2458 if not dotiming:
2410 output_template = [
2459 output_template = [
2411 i
2460 i
2412 for i in output_template
2461 for i in output_template
2413 if not ('time' in i[0] or 'renames' in i[0])
2462 if not ('time' in i[0] or 'renames' in i[0])
2414 ]
2463 ]
2415 header_names = [h for (h, v) in output_template]
2464 header_names = [h for (h, v) in output_template]
2416 output = ' '.join([v for (h, v) in output_template]) + '\n'
2465 output = ' '.join([v for (h, v) in output_template]) + '\n'
2417 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2466 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2418 fm.plain(header % tuple(header_names))
2467 fm.plain(header % tuple(header_names))
2419
2468
2420 if not revs:
2469 if not revs:
2421 revs = ['all()']
2470 revs = ['all()']
2422 revs = scmutil.revrange(repo, revs)
2471 revs = scmutil.revrange(repo, revs)
2423
2472
2424 if dostats:
2473 if dostats:
2425 alldata = {
2474 alldata = {
2426 'nbrevs': [],
2475 'nbrevs': [],
2427 'nbmissingfiles': [],
2476 'nbmissingfiles': [],
2428 }
2477 }
2429 if dotiming:
2478 if dotiming:
2430 alldata['parentnbrenames'] = []
2479 alldata['parentnbrenames'] = []
2431 alldata['totalnbrenames'] = []
2480 alldata['totalnbrenames'] = []
2432 alldata['parenttime'] = []
2481 alldata['parenttime'] = []
2433 alldata['totaltime'] = []
2482 alldata['totaltime'] = []
2434
2483
2435 roi = repo.revs('merge() and %ld', revs)
2484 roi = repo.revs('merge() and %ld', revs)
2436 for r in roi:
2485 for r in roi:
2437 ctx = repo[r]
2486 ctx = repo[r]
2438 p1 = ctx.p1()
2487 p1 = ctx.p1()
2439 p2 = ctx.p2()
2488 p2 = ctx.p2()
2440 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2489 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2441 for b in bases:
2490 for b in bases:
2442 b = repo[b]
2491 b = repo[b]
2443 p1missing = copies._computeforwardmissing(b, p1)
2492 p1missing = copies._computeforwardmissing(b, p1)
2444 p2missing = copies._computeforwardmissing(b, p2)
2493 p2missing = copies._computeforwardmissing(b, p2)
2445 data = {
2494 data = {
2446 b'base': b.hex(),
2495 b'base': b.hex(),
2447 b'p1.node': p1.hex(),
2496 b'p1.node': p1.hex(),
2448 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2497 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2449 b'p1.nbmissingfiles': len(p1missing),
2498 b'p1.nbmissingfiles': len(p1missing),
2450 b'p2.node': p2.hex(),
2499 b'p2.node': p2.hex(),
2451 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2500 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2452 b'p2.nbmissingfiles': len(p2missing),
2501 b'p2.nbmissingfiles': len(p2missing),
2453 }
2502 }
2454 if dostats:
2503 if dostats:
2455 if p1missing:
2504 if p1missing:
2456 alldata['nbrevs'].append(
2505 alldata['nbrevs'].append(
2457 (data['p1.nbrevs'], b.hex(), p1.hex())
2506 (data['p1.nbrevs'], b.hex(), p1.hex())
2458 )
2507 )
2459 alldata['nbmissingfiles'].append(
2508 alldata['nbmissingfiles'].append(
2460 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2509 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2461 )
2510 )
2462 if p2missing:
2511 if p2missing:
2463 alldata['nbrevs'].append(
2512 alldata['nbrevs'].append(
2464 (data['p2.nbrevs'], b.hex(), p2.hex())
2513 (data['p2.nbrevs'], b.hex(), p2.hex())
2465 )
2514 )
2466 alldata['nbmissingfiles'].append(
2515 alldata['nbmissingfiles'].append(
2467 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2516 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2468 )
2517 )
2469 if dotiming:
2518 if dotiming:
2470 begin = util.timer()
2519 begin = util.timer()
2471 mergedata = copies.mergecopies(repo, p1, p2, b)
2520 mergedata = copies.mergecopies(repo, p1, p2, b)
2472 end = util.timer()
2521 end = util.timer()
2473 # not very stable timing since we did only one run
2522 # not very stable timing since we did only one run
2474 data['time'] = end - begin
2523 data['time'] = end - begin
2475 # mergedata contains five dicts: "copy", "movewithdir",
2524 # mergedata contains five dicts: "copy", "movewithdir",
2476 # "diverge", "renamedelete" and "dirmove".
2525 # "diverge", "renamedelete" and "dirmove".
2477 # The first 4 are about renamed file so lets count that.
2526 # The first 4 are about renamed file so lets count that.
2478 renames = len(mergedata[0])
2527 renames = len(mergedata[0])
2479 renames += len(mergedata[1])
2528 renames += len(mergedata[1])
2480 renames += len(mergedata[2])
2529 renames += len(mergedata[2])
2481 renames += len(mergedata[3])
2530 renames += len(mergedata[3])
2482 data['nbrenamedfiles'] = renames
2531 data['nbrenamedfiles'] = renames
2483 begin = util.timer()
2532 begin = util.timer()
2484 p1renames = copies.pathcopies(b, p1)
2533 p1renames = copies.pathcopies(b, p1)
2485 end = util.timer()
2534 end = util.timer()
2486 data['p1.time'] = end - begin
2535 data['p1.time'] = end - begin
2487 begin = util.timer()
2536 begin = util.timer()
2488 p2renames = copies.pathcopies(b, p2)
2537 p2renames = copies.pathcopies(b, p2)
2489 end = util.timer()
2538 end = util.timer()
2490 data['p2.time'] = end - begin
2539 data['p2.time'] = end - begin
2491 data['p1.renamedfiles'] = len(p1renames)
2540 data['p1.renamedfiles'] = len(p1renames)
2492 data['p2.renamedfiles'] = len(p2renames)
2541 data['p2.renamedfiles'] = len(p2renames)
2493
2542
2494 if dostats:
2543 if dostats:
2495 if p1missing:
2544 if p1missing:
2496 alldata['parentnbrenames'].append(
2545 alldata['parentnbrenames'].append(
2497 (data['p1.renamedfiles'], b.hex(), p1.hex())
2546 (data['p1.renamedfiles'], b.hex(), p1.hex())
2498 )
2547 )
2499 alldata['parenttime'].append(
2548 alldata['parenttime'].append(
2500 (data['p1.time'], b.hex(), p1.hex())
2549 (data['p1.time'], b.hex(), p1.hex())
2501 )
2550 )
2502 if p2missing:
2551 if p2missing:
2503 alldata['parentnbrenames'].append(
2552 alldata['parentnbrenames'].append(
2504 (data['p2.renamedfiles'], b.hex(), p2.hex())
2553 (data['p2.renamedfiles'], b.hex(), p2.hex())
2505 )
2554 )
2506 alldata['parenttime'].append(
2555 alldata['parenttime'].append(
2507 (data['p2.time'], b.hex(), p2.hex())
2556 (data['p2.time'], b.hex(), p2.hex())
2508 )
2557 )
2509 if p1missing or p2missing:
2558 if p1missing or p2missing:
2510 alldata['totalnbrenames'].append(
2559 alldata['totalnbrenames'].append(
2511 (
2560 (
2512 data['nbrenamedfiles'],
2561 data['nbrenamedfiles'],
2513 b.hex(),
2562 b.hex(),
2514 p1.hex(),
2563 p1.hex(),
2515 p2.hex(),
2564 p2.hex(),
2516 )
2565 )
2517 )
2566 )
2518 alldata['totaltime'].append(
2567 alldata['totaltime'].append(
2519 (data['time'], b.hex(), p1.hex(), p2.hex())
2568 (data['time'], b.hex(), p1.hex(), p2.hex())
2520 )
2569 )
2521 fm.startitem()
2570 fm.startitem()
2522 fm.data(**data)
2571 fm.data(**data)
2523 # make node pretty for the human output
2572 # make node pretty for the human output
2524 out = data.copy()
2573 out = data.copy()
2525 out['base'] = fm.hexfunc(b.node())
2574 out['base'] = fm.hexfunc(b.node())
2526 out['p1.node'] = fm.hexfunc(p1.node())
2575 out['p1.node'] = fm.hexfunc(p1.node())
2527 out['p2.node'] = fm.hexfunc(p2.node())
2576 out['p2.node'] = fm.hexfunc(p2.node())
2528 fm.plain(output % out)
2577 fm.plain(output % out)
2529
2578
2530 fm.end()
2579 fm.end()
2531 if dostats:
2580 if dostats:
2532 # use a second formatter because the data are quite different, not sure
2581 # use a second formatter because the data are quite different, not sure
2533 # how it flies with the templater.
2582 # how it flies with the templater.
2534 entries = [
2583 entries = [
2535 ('nbrevs', 'number of revision covered'),
2584 ('nbrevs', 'number of revision covered'),
2536 ('nbmissingfiles', 'number of missing files at head'),
2585 ('nbmissingfiles', 'number of missing files at head'),
2537 ]
2586 ]
2538 if dotiming:
2587 if dotiming:
2539 entries.append(
2588 entries.append(
2540 ('parentnbrenames', 'rename from one parent to base')
2589 ('parentnbrenames', 'rename from one parent to base')
2541 )
2590 )
2542 entries.append(('totalnbrenames', 'total number of renames'))
2591 entries.append(('totalnbrenames', 'total number of renames'))
2543 entries.append(('parenttime', 'time for one parent'))
2592 entries.append(('parenttime', 'time for one parent'))
2544 entries.append(('totaltime', 'time for both parents'))
2593 entries.append(('totaltime', 'time for both parents'))
2545 _displaystats(ui, opts, entries, alldata)
2594 _displaystats(ui, opts, entries, alldata)
2546
2595
2547
2596
2548 @command(
2597 @command(
2549 b'perf::helper-pathcopies|perfhelper-pathcopies',
2598 b'perf::helper-pathcopies|perfhelper-pathcopies',
2550 formatteropts
2599 formatteropts
2551 + [
2600 + [
2552 (b'r', b'revs', [], b'restrict search to these revisions'),
2601 (b'r', b'revs', [], b'restrict search to these revisions'),
2553 (b'', b'timing', False, b'provides extra data (costly)'),
2602 (b'', b'timing', False, b'provides extra data (costly)'),
2554 (b'', b'stats', False, b'provides statistic about the measured data'),
2603 (b'', b'stats', False, b'provides statistic about the measured data'),
2555 ],
2604 ],
2556 )
2605 )
2557 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2606 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2558 """find statistic about potential parameters for the `perftracecopies`
2607 """find statistic about potential parameters for the `perftracecopies`
2559
2608
2560 This command find source-destination pair relevant for copytracing testing.
2609 This command find source-destination pair relevant for copytracing testing.
2561 It report value for some of the parameters that impact copy tracing time.
2610 It report value for some of the parameters that impact copy tracing time.
2562
2611
2563 If `--timing` is set, rename detection is run and the associated timing
2612 If `--timing` is set, rename detection is run and the associated timing
2564 will be reported. The extra details comes at the cost of a slower command
2613 will be reported. The extra details comes at the cost of a slower command
2565 execution.
2614 execution.
2566
2615
2567 Since the rename detection is only run once, other factors might easily
2616 Since the rename detection is only run once, other factors might easily
2568 affect the precision of the timing. However it should give a good
2617 affect the precision of the timing. However it should give a good
2569 approximation of which revision pairs are very costly.
2618 approximation of which revision pairs are very costly.
2570 """
2619 """
2571 opts = _byteskwargs(opts)
2620 opts = _byteskwargs(opts)
2572 fm = ui.formatter(b'perf', opts)
2621 fm = ui.formatter(b'perf', opts)
2573 dotiming = opts[b'timing']
2622 dotiming = opts[b'timing']
2574 dostats = opts[b'stats']
2623 dostats = opts[b'stats']
2575
2624
2576 if dotiming:
2625 if dotiming:
2577 header = '%12s %12s %12s %12s %12s %12s\n'
2626 header = '%12s %12s %12s %12s %12s %12s\n'
2578 output = (
2627 output = (
2579 "%(source)12s %(destination)12s "
2628 "%(source)12s %(destination)12s "
2580 "%(nbrevs)12d %(nbmissingfiles)12d "
2629 "%(nbrevs)12d %(nbmissingfiles)12d "
2581 "%(nbrenamedfiles)12d %(time)18.5f\n"
2630 "%(nbrenamedfiles)12d %(time)18.5f\n"
2582 )
2631 )
2583 header_names = (
2632 header_names = (
2584 "source",
2633 "source",
2585 "destination",
2634 "destination",
2586 "nb-revs",
2635 "nb-revs",
2587 "nb-files",
2636 "nb-files",
2588 "nb-renames",
2637 "nb-renames",
2589 "time",
2638 "time",
2590 )
2639 )
2591 fm.plain(header % header_names)
2640 fm.plain(header % header_names)
2592 else:
2641 else:
2593 header = '%12s %12s %12s %12s\n'
2642 header = '%12s %12s %12s %12s\n'
2594 output = (
2643 output = (
2595 "%(source)12s %(destination)12s "
2644 "%(source)12s %(destination)12s "
2596 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2645 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2597 )
2646 )
2598 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2647 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2599
2648
2600 if not revs:
2649 if not revs:
2601 revs = ['all()']
2650 revs = ['all()']
2602 revs = scmutil.revrange(repo, revs)
2651 revs = scmutil.revrange(repo, revs)
2603
2652
2604 if dostats:
2653 if dostats:
2605 alldata = {
2654 alldata = {
2606 'nbrevs': [],
2655 'nbrevs': [],
2607 'nbmissingfiles': [],
2656 'nbmissingfiles': [],
2608 }
2657 }
2609 if dotiming:
2658 if dotiming:
2610 alldata['nbrenames'] = []
2659 alldata['nbrenames'] = []
2611 alldata['time'] = []
2660 alldata['time'] = []
2612
2661
2613 roi = repo.revs('merge() and %ld', revs)
2662 roi = repo.revs('merge() and %ld', revs)
2614 for r in roi:
2663 for r in roi:
2615 ctx = repo[r]
2664 ctx = repo[r]
2616 p1 = ctx.p1().rev()
2665 p1 = ctx.p1().rev()
2617 p2 = ctx.p2().rev()
2666 p2 = ctx.p2().rev()
2618 bases = repo.changelog._commonancestorsheads(p1, p2)
2667 bases = repo.changelog._commonancestorsheads(p1, p2)
2619 for p in (p1, p2):
2668 for p in (p1, p2):
2620 for b in bases:
2669 for b in bases:
2621 base = repo[b]
2670 base = repo[b]
2622 parent = repo[p]
2671 parent = repo[p]
2623 missing = copies._computeforwardmissing(base, parent)
2672 missing = copies._computeforwardmissing(base, parent)
2624 if not missing:
2673 if not missing:
2625 continue
2674 continue
2626 data = {
2675 data = {
2627 b'source': base.hex(),
2676 b'source': base.hex(),
2628 b'destination': parent.hex(),
2677 b'destination': parent.hex(),
2629 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2678 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2630 b'nbmissingfiles': len(missing),
2679 b'nbmissingfiles': len(missing),
2631 }
2680 }
2632 if dostats:
2681 if dostats:
2633 alldata['nbrevs'].append(
2682 alldata['nbrevs'].append(
2634 (
2683 (
2635 data['nbrevs'],
2684 data['nbrevs'],
2636 base.hex(),
2685 base.hex(),
2637 parent.hex(),
2686 parent.hex(),
2638 )
2687 )
2639 )
2688 )
2640 alldata['nbmissingfiles'].append(
2689 alldata['nbmissingfiles'].append(
2641 (
2690 (
2642 data['nbmissingfiles'],
2691 data['nbmissingfiles'],
2643 base.hex(),
2692 base.hex(),
2644 parent.hex(),
2693 parent.hex(),
2645 )
2694 )
2646 )
2695 )
2647 if dotiming:
2696 if dotiming:
2648 begin = util.timer()
2697 begin = util.timer()
2649 renames = copies.pathcopies(base, parent)
2698 renames = copies.pathcopies(base, parent)
2650 end = util.timer()
2699 end = util.timer()
2651 # not very stable timing since we did only one run
2700 # not very stable timing since we did only one run
2652 data['time'] = end - begin
2701 data['time'] = end - begin
2653 data['nbrenamedfiles'] = len(renames)
2702 data['nbrenamedfiles'] = len(renames)
2654 if dostats:
2703 if dostats:
2655 alldata['time'].append(
2704 alldata['time'].append(
2656 (
2705 (
2657 data['time'],
2706 data['time'],
2658 base.hex(),
2707 base.hex(),
2659 parent.hex(),
2708 parent.hex(),
2660 )
2709 )
2661 )
2710 )
2662 alldata['nbrenames'].append(
2711 alldata['nbrenames'].append(
2663 (
2712 (
2664 data['nbrenamedfiles'],
2713 data['nbrenamedfiles'],
2665 base.hex(),
2714 base.hex(),
2666 parent.hex(),
2715 parent.hex(),
2667 )
2716 )
2668 )
2717 )
2669 fm.startitem()
2718 fm.startitem()
2670 fm.data(**data)
2719 fm.data(**data)
2671 out = data.copy()
2720 out = data.copy()
2672 out['source'] = fm.hexfunc(base.node())
2721 out['source'] = fm.hexfunc(base.node())
2673 out['destination'] = fm.hexfunc(parent.node())
2722 out['destination'] = fm.hexfunc(parent.node())
2674 fm.plain(output % out)
2723 fm.plain(output % out)
2675
2724
2676 fm.end()
2725 fm.end()
2677 if dostats:
2726 if dostats:
2678 entries = [
2727 entries = [
2679 ('nbrevs', 'number of revision covered'),
2728 ('nbrevs', 'number of revision covered'),
2680 ('nbmissingfiles', 'number of missing files at head'),
2729 ('nbmissingfiles', 'number of missing files at head'),
2681 ]
2730 ]
2682 if dotiming:
2731 if dotiming:
2683 entries.append(('nbrenames', 'renamed files'))
2732 entries.append(('nbrenames', 'renamed files'))
2684 entries.append(('time', 'time'))
2733 entries.append(('time', 'time'))
2685 _displaystats(ui, opts, entries, alldata)
2734 _displaystats(ui, opts, entries, alldata)
2686
2735
2687
2736
2688 @command(b'perf::cca|perfcca', formatteropts)
2737 @command(b'perf::cca|perfcca', formatteropts)
2689 def perfcca(ui, repo, **opts):
2738 def perfcca(ui, repo, **opts):
2690 opts = _byteskwargs(opts)
2739 opts = _byteskwargs(opts)
2691 timer, fm = gettimer(ui, opts)
2740 timer, fm = gettimer(ui, opts)
2692 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2741 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2693 fm.end()
2742 fm.end()
2694
2743
2695
2744
2696 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2745 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2697 def perffncacheload(ui, repo, **opts):
2746 def perffncacheload(ui, repo, **opts):
2698 opts = _byteskwargs(opts)
2747 opts = _byteskwargs(opts)
2699 timer, fm = gettimer(ui, opts)
2748 timer, fm = gettimer(ui, opts)
2700 s = repo.store
2749 s = repo.store
2701
2750
2702 def d():
2751 def d():
2703 s.fncache._load()
2752 s.fncache._load()
2704
2753
2705 timer(d)
2754 timer(d)
2706 fm.end()
2755 fm.end()
2707
2756
2708
2757
2709 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2758 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2710 def perffncachewrite(ui, repo, **opts):
2759 def perffncachewrite(ui, repo, **opts):
2711 opts = _byteskwargs(opts)
2760 opts = _byteskwargs(opts)
2712 timer, fm = gettimer(ui, opts)
2761 timer, fm = gettimer(ui, opts)
2713 s = repo.store
2762 s = repo.store
2714 lock = repo.lock()
2763 lock = repo.lock()
2715 s.fncache._load()
2764 s.fncache._load()
2716 tr = repo.transaction(b'perffncachewrite')
2765 tr = repo.transaction(b'perffncachewrite')
2717 tr.addbackup(b'fncache')
2766 tr.addbackup(b'fncache')
2718
2767
2719 def d():
2768 def d():
2720 s.fncache._dirty = True
2769 s.fncache._dirty = True
2721 s.fncache.write(tr)
2770 s.fncache.write(tr)
2722
2771
2723 timer(d)
2772 timer(d)
2724 tr.close()
2773 tr.close()
2725 lock.release()
2774 lock.release()
2726 fm.end()
2775 fm.end()
2727
2776
2728
2777
2729 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2778 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2730 def perffncacheencode(ui, repo, **opts):
2779 def perffncacheencode(ui, repo, **opts):
2731 opts = _byteskwargs(opts)
2780 opts = _byteskwargs(opts)
2732 timer, fm = gettimer(ui, opts)
2781 timer, fm = gettimer(ui, opts)
2733 s = repo.store
2782 s = repo.store
2734 s.fncache._load()
2783 s.fncache._load()
2735
2784
2736 def d():
2785 def d():
2737 for p in s.fncache.entries:
2786 for p in s.fncache.entries:
2738 s.encode(p)
2787 s.encode(p)
2739
2788
2740 timer(d)
2789 timer(d)
2741 fm.end()
2790 fm.end()
2742
2791
2743
2792
2744 def _bdiffworker(q, blocks, xdiff, ready, done):
2793 def _bdiffworker(q, blocks, xdiff, ready, done):
2745 while not done.is_set():
2794 while not done.is_set():
2746 pair = q.get()
2795 pair = q.get()
2747 while pair is not None:
2796 while pair is not None:
2748 if xdiff:
2797 if xdiff:
2749 mdiff.bdiff.xdiffblocks(*pair)
2798 mdiff.bdiff.xdiffblocks(*pair)
2750 elif blocks:
2799 elif blocks:
2751 mdiff.bdiff.blocks(*pair)
2800 mdiff.bdiff.blocks(*pair)
2752 else:
2801 else:
2753 mdiff.textdiff(*pair)
2802 mdiff.textdiff(*pair)
2754 q.task_done()
2803 q.task_done()
2755 pair = q.get()
2804 pair = q.get()
2756 q.task_done() # for the None one
2805 q.task_done() # for the None one
2757 with ready:
2806 with ready:
2758 ready.wait()
2807 ready.wait()
2759
2808
2760
2809
2761 def _manifestrevision(repo, mnode):
2810 def _manifestrevision(repo, mnode):
2762 ml = repo.manifestlog
2811 ml = repo.manifestlog
2763
2812
2764 if util.safehasattr(ml, b'getstorage'):
2813 if util.safehasattr(ml, b'getstorage'):
2765 store = ml.getstorage(b'')
2814 store = ml.getstorage(b'')
2766 else:
2815 else:
2767 store = ml._revlog
2816 store = ml._revlog
2768
2817
2769 return store.revision(mnode)
2818 return store.revision(mnode)
2770
2819
2771
2820
2772 @command(
2821 @command(
2773 b'perf::bdiff|perfbdiff',
2822 b'perf::bdiff|perfbdiff',
2774 revlogopts
2823 revlogopts
2775 + formatteropts
2824 + formatteropts
2776 + [
2825 + [
2777 (
2826 (
2778 b'',
2827 b'',
2779 b'count',
2828 b'count',
2780 1,
2829 1,
2781 b'number of revisions to test (when using --startrev)',
2830 b'number of revisions to test (when using --startrev)',
2782 ),
2831 ),
2783 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2832 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2784 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2833 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2785 (b'', b'blocks', False, b'test computing diffs into blocks'),
2834 (b'', b'blocks', False, b'test computing diffs into blocks'),
2786 (b'', b'xdiff', False, b'use xdiff algorithm'),
2835 (b'', b'xdiff', False, b'use xdiff algorithm'),
2787 ],
2836 ],
2788 b'-c|-m|FILE REV',
2837 b'-c|-m|FILE REV',
2789 )
2838 )
2790 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2839 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2791 """benchmark a bdiff between revisions
2840 """benchmark a bdiff between revisions
2792
2841
2793 By default, benchmark a bdiff between its delta parent and itself.
2842 By default, benchmark a bdiff between its delta parent and itself.
2794
2843
2795 With ``--count``, benchmark bdiffs between delta parents and self for N
2844 With ``--count``, benchmark bdiffs between delta parents and self for N
2796 revisions starting at the specified revision.
2845 revisions starting at the specified revision.
2797
2846
2798 With ``--alldata``, assume the requested revision is a changeset and
2847 With ``--alldata``, assume the requested revision is a changeset and
2799 measure bdiffs for all changes related to that changeset (manifest
2848 measure bdiffs for all changes related to that changeset (manifest
2800 and filelogs).
2849 and filelogs).
2801 """
2850 """
2802 opts = _byteskwargs(opts)
2851 opts = _byteskwargs(opts)
2803
2852
2804 if opts[b'xdiff'] and not opts[b'blocks']:
2853 if opts[b'xdiff'] and not opts[b'blocks']:
2805 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2854 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2806
2855
2807 if opts[b'alldata']:
2856 if opts[b'alldata']:
2808 opts[b'changelog'] = True
2857 opts[b'changelog'] = True
2809
2858
2810 if opts.get(b'changelog') or opts.get(b'manifest'):
2859 if opts.get(b'changelog') or opts.get(b'manifest'):
2811 file_, rev = None, file_
2860 file_, rev = None, file_
2812 elif rev is None:
2861 elif rev is None:
2813 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2862 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2814
2863
2815 blocks = opts[b'blocks']
2864 blocks = opts[b'blocks']
2816 xdiff = opts[b'xdiff']
2865 xdiff = opts[b'xdiff']
2817 textpairs = []
2866 textpairs = []
2818
2867
2819 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2868 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2820
2869
2821 startrev = r.rev(r.lookup(rev))
2870 startrev = r.rev(r.lookup(rev))
2822 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2871 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2823 if opts[b'alldata']:
2872 if opts[b'alldata']:
2824 # Load revisions associated with changeset.
2873 # Load revisions associated with changeset.
2825 ctx = repo[rev]
2874 ctx = repo[rev]
2826 mtext = _manifestrevision(repo, ctx.manifestnode())
2875 mtext = _manifestrevision(repo, ctx.manifestnode())
2827 for pctx in ctx.parents():
2876 for pctx in ctx.parents():
2828 pman = _manifestrevision(repo, pctx.manifestnode())
2877 pman = _manifestrevision(repo, pctx.manifestnode())
2829 textpairs.append((pman, mtext))
2878 textpairs.append((pman, mtext))
2830
2879
2831 # Load filelog revisions by iterating manifest delta.
2880 # Load filelog revisions by iterating manifest delta.
2832 man = ctx.manifest()
2881 man = ctx.manifest()
2833 pman = ctx.p1().manifest()
2882 pman = ctx.p1().manifest()
2834 for filename, change in pman.diff(man).items():
2883 for filename, change in pman.diff(man).items():
2835 fctx = repo.file(filename)
2884 fctx = repo.file(filename)
2836 f1 = fctx.revision(change[0][0] or -1)
2885 f1 = fctx.revision(change[0][0] or -1)
2837 f2 = fctx.revision(change[1][0] or -1)
2886 f2 = fctx.revision(change[1][0] or -1)
2838 textpairs.append((f1, f2))
2887 textpairs.append((f1, f2))
2839 else:
2888 else:
2840 dp = r.deltaparent(rev)
2889 dp = r.deltaparent(rev)
2841 textpairs.append((r.revision(dp), r.revision(rev)))
2890 textpairs.append((r.revision(dp), r.revision(rev)))
2842
2891
2843 withthreads = threads > 0
2892 withthreads = threads > 0
2844 if not withthreads:
2893 if not withthreads:
2845
2894
2846 def d():
2895 def d():
2847 for pair in textpairs:
2896 for pair in textpairs:
2848 if xdiff:
2897 if xdiff:
2849 mdiff.bdiff.xdiffblocks(*pair)
2898 mdiff.bdiff.xdiffblocks(*pair)
2850 elif blocks:
2899 elif blocks:
2851 mdiff.bdiff.blocks(*pair)
2900 mdiff.bdiff.blocks(*pair)
2852 else:
2901 else:
2853 mdiff.textdiff(*pair)
2902 mdiff.textdiff(*pair)
2854
2903
2855 else:
2904 else:
2856 q = queue()
2905 q = queue()
2857 for i in _xrange(threads):
2906 for i in _xrange(threads):
2858 q.put(None)
2907 q.put(None)
2859 ready = threading.Condition()
2908 ready = threading.Condition()
2860 done = threading.Event()
2909 done = threading.Event()
2861 for i in _xrange(threads):
2910 for i in _xrange(threads):
2862 threading.Thread(
2911 threading.Thread(
2863 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2912 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2864 ).start()
2913 ).start()
2865 q.join()
2914 q.join()
2866
2915
2867 def d():
2916 def d():
2868 for pair in textpairs:
2917 for pair in textpairs:
2869 q.put(pair)
2918 q.put(pair)
2870 for i in _xrange(threads):
2919 for i in _xrange(threads):
2871 q.put(None)
2920 q.put(None)
2872 with ready:
2921 with ready:
2873 ready.notify_all()
2922 ready.notify_all()
2874 q.join()
2923 q.join()
2875
2924
2876 timer, fm = gettimer(ui, opts)
2925 timer, fm = gettimer(ui, opts)
2877 timer(d)
2926 timer(d)
2878 fm.end()
2927 fm.end()
2879
2928
2880 if withthreads:
2929 if withthreads:
2881 done.set()
2930 done.set()
2882 for i in _xrange(threads):
2931 for i in _xrange(threads):
2883 q.put(None)
2932 q.put(None)
2884 with ready:
2933 with ready:
2885 ready.notify_all()
2934 ready.notify_all()
2886
2935
2887
2936
2888 @command(
2937 @command(
2889 b'perf::unbundle',
2938 b'perf::unbundle',
2890 formatteropts,
2939 formatteropts,
2891 b'BUNDLE_FILE',
2940 b'BUNDLE_FILE',
2892 )
2941 )
2893 def perf_unbundle(ui, repo, fname, **opts):
2942 def perf_unbundle(ui, repo, fname, **opts):
2894 """benchmark application of a bundle in a repository.
2943 """benchmark application of a bundle in a repository.
2895
2944
2896 This does not include the final transaction processing"""
2945 This does not include the final transaction processing"""
2897
2946
2898 from mercurial import exchange
2947 from mercurial import exchange
2899 from mercurial import bundle2
2948 from mercurial import bundle2
2900 from mercurial import transaction
2949 from mercurial import transaction
2901
2950
2902 opts = _byteskwargs(opts)
2951 opts = _byteskwargs(opts)
2903
2952
2904 ### some compatibility hotfix
2953 ### some compatibility hotfix
2905 #
2954 #
2906 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2955 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2907 # critical regression that break transaction rollback for files that are
2956 # critical regression that break transaction rollback for files that are
2908 # de-inlined.
2957 # de-inlined.
2909 method = transaction.transaction._addentry
2958 method = transaction.transaction._addentry
2910 pre_63edc384d3b7 = "data" in getargspec(method).args
2959 pre_63edc384d3b7 = "data" in getargspec(method).args
2911 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2960 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2912 # a changeset that is a close descendant of 18415fc918a1, the changeset
2961 # a changeset that is a close descendant of 18415fc918a1, the changeset
2913 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2962 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2914 args = getargspec(error.Abort.__init__).args
2963 args = getargspec(error.Abort.__init__).args
2915 post_18415fc918a1 = "detailed_exit_code" in args
2964 post_18415fc918a1 = "detailed_exit_code" in args
2916
2965
2917 old_max_inline = None
2966 old_max_inline = None
2918 try:
2967 try:
2919 if not (pre_63edc384d3b7 or post_18415fc918a1):
2968 if not (pre_63edc384d3b7 or post_18415fc918a1):
2920 # disable inlining
2969 # disable inlining
2921 old_max_inline = mercurial.revlog._maxinline
2970 old_max_inline = mercurial.revlog._maxinline
2922 # large enough to never happen
2971 # large enough to never happen
2923 mercurial.revlog._maxinline = 2 ** 50
2972 mercurial.revlog._maxinline = 2 ** 50
2924
2973
2925 with repo.lock():
2974 with repo.lock():
2926 bundle = [None, None]
2975 bundle = [None, None]
2927 orig_quiet = repo.ui.quiet
2976 orig_quiet = repo.ui.quiet
2928 try:
2977 try:
2929 repo.ui.quiet = True
2978 repo.ui.quiet = True
2930 with open(fname, mode="rb") as f:
2979 with open(fname, mode="rb") as f:
2931
2980
2932 def noop_report(*args, **kwargs):
2981 def noop_report(*args, **kwargs):
2933 pass
2982 pass
2934
2983
2935 def setup():
2984 def setup():
2936 gen, tr = bundle
2985 gen, tr = bundle
2937 if tr is not None:
2986 if tr is not None:
2938 tr.abort()
2987 tr.abort()
2939 bundle[:] = [None, None]
2988 bundle[:] = [None, None]
2940 f.seek(0)
2989 f.seek(0)
2941 bundle[0] = exchange.readbundle(ui, f, fname)
2990 bundle[0] = exchange.readbundle(ui, f, fname)
2942 bundle[1] = repo.transaction(b'perf::unbundle')
2991 bundle[1] = repo.transaction(b'perf::unbundle')
2943 # silence the transaction
2992 # silence the transaction
2944 bundle[1]._report = noop_report
2993 bundle[1]._report = noop_report
2945
2994
2946 def apply():
2995 def apply():
2947 gen, tr = bundle
2996 gen, tr = bundle
2948 bundle2.applybundle(
2997 bundle2.applybundle(
2949 repo,
2998 repo,
2950 gen,
2999 gen,
2951 tr,
3000 tr,
2952 source=b'perf::unbundle',
3001 source=b'perf::unbundle',
2953 url=fname,
3002 url=fname,
2954 )
3003 )
2955
3004
2956 timer, fm = gettimer(ui, opts)
3005 timer, fm = gettimer(ui, opts)
2957 timer(apply, setup=setup)
3006 timer(apply, setup=setup)
2958 fm.end()
3007 fm.end()
2959 finally:
3008 finally:
2960 repo.ui.quiet == orig_quiet
3009 repo.ui.quiet == orig_quiet
2961 gen, tr = bundle
3010 gen, tr = bundle
2962 if tr is not None:
3011 if tr is not None:
2963 tr.abort()
3012 tr.abort()
2964 finally:
3013 finally:
2965 if old_max_inline is not None:
3014 if old_max_inline is not None:
2966 mercurial.revlog._maxinline = old_max_inline
3015 mercurial.revlog._maxinline = old_max_inline
2967
3016
2968
3017
2969 @command(
3018 @command(
2970 b'perf::unidiff|perfunidiff',
3019 b'perf::unidiff|perfunidiff',
2971 revlogopts
3020 revlogopts
2972 + formatteropts
3021 + formatteropts
2973 + [
3022 + [
2974 (
3023 (
2975 b'',
3024 b'',
2976 b'count',
3025 b'count',
2977 1,
3026 1,
2978 b'number of revisions to test (when using --startrev)',
3027 b'number of revisions to test (when using --startrev)',
2979 ),
3028 ),
2980 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3029 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2981 ],
3030 ],
2982 b'-c|-m|FILE REV',
3031 b'-c|-m|FILE REV',
2983 )
3032 )
2984 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3033 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2985 """benchmark a unified diff between revisions
3034 """benchmark a unified diff between revisions
2986
3035
2987 This doesn't include any copy tracing - it's just a unified diff
3036 This doesn't include any copy tracing - it's just a unified diff
2988 of the texts.
3037 of the texts.
2989
3038
2990 By default, benchmark a diff between its delta parent and itself.
3039 By default, benchmark a diff between its delta parent and itself.
2991
3040
2992 With ``--count``, benchmark diffs between delta parents and self for N
3041 With ``--count``, benchmark diffs between delta parents and self for N
2993 revisions starting at the specified revision.
3042 revisions starting at the specified revision.
2994
3043
2995 With ``--alldata``, assume the requested revision is a changeset and
3044 With ``--alldata``, assume the requested revision is a changeset and
2996 measure diffs for all changes related to that changeset (manifest
3045 measure diffs for all changes related to that changeset (manifest
2997 and filelogs).
3046 and filelogs).
2998 """
3047 """
2999 opts = _byteskwargs(opts)
3048 opts = _byteskwargs(opts)
3000 if opts[b'alldata']:
3049 if opts[b'alldata']:
3001 opts[b'changelog'] = True
3050 opts[b'changelog'] = True
3002
3051
3003 if opts.get(b'changelog') or opts.get(b'manifest'):
3052 if opts.get(b'changelog') or opts.get(b'manifest'):
3004 file_, rev = None, file_
3053 file_, rev = None, file_
3005 elif rev is None:
3054 elif rev is None:
3006 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3055 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3007
3056
3008 textpairs = []
3057 textpairs = []
3009
3058
3010 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3059 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3011
3060
3012 startrev = r.rev(r.lookup(rev))
3061 startrev = r.rev(r.lookup(rev))
3013 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3062 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3014 if opts[b'alldata']:
3063 if opts[b'alldata']:
3015 # Load revisions associated with changeset.
3064 # Load revisions associated with changeset.
3016 ctx = repo[rev]
3065 ctx = repo[rev]
3017 mtext = _manifestrevision(repo, ctx.manifestnode())
3066 mtext = _manifestrevision(repo, ctx.manifestnode())
3018 for pctx in ctx.parents():
3067 for pctx in ctx.parents():
3019 pman = _manifestrevision(repo, pctx.manifestnode())
3068 pman = _manifestrevision(repo, pctx.manifestnode())
3020 textpairs.append((pman, mtext))
3069 textpairs.append((pman, mtext))
3021
3070
3022 # Load filelog revisions by iterating manifest delta.
3071 # Load filelog revisions by iterating manifest delta.
3023 man = ctx.manifest()
3072 man = ctx.manifest()
3024 pman = ctx.p1().manifest()
3073 pman = ctx.p1().manifest()
3025 for filename, change in pman.diff(man).items():
3074 for filename, change in pman.diff(man).items():
3026 fctx = repo.file(filename)
3075 fctx = repo.file(filename)
3027 f1 = fctx.revision(change[0][0] or -1)
3076 f1 = fctx.revision(change[0][0] or -1)
3028 f2 = fctx.revision(change[1][0] or -1)
3077 f2 = fctx.revision(change[1][0] or -1)
3029 textpairs.append((f1, f2))
3078 textpairs.append((f1, f2))
3030 else:
3079 else:
3031 dp = r.deltaparent(rev)
3080 dp = r.deltaparent(rev)
3032 textpairs.append((r.revision(dp), r.revision(rev)))
3081 textpairs.append((r.revision(dp), r.revision(rev)))
3033
3082
3034 def d():
3083 def d():
3035 for left, right in textpairs:
3084 for left, right in textpairs:
3036 # The date strings don't matter, so we pass empty strings.
3085 # The date strings don't matter, so we pass empty strings.
3037 headerlines, hunks = mdiff.unidiff(
3086 headerlines, hunks = mdiff.unidiff(
3038 left, b'', right, b'', b'left', b'right', binary=False
3087 left, b'', right, b'', b'left', b'right', binary=False
3039 )
3088 )
3040 # consume iterators in roughly the way patch.py does
3089 # consume iterators in roughly the way patch.py does
3041 b'\n'.join(headerlines)
3090 b'\n'.join(headerlines)
3042 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3091 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3043
3092
3044 timer, fm = gettimer(ui, opts)
3093 timer, fm = gettimer(ui, opts)
3045 timer(d)
3094 timer(d)
3046 fm.end()
3095 fm.end()
3047
3096
3048
3097
3049 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3098 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3050 def perfdiffwd(ui, repo, **opts):
3099 def perfdiffwd(ui, repo, **opts):
3051 """Profile diff of working directory changes"""
3100 """Profile diff of working directory changes"""
3052 opts = _byteskwargs(opts)
3101 opts = _byteskwargs(opts)
3053 timer, fm = gettimer(ui, opts)
3102 timer, fm = gettimer(ui, opts)
3054 options = {
3103 options = {
3055 'w': 'ignore_all_space',
3104 'w': 'ignore_all_space',
3056 'b': 'ignore_space_change',
3105 'b': 'ignore_space_change',
3057 'B': 'ignore_blank_lines',
3106 'B': 'ignore_blank_lines',
3058 }
3107 }
3059
3108
3060 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3109 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3061 opts = {options[c]: b'1' for c in diffopt}
3110 opts = {options[c]: b'1' for c in diffopt}
3062
3111
3063 def d():
3112 def d():
3064 ui.pushbuffer()
3113 ui.pushbuffer()
3065 commands.diff(ui, repo, **opts)
3114 commands.diff(ui, repo, **opts)
3066 ui.popbuffer()
3115 ui.popbuffer()
3067
3116
3068 diffopt = diffopt.encode('ascii')
3117 diffopt = diffopt.encode('ascii')
3069 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3118 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3070 timer(d, title=title)
3119 timer(d, title=title)
3071 fm.end()
3120 fm.end()
3072
3121
3073
3122
3074 @command(
3123 @command(
3075 b'perf::revlogindex|perfrevlogindex',
3124 b'perf::revlogindex|perfrevlogindex',
3076 revlogopts + formatteropts,
3125 revlogopts + formatteropts,
3077 b'-c|-m|FILE',
3126 b'-c|-m|FILE',
3078 )
3127 )
3079 def perfrevlogindex(ui, repo, file_=None, **opts):
3128 def perfrevlogindex(ui, repo, file_=None, **opts):
3080 """Benchmark operations against a revlog index.
3129 """Benchmark operations against a revlog index.
3081
3130
3082 This tests constructing a revlog instance, reading index data,
3131 This tests constructing a revlog instance, reading index data,
3083 parsing index data, and performing various operations related to
3132 parsing index data, and performing various operations related to
3084 index data.
3133 index data.
3085 """
3134 """
3086
3135
3087 opts = _byteskwargs(opts)
3136 opts = _byteskwargs(opts)
3088
3137
3089 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3138 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3090
3139
3091 opener = getattr(rl, 'opener') # trick linter
3140 opener = getattr(rl, 'opener') # trick linter
3092 # compat with hg <= 5.8
3141 # compat with hg <= 5.8
3093 radix = getattr(rl, 'radix', None)
3142 radix = getattr(rl, 'radix', None)
3094 indexfile = getattr(rl, '_indexfile', None)
3143 indexfile = getattr(rl, '_indexfile', None)
3095 if indexfile is None:
3144 if indexfile is None:
3096 # compatibility with <= hg-5.8
3145 # compatibility with <= hg-5.8
3097 indexfile = getattr(rl, 'indexfile')
3146 indexfile = getattr(rl, 'indexfile')
3098 data = opener.read(indexfile)
3147 data = opener.read(indexfile)
3099
3148
3100 header = struct.unpack(b'>I', data[0:4])[0]
3149 header = struct.unpack(b'>I', data[0:4])[0]
3101 version = header & 0xFFFF
3150 version = header & 0xFFFF
3102 if version == 1:
3151 if version == 1:
3103 inline = header & (1 << 16)
3152 inline = header & (1 << 16)
3104 else:
3153 else:
3105 raise error.Abort(b'unsupported revlog version: %d' % version)
3154 raise error.Abort(b'unsupported revlog version: %d' % version)
3106
3155
3107 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3156 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3108 if parse_index_v1 is None:
3157 if parse_index_v1 is None:
3109 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3158 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3110
3159
3111 rllen = len(rl)
3160 rllen = len(rl)
3112
3161
3113 node0 = rl.node(0)
3162 node0 = rl.node(0)
3114 node25 = rl.node(rllen // 4)
3163 node25 = rl.node(rllen // 4)
3115 node50 = rl.node(rllen // 2)
3164 node50 = rl.node(rllen // 2)
3116 node75 = rl.node(rllen // 4 * 3)
3165 node75 = rl.node(rllen // 4 * 3)
3117 node100 = rl.node(rllen - 1)
3166 node100 = rl.node(rllen - 1)
3118
3167
3119 allrevs = range(rllen)
3168 allrevs = range(rllen)
3120 allrevsrev = list(reversed(allrevs))
3169 allrevsrev = list(reversed(allrevs))
3121 allnodes = [rl.node(rev) for rev in range(rllen)]
3170 allnodes = [rl.node(rev) for rev in range(rllen)]
3122 allnodesrev = list(reversed(allnodes))
3171 allnodesrev = list(reversed(allnodes))
3123
3172
3124 def constructor():
3173 def constructor():
3125 if radix is not None:
3174 if radix is not None:
3126 revlog(opener, radix=radix)
3175 revlog(opener, radix=radix)
3127 else:
3176 else:
3128 # hg <= 5.8
3177 # hg <= 5.8
3129 revlog(opener, indexfile=indexfile)
3178 revlog(opener, indexfile=indexfile)
3130
3179
3131 def read():
3180 def read():
3132 with opener(indexfile) as fh:
3181 with opener(indexfile) as fh:
3133 fh.read()
3182 fh.read()
3134
3183
3135 def parseindex():
3184 def parseindex():
3136 parse_index_v1(data, inline)
3185 parse_index_v1(data, inline)
3137
3186
3138 def getentry(revornode):
3187 def getentry(revornode):
3139 index = parse_index_v1(data, inline)[0]
3188 index = parse_index_v1(data, inline)[0]
3140 index[revornode]
3189 index[revornode]
3141
3190
3142 def getentries(revs, count=1):
3191 def getentries(revs, count=1):
3143 index = parse_index_v1(data, inline)[0]
3192 index = parse_index_v1(data, inline)[0]
3144
3193
3145 for i in range(count):
3194 for i in range(count):
3146 for rev in revs:
3195 for rev in revs:
3147 index[rev]
3196 index[rev]
3148
3197
3149 def resolvenode(node):
3198 def resolvenode(node):
3150 index = parse_index_v1(data, inline)[0]
3199 index = parse_index_v1(data, inline)[0]
3151 rev = getattr(index, 'rev', None)
3200 rev = getattr(index, 'rev', None)
3152 if rev is None:
3201 if rev is None:
3153 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3202 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3154 # This only works for the C code.
3203 # This only works for the C code.
3155 if nodemap is None:
3204 if nodemap is None:
3156 return
3205 return
3157 rev = nodemap.__getitem__
3206 rev = nodemap.__getitem__
3158
3207
3159 try:
3208 try:
3160 rev(node)
3209 rev(node)
3161 except error.RevlogError:
3210 except error.RevlogError:
3162 pass
3211 pass
3163
3212
3164 def resolvenodes(nodes, count=1):
3213 def resolvenodes(nodes, count=1):
3165 index = parse_index_v1(data, inline)[0]
3214 index = parse_index_v1(data, inline)[0]
3166 rev = getattr(index, 'rev', None)
3215 rev = getattr(index, 'rev', None)
3167 if rev is None:
3216 if rev is None:
3168 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3217 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3169 # This only works for the C code.
3218 # This only works for the C code.
3170 if nodemap is None:
3219 if nodemap is None:
3171 return
3220 return
3172 rev = nodemap.__getitem__
3221 rev = nodemap.__getitem__
3173
3222
3174 for i in range(count):
3223 for i in range(count):
3175 for node in nodes:
3224 for node in nodes:
3176 try:
3225 try:
3177 rev(node)
3226 rev(node)
3178 except error.RevlogError:
3227 except error.RevlogError:
3179 pass
3228 pass
3180
3229
3181 benches = [
3230 benches = [
3182 (constructor, b'revlog constructor'),
3231 (constructor, b'revlog constructor'),
3183 (read, b'read'),
3232 (read, b'read'),
3184 (parseindex, b'create index object'),
3233 (parseindex, b'create index object'),
3185 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3234 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3186 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3235 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3187 (lambda: resolvenode(node0), b'look up node at rev 0'),
3236 (lambda: resolvenode(node0), b'look up node at rev 0'),
3188 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3237 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3189 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3238 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3190 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3239 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3191 (lambda: resolvenode(node100), b'look up node at tip'),
3240 (lambda: resolvenode(node100), b'look up node at tip'),
3192 # 2x variation is to measure caching impact.
3241 # 2x variation is to measure caching impact.
3193 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3242 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3194 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3243 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3195 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3244 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3196 (
3245 (
3197 lambda: resolvenodes(allnodesrev, 2),
3246 lambda: resolvenodes(allnodesrev, 2),
3198 b'look up all nodes 2x (reverse)',
3247 b'look up all nodes 2x (reverse)',
3199 ),
3248 ),
3200 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3249 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3201 (
3250 (
3202 lambda: getentries(allrevs, 2),
3251 lambda: getentries(allrevs, 2),
3203 b'retrieve all index entries 2x (forward)',
3252 b'retrieve all index entries 2x (forward)',
3204 ),
3253 ),
3205 (
3254 (
3206 lambda: getentries(allrevsrev),
3255 lambda: getentries(allrevsrev),
3207 b'retrieve all index entries (reverse)',
3256 b'retrieve all index entries (reverse)',
3208 ),
3257 ),
3209 (
3258 (
3210 lambda: getentries(allrevsrev, 2),
3259 lambda: getentries(allrevsrev, 2),
3211 b'retrieve all index entries 2x (reverse)',
3260 b'retrieve all index entries 2x (reverse)',
3212 ),
3261 ),
3213 ]
3262 ]
3214
3263
3215 for fn, title in benches:
3264 for fn, title in benches:
3216 timer, fm = gettimer(ui, opts)
3265 timer, fm = gettimer(ui, opts)
3217 timer(fn, title=title)
3266 timer(fn, title=title)
3218 fm.end()
3267 fm.end()
3219
3268
3220
3269
3221 @command(
3270 @command(
3222 b'perf::revlogrevisions|perfrevlogrevisions',
3271 b'perf::revlogrevisions|perfrevlogrevisions',
3223 revlogopts
3272 revlogopts
3224 + formatteropts
3273 + formatteropts
3225 + [
3274 + [
3226 (b'd', b'dist', 100, b'distance between the revisions'),
3275 (b'd', b'dist', 100, b'distance between the revisions'),
3227 (b's', b'startrev', 0, b'revision to start reading at'),
3276 (b's', b'startrev', 0, b'revision to start reading at'),
3228 (b'', b'reverse', False, b'read in reverse'),
3277 (b'', b'reverse', False, b'read in reverse'),
3229 ],
3278 ],
3230 b'-c|-m|FILE',
3279 b'-c|-m|FILE',
3231 )
3280 )
3232 def perfrevlogrevisions(
3281 def perfrevlogrevisions(
3233 ui, repo, file_=None, startrev=0, reverse=False, **opts
3282 ui, repo, file_=None, startrev=0, reverse=False, **opts
3234 ):
3283 ):
3235 """Benchmark reading a series of revisions from a revlog.
3284 """Benchmark reading a series of revisions from a revlog.
3236
3285
3237 By default, we read every ``-d/--dist`` revision from 0 to tip of
3286 By default, we read every ``-d/--dist`` revision from 0 to tip of
3238 the specified revlog.
3287 the specified revlog.
3239
3288
3240 The start revision can be defined via ``-s/--startrev``.
3289 The start revision can be defined via ``-s/--startrev``.
3241 """
3290 """
3242 opts = _byteskwargs(opts)
3291 opts = _byteskwargs(opts)
3243
3292
3244 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3293 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3245 rllen = getlen(ui)(rl)
3294 rllen = getlen(ui)(rl)
3246
3295
3247 if startrev < 0:
3296 if startrev < 0:
3248 startrev = rllen + startrev
3297 startrev = rllen + startrev
3249
3298
3250 def d():
3299 def d():
3251 rl.clearcaches()
3300 rl.clearcaches()
3252
3301
3253 beginrev = startrev
3302 beginrev = startrev
3254 endrev = rllen
3303 endrev = rllen
3255 dist = opts[b'dist']
3304 dist = opts[b'dist']
3256
3305
3257 if reverse:
3306 if reverse:
3258 beginrev, endrev = endrev - 1, beginrev - 1
3307 beginrev, endrev = endrev - 1, beginrev - 1
3259 dist = -1 * dist
3308 dist = -1 * dist
3260
3309
3261 for x in _xrange(beginrev, endrev, dist):
3310 for x in _xrange(beginrev, endrev, dist):
3262 # Old revisions don't support passing int.
3311 # Old revisions don't support passing int.
3263 n = rl.node(x)
3312 n = rl.node(x)
3264 rl.revision(n)
3313 rl.revision(n)
3265
3314
3266 timer, fm = gettimer(ui, opts)
3315 timer, fm = gettimer(ui, opts)
3267 timer(d)
3316 timer(d)
3268 fm.end()
3317 fm.end()
3269
3318
3270
3319
3271 @command(
3320 @command(
3272 b'perf::revlogwrite|perfrevlogwrite',
3321 b'perf::revlogwrite|perfrevlogwrite',
3273 revlogopts
3322 revlogopts
3274 + formatteropts
3323 + formatteropts
3275 + [
3324 + [
3276 (b's', b'startrev', 1000, b'revision to start writing at'),
3325 (b's', b'startrev', 1000, b'revision to start writing at'),
3277 (b'', b'stoprev', -1, b'last revision to write'),
3326 (b'', b'stoprev', -1, b'last revision to write'),
3278 (b'', b'count', 3, b'number of passes to perform'),
3327 (b'', b'count', 3, b'number of passes to perform'),
3279 (b'', b'details', False, b'print timing for every revisions tested'),
3328 (b'', b'details', False, b'print timing for every revisions tested'),
3280 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3329 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3281 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3330 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3282 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3331 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3283 ],
3332 ],
3284 b'-c|-m|FILE',
3333 b'-c|-m|FILE',
3285 )
3334 )
3286 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3335 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3287 """Benchmark writing a series of revisions to a revlog.
3336 """Benchmark writing a series of revisions to a revlog.
3288
3337
3289 Possible source values are:
3338 Possible source values are:
3290 * `full`: add from a full text (default).
3339 * `full`: add from a full text (default).
3291 * `parent-1`: add from a delta to the first parent
3340 * `parent-1`: add from a delta to the first parent
3292 * `parent-2`: add from a delta to the second parent if it exists
3341 * `parent-2`: add from a delta to the second parent if it exists
3293 (use a delta from the first parent otherwise)
3342 (use a delta from the first parent otherwise)
3294 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3343 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3295 * `storage`: add from the existing precomputed deltas
3344 * `storage`: add from the existing precomputed deltas
3296
3345
3297 Note: This performance command measures performance in a custom way. As a
3346 Note: This performance command measures performance in a custom way. As a
3298 result some of the global configuration of the 'perf' command does not
3347 result some of the global configuration of the 'perf' command does not
3299 apply to it:
3348 apply to it:
3300
3349
3301 * ``pre-run``: disabled
3350 * ``pre-run``: disabled
3302
3351
3303 * ``profile-benchmark``: disabled
3352 * ``profile-benchmark``: disabled
3304
3353
3305 * ``run-limits``: disabled use --count instead
3354 * ``run-limits``: disabled use --count instead
3306 """
3355 """
3307 opts = _byteskwargs(opts)
3356 opts = _byteskwargs(opts)
3308
3357
3309 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3358 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3310 rllen = getlen(ui)(rl)
3359 rllen = getlen(ui)(rl)
3311 if startrev < 0:
3360 if startrev < 0:
3312 startrev = rllen + startrev
3361 startrev = rllen + startrev
3313 if stoprev < 0:
3362 if stoprev < 0:
3314 stoprev = rllen + stoprev
3363 stoprev = rllen + stoprev
3315
3364
3316 lazydeltabase = opts['lazydeltabase']
3365 lazydeltabase = opts['lazydeltabase']
3317 source = opts['source']
3366 source = opts['source']
3318 clearcaches = opts['clear_caches']
3367 clearcaches = opts['clear_caches']
3319 validsource = (
3368 validsource = (
3320 b'full',
3369 b'full',
3321 b'parent-1',
3370 b'parent-1',
3322 b'parent-2',
3371 b'parent-2',
3323 b'parent-smallest',
3372 b'parent-smallest',
3324 b'storage',
3373 b'storage',
3325 )
3374 )
3326 if source not in validsource:
3375 if source not in validsource:
3327 raise error.Abort('invalid source type: %s' % source)
3376 raise error.Abort('invalid source type: %s' % source)
3328
3377
3329 ### actually gather results
3378 ### actually gather results
3330 count = opts['count']
3379 count = opts['count']
3331 if count <= 0:
3380 if count <= 0:
3332 raise error.Abort('invalide run count: %d' % count)
3381 raise error.Abort('invalide run count: %d' % count)
3333 allresults = []
3382 allresults = []
3334 for c in range(count):
3383 for c in range(count):
3335 timing = _timeonewrite(
3384 timing = _timeonewrite(
3336 ui,
3385 ui,
3337 rl,
3386 rl,
3338 source,
3387 source,
3339 startrev,
3388 startrev,
3340 stoprev,
3389 stoprev,
3341 c + 1,
3390 c + 1,
3342 lazydeltabase=lazydeltabase,
3391 lazydeltabase=lazydeltabase,
3343 clearcaches=clearcaches,
3392 clearcaches=clearcaches,
3344 )
3393 )
3345 allresults.append(timing)
3394 allresults.append(timing)
3346
3395
3347 ### consolidate the results in a single list
3396 ### consolidate the results in a single list
3348 results = []
3397 results = []
3349 for idx, (rev, t) in enumerate(allresults[0]):
3398 for idx, (rev, t) in enumerate(allresults[0]):
3350 ts = [t]
3399 ts = [t]
3351 for other in allresults[1:]:
3400 for other in allresults[1:]:
3352 orev, ot = other[idx]
3401 orev, ot = other[idx]
3353 assert orev == rev
3402 assert orev == rev
3354 ts.append(ot)
3403 ts.append(ot)
3355 results.append((rev, ts))
3404 results.append((rev, ts))
3356 resultcount = len(results)
3405 resultcount = len(results)
3357
3406
3358 ### Compute and display relevant statistics
3407 ### Compute and display relevant statistics
3359
3408
3360 # get a formatter
3409 # get a formatter
3361 fm = ui.formatter(b'perf', opts)
3410 fm = ui.formatter(b'perf', opts)
3362 displayall = ui.configbool(b"perf", b"all-timing", False)
3411 displayall = ui.configbool(b"perf", b"all-timing", False)
3363
3412
3364 # print individual details if requested
3413 # print individual details if requested
3365 if opts['details']:
3414 if opts['details']:
3366 for idx, item in enumerate(results, 1):
3415 for idx, item in enumerate(results, 1):
3367 rev, data = item
3416 rev, data = item
3368 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3417 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3369 formatone(fm, data, title=title, displayall=displayall)
3418 formatone(fm, data, title=title, displayall=displayall)
3370
3419
3371 # sorts results by median time
3420 # sorts results by median time
3372 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3421 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3373 # list of (name, index) to display)
3422 # list of (name, index) to display)
3374 relevants = [
3423 relevants = [
3375 ("min", 0),
3424 ("min", 0),
3376 ("10%", resultcount * 10 // 100),
3425 ("10%", resultcount * 10 // 100),
3377 ("25%", resultcount * 25 // 100),
3426 ("25%", resultcount * 25 // 100),
3378 ("50%", resultcount * 70 // 100),
3427 ("50%", resultcount * 70 // 100),
3379 ("75%", resultcount * 75 // 100),
3428 ("75%", resultcount * 75 // 100),
3380 ("90%", resultcount * 90 // 100),
3429 ("90%", resultcount * 90 // 100),
3381 ("95%", resultcount * 95 // 100),
3430 ("95%", resultcount * 95 // 100),
3382 ("99%", resultcount * 99 // 100),
3431 ("99%", resultcount * 99 // 100),
3383 ("99.9%", resultcount * 999 // 1000),
3432 ("99.9%", resultcount * 999 // 1000),
3384 ("99.99%", resultcount * 9999 // 10000),
3433 ("99.99%", resultcount * 9999 // 10000),
3385 ("99.999%", resultcount * 99999 // 100000),
3434 ("99.999%", resultcount * 99999 // 100000),
3386 ("max", -1),
3435 ("max", -1),
3387 ]
3436 ]
3388 if not ui.quiet:
3437 if not ui.quiet:
3389 for name, idx in relevants:
3438 for name, idx in relevants:
3390 data = results[idx]
3439 data = results[idx]
3391 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3440 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3392 formatone(fm, data[1], title=title, displayall=displayall)
3441 formatone(fm, data[1], title=title, displayall=displayall)
3393
3442
3394 # XXX summing that many float will not be very precise, we ignore this fact
3443 # XXX summing that many float will not be very precise, we ignore this fact
3395 # for now
3444 # for now
3396 totaltime = []
3445 totaltime = []
3397 for item in allresults:
3446 for item in allresults:
3398 totaltime.append(
3447 totaltime.append(
3399 (
3448 (
3400 sum(x[1][0] for x in item),
3449 sum(x[1][0] for x in item),
3401 sum(x[1][1] for x in item),
3450 sum(x[1][1] for x in item),
3402 sum(x[1][2] for x in item),
3451 sum(x[1][2] for x in item),
3403 )
3452 )
3404 )
3453 )
3405 formatone(
3454 formatone(
3406 fm,
3455 fm,
3407 totaltime,
3456 totaltime,
3408 title="total time (%d revs)" % resultcount,
3457 title="total time (%d revs)" % resultcount,
3409 displayall=displayall,
3458 displayall=displayall,
3410 )
3459 )
3411 fm.end()
3460 fm.end()
3412
3461
3413
3462
3414 class _faketr:
3463 class _faketr:
3415 def add(s, x, y, z=None):
3464 def add(s, x, y, z=None):
3416 return None
3465 return None
3417
3466
3418
3467
3419 def _timeonewrite(
3468 def _timeonewrite(
3420 ui,
3469 ui,
3421 orig,
3470 orig,
3422 source,
3471 source,
3423 startrev,
3472 startrev,
3424 stoprev,
3473 stoprev,
3425 runidx=None,
3474 runidx=None,
3426 lazydeltabase=True,
3475 lazydeltabase=True,
3427 clearcaches=True,
3476 clearcaches=True,
3428 ):
3477 ):
3429 timings = []
3478 timings = []
3430 tr = _faketr()
3479 tr = _faketr()
3431 with _temprevlog(ui, orig, startrev) as dest:
3480 with _temprevlog(ui, orig, startrev) as dest:
3432 dest._lazydeltabase = lazydeltabase
3481 dest._lazydeltabase = lazydeltabase
3433 revs = list(orig.revs(startrev, stoprev))
3482 revs = list(orig.revs(startrev, stoprev))
3434 total = len(revs)
3483 total = len(revs)
3435 topic = 'adding'
3484 topic = 'adding'
3436 if runidx is not None:
3485 if runidx is not None:
3437 topic += ' (run #%d)' % runidx
3486 topic += ' (run #%d)' % runidx
3438 # Support both old and new progress API
3487 # Support both old and new progress API
3439 if util.safehasattr(ui, 'makeprogress'):
3488 if util.safehasattr(ui, 'makeprogress'):
3440 progress = ui.makeprogress(topic, unit='revs', total=total)
3489 progress = ui.makeprogress(topic, unit='revs', total=total)
3441
3490
3442 def updateprogress(pos):
3491 def updateprogress(pos):
3443 progress.update(pos)
3492 progress.update(pos)
3444
3493
3445 def completeprogress():
3494 def completeprogress():
3446 progress.complete()
3495 progress.complete()
3447
3496
3448 else:
3497 else:
3449
3498
3450 def updateprogress(pos):
3499 def updateprogress(pos):
3451 ui.progress(topic, pos, unit='revs', total=total)
3500 ui.progress(topic, pos, unit='revs', total=total)
3452
3501
3453 def completeprogress():
3502 def completeprogress():
3454 ui.progress(topic, None, unit='revs', total=total)
3503 ui.progress(topic, None, unit='revs', total=total)
3455
3504
3456 for idx, rev in enumerate(revs):
3505 for idx, rev in enumerate(revs):
3457 updateprogress(idx)
3506 updateprogress(idx)
3458 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3507 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3459 if clearcaches:
3508 if clearcaches:
3460 dest.index.clearcaches()
3509 dest.index.clearcaches()
3461 dest.clearcaches()
3510 dest.clearcaches()
3462 with timeone() as r:
3511 with timeone() as r:
3463 dest.addrawrevision(*addargs, **addkwargs)
3512 dest.addrawrevision(*addargs, **addkwargs)
3464 timings.append((rev, r[0]))
3513 timings.append((rev, r[0]))
3465 updateprogress(total)
3514 updateprogress(total)
3466 completeprogress()
3515 completeprogress()
3467 return timings
3516 return timings
3468
3517
3469
3518
3470 def _getrevisionseed(orig, rev, tr, source):
3519 def _getrevisionseed(orig, rev, tr, source):
3471 from mercurial.node import nullid
3520 from mercurial.node import nullid
3472
3521
3473 linkrev = orig.linkrev(rev)
3522 linkrev = orig.linkrev(rev)
3474 node = orig.node(rev)
3523 node = orig.node(rev)
3475 p1, p2 = orig.parents(node)
3524 p1, p2 = orig.parents(node)
3476 flags = orig.flags(rev)
3525 flags = orig.flags(rev)
3477 cachedelta = None
3526 cachedelta = None
3478 text = None
3527 text = None
3479
3528
3480 if source == b'full':
3529 if source == b'full':
3481 text = orig.revision(rev)
3530 text = orig.revision(rev)
3482 elif source == b'parent-1':
3531 elif source == b'parent-1':
3483 baserev = orig.rev(p1)
3532 baserev = orig.rev(p1)
3484 cachedelta = (baserev, orig.revdiff(p1, rev))
3533 cachedelta = (baserev, orig.revdiff(p1, rev))
3485 elif source == b'parent-2':
3534 elif source == b'parent-2':
3486 parent = p2
3535 parent = p2
3487 if p2 == nullid:
3536 if p2 == nullid:
3488 parent = p1
3537 parent = p1
3489 baserev = orig.rev(parent)
3538 baserev = orig.rev(parent)
3490 cachedelta = (baserev, orig.revdiff(parent, rev))
3539 cachedelta = (baserev, orig.revdiff(parent, rev))
3491 elif source == b'parent-smallest':
3540 elif source == b'parent-smallest':
3492 p1diff = orig.revdiff(p1, rev)
3541 p1diff = orig.revdiff(p1, rev)
3493 parent = p1
3542 parent = p1
3494 diff = p1diff
3543 diff = p1diff
3495 if p2 != nullid:
3544 if p2 != nullid:
3496 p2diff = orig.revdiff(p2, rev)
3545 p2diff = orig.revdiff(p2, rev)
3497 if len(p1diff) > len(p2diff):
3546 if len(p1diff) > len(p2diff):
3498 parent = p2
3547 parent = p2
3499 diff = p2diff
3548 diff = p2diff
3500 baserev = orig.rev(parent)
3549 baserev = orig.rev(parent)
3501 cachedelta = (baserev, diff)
3550 cachedelta = (baserev, diff)
3502 elif source == b'storage':
3551 elif source == b'storage':
3503 baserev = orig.deltaparent(rev)
3552 baserev = orig.deltaparent(rev)
3504 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3553 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3505
3554
3506 return (
3555 return (
3507 (text, tr, linkrev, p1, p2),
3556 (text, tr, linkrev, p1, p2),
3508 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3557 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3509 )
3558 )
3510
3559
3511
3560
3512 @contextlib.contextmanager
3561 @contextlib.contextmanager
3513 def _temprevlog(ui, orig, truncaterev):
3562 def _temprevlog(ui, orig, truncaterev):
3514 from mercurial import vfs as vfsmod
3563 from mercurial import vfs as vfsmod
3515
3564
3516 if orig._inline:
3565 if orig._inline:
3517 raise error.Abort('not supporting inline revlog (yet)')
3566 raise error.Abort('not supporting inline revlog (yet)')
3518 revlogkwargs = {}
3567 revlogkwargs = {}
3519 k = 'upperboundcomp'
3568 k = 'upperboundcomp'
3520 if util.safehasattr(orig, k):
3569 if util.safehasattr(orig, k):
3521 revlogkwargs[k] = getattr(orig, k)
3570 revlogkwargs[k] = getattr(orig, k)
3522
3571
3523 indexfile = getattr(orig, '_indexfile', None)
3572 indexfile = getattr(orig, '_indexfile', None)
3524 if indexfile is None:
3573 if indexfile is None:
3525 # compatibility with <= hg-5.8
3574 # compatibility with <= hg-5.8
3526 indexfile = getattr(orig, 'indexfile')
3575 indexfile = getattr(orig, 'indexfile')
3527 origindexpath = orig.opener.join(indexfile)
3576 origindexpath = orig.opener.join(indexfile)
3528
3577
3529 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3578 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3530 origdatapath = orig.opener.join(datafile)
3579 origdatapath = orig.opener.join(datafile)
3531 radix = b'revlog'
3580 radix = b'revlog'
3532 indexname = b'revlog.i'
3581 indexname = b'revlog.i'
3533 dataname = b'revlog.d'
3582 dataname = b'revlog.d'
3534
3583
3535 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3584 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3536 try:
3585 try:
3537 # copy the data file in a temporary directory
3586 # copy the data file in a temporary directory
3538 ui.debug('copying data in %s\n' % tmpdir)
3587 ui.debug('copying data in %s\n' % tmpdir)
3539 destindexpath = os.path.join(tmpdir, 'revlog.i')
3588 destindexpath = os.path.join(tmpdir, 'revlog.i')
3540 destdatapath = os.path.join(tmpdir, 'revlog.d')
3589 destdatapath = os.path.join(tmpdir, 'revlog.d')
3541 shutil.copyfile(origindexpath, destindexpath)
3590 shutil.copyfile(origindexpath, destindexpath)
3542 shutil.copyfile(origdatapath, destdatapath)
3591 shutil.copyfile(origdatapath, destdatapath)
3543
3592
3544 # remove the data we want to add again
3593 # remove the data we want to add again
3545 ui.debug('truncating data to be rewritten\n')
3594 ui.debug('truncating data to be rewritten\n')
3546 with open(destindexpath, 'ab') as index:
3595 with open(destindexpath, 'ab') as index:
3547 index.seek(0)
3596 index.seek(0)
3548 index.truncate(truncaterev * orig._io.size)
3597 index.truncate(truncaterev * orig._io.size)
3549 with open(destdatapath, 'ab') as data:
3598 with open(destdatapath, 'ab') as data:
3550 data.seek(0)
3599 data.seek(0)
3551 data.truncate(orig.start(truncaterev))
3600 data.truncate(orig.start(truncaterev))
3552
3601
3553 # instantiate a new revlog from the temporary copy
3602 # instantiate a new revlog from the temporary copy
3554 ui.debug('truncating adding to be rewritten\n')
3603 ui.debug('truncating adding to be rewritten\n')
3555 vfs = vfsmod.vfs(tmpdir)
3604 vfs = vfsmod.vfs(tmpdir)
3556 vfs.options = getattr(orig.opener, 'options', None)
3605 vfs.options = getattr(orig.opener, 'options', None)
3557
3606
3558 try:
3607 try:
3559 dest = revlog(vfs, radix=radix, **revlogkwargs)
3608 dest = revlog(vfs, radix=radix, **revlogkwargs)
3560 except TypeError:
3609 except TypeError:
3561 dest = revlog(
3610 dest = revlog(
3562 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3611 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3563 )
3612 )
3564 if dest._inline:
3613 if dest._inline:
3565 raise error.Abort('not supporting inline revlog (yet)')
3614 raise error.Abort('not supporting inline revlog (yet)')
3566 # make sure internals are initialized
3615 # make sure internals are initialized
3567 dest.revision(len(dest) - 1)
3616 dest.revision(len(dest) - 1)
3568 yield dest
3617 yield dest
3569 del dest, vfs
3618 del dest, vfs
3570 finally:
3619 finally:
3571 shutil.rmtree(tmpdir, True)
3620 shutil.rmtree(tmpdir, True)
3572
3621
3573
3622
3574 @command(
3623 @command(
3575 b'perf::revlogchunks|perfrevlogchunks',
3624 b'perf::revlogchunks|perfrevlogchunks',
3576 revlogopts
3625 revlogopts
3577 + formatteropts
3626 + formatteropts
3578 + [
3627 + [
3579 (b'e', b'engines', b'', b'compression engines to use'),
3628 (b'e', b'engines', b'', b'compression engines to use'),
3580 (b's', b'startrev', 0, b'revision to start at'),
3629 (b's', b'startrev', 0, b'revision to start at'),
3581 ],
3630 ],
3582 b'-c|-m|FILE',
3631 b'-c|-m|FILE',
3583 )
3632 )
3584 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3633 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3585 """Benchmark operations on revlog chunks.
3634 """Benchmark operations on revlog chunks.
3586
3635
3587 Logically, each revlog is a collection of fulltext revisions. However,
3636 Logically, each revlog is a collection of fulltext revisions. However,
3588 stored within each revlog are "chunks" of possibly compressed data. This
3637 stored within each revlog are "chunks" of possibly compressed data. This
3589 data needs to be read and decompressed or compressed and written.
3638 data needs to be read and decompressed or compressed and written.
3590
3639
3591 This command measures the time it takes to read+decompress and recompress
3640 This command measures the time it takes to read+decompress and recompress
3592 chunks in a revlog. It effectively isolates I/O and compression performance.
3641 chunks in a revlog. It effectively isolates I/O and compression performance.
3593 For measurements of higher-level operations like resolving revisions,
3642 For measurements of higher-level operations like resolving revisions,
3594 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3643 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3595 """
3644 """
3596 opts = _byteskwargs(opts)
3645 opts = _byteskwargs(opts)
3597
3646
3598 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3647 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3599
3648
3600 # _chunkraw was renamed to _getsegmentforrevs.
3649 # _chunkraw was renamed to _getsegmentforrevs.
3601 try:
3650 try:
3602 segmentforrevs = rl._getsegmentforrevs
3651 segmentforrevs = rl._getsegmentforrevs
3603 except AttributeError:
3652 except AttributeError:
3604 segmentforrevs = rl._chunkraw
3653 segmentforrevs = rl._chunkraw
3605
3654
3606 # Verify engines argument.
3655 # Verify engines argument.
3607 if engines:
3656 if engines:
3608 engines = {e.strip() for e in engines.split(b',')}
3657 engines = {e.strip() for e in engines.split(b',')}
3609 for engine in engines:
3658 for engine in engines:
3610 try:
3659 try:
3611 util.compressionengines[engine]
3660 util.compressionengines[engine]
3612 except KeyError:
3661 except KeyError:
3613 raise error.Abort(b'unknown compression engine: %s' % engine)
3662 raise error.Abort(b'unknown compression engine: %s' % engine)
3614 else:
3663 else:
3615 engines = []
3664 engines = []
3616 for e in util.compengines:
3665 for e in util.compengines:
3617 engine = util.compengines[e]
3666 engine = util.compengines[e]
3618 try:
3667 try:
3619 if engine.available():
3668 if engine.available():
3620 engine.revlogcompressor().compress(b'dummy')
3669 engine.revlogcompressor().compress(b'dummy')
3621 engines.append(e)
3670 engines.append(e)
3622 except NotImplementedError:
3671 except NotImplementedError:
3623 pass
3672 pass
3624
3673
3625 revs = list(rl.revs(startrev, len(rl) - 1))
3674 revs = list(rl.revs(startrev, len(rl) - 1))
3626
3675
3627 def rlfh(rl):
3676 def rlfh(rl):
3628 if rl._inline:
3677 if rl._inline:
3629 indexfile = getattr(rl, '_indexfile', None)
3678 indexfile = getattr(rl, '_indexfile', None)
3630 if indexfile is None:
3679 if indexfile is None:
3631 # compatibility with <= hg-5.8
3680 # compatibility with <= hg-5.8
3632 indexfile = getattr(rl, 'indexfile')
3681 indexfile = getattr(rl, 'indexfile')
3633 return getsvfs(repo)(indexfile)
3682 return getsvfs(repo)(indexfile)
3634 else:
3683 else:
3635 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3684 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3636 return getsvfs(repo)(datafile)
3685 return getsvfs(repo)(datafile)
3637
3686
3638 def doread():
3687 def doread():
3639 rl.clearcaches()
3688 rl.clearcaches()
3640 for rev in revs:
3689 for rev in revs:
3641 segmentforrevs(rev, rev)
3690 segmentforrevs(rev, rev)
3642
3691
3643 def doreadcachedfh():
3692 def doreadcachedfh():
3644 rl.clearcaches()
3693 rl.clearcaches()
3645 fh = rlfh(rl)
3694 fh = rlfh(rl)
3646 for rev in revs:
3695 for rev in revs:
3647 segmentforrevs(rev, rev, df=fh)
3696 segmentforrevs(rev, rev, df=fh)
3648
3697
3649 def doreadbatch():
3698 def doreadbatch():
3650 rl.clearcaches()
3699 rl.clearcaches()
3651 segmentforrevs(revs[0], revs[-1])
3700 segmentforrevs(revs[0], revs[-1])
3652
3701
3653 def doreadbatchcachedfh():
3702 def doreadbatchcachedfh():
3654 rl.clearcaches()
3703 rl.clearcaches()
3655 fh = rlfh(rl)
3704 fh = rlfh(rl)
3656 segmentforrevs(revs[0], revs[-1], df=fh)
3705 segmentforrevs(revs[0], revs[-1], df=fh)
3657
3706
3658 def dochunk():
3707 def dochunk():
3659 rl.clearcaches()
3708 rl.clearcaches()
3660 fh = rlfh(rl)
3709 fh = rlfh(rl)
3661 for rev in revs:
3710 for rev in revs:
3662 rl._chunk(rev, df=fh)
3711 rl._chunk(rev, df=fh)
3663
3712
3664 chunks = [None]
3713 chunks = [None]
3665
3714
3666 def dochunkbatch():
3715 def dochunkbatch():
3667 rl.clearcaches()
3716 rl.clearcaches()
3668 fh = rlfh(rl)
3717 fh = rlfh(rl)
3669 # Save chunks as a side-effect.
3718 # Save chunks as a side-effect.
3670 chunks[0] = rl._chunks(revs, df=fh)
3719 chunks[0] = rl._chunks(revs, df=fh)
3671
3720
3672 def docompress(compressor):
3721 def docompress(compressor):
3673 rl.clearcaches()
3722 rl.clearcaches()
3674
3723
3675 try:
3724 try:
3676 # Swap in the requested compression engine.
3725 # Swap in the requested compression engine.
3677 oldcompressor = rl._compressor
3726 oldcompressor = rl._compressor
3678 rl._compressor = compressor
3727 rl._compressor = compressor
3679 for chunk in chunks[0]:
3728 for chunk in chunks[0]:
3680 rl.compress(chunk)
3729 rl.compress(chunk)
3681 finally:
3730 finally:
3682 rl._compressor = oldcompressor
3731 rl._compressor = oldcompressor
3683
3732
3684 benches = [
3733 benches = [
3685 (lambda: doread(), b'read'),
3734 (lambda: doread(), b'read'),
3686 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3735 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3687 (lambda: doreadbatch(), b'read batch'),
3736 (lambda: doreadbatch(), b'read batch'),
3688 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3737 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3689 (lambda: dochunk(), b'chunk'),
3738 (lambda: dochunk(), b'chunk'),
3690 (lambda: dochunkbatch(), b'chunk batch'),
3739 (lambda: dochunkbatch(), b'chunk batch'),
3691 ]
3740 ]
3692
3741
3693 for engine in sorted(engines):
3742 for engine in sorted(engines):
3694 compressor = util.compengines[engine].revlogcompressor()
3743 compressor = util.compengines[engine].revlogcompressor()
3695 benches.append(
3744 benches.append(
3696 (
3745 (
3697 functools.partial(docompress, compressor),
3746 functools.partial(docompress, compressor),
3698 b'compress w/ %s' % engine,
3747 b'compress w/ %s' % engine,
3699 )
3748 )
3700 )
3749 )
3701
3750
3702 for fn, title in benches:
3751 for fn, title in benches:
3703 timer, fm = gettimer(ui, opts)
3752 timer, fm = gettimer(ui, opts)
3704 timer(fn, title=title)
3753 timer(fn, title=title)
3705 fm.end()
3754 fm.end()
3706
3755
3707
3756
3708 @command(
3757 @command(
3709 b'perf::revlogrevision|perfrevlogrevision',
3758 b'perf::revlogrevision|perfrevlogrevision',
3710 revlogopts
3759 revlogopts
3711 + formatteropts
3760 + formatteropts
3712 + [(b'', b'cache', False, b'use caches instead of clearing')],
3761 + [(b'', b'cache', False, b'use caches instead of clearing')],
3713 b'-c|-m|FILE REV',
3762 b'-c|-m|FILE REV',
3714 )
3763 )
3715 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3764 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3716 """Benchmark obtaining a revlog revision.
3765 """Benchmark obtaining a revlog revision.
3717
3766
3718 Obtaining a revlog revision consists of roughly the following steps:
3767 Obtaining a revlog revision consists of roughly the following steps:
3719
3768
3720 1. Compute the delta chain
3769 1. Compute the delta chain
3721 2. Slice the delta chain if applicable
3770 2. Slice the delta chain if applicable
3722 3. Obtain the raw chunks for that delta chain
3771 3. Obtain the raw chunks for that delta chain
3723 4. Decompress each raw chunk
3772 4. Decompress each raw chunk
3724 5. Apply binary patches to obtain fulltext
3773 5. Apply binary patches to obtain fulltext
3725 6. Verify hash of fulltext
3774 6. Verify hash of fulltext
3726
3775
3727 This command measures the time spent in each of these phases.
3776 This command measures the time spent in each of these phases.
3728 """
3777 """
3729 opts = _byteskwargs(opts)
3778 opts = _byteskwargs(opts)
3730
3779
3731 if opts.get(b'changelog') or opts.get(b'manifest'):
3780 if opts.get(b'changelog') or opts.get(b'manifest'):
3732 file_, rev = None, file_
3781 file_, rev = None, file_
3733 elif rev is None:
3782 elif rev is None:
3734 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3783 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3735
3784
3736 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3785 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3737
3786
3738 # _chunkraw was renamed to _getsegmentforrevs.
3787 # _chunkraw was renamed to _getsegmentforrevs.
3739 try:
3788 try:
3740 segmentforrevs = r._getsegmentforrevs
3789 segmentforrevs = r._getsegmentforrevs
3741 except AttributeError:
3790 except AttributeError:
3742 segmentforrevs = r._chunkraw
3791 segmentforrevs = r._chunkraw
3743
3792
3744 node = r.lookup(rev)
3793 node = r.lookup(rev)
3745 rev = r.rev(node)
3794 rev = r.rev(node)
3746
3795
3747 def getrawchunks(data, chain):
3796 def getrawchunks(data, chain):
3748 start = r.start
3797 start = r.start
3749 length = r.length
3798 length = r.length
3750 inline = r._inline
3799 inline = r._inline
3751 try:
3800 try:
3752 iosize = r.index.entry_size
3801 iosize = r.index.entry_size
3753 except AttributeError:
3802 except AttributeError:
3754 iosize = r._io.size
3803 iosize = r._io.size
3755 buffer = util.buffer
3804 buffer = util.buffer
3756
3805
3757 chunks = []
3806 chunks = []
3758 ladd = chunks.append
3807 ladd = chunks.append
3759 for idx, item in enumerate(chain):
3808 for idx, item in enumerate(chain):
3760 offset = start(item[0])
3809 offset = start(item[0])
3761 bits = data[idx]
3810 bits = data[idx]
3762 for rev in item:
3811 for rev in item:
3763 chunkstart = start(rev)
3812 chunkstart = start(rev)
3764 if inline:
3813 if inline:
3765 chunkstart += (rev + 1) * iosize
3814 chunkstart += (rev + 1) * iosize
3766 chunklength = length(rev)
3815 chunklength = length(rev)
3767 ladd(buffer(bits, chunkstart - offset, chunklength))
3816 ladd(buffer(bits, chunkstart - offset, chunklength))
3768
3817
3769 return chunks
3818 return chunks
3770
3819
3771 def dodeltachain(rev):
3820 def dodeltachain(rev):
3772 if not cache:
3821 if not cache:
3773 r.clearcaches()
3822 r.clearcaches()
3774 r._deltachain(rev)
3823 r._deltachain(rev)
3775
3824
3776 def doread(chain):
3825 def doread(chain):
3777 if not cache:
3826 if not cache:
3778 r.clearcaches()
3827 r.clearcaches()
3779 for item in slicedchain:
3828 for item in slicedchain:
3780 segmentforrevs(item[0], item[-1])
3829 segmentforrevs(item[0], item[-1])
3781
3830
3782 def doslice(r, chain, size):
3831 def doslice(r, chain, size):
3783 for s in slicechunk(r, chain, targetsize=size):
3832 for s in slicechunk(r, chain, targetsize=size):
3784 pass
3833 pass
3785
3834
3786 def dorawchunks(data, chain):
3835 def dorawchunks(data, chain):
3787 if not cache:
3836 if not cache:
3788 r.clearcaches()
3837 r.clearcaches()
3789 getrawchunks(data, chain)
3838 getrawchunks(data, chain)
3790
3839
3791 def dodecompress(chunks):
3840 def dodecompress(chunks):
3792 decomp = r.decompress
3841 decomp = r.decompress
3793 for chunk in chunks:
3842 for chunk in chunks:
3794 decomp(chunk)
3843 decomp(chunk)
3795
3844
3796 def dopatch(text, bins):
3845 def dopatch(text, bins):
3797 if not cache:
3846 if not cache:
3798 r.clearcaches()
3847 r.clearcaches()
3799 mdiff.patches(text, bins)
3848 mdiff.patches(text, bins)
3800
3849
3801 def dohash(text):
3850 def dohash(text):
3802 if not cache:
3851 if not cache:
3803 r.clearcaches()
3852 r.clearcaches()
3804 r.checkhash(text, node, rev=rev)
3853 r.checkhash(text, node, rev=rev)
3805
3854
3806 def dorevision():
3855 def dorevision():
3807 if not cache:
3856 if not cache:
3808 r.clearcaches()
3857 r.clearcaches()
3809 r.revision(node)
3858 r.revision(node)
3810
3859
3811 try:
3860 try:
3812 from mercurial.revlogutils.deltas import slicechunk
3861 from mercurial.revlogutils.deltas import slicechunk
3813 except ImportError:
3862 except ImportError:
3814 slicechunk = getattr(revlog, '_slicechunk', None)
3863 slicechunk = getattr(revlog, '_slicechunk', None)
3815
3864
3816 size = r.length(rev)
3865 size = r.length(rev)
3817 chain = r._deltachain(rev)[0]
3866 chain = r._deltachain(rev)[0]
3818 if not getattr(r, '_withsparseread', False):
3867 if not getattr(r, '_withsparseread', False):
3819 slicedchain = (chain,)
3868 slicedchain = (chain,)
3820 else:
3869 else:
3821 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3870 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3822 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3871 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3823 rawchunks = getrawchunks(data, slicedchain)
3872 rawchunks = getrawchunks(data, slicedchain)
3824 bins = r._chunks(chain)
3873 bins = r._chunks(chain)
3825 text = bytes(bins[0])
3874 text = bytes(bins[0])
3826 bins = bins[1:]
3875 bins = bins[1:]
3827 text = mdiff.patches(text, bins)
3876 text = mdiff.patches(text, bins)
3828
3877
3829 benches = [
3878 benches = [
3830 (lambda: dorevision(), b'full'),
3879 (lambda: dorevision(), b'full'),
3831 (lambda: dodeltachain(rev), b'deltachain'),
3880 (lambda: dodeltachain(rev), b'deltachain'),
3832 (lambda: doread(chain), b'read'),
3881 (lambda: doread(chain), b'read'),
3833 ]
3882 ]
3834
3883
3835 if getattr(r, '_withsparseread', False):
3884 if getattr(r, '_withsparseread', False):
3836 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3885 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3837 benches.append(slicing)
3886 benches.append(slicing)
3838
3887
3839 benches.extend(
3888 benches.extend(
3840 [
3889 [
3841 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3890 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3842 (lambda: dodecompress(rawchunks), b'decompress'),
3891 (lambda: dodecompress(rawchunks), b'decompress'),
3843 (lambda: dopatch(text, bins), b'patch'),
3892 (lambda: dopatch(text, bins), b'patch'),
3844 (lambda: dohash(text), b'hash'),
3893 (lambda: dohash(text), b'hash'),
3845 ]
3894 ]
3846 )
3895 )
3847
3896
3848 timer, fm = gettimer(ui, opts)
3897 timer, fm = gettimer(ui, opts)
3849 for fn, title in benches:
3898 for fn, title in benches:
3850 timer(fn, title=title)
3899 timer(fn, title=title)
3851 fm.end()
3900 fm.end()
3852
3901
3853
3902
3854 @command(
3903 @command(
3855 b'perf::revset|perfrevset',
3904 b'perf::revset|perfrevset',
3856 [
3905 [
3857 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3906 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3858 (b'', b'contexts', False, b'obtain changectx for each revision'),
3907 (b'', b'contexts', False, b'obtain changectx for each revision'),
3859 ]
3908 ]
3860 + formatteropts,
3909 + formatteropts,
3861 b"REVSET",
3910 b"REVSET",
3862 )
3911 )
3863 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3912 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3864 """benchmark the execution time of a revset
3913 """benchmark the execution time of a revset
3865
3914
3866 Use the --clean option if need to evaluate the impact of build volatile
3915 Use the --clean option if need to evaluate the impact of build volatile
3867 revisions set cache on the revset execution. Volatile cache hold filtered
3916 revisions set cache on the revset execution. Volatile cache hold filtered
3868 and obsolete related cache."""
3917 and obsolete related cache."""
3869 opts = _byteskwargs(opts)
3918 opts = _byteskwargs(opts)
3870
3919
3871 timer, fm = gettimer(ui, opts)
3920 timer, fm = gettimer(ui, opts)
3872
3921
3873 def d():
3922 def d():
3874 if clear:
3923 if clear:
3875 repo.invalidatevolatilesets()
3924 repo.invalidatevolatilesets()
3876 if contexts:
3925 if contexts:
3877 for ctx in repo.set(expr):
3926 for ctx in repo.set(expr):
3878 pass
3927 pass
3879 else:
3928 else:
3880 for r in repo.revs(expr):
3929 for r in repo.revs(expr):
3881 pass
3930 pass
3882
3931
3883 timer(d)
3932 timer(d)
3884 fm.end()
3933 fm.end()
3885
3934
3886
3935
3887 @command(
3936 @command(
3888 b'perf::volatilesets|perfvolatilesets',
3937 b'perf::volatilesets|perfvolatilesets',
3889 [
3938 [
3890 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3939 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3891 ]
3940 ]
3892 + formatteropts,
3941 + formatteropts,
3893 )
3942 )
3894 def perfvolatilesets(ui, repo, *names, **opts):
3943 def perfvolatilesets(ui, repo, *names, **opts):
3895 """benchmark the computation of various volatile set
3944 """benchmark the computation of various volatile set
3896
3945
3897 Volatile set computes element related to filtering and obsolescence."""
3946 Volatile set computes element related to filtering and obsolescence."""
3898 opts = _byteskwargs(opts)
3947 opts = _byteskwargs(opts)
3899 timer, fm = gettimer(ui, opts)
3948 timer, fm = gettimer(ui, opts)
3900 repo = repo.unfiltered()
3949 repo = repo.unfiltered()
3901
3950
3902 def getobs(name):
3951 def getobs(name):
3903 def d():
3952 def d():
3904 repo.invalidatevolatilesets()
3953 repo.invalidatevolatilesets()
3905 if opts[b'clear_obsstore']:
3954 if opts[b'clear_obsstore']:
3906 clearfilecache(repo, b'obsstore')
3955 clearfilecache(repo, b'obsstore')
3907 obsolete.getrevs(repo, name)
3956 obsolete.getrevs(repo, name)
3908
3957
3909 return d
3958 return d
3910
3959
3911 allobs = sorted(obsolete.cachefuncs)
3960 allobs = sorted(obsolete.cachefuncs)
3912 if names:
3961 if names:
3913 allobs = [n for n in allobs if n in names]
3962 allobs = [n for n in allobs if n in names]
3914
3963
3915 for name in allobs:
3964 for name in allobs:
3916 timer(getobs(name), title=name)
3965 timer(getobs(name), title=name)
3917
3966
3918 def getfiltered(name):
3967 def getfiltered(name):
3919 def d():
3968 def d():
3920 repo.invalidatevolatilesets()
3969 repo.invalidatevolatilesets()
3921 if opts[b'clear_obsstore']:
3970 if opts[b'clear_obsstore']:
3922 clearfilecache(repo, b'obsstore')
3971 clearfilecache(repo, b'obsstore')
3923 repoview.filterrevs(repo, name)
3972 repoview.filterrevs(repo, name)
3924
3973
3925 return d
3974 return d
3926
3975
3927 allfilter = sorted(repoview.filtertable)
3976 allfilter = sorted(repoview.filtertable)
3928 if names:
3977 if names:
3929 allfilter = [n for n in allfilter if n in names]
3978 allfilter = [n for n in allfilter if n in names]
3930
3979
3931 for name in allfilter:
3980 for name in allfilter:
3932 timer(getfiltered(name), title=name)
3981 timer(getfiltered(name), title=name)
3933 fm.end()
3982 fm.end()
3934
3983
3935
3984
3936 @command(
3985 @command(
3937 b'perf::branchmap|perfbranchmap',
3986 b'perf::branchmap|perfbranchmap',
3938 [
3987 [
3939 (b'f', b'full', False, b'Includes build time of subset'),
3988 (b'f', b'full', False, b'Includes build time of subset'),
3940 (
3989 (
3941 b'',
3990 b'',
3942 b'clear-revbranch',
3991 b'clear-revbranch',
3943 False,
3992 False,
3944 b'purge the revbranch cache between computation',
3993 b'purge the revbranch cache between computation',
3945 ),
3994 ),
3946 ]
3995 ]
3947 + formatteropts,
3996 + formatteropts,
3948 )
3997 )
3949 def perfbranchmap(ui, repo, *filternames, **opts):
3998 def perfbranchmap(ui, repo, *filternames, **opts):
3950 """benchmark the update of a branchmap
3999 """benchmark the update of a branchmap
3951
4000
3952 This benchmarks the full repo.branchmap() call with read and write disabled
4001 This benchmarks the full repo.branchmap() call with read and write disabled
3953 """
4002 """
3954 opts = _byteskwargs(opts)
4003 opts = _byteskwargs(opts)
3955 full = opts.get(b"full", False)
4004 full = opts.get(b"full", False)
3956 clear_revbranch = opts.get(b"clear_revbranch", False)
4005 clear_revbranch = opts.get(b"clear_revbranch", False)
3957 timer, fm = gettimer(ui, opts)
4006 timer, fm = gettimer(ui, opts)
3958
4007
3959 def getbranchmap(filtername):
4008 def getbranchmap(filtername):
3960 """generate a benchmark function for the filtername"""
4009 """generate a benchmark function for the filtername"""
3961 if filtername is None:
4010 if filtername is None:
3962 view = repo
4011 view = repo
3963 else:
4012 else:
3964 view = repo.filtered(filtername)
4013 view = repo.filtered(filtername)
3965 if util.safehasattr(view._branchcaches, '_per_filter'):
4014 if util.safehasattr(view._branchcaches, '_per_filter'):
3966 filtered = view._branchcaches._per_filter
4015 filtered = view._branchcaches._per_filter
3967 else:
4016 else:
3968 # older versions
4017 # older versions
3969 filtered = view._branchcaches
4018 filtered = view._branchcaches
3970
4019
3971 def d():
4020 def d():
3972 if clear_revbranch:
4021 if clear_revbranch:
3973 repo.revbranchcache()._clear()
4022 repo.revbranchcache()._clear()
3974 if full:
4023 if full:
3975 view._branchcaches.clear()
4024 view._branchcaches.clear()
3976 else:
4025 else:
3977 filtered.pop(filtername, None)
4026 filtered.pop(filtername, None)
3978 view.branchmap()
4027 view.branchmap()
3979
4028
3980 return d
4029 return d
3981
4030
3982 # add filter in smaller subset to bigger subset
4031 # add filter in smaller subset to bigger subset
3983 possiblefilters = set(repoview.filtertable)
4032 possiblefilters = set(repoview.filtertable)
3984 if filternames:
4033 if filternames:
3985 possiblefilters &= set(filternames)
4034 possiblefilters &= set(filternames)
3986 subsettable = getbranchmapsubsettable()
4035 subsettable = getbranchmapsubsettable()
3987 allfilters = []
4036 allfilters = []
3988 while possiblefilters:
4037 while possiblefilters:
3989 for name in possiblefilters:
4038 for name in possiblefilters:
3990 subset = subsettable.get(name)
4039 subset = subsettable.get(name)
3991 if subset not in possiblefilters:
4040 if subset not in possiblefilters:
3992 break
4041 break
3993 else:
4042 else:
3994 assert False, b'subset cycle %s!' % possiblefilters
4043 assert False, b'subset cycle %s!' % possiblefilters
3995 allfilters.append(name)
4044 allfilters.append(name)
3996 possiblefilters.remove(name)
4045 possiblefilters.remove(name)
3997
4046
3998 # warm the cache
4047 # warm the cache
3999 if not full:
4048 if not full:
4000 for name in allfilters:
4049 for name in allfilters:
4001 repo.filtered(name).branchmap()
4050 repo.filtered(name).branchmap()
4002 if not filternames or b'unfiltered' in filternames:
4051 if not filternames or b'unfiltered' in filternames:
4003 # add unfiltered
4052 # add unfiltered
4004 allfilters.append(None)
4053 allfilters.append(None)
4005
4054
4006 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4055 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4007 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4056 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4008 branchcacheread.set(classmethod(lambda *args: None))
4057 branchcacheread.set(classmethod(lambda *args: None))
4009 else:
4058 else:
4010 # older versions
4059 # older versions
4011 branchcacheread = safeattrsetter(branchmap, b'read')
4060 branchcacheread = safeattrsetter(branchmap, b'read')
4012 branchcacheread.set(lambda *args: None)
4061 branchcacheread.set(lambda *args: None)
4013 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4062 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4014 branchcachewrite.set(lambda *args: None)
4063 branchcachewrite.set(lambda *args: None)
4015 try:
4064 try:
4016 for name in allfilters:
4065 for name in allfilters:
4017 printname = name
4066 printname = name
4018 if name is None:
4067 if name is None:
4019 printname = b'unfiltered'
4068 printname = b'unfiltered'
4020 timer(getbranchmap(name), title=printname)
4069 timer(getbranchmap(name), title=printname)
4021 finally:
4070 finally:
4022 branchcacheread.restore()
4071 branchcacheread.restore()
4023 branchcachewrite.restore()
4072 branchcachewrite.restore()
4024 fm.end()
4073 fm.end()
4025
4074
4026
4075
4027 @command(
4076 @command(
4028 b'perf::branchmapupdate|perfbranchmapupdate',
4077 b'perf::branchmapupdate|perfbranchmapupdate',
4029 [
4078 [
4030 (b'', b'base', [], b'subset of revision to start from'),
4079 (b'', b'base', [], b'subset of revision to start from'),
4031 (b'', b'target', [], b'subset of revision to end with'),
4080 (b'', b'target', [], b'subset of revision to end with'),
4032 (b'', b'clear-caches', False, b'clear cache between each runs'),
4081 (b'', b'clear-caches', False, b'clear cache between each runs'),
4033 ]
4082 ]
4034 + formatteropts,
4083 + formatteropts,
4035 )
4084 )
4036 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4085 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4037 """benchmark branchmap update from for <base> revs to <target> revs
4086 """benchmark branchmap update from for <base> revs to <target> revs
4038
4087
4039 If `--clear-caches` is passed, the following items will be reset before
4088 If `--clear-caches` is passed, the following items will be reset before
4040 each update:
4089 each update:
4041 * the changelog instance and associated indexes
4090 * the changelog instance and associated indexes
4042 * the rev-branch-cache instance
4091 * the rev-branch-cache instance
4043
4092
4044 Examples:
4093 Examples:
4045
4094
4046 # update for the one last revision
4095 # update for the one last revision
4047 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4096 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4048
4097
4049 $ update for change coming with a new branch
4098 $ update for change coming with a new branch
4050 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4099 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4051 """
4100 """
4052 from mercurial import branchmap
4101 from mercurial import branchmap
4053 from mercurial import repoview
4102 from mercurial import repoview
4054
4103
4055 opts = _byteskwargs(opts)
4104 opts = _byteskwargs(opts)
4056 timer, fm = gettimer(ui, opts)
4105 timer, fm = gettimer(ui, opts)
4057 clearcaches = opts[b'clear_caches']
4106 clearcaches = opts[b'clear_caches']
4058 unfi = repo.unfiltered()
4107 unfi = repo.unfiltered()
4059 x = [None] # used to pass data between closure
4108 x = [None] # used to pass data between closure
4060
4109
4061 # we use a `list` here to avoid possible side effect from smartset
4110 # we use a `list` here to avoid possible side effect from smartset
4062 baserevs = list(scmutil.revrange(repo, base))
4111 baserevs = list(scmutil.revrange(repo, base))
4063 targetrevs = list(scmutil.revrange(repo, target))
4112 targetrevs = list(scmutil.revrange(repo, target))
4064 if not baserevs:
4113 if not baserevs:
4065 raise error.Abort(b'no revisions selected for --base')
4114 raise error.Abort(b'no revisions selected for --base')
4066 if not targetrevs:
4115 if not targetrevs:
4067 raise error.Abort(b'no revisions selected for --target')
4116 raise error.Abort(b'no revisions selected for --target')
4068
4117
4069 # make sure the target branchmap also contains the one in the base
4118 # make sure the target branchmap also contains the one in the base
4070 targetrevs = list(set(baserevs) | set(targetrevs))
4119 targetrevs = list(set(baserevs) | set(targetrevs))
4071 targetrevs.sort()
4120 targetrevs.sort()
4072
4121
4073 cl = repo.changelog
4122 cl = repo.changelog
4074 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4123 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4075 allbaserevs.sort()
4124 allbaserevs.sort()
4076 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4125 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4077
4126
4078 newrevs = list(alltargetrevs.difference(allbaserevs))
4127 newrevs = list(alltargetrevs.difference(allbaserevs))
4079 newrevs.sort()
4128 newrevs.sort()
4080
4129
4081 allrevs = frozenset(unfi.changelog.revs())
4130 allrevs = frozenset(unfi.changelog.revs())
4082 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4131 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4083 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4132 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4084
4133
4085 def basefilter(repo, visibilityexceptions=None):
4134 def basefilter(repo, visibilityexceptions=None):
4086 return basefilterrevs
4135 return basefilterrevs
4087
4136
4088 def targetfilter(repo, visibilityexceptions=None):
4137 def targetfilter(repo, visibilityexceptions=None):
4089 return targetfilterrevs
4138 return targetfilterrevs
4090
4139
4091 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4140 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4092 ui.status(msg % (len(allbaserevs), len(newrevs)))
4141 ui.status(msg % (len(allbaserevs), len(newrevs)))
4093 if targetfilterrevs:
4142 if targetfilterrevs:
4094 msg = b'(%d revisions still filtered)\n'
4143 msg = b'(%d revisions still filtered)\n'
4095 ui.status(msg % len(targetfilterrevs))
4144 ui.status(msg % len(targetfilterrevs))
4096
4145
4097 try:
4146 try:
4098 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4147 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4099 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4148 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4100
4149
4101 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4150 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4102 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4151 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4103
4152
4104 # try to find an existing branchmap to reuse
4153 # try to find an existing branchmap to reuse
4105 subsettable = getbranchmapsubsettable()
4154 subsettable = getbranchmapsubsettable()
4106 candidatefilter = subsettable.get(None)
4155 candidatefilter = subsettable.get(None)
4107 while candidatefilter is not None:
4156 while candidatefilter is not None:
4108 candidatebm = repo.filtered(candidatefilter).branchmap()
4157 candidatebm = repo.filtered(candidatefilter).branchmap()
4109 if candidatebm.validfor(baserepo):
4158 if candidatebm.validfor(baserepo):
4110 filtered = repoview.filterrevs(repo, candidatefilter)
4159 filtered = repoview.filterrevs(repo, candidatefilter)
4111 missing = [r for r in allbaserevs if r in filtered]
4160 missing = [r for r in allbaserevs if r in filtered]
4112 base = candidatebm.copy()
4161 base = candidatebm.copy()
4113 base.update(baserepo, missing)
4162 base.update(baserepo, missing)
4114 break
4163 break
4115 candidatefilter = subsettable.get(candidatefilter)
4164 candidatefilter = subsettable.get(candidatefilter)
4116 else:
4165 else:
4117 # no suitable subset where found
4166 # no suitable subset where found
4118 base = branchmap.branchcache()
4167 base = branchmap.branchcache()
4119 base.update(baserepo, allbaserevs)
4168 base.update(baserepo, allbaserevs)
4120
4169
4121 def setup():
4170 def setup():
4122 x[0] = base.copy()
4171 x[0] = base.copy()
4123 if clearcaches:
4172 if clearcaches:
4124 unfi._revbranchcache = None
4173 unfi._revbranchcache = None
4125 clearchangelog(repo)
4174 clearchangelog(repo)
4126
4175
4127 def bench():
4176 def bench():
4128 x[0].update(targetrepo, newrevs)
4177 x[0].update(targetrepo, newrevs)
4129
4178
4130 timer(bench, setup=setup)
4179 timer(bench, setup=setup)
4131 fm.end()
4180 fm.end()
4132 finally:
4181 finally:
4133 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4182 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4134 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4183 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4135
4184
4136
4185
4137 @command(
4186 @command(
4138 b'perf::branchmapload|perfbranchmapload',
4187 b'perf::branchmapload|perfbranchmapload',
4139 [
4188 [
4140 (b'f', b'filter', b'', b'Specify repoview filter'),
4189 (b'f', b'filter', b'', b'Specify repoview filter'),
4141 (b'', b'list', False, b'List brachmap filter caches'),
4190 (b'', b'list', False, b'List brachmap filter caches'),
4142 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4191 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4143 ]
4192 ]
4144 + formatteropts,
4193 + formatteropts,
4145 )
4194 )
4146 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4195 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4147 """benchmark reading the branchmap"""
4196 """benchmark reading the branchmap"""
4148 opts = _byteskwargs(opts)
4197 opts = _byteskwargs(opts)
4149 clearrevlogs = opts[b'clear_revlogs']
4198 clearrevlogs = opts[b'clear_revlogs']
4150
4199
4151 if list:
4200 if list:
4152 for name, kind, st in repo.cachevfs.readdir(stat=True):
4201 for name, kind, st in repo.cachevfs.readdir(stat=True):
4153 if name.startswith(b'branch2'):
4202 if name.startswith(b'branch2'):
4154 filtername = name.partition(b'-')[2] or b'unfiltered'
4203 filtername = name.partition(b'-')[2] or b'unfiltered'
4155 ui.status(
4204 ui.status(
4156 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4205 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4157 )
4206 )
4158 return
4207 return
4159 if not filter:
4208 if not filter:
4160 filter = None
4209 filter = None
4161 subsettable = getbranchmapsubsettable()
4210 subsettable = getbranchmapsubsettable()
4162 if filter is None:
4211 if filter is None:
4163 repo = repo.unfiltered()
4212 repo = repo.unfiltered()
4164 else:
4213 else:
4165 repo = repoview.repoview(repo, filter)
4214 repo = repoview.repoview(repo, filter)
4166
4215
4167 repo.branchmap() # make sure we have a relevant, up to date branchmap
4216 repo.branchmap() # make sure we have a relevant, up to date branchmap
4168
4217
4169 try:
4218 try:
4170 fromfile = branchmap.branchcache.fromfile
4219 fromfile = branchmap.branchcache.fromfile
4171 except AttributeError:
4220 except AttributeError:
4172 # older versions
4221 # older versions
4173 fromfile = branchmap.read
4222 fromfile = branchmap.read
4174
4223
4175 currentfilter = filter
4224 currentfilter = filter
4176 # try once without timer, the filter may not be cached
4225 # try once without timer, the filter may not be cached
4177 while fromfile(repo) is None:
4226 while fromfile(repo) is None:
4178 currentfilter = subsettable.get(currentfilter)
4227 currentfilter = subsettable.get(currentfilter)
4179 if currentfilter is None:
4228 if currentfilter is None:
4180 raise error.Abort(
4229 raise error.Abort(
4181 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4230 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4182 )
4231 )
4183 repo = repo.filtered(currentfilter)
4232 repo = repo.filtered(currentfilter)
4184 timer, fm = gettimer(ui, opts)
4233 timer, fm = gettimer(ui, opts)
4185
4234
4186 def setup():
4235 def setup():
4187 if clearrevlogs:
4236 if clearrevlogs:
4188 clearchangelog(repo)
4237 clearchangelog(repo)
4189
4238
4190 def bench():
4239 def bench():
4191 fromfile(repo)
4240 fromfile(repo)
4192
4241
4193 timer(bench, setup=setup)
4242 timer(bench, setup=setup)
4194 fm.end()
4243 fm.end()
4195
4244
4196
4245
4197 @command(b'perf::loadmarkers|perfloadmarkers')
4246 @command(b'perf::loadmarkers|perfloadmarkers')
4198 def perfloadmarkers(ui, repo):
4247 def perfloadmarkers(ui, repo):
4199 """benchmark the time to parse the on-disk markers for a repo
4248 """benchmark the time to parse the on-disk markers for a repo
4200
4249
4201 Result is the number of markers in the repo."""
4250 Result is the number of markers in the repo."""
4202 timer, fm = gettimer(ui)
4251 timer, fm = gettimer(ui)
4203 svfs = getsvfs(repo)
4252 svfs = getsvfs(repo)
4204 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4253 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4205 fm.end()
4254 fm.end()
4206
4255
4207
4256
4208 @command(
4257 @command(
4209 b'perf::lrucachedict|perflrucachedict',
4258 b'perf::lrucachedict|perflrucachedict',
4210 formatteropts
4259 formatteropts
4211 + [
4260 + [
4212 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4261 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4213 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4262 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4214 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4263 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4215 (b'', b'size', 4, b'size of cache'),
4264 (b'', b'size', 4, b'size of cache'),
4216 (b'', b'gets', 10000, b'number of key lookups'),
4265 (b'', b'gets', 10000, b'number of key lookups'),
4217 (b'', b'sets', 10000, b'number of key sets'),
4266 (b'', b'sets', 10000, b'number of key sets'),
4218 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4267 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4219 (
4268 (
4220 b'',
4269 b'',
4221 b'mixedgetfreq',
4270 b'mixedgetfreq',
4222 50,
4271 50,
4223 b'frequency of get vs set ops in mixed mode',
4272 b'frequency of get vs set ops in mixed mode',
4224 ),
4273 ),
4225 ],
4274 ],
4226 norepo=True,
4275 norepo=True,
4227 )
4276 )
4228 def perflrucache(
4277 def perflrucache(
4229 ui,
4278 ui,
4230 mincost=0,
4279 mincost=0,
4231 maxcost=100,
4280 maxcost=100,
4232 costlimit=0,
4281 costlimit=0,
4233 size=4,
4282 size=4,
4234 gets=10000,
4283 gets=10000,
4235 sets=10000,
4284 sets=10000,
4236 mixed=10000,
4285 mixed=10000,
4237 mixedgetfreq=50,
4286 mixedgetfreq=50,
4238 **opts
4287 **opts
4239 ):
4288 ):
4240 opts = _byteskwargs(opts)
4289 opts = _byteskwargs(opts)
4241
4290
4242 def doinit():
4291 def doinit():
4243 for i in _xrange(10000):
4292 for i in _xrange(10000):
4244 util.lrucachedict(size)
4293 util.lrucachedict(size)
4245
4294
4246 costrange = list(range(mincost, maxcost + 1))
4295 costrange = list(range(mincost, maxcost + 1))
4247
4296
4248 values = []
4297 values = []
4249 for i in _xrange(size):
4298 for i in _xrange(size):
4250 values.append(random.randint(0, _maxint))
4299 values.append(random.randint(0, _maxint))
4251
4300
4252 # Get mode fills the cache and tests raw lookup performance with no
4301 # Get mode fills the cache and tests raw lookup performance with no
4253 # eviction.
4302 # eviction.
4254 getseq = []
4303 getseq = []
4255 for i in _xrange(gets):
4304 for i in _xrange(gets):
4256 getseq.append(random.choice(values))
4305 getseq.append(random.choice(values))
4257
4306
4258 def dogets():
4307 def dogets():
4259 d = util.lrucachedict(size)
4308 d = util.lrucachedict(size)
4260 for v in values:
4309 for v in values:
4261 d[v] = v
4310 d[v] = v
4262 for key in getseq:
4311 for key in getseq:
4263 value = d[key]
4312 value = d[key]
4264 value # silence pyflakes warning
4313 value # silence pyflakes warning
4265
4314
4266 def dogetscost():
4315 def dogetscost():
4267 d = util.lrucachedict(size, maxcost=costlimit)
4316 d = util.lrucachedict(size, maxcost=costlimit)
4268 for i, v in enumerate(values):
4317 for i, v in enumerate(values):
4269 d.insert(v, v, cost=costs[i])
4318 d.insert(v, v, cost=costs[i])
4270 for key in getseq:
4319 for key in getseq:
4271 try:
4320 try:
4272 value = d[key]
4321 value = d[key]
4273 value # silence pyflakes warning
4322 value # silence pyflakes warning
4274 except KeyError:
4323 except KeyError:
4275 pass
4324 pass
4276
4325
4277 # Set mode tests insertion speed with cache eviction.
4326 # Set mode tests insertion speed with cache eviction.
4278 setseq = []
4327 setseq = []
4279 costs = []
4328 costs = []
4280 for i in _xrange(sets):
4329 for i in _xrange(sets):
4281 setseq.append(random.randint(0, _maxint))
4330 setseq.append(random.randint(0, _maxint))
4282 costs.append(random.choice(costrange))
4331 costs.append(random.choice(costrange))
4283
4332
4284 def doinserts():
4333 def doinserts():
4285 d = util.lrucachedict(size)
4334 d = util.lrucachedict(size)
4286 for v in setseq:
4335 for v in setseq:
4287 d.insert(v, v)
4336 d.insert(v, v)
4288
4337
4289 def doinsertscost():
4338 def doinsertscost():
4290 d = util.lrucachedict(size, maxcost=costlimit)
4339 d = util.lrucachedict(size, maxcost=costlimit)
4291 for i, v in enumerate(setseq):
4340 for i, v in enumerate(setseq):
4292 d.insert(v, v, cost=costs[i])
4341 d.insert(v, v, cost=costs[i])
4293
4342
4294 def dosets():
4343 def dosets():
4295 d = util.lrucachedict(size)
4344 d = util.lrucachedict(size)
4296 for v in setseq:
4345 for v in setseq:
4297 d[v] = v
4346 d[v] = v
4298
4347
4299 # Mixed mode randomly performs gets and sets with eviction.
4348 # Mixed mode randomly performs gets and sets with eviction.
4300 mixedops = []
4349 mixedops = []
4301 for i in _xrange(mixed):
4350 for i in _xrange(mixed):
4302 r = random.randint(0, 100)
4351 r = random.randint(0, 100)
4303 if r < mixedgetfreq:
4352 if r < mixedgetfreq:
4304 op = 0
4353 op = 0
4305 else:
4354 else:
4306 op = 1
4355 op = 1
4307
4356
4308 mixedops.append(
4357 mixedops.append(
4309 (op, random.randint(0, size * 2), random.choice(costrange))
4358 (op, random.randint(0, size * 2), random.choice(costrange))
4310 )
4359 )
4311
4360
4312 def domixed():
4361 def domixed():
4313 d = util.lrucachedict(size)
4362 d = util.lrucachedict(size)
4314
4363
4315 for op, v, cost in mixedops:
4364 for op, v, cost in mixedops:
4316 if op == 0:
4365 if op == 0:
4317 try:
4366 try:
4318 d[v]
4367 d[v]
4319 except KeyError:
4368 except KeyError:
4320 pass
4369 pass
4321 else:
4370 else:
4322 d[v] = v
4371 d[v] = v
4323
4372
4324 def domixedcost():
4373 def domixedcost():
4325 d = util.lrucachedict(size, maxcost=costlimit)
4374 d = util.lrucachedict(size, maxcost=costlimit)
4326
4375
4327 for op, v, cost in mixedops:
4376 for op, v, cost in mixedops:
4328 if op == 0:
4377 if op == 0:
4329 try:
4378 try:
4330 d[v]
4379 d[v]
4331 except KeyError:
4380 except KeyError:
4332 pass
4381 pass
4333 else:
4382 else:
4334 d.insert(v, v, cost=cost)
4383 d.insert(v, v, cost=cost)
4335
4384
4336 benches = [
4385 benches = [
4337 (doinit, b'init'),
4386 (doinit, b'init'),
4338 ]
4387 ]
4339
4388
4340 if costlimit:
4389 if costlimit:
4341 benches.extend(
4390 benches.extend(
4342 [
4391 [
4343 (dogetscost, b'gets w/ cost limit'),
4392 (dogetscost, b'gets w/ cost limit'),
4344 (doinsertscost, b'inserts w/ cost limit'),
4393 (doinsertscost, b'inserts w/ cost limit'),
4345 (domixedcost, b'mixed w/ cost limit'),
4394 (domixedcost, b'mixed w/ cost limit'),
4346 ]
4395 ]
4347 )
4396 )
4348 else:
4397 else:
4349 benches.extend(
4398 benches.extend(
4350 [
4399 [
4351 (dogets, b'gets'),
4400 (dogets, b'gets'),
4352 (doinserts, b'inserts'),
4401 (doinserts, b'inserts'),
4353 (dosets, b'sets'),
4402 (dosets, b'sets'),
4354 (domixed, b'mixed'),
4403 (domixed, b'mixed'),
4355 ]
4404 ]
4356 )
4405 )
4357
4406
4358 for fn, title in benches:
4407 for fn, title in benches:
4359 timer, fm = gettimer(ui, opts)
4408 timer, fm = gettimer(ui, opts)
4360 timer(fn, title=title)
4409 timer(fn, title=title)
4361 fm.end()
4410 fm.end()
4362
4411
4363
4412
4364 @command(
4413 @command(
4365 b'perf::write|perfwrite',
4414 b'perf::write|perfwrite',
4366 formatteropts
4415 formatteropts
4367 + [
4416 + [
4368 (b'', b'write-method', b'write', b'ui write method'),
4417 (b'', b'write-method', b'write', b'ui write method'),
4369 (b'', b'nlines', 100, b'number of lines'),
4418 (b'', b'nlines', 100, b'number of lines'),
4370 (b'', b'nitems', 100, b'number of items (per line)'),
4419 (b'', b'nitems', 100, b'number of items (per line)'),
4371 (b'', b'item', b'x', b'item that is written'),
4420 (b'', b'item', b'x', b'item that is written'),
4372 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4421 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4373 (b'', b'flush-line', None, b'flush after each line'),
4422 (b'', b'flush-line', None, b'flush after each line'),
4374 ],
4423 ],
4375 )
4424 )
4376 def perfwrite(ui, repo, **opts):
4425 def perfwrite(ui, repo, **opts):
4377 """microbenchmark ui.write (and others)"""
4426 """microbenchmark ui.write (and others)"""
4378 opts = _byteskwargs(opts)
4427 opts = _byteskwargs(opts)
4379
4428
4380 write = getattr(ui, _sysstr(opts[b'write_method']))
4429 write = getattr(ui, _sysstr(opts[b'write_method']))
4381 nlines = int(opts[b'nlines'])
4430 nlines = int(opts[b'nlines'])
4382 nitems = int(opts[b'nitems'])
4431 nitems = int(opts[b'nitems'])
4383 item = opts[b'item']
4432 item = opts[b'item']
4384 batch_line = opts.get(b'batch_line')
4433 batch_line = opts.get(b'batch_line')
4385 flush_line = opts.get(b'flush_line')
4434 flush_line = opts.get(b'flush_line')
4386
4435
4387 if batch_line:
4436 if batch_line:
4388 line = item * nitems + b'\n'
4437 line = item * nitems + b'\n'
4389
4438
4390 def benchmark():
4439 def benchmark():
4391 for i in pycompat.xrange(nlines):
4440 for i in pycompat.xrange(nlines):
4392 if batch_line:
4441 if batch_line:
4393 write(line)
4442 write(line)
4394 else:
4443 else:
4395 for i in pycompat.xrange(nitems):
4444 for i in pycompat.xrange(nitems):
4396 write(item)
4445 write(item)
4397 write(b'\n')
4446 write(b'\n')
4398 if flush_line:
4447 if flush_line:
4399 ui.flush()
4448 ui.flush()
4400 ui.flush()
4449 ui.flush()
4401
4450
4402 timer, fm = gettimer(ui, opts)
4451 timer, fm = gettimer(ui, opts)
4403 timer(benchmark)
4452 timer(benchmark)
4404 fm.end()
4453 fm.end()
4405
4454
4406
4455
4407 def uisetup(ui):
4456 def uisetup(ui):
4408 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4457 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4409 commands, b'debugrevlogopts'
4458 commands, b'debugrevlogopts'
4410 ):
4459 ):
4411 # for "historical portability":
4460 # for "historical portability":
4412 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4461 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4413 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4462 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4414 # openrevlog() should cause failure, because it has been
4463 # openrevlog() should cause failure, because it has been
4415 # available since 3.5 (or 49c583ca48c4).
4464 # available since 3.5 (or 49c583ca48c4).
4416 def openrevlog(orig, repo, cmd, file_, opts):
4465 def openrevlog(orig, repo, cmd, file_, opts):
4417 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4466 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4418 raise error.Abort(
4467 raise error.Abort(
4419 b"This version doesn't support --dir option",
4468 b"This version doesn't support --dir option",
4420 hint=b"use 3.5 or later",
4469 hint=b"use 3.5 or later",
4421 )
4470 )
4422 return orig(repo, cmd, file_, opts)
4471 return orig(repo, cmd, file_, opts)
4423
4472
4424 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4473 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4425
4474
4426
4475
4427 @command(
4476 @command(
4428 b'perf::progress|perfprogress',
4477 b'perf::progress|perfprogress',
4429 formatteropts
4478 formatteropts
4430 + [
4479 + [
4431 (b'', b'topic', b'topic', b'topic for progress messages'),
4480 (b'', b'topic', b'topic', b'topic for progress messages'),
4432 (b'c', b'total', 1000000, b'total value we are progressing to'),
4481 (b'c', b'total', 1000000, b'total value we are progressing to'),
4433 ],
4482 ],
4434 norepo=True,
4483 norepo=True,
4435 )
4484 )
4436 def perfprogress(ui, topic=None, total=None, **opts):
4485 def perfprogress(ui, topic=None, total=None, **opts):
4437 """printing of progress bars"""
4486 """printing of progress bars"""
4438 opts = _byteskwargs(opts)
4487 opts = _byteskwargs(opts)
4439
4488
4440 timer, fm = gettimer(ui, opts)
4489 timer, fm = gettimer(ui, opts)
4441
4490
4442 def doprogress():
4491 def doprogress():
4443 with ui.makeprogress(topic, total=total) as progress:
4492 with ui.makeprogress(topic, total=total) as progress:
4444 for i in _xrange(total):
4493 for i in _xrange(total):
4445 progress.increment()
4494 progress.increment()
4446
4495
4447 timer(doprogress)
4496 timer(doprogress)
4448 fm.end()
4497 fm.end()
@@ -1,912 +1,922 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13
13
14 import binascii
14 import binascii
15 import io
15 import io
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import stringutil
31 from .utils import stringutil
32
32
33 # Tags computation can be expensive and caches exist to make it fast in
33 # Tags computation can be expensive and caches exist to make it fast in
34 # the common case.
34 # the common case.
35 #
35 #
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 # each revision in the repository. The file is effectively an array of
37 # each revision in the repository. The file is effectively an array of
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 # details.
39 # details.
40 #
40 #
41 # The .hgtags filenode cache grows in proportion to the length of the
41 # The .hgtags filenode cache grows in proportion to the length of the
42 # changelog. The file is truncated when the # changelog is stripped.
42 # changelog. The file is truncated when the # changelog is stripped.
43 #
43 #
44 # The purpose of the filenode cache is to avoid the most expensive part
44 # The purpose of the filenode cache is to avoid the most expensive part
45 # of finding global tags, which is looking up the .hgtags filenode in the
45 # of finding global tags, which is looking up the .hgtags filenode in the
46 # manifest for each head. This can take dozens or over 100ms for
46 # manifest for each head. This can take dozens or over 100ms for
47 # repositories with very large manifests. Multiplied by dozens or even
47 # repositories with very large manifests. Multiplied by dozens or even
48 # hundreds of heads and there is a significant performance concern.
48 # hundreds of heads and there is a significant performance concern.
49 #
49 #
50 # There also exist a separate cache file for each repository filter.
50 # There also exist a separate cache file for each repository filter.
51 # These "tags-*" files store information about the history of tags.
51 # These "tags-*" files store information about the history of tags.
52 #
52 #
53 # The tags cache files consists of a cache validation line followed by
53 # The tags cache files consists of a cache validation line followed by
54 # a history of tags.
54 # a history of tags.
55 #
55 #
56 # The cache validation line has the format:
56 # The cache validation line has the format:
57 #
57 #
58 # <tiprev> <tipnode> [<filteredhash>]
58 # <tiprev> <tipnode> [<filteredhash>]
59 #
59 #
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 # node for that changeset. These redundantly identify the repository
61 # node for that changeset. These redundantly identify the repository
62 # tip from the time the cache was written. In addition, <filteredhash>,
62 # tip from the time the cache was written. In addition, <filteredhash>,
63 # if present, is a 40 character hex hash of the contents of the filtered
63 # if present, is a 40 character hex hash of the contents of the filtered
64 # revisions for this filter. If the set of filtered revs changes, the
64 # revisions for this filter. If the set of filtered revs changes, the
65 # hash will change and invalidate the cache.
65 # hash will change and invalidate the cache.
66 #
66 #
67 # The history part of the tags cache consists of lines of the form:
67 # The history part of the tags cache consists of lines of the form:
68 #
68 #
69 # <node> <tag>
69 # <node> <tag>
70 #
70 #
71 # (This format is identical to that of .hgtags files.)
71 # (This format is identical to that of .hgtags files.)
72 #
72 #
73 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # <tag> is the tag name and <node> is the 40 character hex changeset
74 # the tag is associated with.
74 # the tag is associated with.
75 #
75 #
76 # Tags are written sorted by tag name.
76 # Tags are written sorted by tag name.
77 #
77 #
78 # Tags associated with multiple changesets have an entry for each changeset.
78 # Tags associated with multiple changesets have an entry for each changeset.
79 # The most recent changeset (in terms of revlog ordering for the head
79 # The most recent changeset (in terms of revlog ordering for the head
80 # setting it) for each tag is last.
80 # setting it) for each tag is last.
81
81
82
82
83 def fnoderevs(ui, repo, revs):
83 def fnoderevs(ui, repo, revs):
84 """return the list of '.hgtags' fnodes used in a set revisions
84 """return the list of '.hgtags' fnodes used in a set revisions
85
85
86 This is returned as list of unique fnodes. We use a list instead of a set
86 This is returned as list of unique fnodes. We use a list instead of a set
87 because order matters when it comes to tags."""
87 because order matters when it comes to tags."""
88 unfi = repo.unfiltered()
88 unfi = repo.unfiltered()
89 tonode = unfi.changelog.node
89 tonode = unfi.changelog.node
90 nodes = [tonode(r) for r in revs]
90 nodes = [tonode(r) for r in revs]
91 fnodes = _getfnodes(ui, repo, nodes)
91 fnodes = _getfnodes(ui, repo, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
93 return fnodes
93 return fnodes
94
94
95
95
96 def _nulltonone(repo, value):
96 def _nulltonone(repo, value):
97 """convert nullid to None
97 """convert nullid to None
98
98
99 For tag value, nullid means "deleted". This small utility function helps
99 For tag value, nullid means "deleted". This small utility function helps
100 translating that to None."""
100 translating that to None."""
101 if value == repo.nullid:
101 if value == repo.nullid:
102 return None
102 return None
103 return value
103 return value
104
104
105
105
106 def difftags(ui, repo, oldfnodes, newfnodes):
106 def difftags(ui, repo, oldfnodes, newfnodes):
107 """list differences between tags expressed in two set of file-nodes
107 """list differences between tags expressed in two set of file-nodes
108
108
109 The list contains entries in the form: (tagname, oldvalue, new value).
109 The list contains entries in the form: (tagname, oldvalue, new value).
110 None is used to expressed missing value:
110 None is used to expressed missing value:
111 ('foo', None, 'abcd') is a new tag,
111 ('foo', None, 'abcd') is a new tag,
112 ('bar', 'ef01', None) is a deletion,
112 ('bar', 'ef01', None) is a deletion,
113 ('baz', 'abcd', 'ef01') is a tag movement.
113 ('baz', 'abcd', 'ef01') is a tag movement.
114 """
114 """
115 if oldfnodes == newfnodes:
115 if oldfnodes == newfnodes:
116 return []
116 return []
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
119
119
120 # list of (tag, old, new): None means missing
120 # list of (tag, old, new): None means missing
121 entries = []
121 entries = []
122 for tag, (new, __) in newtags.items():
122 for tag, (new, __) in newtags.items():
123 new = _nulltonone(repo, new)
123 new = _nulltonone(repo, new)
124 old, __ = oldtags.pop(tag, (None, None))
124 old, __ = oldtags.pop(tag, (None, None))
125 old = _nulltonone(repo, old)
125 old = _nulltonone(repo, old)
126 if old != new:
126 if old != new:
127 entries.append((tag, old, new))
127 entries.append((tag, old, new))
128 # handle deleted tags
128 # handle deleted tags
129 for tag, (old, __) in oldtags.items():
129 for tag, (old, __) in oldtags.items():
130 old = _nulltonone(repo, old)
130 old = _nulltonone(repo, old)
131 if old is not None:
131 if old is not None:
132 entries.append((tag, old, None))
132 entries.append((tag, old, None))
133 entries.sort()
133 entries.sort()
134 return entries
134 return entries
135
135
136
136
137 def writediff(fp, difflist):
137 def writediff(fp, difflist):
138 """write tags diff information to a file.
138 """write tags diff information to a file.
139
139
140 Data are stored with a line based format:
140 Data are stored with a line based format:
141
141
142 <action> <hex-node> <tag-name>\n
142 <action> <hex-node> <tag-name>\n
143
143
144 Action are defined as follow:
144 Action are defined as follow:
145 -R tag is removed,
145 -R tag is removed,
146 +A tag is added,
146 +A tag is added,
147 -M tag is moved (old value),
147 -M tag is moved (old value),
148 +M tag is moved (new value),
148 +M tag is moved (new value),
149
149
150 Example:
150 Example:
151
151
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
153
153
154 See documentation of difftags output for details about the input.
154 See documentation of difftags output for details about the input.
155 """
155 """
156 add = b'+A %s %s\n'
156 add = b'+A %s %s\n'
157 remove = b'-R %s %s\n'
157 remove = b'-R %s %s\n'
158 updateold = b'-M %s %s\n'
158 updateold = b'-M %s %s\n'
159 updatenew = b'+M %s %s\n'
159 updatenew = b'+M %s %s\n'
160 for tag, old, new in difflist:
160 for tag, old, new in difflist:
161 # translate to hex
161 # translate to hex
162 if old is not None:
162 if old is not None:
163 old = hex(old)
163 old = hex(old)
164 if new is not None:
164 if new is not None:
165 new = hex(new)
165 new = hex(new)
166 # write to file
166 # write to file
167 if old is None:
167 if old is None:
168 fp.write(add % (new, tag))
168 fp.write(add % (new, tag))
169 elif new is None:
169 elif new is None:
170 fp.write(remove % (old, tag))
170 fp.write(remove % (old, tag))
171 else:
171 else:
172 fp.write(updateold % (old, tag))
172 fp.write(updateold % (old, tag))
173 fp.write(updatenew % (new, tag))
173 fp.write(updatenew % (new, tag))
174
174
175
175
176 def findglobaltags(ui, repo):
176 def findglobaltags(ui, repo):
177 """Find global tags in a repo: return a tagsmap
177 """Find global tags in a repo: return a tagsmap
178
178
179 tagsmap: tag name to (node, hist) 2-tuples.
179 tagsmap: tag name to (node, hist) 2-tuples.
180
180
181 The tags cache is read and updated as a side-effect of calling.
181 The tags cache is read and updated as a side-effect of calling.
182 """
182 """
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
184 if cachetags is not None:
184 if cachetags is not None:
185 assert not shouldwrite
185 assert not shouldwrite
186 # XXX is this really 100% correct? are there oddball special
186 # XXX is this really 100% correct? are there oddball special
187 # cases where a global tag should outrank a local tag but won't,
187 # cases where a global tag should outrank a local tag but won't,
188 # because cachetags does not contain rank info?
188 # because cachetags does not contain rank info?
189 alltags = {}
189 alltags = {}
190 _updatetags(cachetags, alltags)
190 _updatetags(cachetags, alltags)
191 return alltags
191 return alltags
192
192
193 for head in reversed(heads): # oldest to newest
193 for head in reversed(heads): # oldest to newest
194 assert repo.changelog.index.has_node(
194 assert repo.changelog.index.has_node(
195 head
195 head
196 ), b"tag cache returned bogus head %s" % short(head)
196 ), b"tag cache returned bogus head %s" % short(head)
197 fnodes = _filterfnodes(tagfnode, reversed(heads))
197 fnodes = _filterfnodes(tagfnode, reversed(heads))
198 alltags = _tagsfromfnodes(ui, repo, fnodes)
198 alltags = _tagsfromfnodes(ui, repo, fnodes)
199
199
200 # and update the cache (if necessary)
200 # and update the cache (if necessary)
201 if shouldwrite:
201 if shouldwrite:
202 _writetagcache(ui, repo, valid, alltags)
202 _writetagcache(ui, repo, valid, alltags)
203 return alltags
203 return alltags
204
204
205
205
206 def _filterfnodes(tagfnode, nodes):
206 def _filterfnodes(tagfnode, nodes):
207 """return a list of unique fnodes
207 """return a list of unique fnodes
208
208
209 The order of this list matches the order of "nodes". Preserving this order
209 The order of this list matches the order of "nodes". Preserving this order
210 is important as reading tags in different order provides different
210 is important as reading tags in different order provides different
211 results."""
211 results."""
212 seen = set() # set of fnode
212 seen = set() # set of fnode
213 fnodes = []
213 fnodes = []
214 for no in nodes: # oldest to newest
214 for no in nodes: # oldest to newest
215 fnode = tagfnode.get(no)
215 fnode = tagfnode.get(no)
216 if fnode and fnode not in seen:
216 if fnode and fnode not in seen:
217 seen.add(fnode)
217 seen.add(fnode)
218 fnodes.append(fnode)
218 fnodes.append(fnode)
219 return fnodes
219 return fnodes
220
220
221
221
222 def _tagsfromfnodes(ui, repo, fnodes):
222 def _tagsfromfnodes(ui, repo, fnodes):
223 """return a tagsmap from a list of file-node
223 """return a tagsmap from a list of file-node
224
224
225 tagsmap: tag name to (node, hist) 2-tuples.
225 tagsmap: tag name to (node, hist) 2-tuples.
226
226
227 The order of the list matters."""
227 The order of the list matters."""
228 alltags = {}
228 alltags = {}
229 fctx = None
229 fctx = None
230 for fnode in fnodes:
230 for fnode in fnodes:
231 if fctx is None:
231 if fctx is None:
232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
233 else:
233 else:
234 fctx = fctx.filectx(fnode)
234 fctx = fctx.filectx(fnode)
235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
236 _updatetags(filetags, alltags)
236 _updatetags(filetags, alltags)
237 return alltags
237 return alltags
238
238
239
239
240 def readlocaltags(ui, repo, alltags, tagtypes):
240 def readlocaltags(ui, repo, alltags, tagtypes):
241 '''Read local tags in repo. Update alltags and tagtypes.'''
241 '''Read local tags in repo. Update alltags and tagtypes.'''
242 try:
242 try:
243 data = repo.vfs.read(b"localtags")
243 data = repo.vfs.read(b"localtags")
244 except FileNotFoundError:
244 except FileNotFoundError:
245 return
245 return
246
246
247 # localtags is in the local encoding; re-encode to UTF-8 on
247 # localtags is in the local encoding; re-encode to UTF-8 on
248 # input for consistency with the rest of this module.
248 # input for consistency with the rest of this module.
249 filetags = _readtags(
249 filetags = _readtags(
250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
251 )
251 )
252
252
253 # remove tags pointing to invalid nodes
253 # remove tags pointing to invalid nodes
254 cl = repo.changelog
254 cl = repo.changelog
255 for t in list(filetags):
255 for t in list(filetags):
256 try:
256 try:
257 cl.rev(filetags[t][0])
257 cl.rev(filetags[t][0])
258 except (LookupError, ValueError):
258 except (LookupError, ValueError):
259 del filetags[t]
259 del filetags[t]
260
260
261 _updatetags(filetags, alltags, b'local', tagtypes)
261 _updatetags(filetags, alltags, b'local', tagtypes)
262
262
263
263
264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
265 """Read tag definitions from a file (or any source of lines).
265 """Read tag definitions from a file (or any source of lines).
266
266
267 This function returns two sortdicts with similar information:
267 This function returns two sortdicts with similar information:
268
268
269 - the first dict, bintaghist, contains the tag information as expected by
269 - the first dict, bintaghist, contains the tag information as expected by
270 the _readtags function, i.e. a mapping from tag name to (node, hist):
270 the _readtags function, i.e. a mapping from tag name to (node, hist):
271 - node is the node id from the last line read for that name,
271 - node is the node id from the last line read for that name,
272 - hist is the list of node ids previously associated with it (in file
272 - hist is the list of node ids previously associated with it (in file
273 order). All node ids are binary, not hex.
273 order). All node ids are binary, not hex.
274
274
275 - the second dict, hextaglines, is a mapping from tag name to a list of
275 - the second dict, hextaglines, is a mapping from tag name to a list of
276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
277
277
278 When calcnodelines is False the hextaglines dict is not calculated (an
278 When calcnodelines is False the hextaglines dict is not calculated (an
279 empty dict is returned). This is done to improve this function's
279 empty dict is returned). This is done to improve this function's
280 performance in cases where the line numbers are not needed.
280 performance in cases where the line numbers are not needed.
281 """
281 """
282
282
283 bintaghist = util.sortdict()
283 bintaghist = util.sortdict()
284 hextaglines = util.sortdict()
284 hextaglines = util.sortdict()
285 count = 0
285 count = 0
286
286
287 def dbg(msg):
287 def dbg(msg):
288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
289
289
290 for nline, line in enumerate(lines):
290 for nline, line in enumerate(lines):
291 count += 1
291 count += 1
292 if not line:
292 if not line:
293 continue
293 continue
294 try:
294 try:
295 (nodehex, name) = line.split(b" ", 1)
295 (nodehex, name) = line.split(b" ", 1)
296 except ValueError:
296 except ValueError:
297 dbg(b"cannot parse entry")
297 dbg(b"cannot parse entry")
298 continue
298 continue
299 name = name.strip()
299 name = name.strip()
300 if recode:
300 if recode:
301 name = recode(name)
301 name = recode(name)
302 try:
302 try:
303 nodebin = bin(nodehex)
303 nodebin = bin(nodehex)
304 except binascii.Error:
304 except binascii.Error:
305 dbg(b"node '%s' is not well formed" % nodehex)
305 dbg(b"node '%s' is not well formed" % nodehex)
306 continue
306 continue
307
307
308 # update filetags
308 # update filetags
309 if calcnodelines:
309 if calcnodelines:
310 # map tag name to a list of line numbers
310 # map tag name to a list of line numbers
311 if name not in hextaglines:
311 if name not in hextaglines:
312 hextaglines[name] = []
312 hextaglines[name] = []
313 hextaglines[name].append([nodehex, nline])
313 hextaglines[name].append([nodehex, nline])
314 continue
314 continue
315 # map tag name to (node, hist)
315 # map tag name to (node, hist)
316 if name not in bintaghist:
316 if name not in bintaghist:
317 bintaghist[name] = []
317 bintaghist[name] = []
318 bintaghist[name].append(nodebin)
318 bintaghist[name].append(nodebin)
319 return bintaghist, hextaglines
319 return bintaghist, hextaglines
320
320
321
321
322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
323 """Read tag definitions from a file (or any source of lines).
323 """Read tag definitions from a file (or any source of lines).
324
324
325 Returns a mapping from tag name to (node, hist).
325 Returns a mapping from tag name to (node, hist).
326
326
327 "node" is the node id from the last line read for that name. "hist"
327 "node" is the node id from the last line read for that name. "hist"
328 is the list of node ids previously associated with it (in file order).
328 is the list of node ids previously associated with it (in file order).
329 All node ids are binary, not hex.
329 All node ids are binary, not hex.
330 """
330 """
331 filetags, nodelines = _readtaghist(
331 filetags, nodelines = _readtaghist(
332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
333 )
333 )
334 # util.sortdict().__setitem__ is much slower at replacing then inserting
334 # util.sortdict().__setitem__ is much slower at replacing then inserting
335 # new entries. The difference can matter if there are thousands of tags.
335 # new entries. The difference can matter if there are thousands of tags.
336 # Create a new sortdict to avoid the performance penalty.
336 # Create a new sortdict to avoid the performance penalty.
337 newtags = util.sortdict()
337 newtags = util.sortdict()
338 for tag, taghist in filetags.items():
338 for tag, taghist in filetags.items():
339 newtags[tag] = (taghist[-1], taghist[:-1])
339 newtags[tag] = (taghist[-1], taghist[:-1])
340 return newtags
340 return newtags
341
341
342
342
343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
344 """Incorporate the tag info read from one file into dictionnaries
344 """Incorporate the tag info read from one file into dictionnaries
345
345
346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
347
347
348 The second one, 'tagtypes', is optional and will be updated to track the
348 The second one, 'tagtypes', is optional and will be updated to track the
349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
350 needs to be set."""
350 needs to be set."""
351 if tagtype is None:
351 if tagtype is None:
352 assert tagtypes is None
352 assert tagtypes is None
353
353
354 for name, nodehist in filetags.items():
354 for name, nodehist in filetags.items():
355 if name not in alltags:
355 if name not in alltags:
356 alltags[name] = nodehist
356 alltags[name] = nodehist
357 if tagtype is not None:
357 if tagtype is not None:
358 tagtypes[name] = tagtype
358 tagtypes[name] = tagtype
359 continue
359 continue
360
360
361 # we prefer alltags[name] if:
361 # we prefer alltags[name] if:
362 # it supersedes us OR
362 # it supersedes us OR
363 # mutual supersedes and it has a higher rank
363 # mutual supersedes and it has a higher rank
364 # otherwise we win because we're tip-most
364 # otherwise we win because we're tip-most
365 anode, ahist = nodehist
365 anode, ahist = nodehist
366 bnode, bhist = alltags[name]
366 bnode, bhist = alltags[name]
367 if (
367 if (
368 bnode != anode
368 bnode != anode
369 and anode in bhist
369 and anode in bhist
370 and (bnode not in ahist or len(bhist) > len(ahist))
370 and (bnode not in ahist or len(bhist) > len(ahist))
371 ):
371 ):
372 anode = bnode
372 anode = bnode
373 elif tagtype is not None:
373 elif tagtype is not None:
374 tagtypes[name] = tagtype
374 tagtypes[name] = tagtype
375 ahist.extend([n for n in bhist if n not in ahist])
375 ahist.extend([n for n in bhist if n not in ahist])
376 alltags[name] = anode, ahist
376 alltags[name] = anode, ahist
377
377
378
378
379 def _filename(repo):
379 def _filename(repo):
380 """name of a tagcache file for a given repo or repoview"""
380 """name of a tagcache file for a given repo or repoview"""
381 filename = b'tags2'
381 filename = b'tags2'
382 if repo.filtername:
382 if repo.filtername:
383 filename = b'%s-%s' % (filename, repo.filtername)
383 filename = b'%s-%s' % (filename, repo.filtername)
384 return filename
384 return filename
385
385
386
386
387 def _readtagcache(ui, repo):
387 def _readtagcache(ui, repo):
388 """Read the tag cache.
388 """Read the tag cache.
389
389
390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
391
391
392 If the cache is completely up-to-date, "cachetags" is a dict of the
392 If the cache is completely up-to-date, "cachetags" is a dict of the
393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
394 None and "shouldwrite" is False.
394 None and "shouldwrite" is False.
395
395
396 If the cache is not up to date, "cachetags" is None. "heads" is a list
396 If the cache is not up to date, "cachetags" is None. "heads" is a list
397 of all heads currently in the repository, ordered from tip to oldest.
397 of all heads currently in the repository, ordered from tip to oldest.
398 "validinfo" is a tuple describing cache validation info. This is used
398 "validinfo" is a tuple describing cache validation info. This is used
399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
400 filenode. "shouldwrite" is True.
400 filenode. "shouldwrite" is True.
401
401
402 If the cache is not up to date, the caller is responsible for reading tag
402 If the cache is not up to date, the caller is responsible for reading tag
403 info from each returned head. (See findglobaltags().)
403 info from each returned head. (See findglobaltags().)
404 """
404 """
405 try:
405 try:
406 cachefile = repo.cachevfs(_filename(repo), b'r')
406 cachefile = repo.cachevfs(_filename(repo), b'r')
407 # force reading the file for static-http
407 # force reading the file for static-http
408 cachelines = iter(cachefile)
408 cachelines = iter(cachefile)
409 except IOError:
409 except IOError:
410 cachefile = None
410 cachefile = None
411
411
412 cacherev = None
412 cacherev = None
413 cachenode = None
413 cachenode = None
414 cachehash = None
414 cachehash = None
415 if cachefile:
415 if cachefile:
416 try:
416 try:
417 validline = next(cachelines)
417 validline = next(cachelines)
418 validline = validline.split()
418 validline = validline.split()
419 cacherev = int(validline[0])
419 cacherev = int(validline[0])
420 cachenode = bin(validline[1])
420 cachenode = bin(validline[1])
421 if len(validline) > 2:
421 if len(validline) > 2:
422 cachehash = bin(validline[2])
422 cachehash = bin(validline[2])
423 except Exception:
423 except Exception:
424 # corruption of the cache, just recompute it.
424 # corruption of the cache, just recompute it.
425 pass
425 pass
426
426
427 tipnode = repo.changelog.tip()
427 tipnode = repo.changelog.tip()
428 tiprev = len(repo.changelog) - 1
428 tiprev = len(repo.changelog) - 1
429
429
430 # Case 1 (common): tip is the same, so nothing has changed.
430 # Case 1 (common): tip is the same, so nothing has changed.
431 # (Unchanged tip trivially means no changesets have been added.
431 # (Unchanged tip trivially means no changesets have been added.
432 # But, thanks to localrepository.destroyed(), it also means none
432 # But, thanks to localrepository.destroyed(), it also means none
433 # have been destroyed by strip or rollback.)
433 # have been destroyed by strip or rollback.)
434 if (
434 if (
435 cacherev == tiprev
435 cacherev == tiprev
436 and cachenode == tipnode
436 and cachenode == tipnode
437 and cachehash == scmutil.filteredhash(repo, tiprev)
437 and cachehash == scmutil.filteredhash(repo, tiprev)
438 ):
438 ):
439 tags = _readtags(ui, repo, cachelines, cachefile.name)
439 tags = _readtags(ui, repo, cachelines, cachefile.name)
440 cachefile.close()
440 cachefile.close()
441 return (None, None, None, tags, False)
441 return (None, None, None, tags, False)
442 if cachefile:
442 if cachefile:
443 cachefile.close() # ignore rest of file
443 cachefile.close() # ignore rest of file
444
444
445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
446
446
447 repoheads = repo.heads()
447 repoheads = repo.heads()
448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
449 # writing an empty cache.
449 # writing an empty cache.
450 if repoheads == [repo.nullid]:
450 if repoheads == [repo.nullid]:
451 return ([], {}, valid, {}, False)
451 return ([], {}, valid, {}, False)
452
452
453 # Case 3 (uncommon): cache file missing or empty.
453 # Case 3 (uncommon): cache file missing or empty.
454
454
455 # Case 4 (uncommon): tip rev decreased. This should only happen
455 # Case 4 (uncommon): tip rev decreased. This should only happen
456 # when we're called from localrepository.destroyed(). Refresh the
456 # when we're called from localrepository.destroyed(). Refresh the
457 # cache so future invocations will not see disappeared heads in the
457 # cache so future invocations will not see disappeared heads in the
458 # cache.
458 # cache.
459
459
460 # Case 5 (common): tip has changed, so we've added/replaced heads.
460 # Case 5 (common): tip has changed, so we've added/replaced heads.
461
461
462 # As it happens, the code to handle cases 3, 4, 5 is the same.
462 # As it happens, the code to handle cases 3, 4, 5 is the same.
463
463
464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
465 # exposed".
465 # exposed".
466 if not len(repo.file(b'.hgtags')):
466 if not len(repo.file(b'.hgtags')):
467 # No tags have ever been committed, so we can avoid a
467 # No tags have ever been committed, so we can avoid a
468 # potentially expensive search.
468 # potentially expensive search.
469 return ([], {}, valid, None, True)
469 return ([], {}, valid, None, True)
470
470
471 # Now we have to lookup the .hgtags filenode for every new head.
471 # Now we have to lookup the .hgtags filenode for every new head.
472 # This is the most expensive part of finding tags, so performance
472 # This is the most expensive part of finding tags, so performance
473 # depends primarily on the size of newheads. Worst case: no cache
473 # depends primarily on the size of newheads. Worst case: no cache
474 # file, so newheads == repoheads.
474 # file, so newheads == repoheads.
475 # Reversed order helps the cache ('repoheads' is in descending order)
475 # Reversed order helps the cache ('repoheads' is in descending order)
476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
477
477
478 # Caller has to iterate over all heads, but can use the filenodes in
478 # Caller has to iterate over all heads, but can use the filenodes in
479 # cachefnode to get to each .hgtags revision quickly.
479 # cachefnode to get to each .hgtags revision quickly.
480 return (repoheads, cachefnode, valid, None, True)
480 return (repoheads, cachefnode, valid, None, True)
481
481
482
482
483 def _getfnodes(ui, repo, nodes):
483 def _getfnodes(ui, repo, nodes):
484 """return .hgtags fnodes for a list of changeset nodes
484 """return .hgtags fnodes for a list of changeset nodes
485
485
486 Return value is a {node: fnode} mapping. There will be no entry for nodes
486 Return value is a {node: fnode} mapping. There will be no entry for nodes
487 without a '.hgtags' file.
487 without a '.hgtags' file.
488 """
488 """
489 starttime = util.timer()
489 starttime = util.timer()
490 fnodescache = hgtagsfnodescache(repo.unfiltered())
490 fnodescache = hgtagsfnodescache(repo.unfiltered())
491 cachefnode = {}
491 cachefnode = {}
492 validated_fnodes = set()
492 validated_fnodes = set()
493 unknown_entries = set()
493 unknown_entries = set()
494
494
495 flog = None
495 flog = None
496 for node in nodes:
496 for node in nodes:
497 fnode = fnodescache.getfnode(node)
497 fnode = fnodescache.getfnode(node)
498 if fnode != repo.nullid:
498 if fnode != repo.nullid:
499 if fnode not in validated_fnodes:
499 if fnode not in validated_fnodes:
500 if flog is None:
500 if flog is None:
501 flog = repo.file(b'.hgtags')
501 flog = repo.file(b'.hgtags')
502 if flog.hasnode(fnode):
502 if flog.hasnode(fnode):
503 validated_fnodes.add(fnode)
503 validated_fnodes.add(fnode)
504 else:
504 else:
505 unknown_entries.add(node)
505 unknown_entries.add(node)
506 cachefnode[node] = fnode
506 cachefnode[node] = fnode
507
507
508 if unknown_entries:
508 if unknown_entries:
509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
510 for node, fnode in fixed_nodemap.items():
510 for node, fnode in fixed_nodemap.items():
511 if fnode != repo.nullid:
511 if fnode != repo.nullid:
512 cachefnode[node] = fnode
512 cachefnode[node] = fnode
513
513
514 fnodescache.write()
514 fnodescache.write()
515
515
516 duration = util.timer() - starttime
516 duration = util.timer() - starttime
517 ui.log(
517 ui.log(
518 b'tagscache',
518 b'tagscache',
519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
520 fnodescache.hitcount,
520 fnodescache.hitcount,
521 fnodescache.lookupcount,
521 fnodescache.lookupcount,
522 duration,
522 duration,
523 )
523 )
524 return cachefnode
524 return cachefnode
525
525
526
526
527 def _writetagcache(ui, repo, valid, cachetags):
527 def _writetagcache(ui, repo, valid, cachetags):
528 filename = _filename(repo)
528 filename = _filename(repo)
529 try:
529 try:
530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
531 except (OSError, IOError):
531 except (OSError, IOError):
532 return
532 return
533
533
534 ui.log(
534 ui.log(
535 b'tagscache',
535 b'tagscache',
536 b'writing .hg/cache/%s with %d tags\n',
536 b'writing .hg/cache/%s with %d tags\n',
537 filename,
537 filename,
538 len(cachetags),
538 len(cachetags),
539 )
539 )
540
540
541 if valid[2]:
541 if valid[2]:
542 cachefile.write(
542 cachefile.write(
543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
544 )
544 )
545 else:
545 else:
546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
547
547
548 # Tag names in the cache are in UTF-8 -- which is the whole reason
548 # Tag names in the cache are in UTF-8 -- which is the whole reason
549 # we keep them in UTF-8 throughout this module. If we converted
549 # we keep them in UTF-8 throughout this module. If we converted
550 # them local encoding on input, we would lose info writing them to
550 # them local encoding on input, we would lose info writing them to
551 # the cache.
551 # the cache.
552 for (name, (node, hist)) in sorted(cachetags.items()):
552 for (name, (node, hist)) in sorted(cachetags.items()):
553 for n in hist:
553 for n in hist:
554 cachefile.write(b"%s %s\n" % (hex(n), name))
554 cachefile.write(b"%s %s\n" % (hex(n), name))
555 cachefile.write(b"%s %s\n" % (hex(node), name))
555 cachefile.write(b"%s %s\n" % (hex(node), name))
556
556
557 try:
557 try:
558 cachefile.close()
558 cachefile.close()
559 except (OSError, IOError):
559 except (OSError, IOError):
560 pass
560 pass
561
561
562
562
563 def tag(repo, names, node, message, local, user, date, editor=False):
563 def tag(repo, names, node, message, local, user, date, editor=False):
564 """tag a revision with one or more symbolic names.
564 """tag a revision with one or more symbolic names.
565
565
566 names is a list of strings or, when adding a single tag, names may be a
566 names is a list of strings or, when adding a single tag, names may be a
567 string.
567 string.
568
568
569 if local is True, the tags are stored in a per-repository file.
569 if local is True, the tags are stored in a per-repository file.
570 otherwise, they are stored in the .hgtags file, and a new
570 otherwise, they are stored in the .hgtags file, and a new
571 changeset is committed with the change.
571 changeset is committed with the change.
572
572
573 keyword arguments:
573 keyword arguments:
574
574
575 local: whether to store tags in non-version-controlled file
575 local: whether to store tags in non-version-controlled file
576 (default False)
576 (default False)
577
577
578 message: commit message to use if committing
578 message: commit message to use if committing
579
579
580 user: name of user to use if committing
580 user: name of user to use if committing
581
581
582 date: date tuple to use if committing"""
582 date: date tuple to use if committing"""
583
583
584 if not local:
584 if not local:
585 m = matchmod.exact([b'.hgtags'])
585 m = matchmod.exact([b'.hgtags'])
586 st = repo.status(match=m, unknown=True, ignored=True)
586 st = repo.status(match=m, unknown=True, ignored=True)
587 if any(
587 if any(
588 (
588 (
589 st.modified,
589 st.modified,
590 st.added,
590 st.added,
591 st.removed,
591 st.removed,
592 st.deleted,
592 st.deleted,
593 st.unknown,
593 st.unknown,
594 st.ignored,
594 st.ignored,
595 )
595 )
596 ):
596 ):
597 raise error.Abort(
597 raise error.Abort(
598 _(b'working copy of .hgtags is changed'),
598 _(b'working copy of .hgtags is changed'),
599 hint=_(b'please commit .hgtags manually'),
599 hint=_(b'please commit .hgtags manually'),
600 )
600 )
601
601
602 with repo.wlock():
602 with repo.wlock():
603 repo.tags() # instantiate the cache
603 repo.tags() # instantiate the cache
604 _tag(repo, names, node, message, local, user, date, editor=editor)
604 _tag(repo, names, node, message, local, user, date, editor=editor)
605
605
606
606
607 def _tag(
607 def _tag(
608 repo, names, node, message, local, user, date, extra=None, editor=False
608 repo, names, node, message, local, user, date, extra=None, editor=False
609 ):
609 ):
610 if isinstance(names, bytes):
610 if isinstance(names, bytes):
611 names = (names,)
611 names = (names,)
612
612
613 branches = repo.branchmap()
613 branches = repo.branchmap()
614 for name in names:
614 for name in names:
615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
616 if name in branches:
616 if name in branches:
617 repo.ui.warn(
617 repo.ui.warn(
618 _(b"warning: tag %s conflicts with existing branch name\n")
618 _(b"warning: tag %s conflicts with existing branch name\n")
619 % name
619 % name
620 )
620 )
621
621
622 def writetags(fp, names, munge, prevtags):
622 def writetags(fp, names, munge, prevtags):
623 fp.seek(0, io.SEEK_END)
623 fp.seek(0, io.SEEK_END)
624 if prevtags and not prevtags.endswith(b'\n'):
624 if prevtags and not prevtags.endswith(b'\n'):
625 fp.write(b'\n')
625 fp.write(b'\n')
626 for name in names:
626 for name in names:
627 if munge:
627 if munge:
628 m = munge(name)
628 m = munge(name)
629 else:
629 else:
630 m = name
630 m = name
631
631
632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
633 old = repo.tags().get(name, repo.nullid)
633 old = repo.tags().get(name, repo.nullid)
634 fp.write(b'%s %s\n' % (hex(old), m))
634 fp.write(b'%s %s\n' % (hex(old), m))
635 fp.write(b'%s %s\n' % (hex(node), m))
635 fp.write(b'%s %s\n' % (hex(node), m))
636 fp.close()
636 fp.close()
637
637
638 prevtags = b''
638 prevtags = b''
639 if local:
639 if local:
640 try:
640 try:
641 fp = repo.vfs(b'localtags', b'r+')
641 fp = repo.vfs(b'localtags', b'r+')
642 except IOError:
642 except IOError:
643 fp = repo.vfs(b'localtags', b'a')
643 fp = repo.vfs(b'localtags', b'a')
644 else:
644 else:
645 prevtags = fp.read()
645 prevtags = fp.read()
646
646
647 # local tags are stored in the current charset
647 # local tags are stored in the current charset
648 writetags(fp, names, None, prevtags)
648 writetags(fp, names, None, prevtags)
649 for name in names:
649 for name in names:
650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
651 return
651 return
652
652
653 try:
653 try:
654 fp = repo.wvfs(b'.hgtags', b'rb+')
654 fp = repo.wvfs(b'.hgtags', b'rb+')
655 except FileNotFoundError:
655 except FileNotFoundError:
656 fp = repo.wvfs(b'.hgtags', b'ab')
656 fp = repo.wvfs(b'.hgtags', b'ab')
657 else:
657 else:
658 prevtags = fp.read()
658 prevtags = fp.read()
659
659
660 # committed tags are stored in UTF-8
660 # committed tags are stored in UTF-8
661 writetags(fp, names, encoding.fromlocal, prevtags)
661 writetags(fp, names, encoding.fromlocal, prevtags)
662
662
663 fp.close()
663 fp.close()
664
664
665 repo.invalidatecaches()
665 repo.invalidatecaches()
666
666
667 with repo.dirstate.changing_files(repo):
667 with repo.dirstate.changing_files(repo):
668 if b'.hgtags' not in repo.dirstate:
668 if b'.hgtags' not in repo.dirstate:
669 repo[None].add([b'.hgtags'])
669 repo[None].add([b'.hgtags'])
670
670
671 m = matchmod.exact([b'.hgtags'])
671 m = matchmod.exact([b'.hgtags'])
672 tagnode = repo.commit(
672 tagnode = repo.commit(
673 message, user, date, extra=extra, match=m, editor=editor
673 message, user, date, extra=extra, match=m, editor=editor
674 )
674 )
675
675
676 for name in names:
676 for name in names:
677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
678
678
679 return tagnode
679 return tagnode
680
680
681
681
682 _fnodescachefile = b'hgtagsfnodes1'
682 _fnodescachefile = b'hgtagsfnodes1'
683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
684 _fnodesmissingrec = b'\xff' * 24
684 _fnodesmissingrec = b'\xff' * 24
685
685
686
686
687 class hgtagsfnodescache:
687 class hgtagsfnodescache:
688 """Persistent cache mapping revisions to .hgtags filenodes.
688 """Persistent cache mapping revisions to .hgtags filenodes.
689
689
690 The cache is an array of records. Each item in the array corresponds to
690 The cache is an array of records. Each item in the array corresponds to
691 a changelog revision. Values in the array contain the first 4 bytes of
691 a changelog revision. Values in the array contain the first 4 bytes of
692 the node hash and the 20 bytes .hgtags filenode for that revision.
692 the node hash and the 20 bytes .hgtags filenode for that revision.
693
693
694 The first 4 bytes are present as a form of verification. Repository
694 The first 4 bytes are present as a form of verification. Repository
695 stripping and rewriting may change the node at a numeric revision in the
695 stripping and rewriting may change the node at a numeric revision in the
696 changelog. The changeset fragment serves as a verifier to detect
696 changelog. The changeset fragment serves as a verifier to detect
697 rewriting. This logic is shared with the rev branch cache (see
697 rewriting. This logic is shared with the rev branch cache (see
698 branchmap.py).
698 branchmap.py).
699
699
700 The instance holds in memory the full cache content but entries are
700 The instance holds in memory the full cache content but entries are
701 only parsed on read.
701 only parsed on read.
702
702
703 Instances behave like lists. ``c[i]`` works where i is a rev or
703 Instances behave like lists. ``c[i]`` works where i is a rev or
704 changeset node. Missing indexes are populated automatically on access.
704 changeset node. Missing indexes are populated automatically on access.
705 """
705 """
706
706
707 def __init__(self, repo):
707 def __init__(self, repo):
708 assert repo.filtername is None
708 assert repo.filtername is None
709
709
710 self._repo = repo
710 self._repo = repo
711
711
712 # Only for reporting purposes.
712 # Only for reporting purposes.
713 self.lookupcount = 0
713 self.lookupcount = 0
714 self.hitcount = 0
714 self.hitcount = 0
715
715
716 try:
716 try:
717 data = repo.cachevfs.read(_fnodescachefile)
717 data = repo.cachevfs.read(_fnodescachefile)
718 except (OSError, IOError):
718 except (OSError, IOError):
719 data = b""
719 data = b""
720 self._raw = bytearray(data)
720 self._raw = bytearray(data)
721
721
722 # The end state of self._raw is an array that is of the exact length
722 # The end state of self._raw is an array that is of the exact length
723 # required to hold a record for every revision in the repository.
723 # required to hold a record for every revision in the repository.
724 # We truncate or extend the array as necessary. self._dirtyoffset is
724 # We truncate or extend the array as necessary. self._dirtyoffset is
725 # defined to be the start offset at which we need to write the output
725 # defined to be the start offset at which we need to write the output
726 # file. This offset is also adjusted when new entries are calculated
726 # file. This offset is also adjusted when new entries are calculated
727 # for array members.
727 # for array members.
728 cllen = len(repo.changelog)
728 cllen = len(repo.changelog)
729 wantedlen = cllen * _fnodesrecsize
729 wantedlen = cllen * _fnodesrecsize
730 rawlen = len(self._raw)
730 rawlen = len(self._raw)
731
731
732 self._dirtyoffset = None
732 self._dirtyoffset = None
733
733
734 rawlentokeep = min(
734 rawlentokeep = min(
735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
736 )
736 )
737 if rawlen > rawlentokeep:
737 if rawlen > rawlentokeep:
738 # There's no easy way to truncate array instances. This seems
738 # There's no easy way to truncate array instances. This seems
739 # slightly less evil than copying a potentially large array slice.
739 # slightly less evil than copying a potentially large array slice.
740 for i in range(rawlen - rawlentokeep):
740 for i in range(rawlen - rawlentokeep):
741 self._raw.pop()
741 self._raw.pop()
742 rawlen = len(self._raw)
742 rawlen = len(self._raw)
743 self._dirtyoffset = rawlen
743 self._dirtyoffset = rawlen
744 if rawlen < wantedlen:
744 if rawlen < wantedlen:
745 if self._dirtyoffset is None:
745 if self._dirtyoffset is None:
746 self._dirtyoffset = rawlen
746 self._dirtyoffset = rawlen
747 # TODO: zero fill entire record, because it's invalid not missing?
747 # TODO: zero fill entire record, because it's invalid not missing?
748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
749
749
750 def getfnode(self, node, computemissing=True):
750 def getfnode(self, node, computemissing=True):
751 """Obtain the filenode of the .hgtags file at a specified revision.
751 """Obtain the filenode of the .hgtags file at a specified revision.
752
752
753 If the value is in the cache, the entry will be validated and returned.
753 If the value is in the cache, the entry will be validated and returned.
754 Otherwise, the filenode will be computed and returned unless
754 Otherwise, the filenode will be computed and returned unless
755 "computemissing" is False. In that case, None will be returned if
755 "computemissing" is False. In that case, None will be returned if
756 the entry is missing or False if the entry is invalid without
756 the entry is missing or False if the entry is invalid without
757 any potentially expensive computation being performed.
757 any potentially expensive computation being performed.
758
758
759 If an .hgtags does not exist at the specified revision, nullid is
759 If an .hgtags does not exist at the specified revision, nullid is
760 returned.
760 returned.
761 """
761 """
762 if node == self._repo.nullid:
762 if node == self._repo.nullid:
763 return node
763 return node
764
764
765 rev = self._repo.changelog.rev(node)
765 rev = self._repo.changelog.rev(node)
766
766
767 self.lookupcount += 1
767 self.lookupcount += 1
768
768
769 offset = rev * _fnodesrecsize
769 offset = rev * _fnodesrecsize
770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
771 properprefix = node[0:4]
771 properprefix = node[0:4]
772
772
773 # Validate and return existing entry.
773 # Validate and return existing entry.
774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
775 fileprefix = record[0:4]
775 fileprefix = record[0:4]
776
776
777 if fileprefix == properprefix:
777 if fileprefix == properprefix:
778 self.hitcount += 1
778 self.hitcount += 1
779 return record[4:]
779 return record[4:]
780
780
781 # Fall through.
781 # Fall through.
782
782
783 # If we get here, the entry is either missing or invalid.
783 # If we get here, the entry is either missing or invalid.
784
784
785 if not computemissing:
785 if not computemissing:
786 if record != _fnodesmissingrec:
786 if record != _fnodesmissingrec:
787 return False
787 return False
788 return None
788 return None
789
789
790 fnode = self._computefnode(node)
790 fnode = self._computefnode(node)
791 self._writeentry(offset, properprefix, fnode)
791 self._writeentry(offset, properprefix, fnode)
792 return fnode
792 return fnode
793
793
794 def _computefnode(self, node):
794 def _computefnode(self, node):
795 """Finds the tag filenode for a node which is missing or invalid
795 """Finds the tag filenode for a node which is missing or invalid
796 in cache"""
796 in cache"""
797 ctx = self._repo[node]
797 ctx = self._repo[node]
798 rev = ctx.rev()
798 rev = ctx.rev()
799 fnode = None
799 fnode = None
800 cl = self._repo.changelog
800 cl = self._repo.changelog
801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
802 p1node = cl.node(p1rev)
802 p1node = cl.node(p1rev)
803 p1fnode = self.getfnode(p1node, computemissing=False)
803 p1fnode = self.getfnode(p1node, computemissing=False)
804 if p2rev != nullrev:
804 if p2rev != nullrev:
805 # There is some no-merge changeset where p1 is null and p2 is set
805 # There is some no-merge changeset where p1 is null and p2 is set
806 # Processing them as merge is just slower, but still gives a good
806 # Processing them as merge is just slower, but still gives a good
807 # result.
807 # result.
808 p2node = cl.node(p2rev)
808 p2node = cl.node(p2rev)
809 p2fnode = self.getfnode(p2node, computemissing=False)
809 p2fnode = self.getfnode(p2node, computemissing=False)
810 if p1fnode != p2fnode:
810 if p1fnode != p2fnode:
811 # we cannot rely on readfast because we don't know against what
811 # we cannot rely on readfast because we don't know against what
812 # parent the readfast delta is computed
812 # parent the readfast delta is computed
813 p1fnode = None
813 p1fnode = None
814 if p1fnode:
814 if p1fnode:
815 mctx = ctx.manifestctx()
815 mctx = ctx.manifestctx()
816 fnode = mctx.readfast().get(b'.hgtags')
816 fnode = mctx.readfast().get(b'.hgtags')
817 if fnode is None:
817 if fnode is None:
818 fnode = p1fnode
818 fnode = p1fnode
819 if fnode is None:
819 if fnode is None:
820 # Populate missing entry.
820 # Populate missing entry.
821 try:
821 try:
822 fnode = ctx.filenode(b'.hgtags')
822 fnode = ctx.filenode(b'.hgtags')
823 except error.LookupError:
823 except error.LookupError:
824 # No .hgtags file on this revision.
824 # No .hgtags file on this revision.
825 fnode = self._repo.nullid
825 fnode = self._repo.nullid
826 return fnode
826 return fnode
827
827
828 def setfnode(self, node, fnode):
828 def setfnode(self, node, fnode):
829 """Set the .hgtags filenode for a given changeset."""
829 """Set the .hgtags filenode for a given changeset."""
830 assert len(fnode) == 20
830 assert len(fnode) == 20
831 ctx = self._repo[node]
831 ctx = self._repo[node]
832
832
833 # Do a lookup first to avoid writing if nothing has changed.
833 # Do a lookup first to avoid writing if nothing has changed.
834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
835 return
835 return
836
836
837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
838
838
839 def refresh_invalid_nodes(self, nodes):
839 def refresh_invalid_nodes(self, nodes):
840 """recomputes file nodes for a given set of nodes which has unknown
840 """recomputes file nodes for a given set of nodes which has unknown
841 filenodes for them in the cache
841 filenodes for them in the cache
842 Also updates the in-memory cache with the correct filenode.
842 Also updates the in-memory cache with the correct filenode.
843 Caller needs to take care about calling `.write()` so that updates are
843 Caller needs to take care about calling `.write()` so that updates are
844 persisted.
844 persisted.
845 Returns a map {node: recomputed fnode}
845 Returns a map {node: recomputed fnode}
846 """
846 """
847 fixed_nodemap = {}
847 fixed_nodemap = {}
848 for node in nodes:
848 for node in nodes:
849 fnode = self._computefnode(node)
849 fnode = self._computefnode(node)
850 fixed_nodemap[node] = fnode
850 fixed_nodemap[node] = fnode
851 self.setfnode(node, fnode)
851 self.setfnode(node, fnode)
852 return fixed_nodemap
852 return fixed_nodemap
853
853
854 def _writeentry(self, offset, prefix, fnode):
854 def _writeentry(self, offset, prefix, fnode):
855 # Slices on array instances only accept other array.
855 # Slices on array instances only accept other array.
856 entry = bytearray(prefix + fnode)
856 entry = bytearray(prefix + fnode)
857 self._raw[offset : offset + _fnodesrecsize] = entry
857 self._raw[offset : offset + _fnodesrecsize] = entry
858 # self._dirtyoffset could be None.
858 # self._dirtyoffset could be None.
859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
860
860
861 def write(self):
861 def write(self):
862 """Perform all necessary writes to cache file.
862 """Perform all necessary writes to cache file.
863
863
864 This may no-op if no writes are needed or if a write lock could
864 This may no-op if no writes are needed or if a write lock could
865 not be obtained.
865 not be obtained.
866 """
866 """
867 if self._dirtyoffset is None:
867 if self._dirtyoffset is None:
868 return
868 return
869
869
870 data = self._raw[self._dirtyoffset :]
870 data = self._raw[self._dirtyoffset :]
871 if not data:
871 if not data:
872 return
872 return
873
873
874 repo = self._repo
874 repo = self._repo
875
875
876 try:
876 try:
877 lock = repo.lock(wait=False)
877 lock = repo.lock(wait=False)
878 except error.LockError:
878 except error.LockError:
879 repo.ui.log(
879 repo.ui.log(
880 b'tagscache',
880 b'tagscache',
881 b'not writing .hg/cache/%s because '
881 b'not writing .hg/cache/%s because '
882 b'lock cannot be acquired\n' % _fnodescachefile,
882 b'lock cannot be acquired\n' % _fnodescachefile,
883 )
883 )
884 return
884 return
885
885
886 try:
886 try:
887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
888 try:
888 try:
889 # if the file has been truncated
889 # if the file has been truncated
890 actualoffset = f.tell()
890 actualoffset = f.tell()
891 if actualoffset < self._dirtyoffset:
891 if actualoffset < self._dirtyoffset:
892 self._dirtyoffset = actualoffset
892 self._dirtyoffset = actualoffset
893 data = self._raw[self._dirtyoffset :]
893 data = self._raw[self._dirtyoffset :]
894 f.seek(self._dirtyoffset)
894 f.seek(self._dirtyoffset)
895 f.truncate()
895 f.truncate()
896 repo.ui.log(
896 repo.ui.log(
897 b'tagscache',
897 b'tagscache',
898 b'writing %d bytes to cache/%s\n'
898 b'writing %d bytes to cache/%s\n'
899 % (len(data), _fnodescachefile),
899 % (len(data), _fnodescachefile),
900 )
900 )
901 f.write(data)
901 f.write(data)
902 self._dirtyoffset = None
902 self._dirtyoffset = None
903 finally:
903 finally:
904 f.close()
904 f.close()
905 except (IOError, OSError) as inst:
905 except (IOError, OSError) as inst:
906 repo.ui.log(
906 repo.ui.log(
907 b'tagscache',
907 b'tagscache',
908 b"couldn't write cache/%s: %s\n"
908 b"couldn't write cache/%s: %s\n"
909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
910 )
910 )
911 finally:
911 finally:
912 lock.release()
912 lock.release()
913
914
915 def clear_cache_on_disk(repo):
916 """function used by the perf extension to "tags" cache"""
917 repo.cachevfs.tryunlink(_filename(repo))
918
919
920 def clear_cache_fnodes(repo):
921 """function used by the perf extension to clear "file node cache"""
922 repo.cachevfs.tryunlink(_filename(repo))
@@ -1,441 +1,441 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistics will be reported for each benchmark: best,
51 When set, additional statistics will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
63 benchmarked)
64
64
65 "run-limits"
65 "run-limits"
66 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 conditions are considered in order with the following logic:
68 conditions are considered in order with the following logic:
69
69
70 If benchmark has been running for <time> seconds, and we have performed
70 If benchmark has been running for <time> seconds, and we have performed
71 <numberofrun> iterations, stop the benchmark,
71 <numberofrun> iterations, stop the benchmark,
72
72
73 The default value is: '3.0-100, 10.0-3'
73 The default value is: '3.0-100, 10.0-3'
74
74
75 "stub"
75 "stub"
76 When set, benchmarks will only be run once, useful for testing (default:
76 When set, benchmarks will only be run once, useful for testing (default:
77 off)
77 off)
78
78
79 list of commands:
79 list of commands:
80
80
81 perf::addremove
81 perf::addremove
82 (no help text available)
82 (no help text available)
83 perf::ancestors
83 perf::ancestors
84 (no help text available)
84 (no help text available)
85 perf::ancestorset
85 perf::ancestorset
86 (no help text available)
86 (no help text available)
87 perf::annotate
87 perf::annotate
88 (no help text available)
88 (no help text available)
89 perf::bdiff benchmark a bdiff between revisions
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
90 perf::bookmarks
91 benchmark parsing bookmarks from disk to memory
91 benchmark parsing bookmarks from disk to memory
92 perf::branchmap
92 perf::branchmap
93 benchmark the update of a branchmap
93 benchmark the update of a branchmap
94 perf::branchmapload
94 perf::branchmapload
95 benchmark reading the branchmap
95 benchmark reading the branchmap
96 perf::branchmapupdate
96 perf::branchmapupdate
97 benchmark branchmap update from for <base> revs to <target>
97 benchmark branchmap update from for <base> revs to <target>
98 revs
98 revs
99 perf::bundle benchmark the creation of a bundle from a repository
99 perf::bundle benchmark the creation of a bundle from a repository
100 perf::bundleread
100 perf::bundleread
101 Benchmark reading of bundle files.
101 Benchmark reading of bundle files.
102 perf::cca (no help text available)
102 perf::cca (no help text available)
103 perf::changegroupchangelog
103 perf::changegroupchangelog
104 Benchmark producing a changelog group for a changegroup.
104 Benchmark producing a changelog group for a changegroup.
105 perf::changeset
105 perf::changeset
106 (no help text available)
106 (no help text available)
107 perf::ctxfiles
107 perf::ctxfiles
108 (no help text available)
108 (no help text available)
109 perf::delta-find
109 perf::delta-find
110 benchmark the process of finding a valid delta for a revlog
110 benchmark the process of finding a valid delta for a revlog
111 revision
111 revision
112 perf::diffwd Profile diff of working directory changes
112 perf::diffwd Profile diff of working directory changes
113 perf::dirfoldmap
113 perf::dirfoldmap
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 perf::dirs (no help text available)
115 perf::dirs (no help text available)
116 perf::dirstate
116 perf::dirstate
117 benchmap the time of various distate operations
117 benchmap the time of various distate operations
118 perf::dirstatedirs
118 perf::dirstatedirs
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 perf::dirstatefoldmap
120 perf::dirstatefoldmap
121 benchmap a 'dirstate._map.filefoldmap.get()' request
121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 perf::dirstatewrite
122 perf::dirstatewrite
123 benchmap the time it take to write a dirstate on disk
123 benchmap the time it take to write a dirstate on disk
124 perf::discovery
124 perf::discovery
125 benchmark discovery between local repo and the peer at given
125 benchmark discovery between local repo and the peer at given
126 path
126 path
127 perf::fncacheencode
127 perf::fncacheencode
128 (no help text available)
128 (no help text available)
129 perf::fncacheload
129 perf::fncacheload
130 (no help text available)
130 (no help text available)
131 perf::fncachewrite
131 perf::fncachewrite
132 (no help text available)
132 (no help text available)
133 perf::heads benchmark the computation of a changelog heads
133 perf::heads benchmark the computation of a changelog heads
134 perf::helper-mergecopies
134 perf::helper-mergecopies
135 find statistics about potential parameters for
135 find statistics about potential parameters for
136 'perfmergecopies'
136 'perfmergecopies'
137 perf::helper-pathcopies
137 perf::helper-pathcopies
138 find statistic about potential parameters for the
138 find statistic about potential parameters for the
139 'perftracecopies'
139 'perftracecopies'
140 perf::ignore benchmark operation related to computing ignore
140 perf::ignore benchmark operation related to computing ignore
141 perf::index benchmark index creation time followed by a lookup
141 perf::index benchmark index creation time followed by a lookup
142 perf::linelogedits
142 perf::linelogedits
143 (no help text available)
143 (no help text available)
144 perf::loadmarkers
144 perf::loadmarkers
145 benchmark the time to parse the on-disk markers for a repo
145 benchmark the time to parse the on-disk markers for a repo
146 perf::log (no help text available)
146 perf::log (no help text available)
147 perf::lookup (no help text available)
147 perf::lookup (no help text available)
148 perf::lrucachedict
148 perf::lrucachedict
149 (no help text available)
149 (no help text available)
150 perf::manifest
150 perf::manifest
151 benchmark the time to read a manifest from disk and return a
151 benchmark the time to read a manifest from disk and return a
152 usable
152 usable
153 perf::mergecalculate
153 perf::mergecalculate
154 (no help text available)
154 (no help text available)
155 perf::mergecopies
155 perf::mergecopies
156 measure runtime of 'copies.mergecopies'
156 measure runtime of 'copies.mergecopies'
157 perf::moonwalk
157 perf::moonwalk
158 benchmark walking the changelog backwards
158 benchmark walking the changelog backwards
159 perf::nodelookup
159 perf::nodelookup
160 (no help text available)
160 (no help text available)
161 perf::nodemap
161 perf::nodemap
162 benchmark the time necessary to look up revision from a cold
162 benchmark the time necessary to look up revision from a cold
163 nodemap
163 nodemap
164 perf::parents
164 perf::parents
165 benchmark the time necessary to fetch one changeset's parents.
165 benchmark the time necessary to fetch one changeset's parents.
166 perf::pathcopies
166 perf::pathcopies
167 benchmark the copy tracing logic
167 benchmark the copy tracing logic
168 perf::phases benchmark phasesets computation
168 perf::phases benchmark phasesets computation
169 perf::phasesremote
169 perf::phasesremote
170 benchmark time needed to analyse phases of the remote server
170 benchmark time needed to analyse phases of the remote server
171 perf::progress
171 perf::progress
172 printing of progress bars
172 printing of progress bars
173 perf::rawfiles
173 perf::rawfiles
174 (no help text available)
174 (no help text available)
175 perf::revlogchunks
175 perf::revlogchunks
176 Benchmark operations on revlog chunks.
176 Benchmark operations on revlog chunks.
177 perf::revlogindex
177 perf::revlogindex
178 Benchmark operations against a revlog index.
178 Benchmark operations against a revlog index.
179 perf::revlogrevision
179 perf::revlogrevision
180 Benchmark obtaining a revlog revision.
180 Benchmark obtaining a revlog revision.
181 perf::revlogrevisions
181 perf::revlogrevisions
182 Benchmark reading a series of revisions from a revlog.
182 Benchmark reading a series of revisions from a revlog.
183 perf::revlogwrite
183 perf::revlogwrite
184 Benchmark writing a series of revisions to a revlog.
184 Benchmark writing a series of revisions to a revlog.
185 perf::revrange
185 perf::revrange
186 (no help text available)
186 (no help text available)
187 perf::revset benchmark the execution time of a revset
187 perf::revset benchmark the execution time of a revset
188 perf::startup
188 perf::startup
189 (no help text available)
189 (no help text available)
190 perf::status benchmark the performance of a single status call
190 perf::status benchmark the performance of a single status call
191 perf::stream-consume
191 perf::stream-consume
192 benchmark the full application of a stream clone
192 benchmark the full application of a stream clone
193 perf::stream-generate
193 perf::stream-generate
194 benchmark the full generation of a stream clone
194 benchmark the full generation of a stream clone
195 perf::stream-locked-section
195 perf::stream-locked-section
196 benchmark the initial, repo-locked, section of a stream-clone
196 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags (no help text available)
197 perf::tags Benchmark tags retrieval in various situation
198 perf::templating
198 perf::templating
199 test the rendering time of a given template
199 test the rendering time of a given template
200 perf::unbundle
200 perf::unbundle
201 benchmark application of a bundle in a repository.
201 benchmark application of a bundle in a repository.
202 perf::unidiff
202 perf::unidiff
203 benchmark a unified diff between revisions
203 benchmark a unified diff between revisions
204 perf::volatilesets
204 perf::volatilesets
205 benchmark the computation of various volatile set
205 benchmark the computation of various volatile set
206 perf::walk (no help text available)
206 perf::walk (no help text available)
207 perf::write microbenchmark ui.write (and others)
207 perf::write microbenchmark ui.write (and others)
208
208
209 (use 'hg help -v perf' to show built-in aliases and global options)
209 (use 'hg help -v perf' to show built-in aliases and global options)
210
210
211 $ hg help perfaddremove
211 $ hg help perfaddremove
212 hg perf::addremove
212 hg perf::addremove
213
213
214 aliases: perfaddremove
214 aliases: perfaddremove
215
215
216 (no help text available)
216 (no help text available)
217
217
218 options:
218 options:
219
219
220 -T --template TEMPLATE display with template
220 -T --template TEMPLATE display with template
221
221
222 (some details hidden, use --verbose to show complete help)
222 (some details hidden, use --verbose to show complete help)
223
223
224 $ hg perfaddremove
224 $ hg perfaddremove
225 $ hg perfancestors
225 $ hg perfancestors
226 $ hg perfancestorset 2
226 $ hg perfancestorset 2
227 $ hg perfannotate a
227 $ hg perfannotate a
228 $ hg perfbdiff -c 1
228 $ hg perfbdiff -c 1
229 $ hg perfbdiff --alldata 1
229 $ hg perfbdiff --alldata 1
230 $ hg perfunidiff -c 1
230 $ hg perfunidiff -c 1
231 $ hg perfunidiff --alldata 1
231 $ hg perfunidiff --alldata 1
232 $ hg perfbookmarks
232 $ hg perfbookmarks
233 $ hg perfbranchmap
233 $ hg perfbranchmap
234 $ hg perfbranchmapload
234 $ hg perfbranchmapload
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 benchmark of branchmap with 3 revisions with 1 new ones
236 benchmark of branchmap with 3 revisions with 1 new ones
237 $ hg perfcca
237 $ hg perfcca
238 $ hg perfchangegroupchangelog
238 $ hg perfchangegroupchangelog
239 $ hg perfchangegroupchangelog --cgversion 01
239 $ hg perfchangegroupchangelog --cgversion 01
240 $ hg perfchangeset 2
240 $ hg perfchangeset 2
241 $ hg perfctxfiles 2
241 $ hg perfctxfiles 2
242 $ hg perfdiffwd
242 $ hg perfdiffwd
243 $ hg perfdirfoldmap
243 $ hg perfdirfoldmap
244 $ hg perfdirs
244 $ hg perfdirs
245 $ hg perfdirstate
245 $ hg perfdirstate
246 $ hg perfdirstate --contains
246 $ hg perfdirstate --contains
247 $ hg perfdirstate --iteration
247 $ hg perfdirstate --iteration
248 $ hg perfdirstatedirs
248 $ hg perfdirstatedirs
249 $ hg perfdirstatefoldmap
249 $ hg perfdirstatefoldmap
250 $ hg perfdirstatewrite
250 $ hg perfdirstatewrite
251 #if repofncache
251 #if repofncache
252 $ hg perffncacheencode
252 $ hg perffncacheencode
253 $ hg perffncacheload
253 $ hg perffncacheload
254 $ hg debugrebuildfncache
254 $ hg debugrebuildfncache
255 fncache already up to date
255 fncache already up to date
256 $ hg perffncachewrite
256 $ hg perffncachewrite
257 $ hg debugrebuildfncache
257 $ hg debugrebuildfncache
258 fncache already up to date
258 fncache already up to date
259 #endif
259 #endif
260 $ hg perfheads
260 $ hg perfheads
261 $ hg perfignore
261 $ hg perfignore
262 $ hg perfindex
262 $ hg perfindex
263 $ hg perflinelogedits -n 1
263 $ hg perflinelogedits -n 1
264 $ hg perfloadmarkers
264 $ hg perfloadmarkers
265 $ hg perflog
265 $ hg perflog
266 $ hg perflookup 2
266 $ hg perflookup 2
267 $ hg perflrucache
267 $ hg perflrucache
268 $ hg perfmanifest 2
268 $ hg perfmanifest 2
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 $ hg perfmanifest -m 44fe2c8352bb
270 $ hg perfmanifest -m 44fe2c8352bb
271 abort: manifest revision must be integer or full node
271 abort: manifest revision must be integer or full node
272 [255]
272 [255]
273 $ hg perfmergecalculate -r 3
273 $ hg perfmergecalculate -r 3
274 $ hg perfmoonwalk
274 $ hg perfmoonwalk
275 $ hg perfnodelookup 2
275 $ hg perfnodelookup 2
276 $ hg perfpathcopies 1 2
276 $ hg perfpathcopies 1 2
277 $ hg perfprogress --total 1000
277 $ hg perfprogress --total 1000
278 $ hg perfrawfiles 2
278 $ hg perfrawfiles 2
279 $ hg perfrevlogindex -c
279 $ hg perfrevlogindex -c
280 #if reporevlogstore
280 #if reporevlogstore
281 $ hg perfrevlogrevisions .hg/store/data/a.i
281 $ hg perfrevlogrevisions .hg/store/data/a.i
282 #endif
282 #endif
283 $ hg perfrevlogrevision -m 0
283 $ hg perfrevlogrevision -m 0
284 $ hg perfrevlogchunks -c
284 $ hg perfrevlogchunks -c
285 $ hg perfrevrange
285 $ hg perfrevrange
286 $ hg perfrevset 'all()'
286 $ hg perfrevset 'all()'
287 $ hg perfstartup
287 $ hg perfstartup
288 $ hg perfstatus
288 $ hg perfstatus
289 $ hg perfstatus --dirstate
289 $ hg perfstatus --dirstate
290 $ hg perftags
290 $ hg perftags
291 $ hg perftemplating
291 $ hg perftemplating
292 $ hg perfvolatilesets
292 $ hg perfvolatilesets
293 $ hg perfwalk
293 $ hg perfwalk
294 $ hg perfparents
294 $ hg perfparents
295 $ hg perfdiscovery -q .
295 $ hg perfdiscovery -q .
296
296
297 Test run control
297 Test run control
298 ----------------
298 ----------------
299
299
300 Simple single entry
300 Simple single entry
301
301
302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 ! wall * comb * user * sys * (best of 15) (glob)
303 ! wall * comb * user * sys * (best of 15) (glob)
304
304
305 Multiple entries
305 Multiple entries
306
306
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
308 ! wall * comb * user * sys * (best of 50) (glob)
308 ! wall * comb * user * sys * (best of 50) (glob)
309
309
310 error case are ignored
310 error case are ignored
311
311
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
313 malformatted run limit entry, missing "-": 500
313 malformatted run limit entry, missing "-": 500
314 ! wall * comb * user * sys * (best of 50) (glob)
314 ! wall * comb * user * sys * (best of 50) (glob)
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
317 ! wall * comb * user * sys * (best of 50) (glob)
317 ! wall * comb * user * sys * (best of 50) (glob)
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
318 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
320 ! wall * comb * user * sys * (best of 50) (glob)
320 ! wall * comb * user * sys * (best of 50) (glob)
321
321
322 test actual output
322 test actual output
323 ------------------
323 ------------------
324
324
325 normal output:
325 normal output:
326
326
327 $ hg perfheads --config perf.stub=no
327 $ hg perfheads --config perf.stub=no
328 ! wall * comb * user * sys * (best of *) (glob)
328 ! wall * comb * user * sys * (best of *) (glob)
329
329
330 detailed output:
330 detailed output:
331
331
332 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
332 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
333 ! wall * comb * user * sys * (best of *) (glob)
333 ! wall * comb * user * sys * (best of *) (glob)
334 ! wall * comb * user * sys * (max of *) (glob)
334 ! wall * comb * user * sys * (max of *) (glob)
335 ! wall * comb * user * sys * (avg of *) (glob)
335 ! wall * comb * user * sys * (avg of *) (glob)
336 ! wall * comb * user * sys * (median of *) (glob)
336 ! wall * comb * user * sys * (median of *) (glob)
337
337
338 test json output
338 test json output
339 ----------------
339 ----------------
340
340
341 normal output:
341 normal output:
342
342
343 $ hg perfheads --template json --config perf.stub=no
343 $ hg perfheads --template json --config perf.stub=no
344 [
344 [
345 {
345 {
346 "comb": *, (glob)
346 "comb": *, (glob)
347 "count": *, (glob)
347 "count": *, (glob)
348 "sys": *, (glob)
348 "sys": *, (glob)
349 "user": *, (glob)
349 "user": *, (glob)
350 "wall": * (glob)
350 "wall": * (glob)
351 }
351 }
352 ]
352 ]
353
353
354 detailed output:
354 detailed output:
355
355
356 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
356 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
357 [
357 [
358 {
358 {
359 "avg.comb": *, (glob)
359 "avg.comb": *, (glob)
360 "avg.count": *, (glob)
360 "avg.count": *, (glob)
361 "avg.sys": *, (glob)
361 "avg.sys": *, (glob)
362 "avg.user": *, (glob)
362 "avg.user": *, (glob)
363 "avg.wall": *, (glob)
363 "avg.wall": *, (glob)
364 "comb": *, (glob)
364 "comb": *, (glob)
365 "count": *, (glob)
365 "count": *, (glob)
366 "max.comb": *, (glob)
366 "max.comb": *, (glob)
367 "max.count": *, (glob)
367 "max.count": *, (glob)
368 "max.sys": *, (glob)
368 "max.sys": *, (glob)
369 "max.user": *, (glob)
369 "max.user": *, (glob)
370 "max.wall": *, (glob)
370 "max.wall": *, (glob)
371 "median.comb": *, (glob)
371 "median.comb": *, (glob)
372 "median.count": *, (glob)
372 "median.count": *, (glob)
373 "median.sys": *, (glob)
373 "median.sys": *, (glob)
374 "median.user": *, (glob)
374 "median.user": *, (glob)
375 "median.wall": *, (glob)
375 "median.wall": *, (glob)
376 "sys": *, (glob)
376 "sys": *, (glob)
377 "user": *, (glob)
377 "user": *, (glob)
378 "wall": * (glob)
378 "wall": * (glob)
379 }
379 }
380 ]
380 ]
381
381
382 Test pre-run feature
382 Test pre-run feature
383 --------------------
383 --------------------
384
384
385 (perf discovery has some spurious output)
385 (perf discovery has some spurious output)
386
386
387 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
387 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
388 ! wall * comb * user * sys * (best of 1) (glob)
388 ! wall * comb * user * sys * (best of 1) (glob)
389 searching for changes
389 searching for changes
390 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
390 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
391 ! wall * comb * user * sys * (best of 1) (glob)
391 ! wall * comb * user * sys * (best of 1) (glob)
392 searching for changes
392 searching for changes
393 searching for changes
393 searching for changes
394 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
394 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
395 ! wall * comb * user * sys * (best of 1) (glob)
395 ! wall * comb * user * sys * (best of 1) (glob)
396 searching for changes
396 searching for changes
397 searching for changes
397 searching for changes
398 searching for changes
398 searching for changes
399 searching for changes
399 searching for changes
400 $ hg perf::bundle 'last(all(), 5)'
400 $ hg perf::bundle 'last(all(), 5)'
401 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
401 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
402 4 changesets found
402 4 changesets found
403 $ hg perf::unbundle last-5.hg
403 $ hg perf::unbundle last-5.hg
404
404
405
405
406 test profile-benchmark option
406 test profile-benchmark option
407 ------------------------------
407 ------------------------------
408
408
409 Function to check that statprof ran
409 Function to check that statprof ran
410 $ statprofran () {
410 $ statprofran () {
411 > grep -E 'Sample count:|No samples recorded' > /dev/null
411 > grep -E 'Sample count:|No samples recorded' > /dev/null
412 > }
412 > }
413 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
413 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
414
414
415 Check perf.py for historical portability
415 Check perf.py for historical portability
416 ----------------------------------------
416 ----------------------------------------
417
417
418 $ cd "$TESTDIR/.."
418 $ cd "$TESTDIR/.."
419
419
420 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
420 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
421 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
421 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
422 > "$TESTDIR"/check-perf-code.py contrib/perf.py
422 > "$TESTDIR"/check-perf-code.py contrib/perf.py
423 contrib/perf.py:\d+: (re)
423 contrib/perf.py:\d+: (re)
424 > from mercurial import (
424 > from mercurial import (
425 import newer module separately in try clause for early Mercurial
425 import newer module separately in try clause for early Mercurial
426 contrib/perf.py:\d+: (re)
426 contrib/perf.py:\d+: (re)
427 > from mercurial import (
427 > from mercurial import (
428 import newer module separately in try clause for early Mercurial
428 import newer module separately in try clause for early Mercurial
429 contrib/perf.py:\d+: (re)
429 contrib/perf.py:\d+: (re)
430 > origindexpath = orig.opener.join(indexfile)
430 > origindexpath = orig.opener.join(indexfile)
431 use getvfs()/getsvfs() for early Mercurial
431 use getvfs()/getsvfs() for early Mercurial
432 contrib/perf.py:\d+: (re)
432 contrib/perf.py:\d+: (re)
433 > origdatapath = orig.opener.join(datafile)
433 > origdatapath = orig.opener.join(datafile)
434 use getvfs()/getsvfs() for early Mercurial
434 use getvfs()/getsvfs() for early Mercurial
435 contrib/perf.py:\d+: (re)
435 contrib/perf.py:\d+: (re)
436 > vfs = vfsmod.vfs(tmpdir)
436 > vfs = vfsmod.vfs(tmpdir)
437 use getvfs()/getsvfs() for early Mercurial
437 use getvfs()/getsvfs() for early Mercurial
438 contrib/perf.py:\d+: (re)
438 contrib/perf.py:\d+: (re)
439 > vfs.options = getattr(orig.opener, 'options', None)
439 > vfs.options = getattr(orig.opener, 'options', None)
440 use getvfs()/getsvfs() for early Mercurial
440 use getvfs()/getsvfs() for early Mercurial
441 [1]
441 [1]
General Comments 0
You need to be logged in to leave comments. Login now