##// END OF EJS Templates
branchcache: use an explicit class for the v2 version...
marmoute -
r52412:ec640dc9 default
parent child Browse files
Show More
@@ -1,4664 +1,4676 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", True)
459 displayall = ui.configbool(b"perf", b"all-timing", True)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
899 fnodes cache"""
900 from mercurial import tags
900 from mercurial import tags
901
901
902 missing_1 = b'\xff' * 4
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
907 cache.write()
908
908
909
909
910 @command(
910 @command(
911 b'perf::tags|perftags',
911 b'perf::tags|perftags',
912 formatteropts
912 formatteropts
913 + [
913 + [
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 (
915 (
916 b'',
916 b'',
917 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
918 False,
918 False,
919 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
920 ),
920 ),
921 (
921 (
922 b'',
922 b'',
923 b'clear-fnode-cache-all',
923 b'clear-fnode-cache-all',
924 False,
924 False,
925 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
926 ),
926 ),
927 (
927 (
928 b'',
928 b'',
929 b'clear-fnode-cache-rev',
929 b'clear-fnode-cache-rev',
930 [],
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
932 b'REVS',
933 ),
933 ),
934 (
934 (
935 b'',
935 b'',
936 b'update-last',
936 b'update-last',
937 b'',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
939 b'N',
940 ),
940 ),
941 ],
941 ],
942 )
942 )
943 def perftags(ui, repo, **opts):
943 def perftags(ui, repo, **opts):
944 """Benchmark tags retrieval in various situation
944 """Benchmark tags retrieval in various situation
945
945
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 altering performance after the command was run. However, it does not
947 altering performance after the command was run. However, it does not
948 destroy any stored data.
948 destroy any stored data.
949 """
949 """
950 from mercurial import tags
950 from mercurial import tags
951
951
952 opts = _byteskwargs(opts)
952 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
953 timer, fm = gettimer(ui, opts)
954 repocleartagscache = repocleartagscachefunc(repo)
954 repocleartagscache = repocleartagscachefunc(repo)
955 clearrevlogs = opts[b'clear_revlogs']
955 clearrevlogs = opts[b'clear_revlogs']
956 clear_disk = opts[b'clear_on_disk_cache']
956 clear_disk = opts[b'clear_on_disk_cache']
957 clear_fnode = opts[b'clear_fnode_cache_all']
957 clear_fnode = opts[b'clear_fnode_cache_all']
958
958
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
960 update_last_str = opts[b'update_last']
961 update_last = None
961 update_last = None
962 if update_last_str:
962 if update_last_str:
963 try:
963 try:
964 update_last = int(update_last_str)
964 update_last = int(update_last_str)
965 except ValueError:
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
967 msg %= update_last_str
968 hint = b'value should be an integer'
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
969 raise error.Abort(msg, hint=hint)
970
970
971 clear_disk_fn = getattr(
971 clear_disk_fn = getattr(
972 tags,
972 tags,
973 "clear_cache_on_disk",
973 "clear_cache_on_disk",
974 _default_clear_on_disk_tags_cache,
974 _default_clear_on_disk_tags_cache,
975 )
975 )
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 clear_fnodes_fn = tags.clear_cache_fnodes
977 clear_fnodes_fn = tags.clear_cache_fnodes
978 else:
978 else:
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 clear_fnodes_rev_fn = getattr(
980 clear_fnodes_rev_fn = getattr(
981 tags,
981 tags,
982 "forget_fnodes",
982 "forget_fnodes",
983 _default_forget_fnodes,
983 _default_forget_fnodes,
984 )
984 )
985
985
986 clear_revs = []
986 clear_revs = []
987 if clear_fnode_revs:
987 if clear_fnode_revs:
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989
989
990 if update_last:
990 if update_last:
991 revset = b'last(all(), %d)' % update_last
991 revset = b'last(all(), %d)' % update_last
992 last_revs = repo.unfiltered().revs(revset)
992 last_revs = repo.unfiltered().revs(revset)
993 clear_revs.extend(last_revs)
993 clear_revs.extend(last_revs)
994
994
995 from mercurial import repoview
995 from mercurial import repoview
996
996
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 filter_id = repoview.extrafilter(repo.ui)
999 filter_id = repoview.extrafilter(repo.ui)
1000
1000
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 pre_repo = repo.filtered(filter_name)
1002 pre_repo = repo.filtered(filter_name)
1003 pre_repo.tags() # warm the cache
1003 pre_repo.tags() # warm the cache
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006
1006
1007 clear_revs = sorted(set(clear_revs))
1007 clear_revs = sorted(set(clear_revs))
1008
1008
1009 def s():
1009 def s():
1010 if update_last:
1010 if update_last:
1011 util.copyfile(old_tags_path, new_tags_path)
1011 util.copyfile(old_tags_path, new_tags_path)
1012 if clearrevlogs:
1012 if clearrevlogs:
1013 clearchangelog(repo)
1013 clearchangelog(repo)
1014 clearfilecache(repo.unfiltered(), 'manifest')
1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 if clear_disk:
1015 if clear_disk:
1016 clear_disk_fn(repo)
1016 clear_disk_fn(repo)
1017 if clear_fnode:
1017 if clear_fnode:
1018 clear_fnodes_fn(repo)
1018 clear_fnodes_fn(repo)
1019 elif clear_revs:
1019 elif clear_revs:
1020 clear_fnodes_rev_fn(repo, clear_revs)
1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 repocleartagscache()
1021 repocleartagscache()
1022
1022
1023 def t():
1023 def t():
1024 len(repo.tags())
1024 len(repo.tags())
1025
1025
1026 timer(t, setup=s)
1026 timer(t, setup=s)
1027 fm.end()
1027 fm.end()
1028
1028
1029
1029
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 def perfancestors(ui, repo, **opts):
1031 def perfancestors(ui, repo, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 heads = repo.changelog.headrevs()
1034 heads = repo.changelog.headrevs()
1035
1035
1036 def d():
1036 def d():
1037 for a in repo.changelog.ancestors(heads):
1037 for a in repo.changelog.ancestors(heads):
1038 pass
1038 pass
1039
1039
1040 timer(d)
1040 timer(d)
1041 fm.end()
1041 fm.end()
1042
1042
1043
1043
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 def perfancestorset(ui, repo, revset, **opts):
1045 def perfancestorset(ui, repo, revset, **opts):
1046 opts = _byteskwargs(opts)
1046 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1047 timer, fm = gettimer(ui, opts)
1048 revs = repo.revs(revset)
1048 revs = repo.revs(revset)
1049 heads = repo.changelog.headrevs()
1049 heads = repo.changelog.headrevs()
1050
1050
1051 def d():
1051 def d():
1052 s = repo.changelog.ancestors(heads)
1052 s = repo.changelog.ancestors(heads)
1053 for rev in revs:
1053 for rev in revs:
1054 rev in s
1054 rev in s
1055
1055
1056 timer(d)
1056 timer(d)
1057 fm.end()
1057 fm.end()
1058
1058
1059
1059
1060 @command(
1060 @command(
1061 b'perf::delta-find',
1061 b'perf::delta-find',
1062 revlogopts + formatteropts,
1062 revlogopts + formatteropts,
1063 b'-c|-m|FILE REV',
1063 b'-c|-m|FILE REV',
1064 )
1064 )
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 """benchmark the process of finding a valid delta for a revlog revision
1066 """benchmark the process of finding a valid delta for a revlog revision
1067
1067
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 This perf command measures how much time we spend in this process. It
1070 This perf command measures how much time we spend in this process. It
1071 operates on an already stored revision.
1071 operates on an already stored revision.
1072
1072
1073 See `hg help debug-delta-find` for another related command.
1073 See `hg help debug-delta-find` for another related command.
1074 """
1074 """
1075 from mercurial import revlogutils
1075 from mercurial import revlogutils
1076 import mercurial.revlogutils.deltas as deltautil
1076 import mercurial.revlogutils.deltas as deltautil
1077
1077
1078 opts = _byteskwargs(opts)
1078 opts = _byteskwargs(opts)
1079 if arg_2 is None:
1079 if arg_2 is None:
1080 file_ = None
1080 file_ = None
1081 rev = arg_1
1081 rev = arg_1
1082 else:
1082 else:
1083 file_ = arg_1
1083 file_ = arg_1
1084 rev = arg_2
1084 rev = arg_2
1085
1085
1086 repo = repo.unfiltered()
1086 repo = repo.unfiltered()
1087
1087
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089
1089
1090 rev = int(rev)
1090 rev = int(rev)
1091
1091
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093
1093
1094 deltacomputer = deltautil.deltacomputer(revlog)
1094 deltacomputer = deltautil.deltacomputer(revlog)
1095
1095
1096 node = revlog.node(rev)
1096 node = revlog.node(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1097 p1r, p2r = revlog.parentrevs(rev)
1098 p1 = revlog.node(p1r)
1098 p1 = revlog.node(p1r)
1099 p2 = revlog.node(p2r)
1099 p2 = revlog.node(p2r)
1100 full_text = revlog.revision(rev)
1100 full_text = revlog.revision(rev)
1101 textlen = len(full_text)
1101 textlen = len(full_text)
1102 cachedelta = None
1102 cachedelta = None
1103 flags = revlog.flags(rev)
1103 flags = revlog.flags(rev)
1104
1104
1105 revinfo = revlogutils.revisioninfo(
1105 revinfo = revlogutils.revisioninfo(
1106 node,
1106 node,
1107 p1,
1107 p1,
1108 p2,
1108 p2,
1109 [full_text], # btext
1109 [full_text], # btext
1110 textlen,
1110 textlen,
1111 cachedelta,
1111 cachedelta,
1112 flags,
1112 flags,
1113 )
1113 )
1114
1114
1115 # Note: we should probably purge the potential caches (like the full
1115 # Note: we should probably purge the potential caches (like the full
1116 # manifest cache) between runs.
1116 # manifest cache) between runs.
1117 def find_one():
1117 def find_one():
1118 with revlog._datafp() as fh:
1118 with revlog._datafp() as fh:
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120
1120
1121 timer(find_one)
1121 timer(find_one)
1122 fm.end()
1122 fm.end()
1123
1123
1124
1124
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 def perfdiscovery(ui, repo, path, **opts):
1126 def perfdiscovery(ui, repo, path, **opts):
1127 """benchmark discovery between local repo and the peer at given path"""
1127 """benchmark discovery between local repo and the peer at given path"""
1128 repos = [repo, None]
1128 repos = [repo, None]
1129 timer, fm = gettimer(ui, opts)
1129 timer, fm = gettimer(ui, opts)
1130
1130
1131 try:
1131 try:
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133
1133
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 except ImportError:
1135 except ImportError:
1136 try:
1136 try:
1137 from mercurial.utils.urlutil import get_unique_pull_path
1137 from mercurial.utils.urlutil import get_unique_pull_path
1138
1138
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 except ImportError:
1140 except ImportError:
1141 path = ui.expandpath(path)
1141 path = ui.expandpath(path)
1142
1142
1143 def s():
1143 def s():
1144 repos[1] = hg.peer(ui, opts, path)
1144 repos[1] = hg.peer(ui, opts, path)
1145
1145
1146 def d():
1146 def d():
1147 setdiscovery.findcommonheads(ui, *repos)
1147 setdiscovery.findcommonheads(ui, *repos)
1148
1148
1149 timer(d, setup=s)
1149 timer(d, setup=s)
1150 fm.end()
1150 fm.end()
1151
1151
1152
1152
1153 @command(
1153 @command(
1154 b'perf::bookmarks|perfbookmarks',
1154 b'perf::bookmarks|perfbookmarks',
1155 formatteropts
1155 formatteropts
1156 + [
1156 + [
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 ],
1158 ],
1159 )
1159 )
1160 def perfbookmarks(ui, repo, **opts):
1160 def perfbookmarks(ui, repo, **opts):
1161 """benchmark parsing bookmarks from disk to memory"""
1161 """benchmark parsing bookmarks from disk to memory"""
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1163 timer, fm = gettimer(ui, opts)
1164
1164
1165 clearrevlogs = opts[b'clear_revlogs']
1165 clearrevlogs = opts[b'clear_revlogs']
1166
1166
1167 def s():
1167 def s():
1168 if clearrevlogs:
1168 if clearrevlogs:
1169 clearchangelog(repo)
1169 clearchangelog(repo)
1170 clearfilecache(repo, b'_bookmarks')
1170 clearfilecache(repo, b'_bookmarks')
1171
1171
1172 def d():
1172 def d():
1173 repo._bookmarks
1173 repo._bookmarks
1174
1174
1175 timer(d, setup=s)
1175 timer(d, setup=s)
1176 fm.end()
1176 fm.end()
1177
1177
1178
1178
1179 @command(
1179 @command(
1180 b'perf::bundle',
1180 b'perf::bundle',
1181 [
1181 [
1182 (
1182 (
1183 b'r',
1183 b'r',
1184 b'rev',
1184 b'rev',
1185 [],
1185 [],
1186 b'changesets to bundle',
1186 b'changesets to bundle',
1187 b'REV',
1187 b'REV',
1188 ),
1188 ),
1189 (
1189 (
1190 b't',
1190 b't',
1191 b'type',
1191 b'type',
1192 b'none',
1192 b'none',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 b'TYPE',
1194 b'TYPE',
1195 ),
1195 ),
1196 ]
1196 ]
1197 + formatteropts,
1197 + formatteropts,
1198 b'REVS',
1198 b'REVS',
1199 )
1199 )
1200 def perfbundle(ui, repo, *revs, **opts):
1200 def perfbundle(ui, repo, *revs, **opts):
1201 """benchmark the creation of a bundle from a repository
1201 """benchmark the creation of a bundle from a repository
1202
1202
1203 For now, this only supports "none" compression.
1203 For now, this only supports "none" compression.
1204 """
1204 """
1205 try:
1205 try:
1206 from mercurial import bundlecaches
1206 from mercurial import bundlecaches
1207
1207
1208 parsebundlespec = bundlecaches.parsebundlespec
1208 parsebundlespec = bundlecaches.parsebundlespec
1209 except ImportError:
1209 except ImportError:
1210 from mercurial import exchange
1210 from mercurial import exchange
1211
1211
1212 parsebundlespec = exchange.parsebundlespec
1212 parsebundlespec = exchange.parsebundlespec
1213
1213
1214 from mercurial import discovery
1214 from mercurial import discovery
1215 from mercurial import bundle2
1215 from mercurial import bundle2
1216
1216
1217 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219
1219
1220 cl = repo.changelog
1220 cl = repo.changelog
1221 revs = list(revs)
1221 revs = list(revs)
1222 revs.extend(opts.get(b'rev', ()))
1222 revs.extend(opts.get(b'rev', ()))
1223 revs = scmutil.revrange(repo, revs)
1223 revs = scmutil.revrange(repo, revs)
1224 if not revs:
1224 if not revs:
1225 raise error.Abort(b"not revision specified")
1225 raise error.Abort(b"not revision specified")
1226 # make it a consistent set (ie: without topological gaps)
1226 # make it a consistent set (ie: without topological gaps)
1227 old_len = len(revs)
1227 old_len = len(revs)
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 if old_len != len(revs):
1229 if old_len != len(revs):
1230 new_count = len(revs) - old_len
1230 new_count = len(revs) - old_len
1231 msg = b"add %d new revisions to make it a consistent set\n"
1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 ui.write_err(msg % new_count)
1232 ui.write_err(msg % new_count)
1233
1233
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 outgoing = discovery.outgoing(repo, bases, targets)
1236 outgoing = discovery.outgoing(repo, bases, targets)
1237
1237
1238 bundle_spec = opts.get(b'type')
1238 bundle_spec = opts.get(b'type')
1239
1239
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241
1241
1242 cgversion = bundle_spec.params.get(b"cg.version")
1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 if cgversion is None:
1243 if cgversion is None:
1244 if bundle_spec.version == b'v1':
1244 if bundle_spec.version == b'v1':
1245 cgversion = b'01'
1245 cgversion = b'01'
1246 if bundle_spec.version == b'v2':
1246 if bundle_spec.version == b'v2':
1247 cgversion = b'02'
1247 cgversion = b'02'
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 err = b"repository does not support bundle version %s"
1249 err = b"repository does not support bundle version %s"
1250 raise error.Abort(err % cgversion)
1250 raise error.Abort(err % cgversion)
1251
1251
1252 if cgversion == b'01': # bundle1
1252 if cgversion == b'01': # bundle1
1253 bversion = b'HG10' + bundle_spec.wirecompression
1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 bcompression = None
1254 bcompression = None
1255 elif cgversion in (b'02', b'03'):
1255 elif cgversion in (b'02', b'03'):
1256 bversion = b'HG20'
1256 bversion = b'HG20'
1257 bcompression = bundle_spec.wirecompression
1257 bcompression = bundle_spec.wirecompression
1258 else:
1258 else:
1259 err = b'perf::bundle: unexpected changegroup version %s'
1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 raise error.ProgrammingError(err % cgversion)
1260 raise error.ProgrammingError(err % cgversion)
1261
1261
1262 if bcompression is None:
1262 if bcompression is None:
1263 bcompression = b'UN'
1263 bcompression = b'UN'
1264
1264
1265 if bcompression != b'UN':
1265 if bcompression != b'UN':
1266 err = b'perf::bundle: compression currently unsupported: %s'
1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 raise error.ProgrammingError(err % bcompression)
1267 raise error.ProgrammingError(err % bcompression)
1268
1268
1269 def do_bundle():
1269 def do_bundle():
1270 bundle2.writenewbundle(
1270 bundle2.writenewbundle(
1271 ui,
1271 ui,
1272 repo,
1272 repo,
1273 b'perf::bundle',
1273 b'perf::bundle',
1274 os.devnull,
1274 os.devnull,
1275 bversion,
1275 bversion,
1276 outgoing,
1276 outgoing,
1277 bundle_spec.params,
1277 bundle_spec.params,
1278 )
1278 )
1279
1279
1280 timer(do_bundle)
1280 timer(do_bundle)
1281 fm.end()
1281 fm.end()
1282
1282
1283
1283
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 """Benchmark reading of bundle files.
1286 """Benchmark reading of bundle files.
1287
1287
1288 This command is meant to isolate the I/O part of bundle reading as
1288 This command is meant to isolate the I/O part of bundle reading as
1289 much as possible.
1289 much as possible.
1290 """
1290 """
1291 from mercurial import (
1291 from mercurial import (
1292 bundle2,
1292 bundle2,
1293 exchange,
1293 exchange,
1294 streamclone,
1294 streamclone,
1295 )
1295 )
1296
1296
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 def makebench(fn):
1299 def makebench(fn):
1300 def run():
1300 def run():
1301 with open(bundlepath, b'rb') as fh:
1301 with open(bundlepath, b'rb') as fh:
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 fn(bundle)
1303 fn(bundle)
1304
1304
1305 return run
1305 return run
1306
1306
1307 def makereadnbytes(size):
1307 def makereadnbytes(size):
1308 def run():
1308 def run():
1309 with open(bundlepath, b'rb') as fh:
1309 with open(bundlepath, b'rb') as fh:
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 while bundle.read(size):
1311 while bundle.read(size):
1312 pass
1312 pass
1313
1313
1314 return run
1314 return run
1315
1315
1316 def makestdioread(size):
1316 def makestdioread(size):
1317 def run():
1317 def run():
1318 with open(bundlepath, b'rb') as fh:
1318 with open(bundlepath, b'rb') as fh:
1319 while fh.read(size):
1319 while fh.read(size):
1320 pass
1320 pass
1321
1321
1322 return run
1322 return run
1323
1323
1324 # bundle1
1324 # bundle1
1325
1325
1326 def deltaiter(bundle):
1326 def deltaiter(bundle):
1327 for delta in bundle.deltaiter():
1327 for delta in bundle.deltaiter():
1328 pass
1328 pass
1329
1329
1330 def iterchunks(bundle):
1330 def iterchunks(bundle):
1331 for chunk in bundle.getchunks():
1331 for chunk in bundle.getchunks():
1332 pass
1332 pass
1333
1333
1334 # bundle2
1334 # bundle2
1335
1335
1336 def forwardchunks(bundle):
1336 def forwardchunks(bundle):
1337 for chunk in bundle._forwardchunks():
1337 for chunk in bundle._forwardchunks():
1338 pass
1338 pass
1339
1339
1340 def iterparts(bundle):
1340 def iterparts(bundle):
1341 for part in bundle.iterparts():
1341 for part in bundle.iterparts():
1342 pass
1342 pass
1343
1343
1344 def iterpartsseekable(bundle):
1344 def iterpartsseekable(bundle):
1345 for part in bundle.iterparts(seekable=True):
1345 for part in bundle.iterparts(seekable=True):
1346 pass
1346 pass
1347
1347
1348 def seek(bundle):
1348 def seek(bundle):
1349 for part in bundle.iterparts(seekable=True):
1349 for part in bundle.iterparts(seekable=True):
1350 part.seek(0, os.SEEK_END)
1350 part.seek(0, os.SEEK_END)
1351
1351
1352 def makepartreadnbytes(size):
1352 def makepartreadnbytes(size):
1353 def run():
1353 def run():
1354 with open(bundlepath, b'rb') as fh:
1354 with open(bundlepath, b'rb') as fh:
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 for part in bundle.iterparts():
1356 for part in bundle.iterparts():
1357 while part.read(size):
1357 while part.read(size):
1358 pass
1358 pass
1359
1359
1360 return run
1360 return run
1361
1361
1362 benches = [
1362 benches = [
1363 (makestdioread(8192), b'read(8k)'),
1363 (makestdioread(8192), b'read(8k)'),
1364 (makestdioread(16384), b'read(16k)'),
1364 (makestdioread(16384), b'read(16k)'),
1365 (makestdioread(32768), b'read(32k)'),
1365 (makestdioread(32768), b'read(32k)'),
1366 (makestdioread(131072), b'read(128k)'),
1366 (makestdioread(131072), b'read(128k)'),
1367 ]
1367 ]
1368
1368
1369 with open(bundlepath, b'rb') as fh:
1369 with open(bundlepath, b'rb') as fh:
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371
1371
1372 if isinstance(bundle, changegroup.cg1unpacker):
1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 benches.extend(
1373 benches.extend(
1374 [
1374 [
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 ]
1381 ]
1382 )
1382 )
1383 elif isinstance(bundle, bundle2.unbundle20):
1383 elif isinstance(bundle, bundle2.unbundle20):
1384 benches.extend(
1384 benches.extend(
1385 [
1385 [
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 (
1388 (
1389 makebench(iterpartsseekable),
1389 makebench(iterpartsseekable),
1390 b'bundle2 iterparts() seekable',
1390 b'bundle2 iterparts() seekable',
1391 ),
1391 ),
1392 (makebench(seek), b'bundle2 part seek()'),
1392 (makebench(seek), b'bundle2 part seek()'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 ]
1397 ]
1398 )
1398 )
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 raise error.Abort(b'stream clone bundles not supported')
1400 raise error.Abort(b'stream clone bundles not supported')
1401 else:
1401 else:
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403
1403
1404 for fn, title in benches:
1404 for fn, title in benches:
1405 timer, fm = gettimer(ui, opts)
1405 timer, fm = gettimer(ui, opts)
1406 timer(fn, title=title)
1406 timer(fn, title=title)
1407 fm.end()
1407 fm.end()
1408
1408
1409
1409
1410 @command(
1410 @command(
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 formatteropts
1412 formatteropts
1413 + [
1413 + [
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 ],
1416 ],
1417 )
1417 )
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 """Benchmark producing a changelog group for a changegroup.
1419 """Benchmark producing a changelog group for a changegroup.
1420
1420
1421 This measures the time spent processing the changelog during a
1421 This measures the time spent processing the changelog during a
1422 bundle operation. This occurs during `hg bundle` and on a server
1422 bundle operation. This occurs during `hg bundle` and on a server
1423 processing a `getbundle` wire protocol request (handles clones
1423 processing a `getbundle` wire protocol request (handles clones
1424 and pull requests).
1424 and pull requests).
1425
1425
1426 By default, all revisions are added to the changegroup.
1426 By default, all revisions are added to the changegroup.
1427 """
1427 """
1428 opts = _byteskwargs(opts)
1428 opts = _byteskwargs(opts)
1429 cl = repo.changelog
1429 cl = repo.changelog
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 bundler = changegroup.getbundler(cgversion, repo)
1431 bundler = changegroup.getbundler(cgversion, repo)
1432
1432
1433 def d():
1433 def d():
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 for chunk in chunks:
1435 for chunk in chunks:
1436 pass
1436 pass
1437
1437
1438 timer, fm = gettimer(ui, opts)
1438 timer, fm = gettimer(ui, opts)
1439
1439
1440 # Terminal printing can interfere with timing. So disable it.
1440 # Terminal printing can interfere with timing. So disable it.
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 timer(d)
1442 timer(d)
1443
1443
1444 fm.end()
1444 fm.end()
1445
1445
1446
1446
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 def perfdirs(ui, repo, **opts):
1448 def perfdirs(ui, repo, **opts):
1449 opts = _byteskwargs(opts)
1449 opts = _byteskwargs(opts)
1450 timer, fm = gettimer(ui, opts)
1450 timer, fm = gettimer(ui, opts)
1451 dirstate = repo.dirstate
1451 dirstate = repo.dirstate
1452 b'a' in dirstate
1452 b'a' in dirstate
1453
1453
1454 def d():
1454 def d():
1455 dirstate.hasdir(b'a')
1455 dirstate.hasdir(b'a')
1456 try:
1456 try:
1457 del dirstate._map._dirs
1457 del dirstate._map._dirs
1458 except AttributeError:
1458 except AttributeError:
1459 pass
1459 pass
1460
1460
1461 timer(d)
1461 timer(d)
1462 fm.end()
1462 fm.end()
1463
1463
1464
1464
1465 @command(
1465 @command(
1466 b'perf::dirstate|perfdirstate',
1466 b'perf::dirstate|perfdirstate',
1467 [
1467 [
1468 (
1468 (
1469 b'',
1469 b'',
1470 b'iteration',
1470 b'iteration',
1471 None,
1471 None,
1472 b'benchmark a full iteration for the dirstate',
1472 b'benchmark a full iteration for the dirstate',
1473 ),
1473 ),
1474 (
1474 (
1475 b'',
1475 b'',
1476 b'contains',
1476 b'contains',
1477 None,
1477 None,
1478 b'benchmark a large amount of `nf in dirstate` calls',
1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 ),
1479 ),
1480 ]
1480 ]
1481 + formatteropts,
1481 + formatteropts,
1482 )
1482 )
1483 def perfdirstate(ui, repo, **opts):
1483 def perfdirstate(ui, repo, **opts):
1484 """benchmap the time of various distate operations
1484 """benchmap the time of various distate operations
1485
1485
1486 By default benchmark the time necessary to load a dirstate from scratch.
1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 The dirstate is loaded to the point were a "contains" request can be
1487 The dirstate is loaded to the point were a "contains" request can be
1488 answered.
1488 answered.
1489 """
1489 """
1490 opts = _byteskwargs(opts)
1490 opts = _byteskwargs(opts)
1491 timer, fm = gettimer(ui, opts)
1491 timer, fm = gettimer(ui, opts)
1492 b"a" in repo.dirstate
1492 b"a" in repo.dirstate
1493
1493
1494 if opts[b'iteration'] and opts[b'contains']:
1494 if opts[b'iteration'] and opts[b'contains']:
1495 msg = b'only specify one of --iteration or --contains'
1495 msg = b'only specify one of --iteration or --contains'
1496 raise error.Abort(msg)
1496 raise error.Abort(msg)
1497
1497
1498 if opts[b'iteration']:
1498 if opts[b'iteration']:
1499 setup = None
1499 setup = None
1500 dirstate = repo.dirstate
1500 dirstate = repo.dirstate
1501
1501
1502 def d():
1502 def d():
1503 for f in dirstate:
1503 for f in dirstate:
1504 pass
1504 pass
1505
1505
1506 elif opts[b'contains']:
1506 elif opts[b'contains']:
1507 setup = None
1507 setup = None
1508 dirstate = repo.dirstate
1508 dirstate = repo.dirstate
1509 allfiles = list(dirstate)
1509 allfiles = list(dirstate)
1510 # also add file path that will be "missing" from the dirstate
1510 # also add file path that will be "missing" from the dirstate
1511 allfiles.extend([f[::-1] for f in allfiles])
1511 allfiles.extend([f[::-1] for f in allfiles])
1512
1512
1513 def d():
1513 def d():
1514 for f in allfiles:
1514 for f in allfiles:
1515 f in dirstate
1515 f in dirstate
1516
1516
1517 else:
1517 else:
1518
1518
1519 def setup():
1519 def setup():
1520 repo.dirstate.invalidate()
1520 repo.dirstate.invalidate()
1521
1521
1522 def d():
1522 def d():
1523 b"a" in repo.dirstate
1523 b"a" in repo.dirstate
1524
1524
1525 timer(d, setup=setup)
1525 timer(d, setup=setup)
1526 fm.end()
1526 fm.end()
1527
1527
1528
1528
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 def perfdirstatedirs(ui, repo, **opts):
1530 def perfdirstatedirs(ui, repo, **opts):
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 opts = _byteskwargs(opts)
1532 opts = _byteskwargs(opts)
1533 timer, fm = gettimer(ui, opts)
1533 timer, fm = gettimer(ui, opts)
1534 repo.dirstate.hasdir(b"a")
1534 repo.dirstate.hasdir(b"a")
1535
1535
1536 def setup():
1536 def setup():
1537 try:
1537 try:
1538 del repo.dirstate._map._dirs
1538 del repo.dirstate._map._dirs
1539 except AttributeError:
1539 except AttributeError:
1540 pass
1540 pass
1541
1541
1542 def d():
1542 def d():
1543 repo.dirstate.hasdir(b"a")
1543 repo.dirstate.hasdir(b"a")
1544
1544
1545 timer(d, setup=setup)
1545 timer(d, setup=setup)
1546 fm.end()
1546 fm.end()
1547
1547
1548
1548
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 def perfdirstatefoldmap(ui, repo, **opts):
1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552
1552
1553 The dirstate filefoldmap cache is dropped between every request.
1553 The dirstate filefoldmap cache is dropped between every request.
1554 """
1554 """
1555 opts = _byteskwargs(opts)
1555 opts = _byteskwargs(opts)
1556 timer, fm = gettimer(ui, opts)
1556 timer, fm = gettimer(ui, opts)
1557 dirstate = repo.dirstate
1557 dirstate = repo.dirstate
1558 dirstate._map.filefoldmap.get(b'a')
1558 dirstate._map.filefoldmap.get(b'a')
1559
1559
1560 def setup():
1560 def setup():
1561 del dirstate._map.filefoldmap
1561 del dirstate._map.filefoldmap
1562
1562
1563 def d():
1563 def d():
1564 dirstate._map.filefoldmap.get(b'a')
1564 dirstate._map.filefoldmap.get(b'a')
1565
1565
1566 timer(d, setup=setup)
1566 timer(d, setup=setup)
1567 fm.end()
1567 fm.end()
1568
1568
1569
1569
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 def perfdirfoldmap(ui, repo, **opts):
1571 def perfdirfoldmap(ui, repo, **opts):
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573
1573
1574 The dirstate dirfoldmap cache is dropped between every request.
1574 The dirstate dirfoldmap cache is dropped between every request.
1575 """
1575 """
1576 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1577 timer, fm = gettimer(ui, opts)
1577 timer, fm = gettimer(ui, opts)
1578 dirstate = repo.dirstate
1578 dirstate = repo.dirstate
1579 dirstate._map.dirfoldmap.get(b'a')
1579 dirstate._map.dirfoldmap.get(b'a')
1580
1580
1581 def setup():
1581 def setup():
1582 del dirstate._map.dirfoldmap
1582 del dirstate._map.dirfoldmap
1583 try:
1583 try:
1584 del dirstate._map._dirs
1584 del dirstate._map._dirs
1585 except AttributeError:
1585 except AttributeError:
1586 pass
1586 pass
1587
1587
1588 def d():
1588 def d():
1589 dirstate._map.dirfoldmap.get(b'a')
1589 dirstate._map.dirfoldmap.get(b'a')
1590
1590
1591 timer(d, setup=setup)
1591 timer(d, setup=setup)
1592 fm.end()
1592 fm.end()
1593
1593
1594
1594
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 def perfdirstatewrite(ui, repo, **opts):
1596 def perfdirstatewrite(ui, repo, **opts):
1597 """benchmap the time it take to write a dirstate on disk"""
1597 """benchmap the time it take to write a dirstate on disk"""
1598 opts = _byteskwargs(opts)
1598 opts = _byteskwargs(opts)
1599 timer, fm = gettimer(ui, opts)
1599 timer, fm = gettimer(ui, opts)
1600 ds = repo.dirstate
1600 ds = repo.dirstate
1601 b"a" in ds
1601 b"a" in ds
1602
1602
1603 def setup():
1603 def setup():
1604 ds._dirty = True
1604 ds._dirty = True
1605
1605
1606 def d():
1606 def d():
1607 ds.write(repo.currenttransaction())
1607 ds.write(repo.currenttransaction())
1608
1608
1609 with repo.wlock():
1609 with repo.wlock():
1610 timer(d, setup=setup)
1610 timer(d, setup=setup)
1611 fm.end()
1611 fm.end()
1612
1612
1613
1613
1614 def _getmergerevs(repo, opts):
1614 def _getmergerevs(repo, opts):
1615 """parse command argument to return rev involved in merge
1615 """parse command argument to return rev involved in merge
1616
1616
1617 input: options dictionnary with `rev`, `from` and `bse`
1617 input: options dictionnary with `rev`, `from` and `bse`
1618 output: (localctx, otherctx, basectx)
1618 output: (localctx, otherctx, basectx)
1619 """
1619 """
1620 if opts[b'from']:
1620 if opts[b'from']:
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 wctx = repo[fromrev]
1622 wctx = repo[fromrev]
1623 else:
1623 else:
1624 wctx = repo[None]
1624 wctx = repo[None]
1625 # we don't want working dir files to be stat'd in the benchmark, so
1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 # prime that cache
1626 # prime that cache
1627 wctx.dirty()
1627 wctx.dirty()
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 if opts[b'base']:
1629 if opts[b'base']:
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 ancestor = repo[fromrev]
1631 ancestor = repo[fromrev]
1632 else:
1632 else:
1633 ancestor = wctx.ancestor(rctx)
1633 ancestor = wctx.ancestor(rctx)
1634 return (wctx, rctx, ancestor)
1634 return (wctx, rctx, ancestor)
1635
1635
1636
1636
1637 @command(
1637 @command(
1638 b'perf::mergecalculate|perfmergecalculate',
1638 b'perf::mergecalculate|perfmergecalculate',
1639 [
1639 [
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 (b'', b'from', b'', b'rev to merge from'),
1641 (b'', b'from', b'', b'rev to merge from'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1642 (b'', b'base', b'', b'the revision to use as base'),
1643 ]
1643 ]
1644 + formatteropts,
1644 + formatteropts,
1645 )
1645 )
1646 def perfmergecalculate(ui, repo, **opts):
1646 def perfmergecalculate(ui, repo, **opts):
1647 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1649
1649
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651
1651
1652 def d():
1652 def d():
1653 # acceptremote is True because we don't want prompts in the middle of
1653 # acceptremote is True because we don't want prompts in the middle of
1654 # our benchmark
1654 # our benchmark
1655 merge.calculateupdates(
1655 merge.calculateupdates(
1656 repo,
1656 repo,
1657 wctx,
1657 wctx,
1658 rctx,
1658 rctx,
1659 [ancestor],
1659 [ancestor],
1660 branchmerge=False,
1660 branchmerge=False,
1661 force=False,
1661 force=False,
1662 acceptremote=True,
1662 acceptremote=True,
1663 followcopies=True,
1663 followcopies=True,
1664 )
1664 )
1665
1665
1666 timer(d)
1666 timer(d)
1667 fm.end()
1667 fm.end()
1668
1668
1669
1669
1670 @command(
1670 @command(
1671 b'perf::mergecopies|perfmergecopies',
1671 b'perf::mergecopies|perfmergecopies',
1672 [
1672 [
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 (b'', b'from', b'', b'rev to merge from'),
1674 (b'', b'from', b'', b'rev to merge from'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1675 (b'', b'base', b'', b'the revision to use as base'),
1676 ]
1676 ]
1677 + formatteropts,
1677 + formatteropts,
1678 )
1678 )
1679 def perfmergecopies(ui, repo, **opts):
1679 def perfmergecopies(ui, repo, **opts):
1680 """measure runtime of `copies.mergecopies`"""
1680 """measure runtime of `copies.mergecopies`"""
1681 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1682 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684
1684
1685 def d():
1685 def d():
1686 # acceptremote is True because we don't want prompts in the middle of
1686 # acceptremote is True because we don't want prompts in the middle of
1687 # our benchmark
1687 # our benchmark
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689
1689
1690 timer(d)
1690 timer(d)
1691 fm.end()
1691 fm.end()
1692
1692
1693
1693
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 """benchmark the copy tracing logic"""
1696 """benchmark the copy tracing logic"""
1697 opts = _byteskwargs(opts)
1697 opts = _byteskwargs(opts)
1698 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701
1701
1702 def d():
1702 def d():
1703 copies.pathcopies(ctx1, ctx2)
1703 copies.pathcopies(ctx1, ctx2)
1704
1704
1705 timer(d)
1705 timer(d)
1706 fm.end()
1706 fm.end()
1707
1707
1708
1708
1709 @command(
1709 @command(
1710 b'perf::phases|perfphases',
1710 b'perf::phases|perfphases',
1711 [
1711 [
1712 (b'', b'full', False, b'include file reading time too'),
1712 (b'', b'full', False, b'include file reading time too'),
1713 ]
1713 ]
1714 + formatteropts,
1714 + formatteropts,
1715 b"",
1715 b"",
1716 )
1716 )
1717 def perfphases(ui, repo, **opts):
1717 def perfphases(ui, repo, **opts):
1718 """benchmark phasesets computation"""
1718 """benchmark phasesets computation"""
1719 opts = _byteskwargs(opts)
1719 opts = _byteskwargs(opts)
1720 timer, fm = gettimer(ui, opts)
1720 timer, fm = gettimer(ui, opts)
1721 _phases = repo._phasecache
1721 _phases = repo._phasecache
1722 full = opts.get(b'full')
1722 full = opts.get(b'full')
1723 tip_rev = repo.changelog.tiprev()
1723 tip_rev = repo.changelog.tiprev()
1724
1724
1725 def d():
1725 def d():
1726 phases = _phases
1726 phases = _phases
1727 if full:
1727 if full:
1728 clearfilecache(repo, b'_phasecache')
1728 clearfilecache(repo, b'_phasecache')
1729 phases = repo._phasecache
1729 phases = repo._phasecache
1730 phases.invalidate()
1730 phases.invalidate()
1731 phases.phase(repo, tip_rev)
1731 phases.phase(repo, tip_rev)
1732
1732
1733 timer(d)
1733 timer(d)
1734 fm.end()
1734 fm.end()
1735
1735
1736
1736
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1738 def perfphasesremote(ui, repo, dest=None, **opts):
1739 """benchmark time needed to analyse phases of the remote server"""
1739 """benchmark time needed to analyse phases of the remote server"""
1740 from mercurial.node import bin
1740 from mercurial.node import bin
1741 from mercurial import (
1741 from mercurial import (
1742 exchange,
1742 exchange,
1743 hg,
1743 hg,
1744 phases,
1744 phases,
1745 )
1745 )
1746
1746
1747 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1748 timer, fm = gettimer(ui, opts)
1748 timer, fm = gettimer(ui, opts)
1749
1749
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 if not path:
1751 if not path:
1752 raise error.Abort(
1752 raise error.Abort(
1753 b'default repository not configured!',
1753 b'default repository not configured!',
1754 hint=b"see 'hg help config.paths'",
1754 hint=b"see 'hg help config.paths'",
1755 )
1755 )
1756 if util.safehasattr(path, 'main_path'):
1756 if util.safehasattr(path, 'main_path'):
1757 path = path.get_push_variant()
1757 path = path.get_push_variant()
1758 dest = path.loc
1758 dest = path.loc
1759 else:
1759 else:
1760 dest = path.pushloc or path.loc
1760 dest = path.pushloc or path.loc
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 other = hg.peer(repo, opts, dest)
1762 other = hg.peer(repo, opts, dest)
1763
1763
1764 # easier to perform discovery through the operation
1764 # easier to perform discovery through the operation
1765 op = exchange.pushoperation(repo, other)
1765 op = exchange.pushoperation(repo, other)
1766 exchange._pushdiscoverychangeset(op)
1766 exchange._pushdiscoverychangeset(op)
1767
1767
1768 remotesubset = op.fallbackheads
1768 remotesubset = op.fallbackheads
1769
1769
1770 with other.commandexecutor() as e:
1770 with other.commandexecutor() as e:
1771 remotephases = e.callcommand(
1771 remotephases = e.callcommand(
1772 b'listkeys', {b'namespace': b'phases'}
1772 b'listkeys', {b'namespace': b'phases'}
1773 ).result()
1773 ).result()
1774 del other
1774 del other
1775 publishing = remotephases.get(b'publishing', False)
1775 publishing = remotephases.get(b'publishing', False)
1776 if publishing:
1776 if publishing:
1777 ui.statusnoi18n(b'publishing: yes\n')
1777 ui.statusnoi18n(b'publishing: yes\n')
1778 else:
1778 else:
1779 ui.statusnoi18n(b'publishing: no\n')
1779 ui.statusnoi18n(b'publishing: no\n')
1780
1780
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 if has_node is None:
1782 if has_node is None:
1783 has_node = repo.changelog.nodemap.__contains__
1783 has_node = repo.changelog.nodemap.__contains__
1784 nonpublishroots = 0
1784 nonpublishroots = 0
1785 for nhex, phase in remotephases.iteritems():
1785 for nhex, phase in remotephases.iteritems():
1786 if nhex == b'publishing': # ignore data related to publish option
1786 if nhex == b'publishing': # ignore data related to publish option
1787 continue
1787 continue
1788 node = bin(nhex)
1788 node = bin(nhex)
1789 if has_node(node) and int(phase):
1789 if has_node(node) and int(phase):
1790 nonpublishroots += 1
1790 nonpublishroots += 1
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793
1793
1794 def d():
1794 def d():
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1796
1796
1797 timer(d)
1797 timer(d)
1798 fm.end()
1798 fm.end()
1799
1799
1800
1800
1801 @command(
1801 @command(
1802 b'perf::manifest|perfmanifest',
1802 b'perf::manifest|perfmanifest',
1803 [
1803 [
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 ]
1806 ]
1807 + formatteropts,
1807 + formatteropts,
1808 b'REV|NODE',
1808 b'REV|NODE',
1809 )
1809 )
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 """benchmark the time to read a manifest from disk and return a usable
1811 """benchmark the time to read a manifest from disk and return a usable
1812 dict-like object
1812 dict-like object
1813
1813
1814 Manifest caches are cleared before retrieval."""
1814 Manifest caches are cleared before retrieval."""
1815 opts = _byteskwargs(opts)
1815 opts = _byteskwargs(opts)
1816 timer, fm = gettimer(ui, opts)
1816 timer, fm = gettimer(ui, opts)
1817 if not manifest_rev:
1817 if not manifest_rev:
1818 ctx = scmutil.revsingle(repo, rev, rev)
1818 ctx = scmutil.revsingle(repo, rev, rev)
1819 t = ctx.manifestnode()
1819 t = ctx.manifestnode()
1820 else:
1820 else:
1821 from mercurial.node import bin
1821 from mercurial.node import bin
1822
1822
1823 if len(rev) == 40:
1823 if len(rev) == 40:
1824 t = bin(rev)
1824 t = bin(rev)
1825 else:
1825 else:
1826 try:
1826 try:
1827 rev = int(rev)
1827 rev = int(rev)
1828
1828
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1830 t = repo.manifestlog.getstorage(b'').node(rev)
1831 else:
1831 else:
1832 t = repo.manifestlog._revlog.lookup(rev)
1832 t = repo.manifestlog._revlog.lookup(rev)
1833 except ValueError:
1833 except ValueError:
1834 raise error.Abort(
1834 raise error.Abort(
1835 b'manifest revision must be integer or full node'
1835 b'manifest revision must be integer or full node'
1836 )
1836 )
1837
1837
1838 def d():
1838 def d():
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 repo.manifestlog[t].read()
1840 repo.manifestlog[t].read()
1841
1841
1842 timer(d)
1842 timer(d)
1843 fm.end()
1843 fm.end()
1844
1844
1845
1845
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 def perfchangeset(ui, repo, rev, **opts):
1847 def perfchangeset(ui, repo, rev, **opts):
1848 opts = _byteskwargs(opts)
1848 opts = _byteskwargs(opts)
1849 timer, fm = gettimer(ui, opts)
1849 timer, fm = gettimer(ui, opts)
1850 n = scmutil.revsingle(repo, rev).node()
1850 n = scmutil.revsingle(repo, rev).node()
1851
1851
1852 def d():
1852 def d():
1853 repo.changelog.read(n)
1853 repo.changelog.read(n)
1854 # repo.changelog._cache = None
1854 # repo.changelog._cache = None
1855
1855
1856 timer(d)
1856 timer(d)
1857 fm.end()
1857 fm.end()
1858
1858
1859
1859
1860 @command(b'perf::ignore|perfignore', formatteropts)
1860 @command(b'perf::ignore|perfignore', formatteropts)
1861 def perfignore(ui, repo, **opts):
1861 def perfignore(ui, repo, **opts):
1862 """benchmark operation related to computing ignore"""
1862 """benchmark operation related to computing ignore"""
1863 opts = _byteskwargs(opts)
1863 opts = _byteskwargs(opts)
1864 timer, fm = gettimer(ui, opts)
1864 timer, fm = gettimer(ui, opts)
1865 dirstate = repo.dirstate
1865 dirstate = repo.dirstate
1866
1866
1867 def setupone():
1867 def setupone():
1868 dirstate.invalidate()
1868 dirstate.invalidate()
1869 clearfilecache(dirstate, b'_ignore')
1869 clearfilecache(dirstate, b'_ignore')
1870
1870
1871 def runone():
1871 def runone():
1872 dirstate._ignore
1872 dirstate._ignore
1873
1873
1874 timer(runone, setup=setupone, title=b"load")
1874 timer(runone, setup=setupone, title=b"load")
1875 fm.end()
1875 fm.end()
1876
1876
1877
1877
1878 @command(
1878 @command(
1879 b'perf::index|perfindex',
1879 b'perf::index|perfindex',
1880 [
1880 [
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 ]
1883 ]
1884 + formatteropts,
1884 + formatteropts,
1885 )
1885 )
1886 def perfindex(ui, repo, **opts):
1886 def perfindex(ui, repo, **opts):
1887 """benchmark index creation time followed by a lookup
1887 """benchmark index creation time followed by a lookup
1888
1888
1889 The default is to look `tip` up. Depending on the index implementation,
1889 The default is to look `tip` up. Depending on the index implementation,
1890 the revision looked up can matters. For example, an implementation
1890 the revision looked up can matters. For example, an implementation
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1891 scanning the index will have a faster lookup time for `--rev tip` than for
1892 `--rev 0`. The number of looked up revisions and their order can also
1892 `--rev 0`. The number of looked up revisions and their order can also
1893 matters.
1893 matters.
1894
1894
1895 Example of useful set to test:
1895 Example of useful set to test:
1896
1896
1897 * tip
1897 * tip
1898 * 0
1898 * 0
1899 * -10:
1899 * -10:
1900 * :10
1900 * :10
1901 * -10: + :10
1901 * -10: + :10
1902 * :10: + -10:
1902 * :10: + -10:
1903 * -10000:
1903 * -10000:
1904 * -10000: + 0
1904 * -10000: + 0
1905
1905
1906 It is not currently possible to check for lookup of a missing node. For
1906 It is not currently possible to check for lookup of a missing node. For
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 import mercurial.revlog
1908 import mercurial.revlog
1909
1909
1910 opts = _byteskwargs(opts)
1910 opts = _byteskwargs(opts)
1911 timer, fm = gettimer(ui, opts)
1911 timer, fm = gettimer(ui, opts)
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 if opts[b'no_lookup']:
1913 if opts[b'no_lookup']:
1914 if opts['rev']:
1914 if opts['rev']:
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 nodes = []
1916 nodes = []
1917 elif not opts[b'rev']:
1917 elif not opts[b'rev']:
1918 nodes = [repo[b"tip"].node()]
1918 nodes = [repo[b"tip"].node()]
1919 else:
1919 else:
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1920 revs = scmutil.revrange(repo, opts[b'rev'])
1921 cl = repo.changelog
1921 cl = repo.changelog
1922 nodes = [cl.node(r) for r in revs]
1922 nodes = [cl.node(r) for r in revs]
1923
1923
1924 unfi = repo.unfiltered()
1924 unfi = repo.unfiltered()
1925 # find the filecache func directly
1925 # find the filecache func directly
1926 # This avoid polluting the benchmark with the filecache logic
1926 # This avoid polluting the benchmark with the filecache logic
1927 makecl = unfi.__class__.changelog.func
1927 makecl = unfi.__class__.changelog.func
1928
1928
1929 def setup():
1929 def setup():
1930 # probably not necessary, but for good measure
1930 # probably not necessary, but for good measure
1931 clearchangelog(unfi)
1931 clearchangelog(unfi)
1932
1932
1933 def d():
1933 def d():
1934 cl = makecl(unfi)
1934 cl = makecl(unfi)
1935 for n in nodes:
1935 for n in nodes:
1936 cl.rev(n)
1936 cl.rev(n)
1937
1937
1938 timer(d, setup=setup)
1938 timer(d, setup=setup)
1939 fm.end()
1939 fm.end()
1940
1940
1941
1941
1942 @command(
1942 @command(
1943 b'perf::nodemap|perfnodemap',
1943 b'perf::nodemap|perfnodemap',
1944 [
1944 [
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 ]
1947 ]
1948 + formatteropts,
1948 + formatteropts,
1949 )
1949 )
1950 def perfnodemap(ui, repo, **opts):
1950 def perfnodemap(ui, repo, **opts):
1951 """benchmark the time necessary to look up revision from a cold nodemap
1951 """benchmark the time necessary to look up revision from a cold nodemap
1952
1952
1953 Depending on the implementation, the amount and order of revision we look
1953 Depending on the implementation, the amount and order of revision we look
1954 up can varies. Example of useful set to test:
1954 up can varies. Example of useful set to test:
1955 * tip
1955 * tip
1956 * 0
1956 * 0
1957 * -10:
1957 * -10:
1958 * :10
1958 * :10
1959 * -10: + :10
1959 * -10: + :10
1960 * :10: + -10:
1960 * :10: + -10:
1961 * -10000:
1961 * -10000:
1962 * -10000: + 0
1962 * -10000: + 0
1963
1963
1964 The command currently focus on valid binary lookup. Benchmarking for
1964 The command currently focus on valid binary lookup. Benchmarking for
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 """
1966 """
1967 import mercurial.revlog
1967 import mercurial.revlog
1968
1968
1969 opts = _byteskwargs(opts)
1969 opts = _byteskwargs(opts)
1970 timer, fm = gettimer(ui, opts)
1970 timer, fm = gettimer(ui, opts)
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972
1972
1973 unfi = repo.unfiltered()
1973 unfi = repo.unfiltered()
1974 clearcaches = opts[b'clear_caches']
1974 clearcaches = opts[b'clear_caches']
1975 # find the filecache func directly
1975 # find the filecache func directly
1976 # This avoid polluting the benchmark with the filecache logic
1976 # This avoid polluting the benchmark with the filecache logic
1977 makecl = unfi.__class__.changelog.func
1977 makecl = unfi.__class__.changelog.func
1978 if not opts[b'rev']:
1978 if not opts[b'rev']:
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1979 raise error.Abort(b'use --rev to specify revisions to look up')
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1980 revs = scmutil.revrange(repo, opts[b'rev'])
1981 cl = repo.changelog
1981 cl = repo.changelog
1982 nodes = [cl.node(r) for r in revs]
1982 nodes = [cl.node(r) for r in revs]
1983
1983
1984 # use a list to pass reference to a nodemap from one closure to the next
1984 # use a list to pass reference to a nodemap from one closure to the next
1985 nodeget = [None]
1985 nodeget = [None]
1986
1986
1987 def setnodeget():
1987 def setnodeget():
1988 # probably not necessary, but for good measure
1988 # probably not necessary, but for good measure
1989 clearchangelog(unfi)
1989 clearchangelog(unfi)
1990 cl = makecl(unfi)
1990 cl = makecl(unfi)
1991 if util.safehasattr(cl.index, 'get_rev'):
1991 if util.safehasattr(cl.index, 'get_rev'):
1992 nodeget[0] = cl.index.get_rev
1992 nodeget[0] = cl.index.get_rev
1993 else:
1993 else:
1994 nodeget[0] = cl.nodemap.get
1994 nodeget[0] = cl.nodemap.get
1995
1995
1996 def d():
1996 def d():
1997 get = nodeget[0]
1997 get = nodeget[0]
1998 for n in nodes:
1998 for n in nodes:
1999 get(n)
1999 get(n)
2000
2000
2001 setup = None
2001 setup = None
2002 if clearcaches:
2002 if clearcaches:
2003
2003
2004 def setup():
2004 def setup():
2005 setnodeget()
2005 setnodeget()
2006
2006
2007 else:
2007 else:
2008 setnodeget()
2008 setnodeget()
2009 d() # prewarm the data structure
2009 d() # prewarm the data structure
2010 timer(d, setup=setup)
2010 timer(d, setup=setup)
2011 fm.end()
2011 fm.end()
2012
2012
2013
2013
2014 @command(b'perf::startup|perfstartup', formatteropts)
2014 @command(b'perf::startup|perfstartup', formatteropts)
2015 def perfstartup(ui, repo, **opts):
2015 def perfstartup(ui, repo, **opts):
2016 opts = _byteskwargs(opts)
2016 opts = _byteskwargs(opts)
2017 timer, fm = gettimer(ui, opts)
2017 timer, fm = gettimer(ui, opts)
2018
2018
2019 def d():
2019 def d():
2020 if os.name != 'nt':
2020 if os.name != 'nt':
2021 os.system(
2021 os.system(
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 )
2023 )
2024 else:
2024 else:
2025 os.environ['HGRCPATH'] = r' '
2025 os.environ['HGRCPATH'] = r' '
2026 os.system("%s version -q > NUL" % sys.argv[0])
2026 os.system("%s version -q > NUL" % sys.argv[0])
2027
2027
2028 timer(d)
2028 timer(d)
2029 fm.end()
2029 fm.end()
2030
2030
2031
2031
2032 def _find_stream_generator(version):
2032 def _find_stream_generator(version):
2033 """find the proper generator function for this stream version"""
2033 """find the proper generator function for this stream version"""
2034 import mercurial.streamclone
2034 import mercurial.streamclone
2035
2035
2036 available = {}
2036 available = {}
2037
2037
2038 # try to fetch a v1 generator
2038 # try to fetch a v1 generator
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 if generatev1 is not None:
2040 if generatev1 is not None:
2041
2041
2042 def generate(repo):
2042 def generate(repo):
2043 entries, bytes, data = generatev2(repo, None, None, True)
2043 entries, bytes, data = generatev2(repo, None, None, True)
2044 return data
2044 return data
2045
2045
2046 available[b'v1'] = generatev1
2046 available[b'v1'] = generatev1
2047 # try to fetch a v2 generator
2047 # try to fetch a v2 generator
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 if generatev2 is not None:
2049 if generatev2 is not None:
2050
2050
2051 def generate(repo):
2051 def generate(repo):
2052 entries, bytes, data = generatev2(repo, None, None, True)
2052 entries, bytes, data = generatev2(repo, None, None, True)
2053 return data
2053 return data
2054
2054
2055 available[b'v2'] = generate
2055 available[b'v2'] = generate
2056 # try to fetch a v3 generator
2056 # try to fetch a v3 generator
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 if generatev3 is not None:
2058 if generatev3 is not None:
2059
2059
2060 def generate(repo):
2060 def generate(repo):
2061 entries, bytes, data = generatev3(repo, None, None, True)
2061 entries, bytes, data = generatev3(repo, None, None, True)
2062 return data
2062 return data
2063
2063
2064 available[b'v3-exp'] = generate
2064 available[b'v3-exp'] = generate
2065
2065
2066 # resolve the request
2066 # resolve the request
2067 if version == b"latest":
2067 if version == b"latest":
2068 # latest is the highest non experimental version
2068 # latest is the highest non experimental version
2069 latest_key = max(v for v in available if b'-exp' not in v)
2069 latest_key = max(v for v in available if b'-exp' not in v)
2070 return available[latest_key]
2070 return available[latest_key]
2071 elif version in available:
2071 elif version in available:
2072 return available[version]
2072 return available[version]
2073 else:
2073 else:
2074 msg = b"unkown or unavailable version: %s"
2074 msg = b"unkown or unavailable version: %s"
2075 msg %= version
2075 msg %= version
2076 hint = b"available versions: %s"
2076 hint = b"available versions: %s"
2077 hint %= b', '.join(sorted(available))
2077 hint %= b', '.join(sorted(available))
2078 raise error.Abort(msg, hint=hint)
2078 raise error.Abort(msg, hint=hint)
2079
2079
2080
2080
2081 @command(
2081 @command(
2082 b'perf::stream-locked-section',
2082 b'perf::stream-locked-section',
2083 [
2083 [
2084 (
2084 (
2085 b'',
2085 b'',
2086 b'stream-version',
2086 b'stream-version',
2087 b'latest',
2087 b'latest',
2088 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2088 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2089 ),
2089 ),
2090 ]
2090 ]
2091 + formatteropts,
2091 + formatteropts,
2092 )
2092 )
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2095
2095
2096 opts = _byteskwargs(opts)
2096 opts = _byteskwargs(opts)
2097 timer, fm = gettimer(ui, opts)
2097 timer, fm = gettimer(ui, opts)
2098
2098
2099 # deletion of the generator may trigger some cleanup that we do not want to
2099 # deletion of the generator may trigger some cleanup that we do not want to
2100 # measure
2100 # measure
2101 result_holder = [None]
2101 result_holder = [None]
2102
2102
2103 def setupone():
2103 def setupone():
2104 result_holder[0] = None
2104 result_holder[0] = None
2105
2105
2106 generate = _find_stream_generator(stream_version)
2106 generate = _find_stream_generator(stream_version)
2107
2107
2108 def runone():
2108 def runone():
2109 # the lock is held for the duration the initialisation
2109 # the lock is held for the duration the initialisation
2110 result_holder[0] = generate(repo)
2110 result_holder[0] = generate(repo)
2111
2111
2112 timer(runone, setup=setupone, title=b"load")
2112 timer(runone, setup=setupone, title=b"load")
2113 fm.end()
2113 fm.end()
2114
2114
2115
2115
2116 @command(
2116 @command(
2117 b'perf::stream-generate',
2117 b'perf::stream-generate',
2118 [
2118 [
2119 (
2119 (
2120 b'',
2120 b'',
2121 b'stream-version',
2121 b'stream-version',
2122 b'latest',
2122 b'latest',
2123 b'stream version to us ("v1", "v2" or "latest", (the default))',
2123 b'stream version to us ("v1", "v2" or "latest", (the default))',
2124 ),
2124 ),
2125 ]
2125 ]
2126 + formatteropts,
2126 + formatteropts,
2127 )
2127 )
2128 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2128 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2129 """benchmark the full generation of a stream clone"""
2129 """benchmark the full generation of a stream clone"""
2130
2130
2131 opts = _byteskwargs(opts)
2131 opts = _byteskwargs(opts)
2132 timer, fm = gettimer(ui, opts)
2132 timer, fm = gettimer(ui, opts)
2133
2133
2134 # deletion of the generator may trigger some cleanup that we do not want to
2134 # deletion of the generator may trigger some cleanup that we do not want to
2135 # measure
2135 # measure
2136
2136
2137 generate = _find_stream_generator(stream_version)
2137 generate = _find_stream_generator(stream_version)
2138
2138
2139 def runone():
2139 def runone():
2140 # the lock is held for the duration the initialisation
2140 # the lock is held for the duration the initialisation
2141 for chunk in generate(repo):
2141 for chunk in generate(repo):
2142 pass
2142 pass
2143
2143
2144 timer(runone, title=b"generate")
2144 timer(runone, title=b"generate")
2145 fm.end()
2145 fm.end()
2146
2146
2147
2147
2148 @command(
2148 @command(
2149 b'perf::stream-consume',
2149 b'perf::stream-consume',
2150 formatteropts,
2150 formatteropts,
2151 )
2151 )
2152 def perf_stream_clone_consume(ui, repo, filename, **opts):
2152 def perf_stream_clone_consume(ui, repo, filename, **opts):
2153 """benchmark the full application of a stream clone
2153 """benchmark the full application of a stream clone
2154
2154
2155 This include the creation of the repository
2155 This include the creation of the repository
2156 """
2156 """
2157 # try except to appease check code
2157 # try except to appease check code
2158 msg = b"mercurial too old, missing necessary module: %s"
2158 msg = b"mercurial too old, missing necessary module: %s"
2159 try:
2159 try:
2160 from mercurial import bundle2
2160 from mercurial import bundle2
2161 except ImportError as exc:
2161 except ImportError as exc:
2162 msg %= _bytestr(exc)
2162 msg %= _bytestr(exc)
2163 raise error.Abort(msg)
2163 raise error.Abort(msg)
2164 try:
2164 try:
2165 from mercurial import exchange
2165 from mercurial import exchange
2166 except ImportError as exc:
2166 except ImportError as exc:
2167 msg %= _bytestr(exc)
2167 msg %= _bytestr(exc)
2168 raise error.Abort(msg)
2168 raise error.Abort(msg)
2169 try:
2169 try:
2170 from mercurial import hg
2170 from mercurial import hg
2171 except ImportError as exc:
2171 except ImportError as exc:
2172 msg %= _bytestr(exc)
2172 msg %= _bytestr(exc)
2173 raise error.Abort(msg)
2173 raise error.Abort(msg)
2174 try:
2174 try:
2175 from mercurial import localrepo
2175 from mercurial import localrepo
2176 except ImportError as exc:
2176 except ImportError as exc:
2177 msg %= _bytestr(exc)
2177 msg %= _bytestr(exc)
2178 raise error.Abort(msg)
2178 raise error.Abort(msg)
2179
2179
2180 opts = _byteskwargs(opts)
2180 opts = _byteskwargs(opts)
2181 timer, fm = gettimer(ui, opts)
2181 timer, fm = gettimer(ui, opts)
2182
2182
2183 # deletion of the generator may trigger some cleanup that we do not want to
2183 # deletion of the generator may trigger some cleanup that we do not want to
2184 # measure
2184 # measure
2185 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2185 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2186 raise error.Abort("not a readable file: %s" % filename)
2186 raise error.Abort("not a readable file: %s" % filename)
2187
2187
2188 run_variables = [None, None]
2188 run_variables = [None, None]
2189
2189
2190 @contextlib.contextmanager
2190 @contextlib.contextmanager
2191 def context():
2191 def context():
2192 with open(filename, mode='rb') as bundle:
2192 with open(filename, mode='rb') as bundle:
2193 with tempfile.TemporaryDirectory() as tmp_dir:
2193 with tempfile.TemporaryDirectory() as tmp_dir:
2194 tmp_dir = fsencode(tmp_dir)
2194 tmp_dir = fsencode(tmp_dir)
2195 run_variables[0] = bundle
2195 run_variables[0] = bundle
2196 run_variables[1] = tmp_dir
2196 run_variables[1] = tmp_dir
2197 yield
2197 yield
2198 run_variables[0] = None
2198 run_variables[0] = None
2199 run_variables[1] = None
2199 run_variables[1] = None
2200
2200
2201 def runone():
2201 def runone():
2202 bundle = run_variables[0]
2202 bundle = run_variables[0]
2203 tmp_dir = run_variables[1]
2203 tmp_dir = run_variables[1]
2204 # only pass ui when no srcrepo
2204 # only pass ui when no srcrepo
2205 localrepo.createrepository(
2205 localrepo.createrepository(
2206 repo.ui, tmp_dir, requirements=repo.requirements
2206 repo.ui, tmp_dir, requirements=repo.requirements
2207 )
2207 )
2208 target = hg.repository(repo.ui, tmp_dir)
2208 target = hg.repository(repo.ui, tmp_dir)
2209 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2209 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2210 # stream v1
2210 # stream v1
2211 if util.safehasattr(gen, 'apply'):
2211 if util.safehasattr(gen, 'apply'):
2212 gen.apply(target)
2212 gen.apply(target)
2213 else:
2213 else:
2214 with target.transaction(b"perf::stream-consume") as tr:
2214 with target.transaction(b"perf::stream-consume") as tr:
2215 bundle2.applybundle(
2215 bundle2.applybundle(
2216 target,
2216 target,
2217 gen,
2217 gen,
2218 tr,
2218 tr,
2219 source=b'unbundle',
2219 source=b'unbundle',
2220 url=filename,
2220 url=filename,
2221 )
2221 )
2222
2222
2223 timer(runone, context=context, title=b"consume")
2223 timer(runone, context=context, title=b"consume")
2224 fm.end()
2224 fm.end()
2225
2225
2226
2226
2227 @command(b'perf::parents|perfparents', formatteropts)
2227 @command(b'perf::parents|perfparents', formatteropts)
2228 def perfparents(ui, repo, **opts):
2228 def perfparents(ui, repo, **opts):
2229 """benchmark the time necessary to fetch one changeset's parents.
2229 """benchmark the time necessary to fetch one changeset's parents.
2230
2230
2231 The fetch is done using the `node identifier`, traversing all object layers
2231 The fetch is done using the `node identifier`, traversing all object layers
2232 from the repository object. The first N revisions will be used for this
2232 from the repository object. The first N revisions will be used for this
2233 benchmark. N is controlled by the ``perf.parentscount`` config option
2233 benchmark. N is controlled by the ``perf.parentscount`` config option
2234 (default: 1000).
2234 (default: 1000).
2235 """
2235 """
2236 opts = _byteskwargs(opts)
2236 opts = _byteskwargs(opts)
2237 timer, fm = gettimer(ui, opts)
2237 timer, fm = gettimer(ui, opts)
2238 # control the number of commits perfparents iterates over
2238 # control the number of commits perfparents iterates over
2239 # experimental config: perf.parentscount
2239 # experimental config: perf.parentscount
2240 count = getint(ui, b"perf", b"parentscount", 1000)
2240 count = getint(ui, b"perf", b"parentscount", 1000)
2241 if len(repo.changelog) < count:
2241 if len(repo.changelog) < count:
2242 raise error.Abort(b"repo needs %d commits for this test" % count)
2242 raise error.Abort(b"repo needs %d commits for this test" % count)
2243 repo = repo.unfiltered()
2243 repo = repo.unfiltered()
2244 nl = [repo.changelog.node(i) for i in _xrange(count)]
2244 nl = [repo.changelog.node(i) for i in _xrange(count)]
2245
2245
2246 def d():
2246 def d():
2247 for n in nl:
2247 for n in nl:
2248 repo.changelog.parents(n)
2248 repo.changelog.parents(n)
2249
2249
2250 timer(d)
2250 timer(d)
2251 fm.end()
2251 fm.end()
2252
2252
2253
2253
2254 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2254 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2255 def perfctxfiles(ui, repo, x, **opts):
2255 def perfctxfiles(ui, repo, x, **opts):
2256 opts = _byteskwargs(opts)
2256 opts = _byteskwargs(opts)
2257 x = int(x)
2257 x = int(x)
2258 timer, fm = gettimer(ui, opts)
2258 timer, fm = gettimer(ui, opts)
2259
2259
2260 def d():
2260 def d():
2261 len(repo[x].files())
2261 len(repo[x].files())
2262
2262
2263 timer(d)
2263 timer(d)
2264 fm.end()
2264 fm.end()
2265
2265
2266
2266
2267 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2267 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2268 def perfrawfiles(ui, repo, x, **opts):
2268 def perfrawfiles(ui, repo, x, **opts):
2269 opts = _byteskwargs(opts)
2269 opts = _byteskwargs(opts)
2270 x = int(x)
2270 x = int(x)
2271 timer, fm = gettimer(ui, opts)
2271 timer, fm = gettimer(ui, opts)
2272 cl = repo.changelog
2272 cl = repo.changelog
2273
2273
2274 def d():
2274 def d():
2275 len(cl.read(x)[3])
2275 len(cl.read(x)[3])
2276
2276
2277 timer(d)
2277 timer(d)
2278 fm.end()
2278 fm.end()
2279
2279
2280
2280
2281 @command(b'perf::lookup|perflookup', formatteropts)
2281 @command(b'perf::lookup|perflookup', formatteropts)
2282 def perflookup(ui, repo, rev, **opts):
2282 def perflookup(ui, repo, rev, **opts):
2283 opts = _byteskwargs(opts)
2283 opts = _byteskwargs(opts)
2284 timer, fm = gettimer(ui, opts)
2284 timer, fm = gettimer(ui, opts)
2285 timer(lambda: len(repo.lookup(rev)))
2285 timer(lambda: len(repo.lookup(rev)))
2286 fm.end()
2286 fm.end()
2287
2287
2288
2288
2289 @command(
2289 @command(
2290 b'perf::linelogedits|perflinelogedits',
2290 b'perf::linelogedits|perflinelogedits',
2291 [
2291 [
2292 (b'n', b'edits', 10000, b'number of edits'),
2292 (b'n', b'edits', 10000, b'number of edits'),
2293 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2293 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2294 ],
2294 ],
2295 norepo=True,
2295 norepo=True,
2296 )
2296 )
2297 def perflinelogedits(ui, **opts):
2297 def perflinelogedits(ui, **opts):
2298 from mercurial import linelog
2298 from mercurial import linelog
2299
2299
2300 opts = _byteskwargs(opts)
2300 opts = _byteskwargs(opts)
2301
2301
2302 edits = opts[b'edits']
2302 edits = opts[b'edits']
2303 maxhunklines = opts[b'max_hunk_lines']
2303 maxhunklines = opts[b'max_hunk_lines']
2304
2304
2305 maxb1 = 100000
2305 maxb1 = 100000
2306 random.seed(0)
2306 random.seed(0)
2307 randint = random.randint
2307 randint = random.randint
2308 currentlines = 0
2308 currentlines = 0
2309 arglist = []
2309 arglist = []
2310 for rev in _xrange(edits):
2310 for rev in _xrange(edits):
2311 a1 = randint(0, currentlines)
2311 a1 = randint(0, currentlines)
2312 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2312 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2313 b1 = randint(0, maxb1)
2313 b1 = randint(0, maxb1)
2314 b2 = randint(b1, b1 + maxhunklines)
2314 b2 = randint(b1, b1 + maxhunklines)
2315 currentlines += (b2 - b1) - (a2 - a1)
2315 currentlines += (b2 - b1) - (a2 - a1)
2316 arglist.append((rev, a1, a2, b1, b2))
2316 arglist.append((rev, a1, a2, b1, b2))
2317
2317
2318 def d():
2318 def d():
2319 ll = linelog.linelog()
2319 ll = linelog.linelog()
2320 for args in arglist:
2320 for args in arglist:
2321 ll.replacelines(*args)
2321 ll.replacelines(*args)
2322
2322
2323 timer, fm = gettimer(ui, opts)
2323 timer, fm = gettimer(ui, opts)
2324 timer(d)
2324 timer(d)
2325 fm.end()
2325 fm.end()
2326
2326
2327
2327
2328 @command(b'perf::revrange|perfrevrange', formatteropts)
2328 @command(b'perf::revrange|perfrevrange', formatteropts)
2329 def perfrevrange(ui, repo, *specs, **opts):
2329 def perfrevrange(ui, repo, *specs, **opts):
2330 opts = _byteskwargs(opts)
2330 opts = _byteskwargs(opts)
2331 timer, fm = gettimer(ui, opts)
2331 timer, fm = gettimer(ui, opts)
2332 revrange = scmutil.revrange
2332 revrange = scmutil.revrange
2333 timer(lambda: len(revrange(repo, specs)))
2333 timer(lambda: len(revrange(repo, specs)))
2334 fm.end()
2334 fm.end()
2335
2335
2336
2336
2337 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2337 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2338 def perfnodelookup(ui, repo, rev, **opts):
2338 def perfnodelookup(ui, repo, rev, **opts):
2339 opts = _byteskwargs(opts)
2339 opts = _byteskwargs(opts)
2340 timer, fm = gettimer(ui, opts)
2340 timer, fm = gettimer(ui, opts)
2341 import mercurial.revlog
2341 import mercurial.revlog
2342
2342
2343 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2343 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2344 n = scmutil.revsingle(repo, rev).node()
2344 n = scmutil.revsingle(repo, rev).node()
2345
2345
2346 try:
2346 try:
2347 cl = revlog(getsvfs(repo), radix=b"00changelog")
2347 cl = revlog(getsvfs(repo), radix=b"00changelog")
2348 except TypeError:
2348 except TypeError:
2349 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2349 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2350
2350
2351 def d():
2351 def d():
2352 cl.rev(n)
2352 cl.rev(n)
2353 clearcaches(cl)
2353 clearcaches(cl)
2354
2354
2355 timer(d)
2355 timer(d)
2356 fm.end()
2356 fm.end()
2357
2357
2358
2358
2359 @command(
2359 @command(
2360 b'perf::log|perflog',
2360 b'perf::log|perflog',
2361 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2361 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2362 )
2362 )
2363 def perflog(ui, repo, rev=None, **opts):
2363 def perflog(ui, repo, rev=None, **opts):
2364 opts = _byteskwargs(opts)
2364 opts = _byteskwargs(opts)
2365 if rev is None:
2365 if rev is None:
2366 rev = []
2366 rev = []
2367 timer, fm = gettimer(ui, opts)
2367 timer, fm = gettimer(ui, opts)
2368 ui.pushbuffer()
2368 ui.pushbuffer()
2369 timer(
2369 timer(
2370 lambda: commands.log(
2370 lambda: commands.log(
2371 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2371 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2372 )
2372 )
2373 )
2373 )
2374 ui.popbuffer()
2374 ui.popbuffer()
2375 fm.end()
2375 fm.end()
2376
2376
2377
2377
2378 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2378 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2379 def perfmoonwalk(ui, repo, **opts):
2379 def perfmoonwalk(ui, repo, **opts):
2380 """benchmark walking the changelog backwards
2380 """benchmark walking the changelog backwards
2381
2381
2382 This also loads the changelog data for each revision in the changelog.
2382 This also loads the changelog data for each revision in the changelog.
2383 """
2383 """
2384 opts = _byteskwargs(opts)
2384 opts = _byteskwargs(opts)
2385 timer, fm = gettimer(ui, opts)
2385 timer, fm = gettimer(ui, opts)
2386
2386
2387 def moonwalk():
2387 def moonwalk():
2388 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2388 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2389 ctx = repo[i]
2389 ctx = repo[i]
2390 ctx.branch() # read changelog data (in addition to the index)
2390 ctx.branch() # read changelog data (in addition to the index)
2391
2391
2392 timer(moonwalk)
2392 timer(moonwalk)
2393 fm.end()
2393 fm.end()
2394
2394
2395
2395
2396 @command(
2396 @command(
2397 b'perf::templating|perftemplating',
2397 b'perf::templating|perftemplating',
2398 [
2398 [
2399 (b'r', b'rev', [], b'revisions to run the template on'),
2399 (b'r', b'rev', [], b'revisions to run the template on'),
2400 ]
2400 ]
2401 + formatteropts,
2401 + formatteropts,
2402 )
2402 )
2403 def perftemplating(ui, repo, testedtemplate=None, **opts):
2403 def perftemplating(ui, repo, testedtemplate=None, **opts):
2404 """test the rendering time of a given template"""
2404 """test the rendering time of a given template"""
2405 if makelogtemplater is None:
2405 if makelogtemplater is None:
2406 raise error.Abort(
2406 raise error.Abort(
2407 b"perftemplating not available with this Mercurial",
2407 b"perftemplating not available with this Mercurial",
2408 hint=b"use 4.3 or later",
2408 hint=b"use 4.3 or later",
2409 )
2409 )
2410
2410
2411 opts = _byteskwargs(opts)
2411 opts = _byteskwargs(opts)
2412
2412
2413 nullui = ui.copy()
2413 nullui = ui.copy()
2414 nullui.fout = open(os.devnull, 'wb')
2414 nullui.fout = open(os.devnull, 'wb')
2415 nullui.disablepager()
2415 nullui.disablepager()
2416 revs = opts.get(b'rev')
2416 revs = opts.get(b'rev')
2417 if not revs:
2417 if not revs:
2418 revs = [b'all()']
2418 revs = [b'all()']
2419 revs = list(scmutil.revrange(repo, revs))
2419 revs = list(scmutil.revrange(repo, revs))
2420
2420
2421 defaulttemplate = (
2421 defaulttemplate = (
2422 b'{date|shortdate} [{rev}:{node|short}]'
2422 b'{date|shortdate} [{rev}:{node|short}]'
2423 b' {author|person}: {desc|firstline}\n'
2423 b' {author|person}: {desc|firstline}\n'
2424 )
2424 )
2425 if testedtemplate is None:
2425 if testedtemplate is None:
2426 testedtemplate = defaulttemplate
2426 testedtemplate = defaulttemplate
2427 displayer = makelogtemplater(nullui, repo, testedtemplate)
2427 displayer = makelogtemplater(nullui, repo, testedtemplate)
2428
2428
2429 def format():
2429 def format():
2430 for r in revs:
2430 for r in revs:
2431 ctx = repo[r]
2431 ctx = repo[r]
2432 displayer.show(ctx)
2432 displayer.show(ctx)
2433 displayer.flush(ctx)
2433 displayer.flush(ctx)
2434
2434
2435 timer, fm = gettimer(ui, opts)
2435 timer, fm = gettimer(ui, opts)
2436 timer(format)
2436 timer(format)
2437 fm.end()
2437 fm.end()
2438
2438
2439
2439
2440 def _displaystats(ui, opts, entries, data):
2440 def _displaystats(ui, opts, entries, data):
2441 # use a second formatter because the data are quite different, not sure
2441 # use a second formatter because the data are quite different, not sure
2442 # how it flies with the templater.
2442 # how it flies with the templater.
2443 fm = ui.formatter(b'perf-stats', opts)
2443 fm = ui.formatter(b'perf-stats', opts)
2444 for key, title in entries:
2444 for key, title in entries:
2445 values = data[key]
2445 values = data[key]
2446 nbvalues = len(data)
2446 nbvalues = len(data)
2447 values.sort()
2447 values.sort()
2448 stats = {
2448 stats = {
2449 'key': key,
2449 'key': key,
2450 'title': title,
2450 'title': title,
2451 'nbitems': len(values),
2451 'nbitems': len(values),
2452 'min': values[0][0],
2452 'min': values[0][0],
2453 '10%': values[(nbvalues * 10) // 100][0],
2453 '10%': values[(nbvalues * 10) // 100][0],
2454 '25%': values[(nbvalues * 25) // 100][0],
2454 '25%': values[(nbvalues * 25) // 100][0],
2455 '50%': values[(nbvalues * 50) // 100][0],
2455 '50%': values[(nbvalues * 50) // 100][0],
2456 '75%': values[(nbvalues * 75) // 100][0],
2456 '75%': values[(nbvalues * 75) // 100][0],
2457 '80%': values[(nbvalues * 80) // 100][0],
2457 '80%': values[(nbvalues * 80) // 100][0],
2458 '85%': values[(nbvalues * 85) // 100][0],
2458 '85%': values[(nbvalues * 85) // 100][0],
2459 '90%': values[(nbvalues * 90) // 100][0],
2459 '90%': values[(nbvalues * 90) // 100][0],
2460 '95%': values[(nbvalues * 95) // 100][0],
2460 '95%': values[(nbvalues * 95) // 100][0],
2461 '99%': values[(nbvalues * 99) // 100][0],
2461 '99%': values[(nbvalues * 99) // 100][0],
2462 'max': values[-1][0],
2462 'max': values[-1][0],
2463 }
2463 }
2464 fm.startitem()
2464 fm.startitem()
2465 fm.data(**stats)
2465 fm.data(**stats)
2466 # make node pretty for the human output
2466 # make node pretty for the human output
2467 fm.plain('### %s (%d items)\n' % (title, len(values)))
2467 fm.plain('### %s (%d items)\n' % (title, len(values)))
2468 lines = [
2468 lines = [
2469 'min',
2469 'min',
2470 '10%',
2470 '10%',
2471 '25%',
2471 '25%',
2472 '50%',
2472 '50%',
2473 '75%',
2473 '75%',
2474 '80%',
2474 '80%',
2475 '85%',
2475 '85%',
2476 '90%',
2476 '90%',
2477 '95%',
2477 '95%',
2478 '99%',
2478 '99%',
2479 'max',
2479 'max',
2480 ]
2480 ]
2481 for l in lines:
2481 for l in lines:
2482 fm.plain('%s: %s\n' % (l, stats[l]))
2482 fm.plain('%s: %s\n' % (l, stats[l]))
2483 fm.end()
2483 fm.end()
2484
2484
2485
2485
2486 @command(
2486 @command(
2487 b'perf::helper-mergecopies|perfhelper-mergecopies',
2487 b'perf::helper-mergecopies|perfhelper-mergecopies',
2488 formatteropts
2488 formatteropts
2489 + [
2489 + [
2490 (b'r', b'revs', [], b'restrict search to these revisions'),
2490 (b'r', b'revs', [], b'restrict search to these revisions'),
2491 (b'', b'timing', False, b'provides extra data (costly)'),
2491 (b'', b'timing', False, b'provides extra data (costly)'),
2492 (b'', b'stats', False, b'provides statistic about the measured data'),
2492 (b'', b'stats', False, b'provides statistic about the measured data'),
2493 ],
2493 ],
2494 )
2494 )
2495 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2495 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2496 """find statistics about potential parameters for `perfmergecopies`
2496 """find statistics about potential parameters for `perfmergecopies`
2497
2497
2498 This command find (base, p1, p2) triplet relevant for copytracing
2498 This command find (base, p1, p2) triplet relevant for copytracing
2499 benchmarking in the context of a merge. It reports values for some of the
2499 benchmarking in the context of a merge. It reports values for some of the
2500 parameters that impact merge copy tracing time during merge.
2500 parameters that impact merge copy tracing time during merge.
2501
2501
2502 If `--timing` is set, rename detection is run and the associated timing
2502 If `--timing` is set, rename detection is run and the associated timing
2503 will be reported. The extra details come at the cost of slower command
2503 will be reported. The extra details come at the cost of slower command
2504 execution.
2504 execution.
2505
2505
2506 Since rename detection is only run once, other factors might easily
2506 Since rename detection is only run once, other factors might easily
2507 affect the precision of the timing. However it should give a good
2507 affect the precision of the timing. However it should give a good
2508 approximation of which revision triplets are very costly.
2508 approximation of which revision triplets are very costly.
2509 """
2509 """
2510 opts = _byteskwargs(opts)
2510 opts = _byteskwargs(opts)
2511 fm = ui.formatter(b'perf', opts)
2511 fm = ui.formatter(b'perf', opts)
2512 dotiming = opts[b'timing']
2512 dotiming = opts[b'timing']
2513 dostats = opts[b'stats']
2513 dostats = opts[b'stats']
2514
2514
2515 output_template = [
2515 output_template = [
2516 ("base", "%(base)12s"),
2516 ("base", "%(base)12s"),
2517 ("p1", "%(p1.node)12s"),
2517 ("p1", "%(p1.node)12s"),
2518 ("p2", "%(p2.node)12s"),
2518 ("p2", "%(p2.node)12s"),
2519 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2519 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2520 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2520 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2521 ("p1.renames", "%(p1.renamedfiles)12d"),
2521 ("p1.renames", "%(p1.renamedfiles)12d"),
2522 ("p1.time", "%(p1.time)12.3f"),
2522 ("p1.time", "%(p1.time)12.3f"),
2523 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2523 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2524 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2524 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2525 ("p2.renames", "%(p2.renamedfiles)12d"),
2525 ("p2.renames", "%(p2.renamedfiles)12d"),
2526 ("p2.time", "%(p2.time)12.3f"),
2526 ("p2.time", "%(p2.time)12.3f"),
2527 ("renames", "%(nbrenamedfiles)12d"),
2527 ("renames", "%(nbrenamedfiles)12d"),
2528 ("total.time", "%(time)12.3f"),
2528 ("total.time", "%(time)12.3f"),
2529 ]
2529 ]
2530 if not dotiming:
2530 if not dotiming:
2531 output_template = [
2531 output_template = [
2532 i
2532 i
2533 for i in output_template
2533 for i in output_template
2534 if not ('time' in i[0] or 'renames' in i[0])
2534 if not ('time' in i[0] or 'renames' in i[0])
2535 ]
2535 ]
2536 header_names = [h for (h, v) in output_template]
2536 header_names = [h for (h, v) in output_template]
2537 output = ' '.join([v for (h, v) in output_template]) + '\n'
2537 output = ' '.join([v for (h, v) in output_template]) + '\n'
2538 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2538 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2539 fm.plain(header % tuple(header_names))
2539 fm.plain(header % tuple(header_names))
2540
2540
2541 if not revs:
2541 if not revs:
2542 revs = ['all()']
2542 revs = ['all()']
2543 revs = scmutil.revrange(repo, revs)
2543 revs = scmutil.revrange(repo, revs)
2544
2544
2545 if dostats:
2545 if dostats:
2546 alldata = {
2546 alldata = {
2547 'nbrevs': [],
2547 'nbrevs': [],
2548 'nbmissingfiles': [],
2548 'nbmissingfiles': [],
2549 }
2549 }
2550 if dotiming:
2550 if dotiming:
2551 alldata['parentnbrenames'] = []
2551 alldata['parentnbrenames'] = []
2552 alldata['totalnbrenames'] = []
2552 alldata['totalnbrenames'] = []
2553 alldata['parenttime'] = []
2553 alldata['parenttime'] = []
2554 alldata['totaltime'] = []
2554 alldata['totaltime'] = []
2555
2555
2556 roi = repo.revs('merge() and %ld', revs)
2556 roi = repo.revs('merge() and %ld', revs)
2557 for r in roi:
2557 for r in roi:
2558 ctx = repo[r]
2558 ctx = repo[r]
2559 p1 = ctx.p1()
2559 p1 = ctx.p1()
2560 p2 = ctx.p2()
2560 p2 = ctx.p2()
2561 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2561 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2562 for b in bases:
2562 for b in bases:
2563 b = repo[b]
2563 b = repo[b]
2564 p1missing = copies._computeforwardmissing(b, p1)
2564 p1missing = copies._computeforwardmissing(b, p1)
2565 p2missing = copies._computeforwardmissing(b, p2)
2565 p2missing = copies._computeforwardmissing(b, p2)
2566 data = {
2566 data = {
2567 b'base': b.hex(),
2567 b'base': b.hex(),
2568 b'p1.node': p1.hex(),
2568 b'p1.node': p1.hex(),
2569 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2569 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2570 b'p1.nbmissingfiles': len(p1missing),
2570 b'p1.nbmissingfiles': len(p1missing),
2571 b'p2.node': p2.hex(),
2571 b'p2.node': p2.hex(),
2572 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2572 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2573 b'p2.nbmissingfiles': len(p2missing),
2573 b'p2.nbmissingfiles': len(p2missing),
2574 }
2574 }
2575 if dostats:
2575 if dostats:
2576 if p1missing:
2576 if p1missing:
2577 alldata['nbrevs'].append(
2577 alldata['nbrevs'].append(
2578 (data['p1.nbrevs'], b.hex(), p1.hex())
2578 (data['p1.nbrevs'], b.hex(), p1.hex())
2579 )
2579 )
2580 alldata['nbmissingfiles'].append(
2580 alldata['nbmissingfiles'].append(
2581 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2581 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2582 )
2582 )
2583 if p2missing:
2583 if p2missing:
2584 alldata['nbrevs'].append(
2584 alldata['nbrevs'].append(
2585 (data['p2.nbrevs'], b.hex(), p2.hex())
2585 (data['p2.nbrevs'], b.hex(), p2.hex())
2586 )
2586 )
2587 alldata['nbmissingfiles'].append(
2587 alldata['nbmissingfiles'].append(
2588 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2588 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2589 )
2589 )
2590 if dotiming:
2590 if dotiming:
2591 begin = util.timer()
2591 begin = util.timer()
2592 mergedata = copies.mergecopies(repo, p1, p2, b)
2592 mergedata = copies.mergecopies(repo, p1, p2, b)
2593 end = util.timer()
2593 end = util.timer()
2594 # not very stable timing since we did only one run
2594 # not very stable timing since we did only one run
2595 data['time'] = end - begin
2595 data['time'] = end - begin
2596 # mergedata contains five dicts: "copy", "movewithdir",
2596 # mergedata contains five dicts: "copy", "movewithdir",
2597 # "diverge", "renamedelete" and "dirmove".
2597 # "diverge", "renamedelete" and "dirmove".
2598 # The first 4 are about renamed file so lets count that.
2598 # The first 4 are about renamed file so lets count that.
2599 renames = len(mergedata[0])
2599 renames = len(mergedata[0])
2600 renames += len(mergedata[1])
2600 renames += len(mergedata[1])
2601 renames += len(mergedata[2])
2601 renames += len(mergedata[2])
2602 renames += len(mergedata[3])
2602 renames += len(mergedata[3])
2603 data['nbrenamedfiles'] = renames
2603 data['nbrenamedfiles'] = renames
2604 begin = util.timer()
2604 begin = util.timer()
2605 p1renames = copies.pathcopies(b, p1)
2605 p1renames = copies.pathcopies(b, p1)
2606 end = util.timer()
2606 end = util.timer()
2607 data['p1.time'] = end - begin
2607 data['p1.time'] = end - begin
2608 begin = util.timer()
2608 begin = util.timer()
2609 p2renames = copies.pathcopies(b, p2)
2609 p2renames = copies.pathcopies(b, p2)
2610 end = util.timer()
2610 end = util.timer()
2611 data['p2.time'] = end - begin
2611 data['p2.time'] = end - begin
2612 data['p1.renamedfiles'] = len(p1renames)
2612 data['p1.renamedfiles'] = len(p1renames)
2613 data['p2.renamedfiles'] = len(p2renames)
2613 data['p2.renamedfiles'] = len(p2renames)
2614
2614
2615 if dostats:
2615 if dostats:
2616 if p1missing:
2616 if p1missing:
2617 alldata['parentnbrenames'].append(
2617 alldata['parentnbrenames'].append(
2618 (data['p1.renamedfiles'], b.hex(), p1.hex())
2618 (data['p1.renamedfiles'], b.hex(), p1.hex())
2619 )
2619 )
2620 alldata['parenttime'].append(
2620 alldata['parenttime'].append(
2621 (data['p1.time'], b.hex(), p1.hex())
2621 (data['p1.time'], b.hex(), p1.hex())
2622 )
2622 )
2623 if p2missing:
2623 if p2missing:
2624 alldata['parentnbrenames'].append(
2624 alldata['parentnbrenames'].append(
2625 (data['p2.renamedfiles'], b.hex(), p2.hex())
2625 (data['p2.renamedfiles'], b.hex(), p2.hex())
2626 )
2626 )
2627 alldata['parenttime'].append(
2627 alldata['parenttime'].append(
2628 (data['p2.time'], b.hex(), p2.hex())
2628 (data['p2.time'], b.hex(), p2.hex())
2629 )
2629 )
2630 if p1missing or p2missing:
2630 if p1missing or p2missing:
2631 alldata['totalnbrenames'].append(
2631 alldata['totalnbrenames'].append(
2632 (
2632 (
2633 data['nbrenamedfiles'],
2633 data['nbrenamedfiles'],
2634 b.hex(),
2634 b.hex(),
2635 p1.hex(),
2635 p1.hex(),
2636 p2.hex(),
2636 p2.hex(),
2637 )
2637 )
2638 )
2638 )
2639 alldata['totaltime'].append(
2639 alldata['totaltime'].append(
2640 (data['time'], b.hex(), p1.hex(), p2.hex())
2640 (data['time'], b.hex(), p1.hex(), p2.hex())
2641 )
2641 )
2642 fm.startitem()
2642 fm.startitem()
2643 fm.data(**data)
2643 fm.data(**data)
2644 # make node pretty for the human output
2644 # make node pretty for the human output
2645 out = data.copy()
2645 out = data.copy()
2646 out['base'] = fm.hexfunc(b.node())
2646 out['base'] = fm.hexfunc(b.node())
2647 out['p1.node'] = fm.hexfunc(p1.node())
2647 out['p1.node'] = fm.hexfunc(p1.node())
2648 out['p2.node'] = fm.hexfunc(p2.node())
2648 out['p2.node'] = fm.hexfunc(p2.node())
2649 fm.plain(output % out)
2649 fm.plain(output % out)
2650
2650
2651 fm.end()
2651 fm.end()
2652 if dostats:
2652 if dostats:
2653 # use a second formatter because the data are quite different, not sure
2653 # use a second formatter because the data are quite different, not sure
2654 # how it flies with the templater.
2654 # how it flies with the templater.
2655 entries = [
2655 entries = [
2656 ('nbrevs', 'number of revision covered'),
2656 ('nbrevs', 'number of revision covered'),
2657 ('nbmissingfiles', 'number of missing files at head'),
2657 ('nbmissingfiles', 'number of missing files at head'),
2658 ]
2658 ]
2659 if dotiming:
2659 if dotiming:
2660 entries.append(
2660 entries.append(
2661 ('parentnbrenames', 'rename from one parent to base')
2661 ('parentnbrenames', 'rename from one parent to base')
2662 )
2662 )
2663 entries.append(('totalnbrenames', 'total number of renames'))
2663 entries.append(('totalnbrenames', 'total number of renames'))
2664 entries.append(('parenttime', 'time for one parent'))
2664 entries.append(('parenttime', 'time for one parent'))
2665 entries.append(('totaltime', 'time for both parents'))
2665 entries.append(('totaltime', 'time for both parents'))
2666 _displaystats(ui, opts, entries, alldata)
2666 _displaystats(ui, opts, entries, alldata)
2667
2667
2668
2668
2669 @command(
2669 @command(
2670 b'perf::helper-pathcopies|perfhelper-pathcopies',
2670 b'perf::helper-pathcopies|perfhelper-pathcopies',
2671 formatteropts
2671 formatteropts
2672 + [
2672 + [
2673 (b'r', b'revs', [], b'restrict search to these revisions'),
2673 (b'r', b'revs', [], b'restrict search to these revisions'),
2674 (b'', b'timing', False, b'provides extra data (costly)'),
2674 (b'', b'timing', False, b'provides extra data (costly)'),
2675 (b'', b'stats', False, b'provides statistic about the measured data'),
2675 (b'', b'stats', False, b'provides statistic about the measured data'),
2676 ],
2676 ],
2677 )
2677 )
2678 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2678 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2679 """find statistic about potential parameters for the `perftracecopies`
2679 """find statistic about potential parameters for the `perftracecopies`
2680
2680
2681 This command find source-destination pair relevant for copytracing testing.
2681 This command find source-destination pair relevant for copytracing testing.
2682 It report value for some of the parameters that impact copy tracing time.
2682 It report value for some of the parameters that impact copy tracing time.
2683
2683
2684 If `--timing` is set, rename detection is run and the associated timing
2684 If `--timing` is set, rename detection is run and the associated timing
2685 will be reported. The extra details comes at the cost of a slower command
2685 will be reported. The extra details comes at the cost of a slower command
2686 execution.
2686 execution.
2687
2687
2688 Since the rename detection is only run once, other factors might easily
2688 Since the rename detection is only run once, other factors might easily
2689 affect the precision of the timing. However it should give a good
2689 affect the precision of the timing. However it should give a good
2690 approximation of which revision pairs are very costly.
2690 approximation of which revision pairs are very costly.
2691 """
2691 """
2692 opts = _byteskwargs(opts)
2692 opts = _byteskwargs(opts)
2693 fm = ui.formatter(b'perf', opts)
2693 fm = ui.formatter(b'perf', opts)
2694 dotiming = opts[b'timing']
2694 dotiming = opts[b'timing']
2695 dostats = opts[b'stats']
2695 dostats = opts[b'stats']
2696
2696
2697 if dotiming:
2697 if dotiming:
2698 header = '%12s %12s %12s %12s %12s %12s\n'
2698 header = '%12s %12s %12s %12s %12s %12s\n'
2699 output = (
2699 output = (
2700 "%(source)12s %(destination)12s "
2700 "%(source)12s %(destination)12s "
2701 "%(nbrevs)12d %(nbmissingfiles)12d "
2701 "%(nbrevs)12d %(nbmissingfiles)12d "
2702 "%(nbrenamedfiles)12d %(time)18.5f\n"
2702 "%(nbrenamedfiles)12d %(time)18.5f\n"
2703 )
2703 )
2704 header_names = (
2704 header_names = (
2705 "source",
2705 "source",
2706 "destination",
2706 "destination",
2707 "nb-revs",
2707 "nb-revs",
2708 "nb-files",
2708 "nb-files",
2709 "nb-renames",
2709 "nb-renames",
2710 "time",
2710 "time",
2711 )
2711 )
2712 fm.plain(header % header_names)
2712 fm.plain(header % header_names)
2713 else:
2713 else:
2714 header = '%12s %12s %12s %12s\n'
2714 header = '%12s %12s %12s %12s\n'
2715 output = (
2715 output = (
2716 "%(source)12s %(destination)12s "
2716 "%(source)12s %(destination)12s "
2717 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2717 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2718 )
2718 )
2719 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2719 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2720
2720
2721 if not revs:
2721 if not revs:
2722 revs = ['all()']
2722 revs = ['all()']
2723 revs = scmutil.revrange(repo, revs)
2723 revs = scmutil.revrange(repo, revs)
2724
2724
2725 if dostats:
2725 if dostats:
2726 alldata = {
2726 alldata = {
2727 'nbrevs': [],
2727 'nbrevs': [],
2728 'nbmissingfiles': [],
2728 'nbmissingfiles': [],
2729 }
2729 }
2730 if dotiming:
2730 if dotiming:
2731 alldata['nbrenames'] = []
2731 alldata['nbrenames'] = []
2732 alldata['time'] = []
2732 alldata['time'] = []
2733
2733
2734 roi = repo.revs('merge() and %ld', revs)
2734 roi = repo.revs('merge() and %ld', revs)
2735 for r in roi:
2735 for r in roi:
2736 ctx = repo[r]
2736 ctx = repo[r]
2737 p1 = ctx.p1().rev()
2737 p1 = ctx.p1().rev()
2738 p2 = ctx.p2().rev()
2738 p2 = ctx.p2().rev()
2739 bases = repo.changelog._commonancestorsheads(p1, p2)
2739 bases = repo.changelog._commonancestorsheads(p1, p2)
2740 for p in (p1, p2):
2740 for p in (p1, p2):
2741 for b in bases:
2741 for b in bases:
2742 base = repo[b]
2742 base = repo[b]
2743 parent = repo[p]
2743 parent = repo[p]
2744 missing = copies._computeforwardmissing(base, parent)
2744 missing = copies._computeforwardmissing(base, parent)
2745 if not missing:
2745 if not missing:
2746 continue
2746 continue
2747 data = {
2747 data = {
2748 b'source': base.hex(),
2748 b'source': base.hex(),
2749 b'destination': parent.hex(),
2749 b'destination': parent.hex(),
2750 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2750 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2751 b'nbmissingfiles': len(missing),
2751 b'nbmissingfiles': len(missing),
2752 }
2752 }
2753 if dostats:
2753 if dostats:
2754 alldata['nbrevs'].append(
2754 alldata['nbrevs'].append(
2755 (
2755 (
2756 data['nbrevs'],
2756 data['nbrevs'],
2757 base.hex(),
2757 base.hex(),
2758 parent.hex(),
2758 parent.hex(),
2759 )
2759 )
2760 )
2760 )
2761 alldata['nbmissingfiles'].append(
2761 alldata['nbmissingfiles'].append(
2762 (
2762 (
2763 data['nbmissingfiles'],
2763 data['nbmissingfiles'],
2764 base.hex(),
2764 base.hex(),
2765 parent.hex(),
2765 parent.hex(),
2766 )
2766 )
2767 )
2767 )
2768 if dotiming:
2768 if dotiming:
2769 begin = util.timer()
2769 begin = util.timer()
2770 renames = copies.pathcopies(base, parent)
2770 renames = copies.pathcopies(base, parent)
2771 end = util.timer()
2771 end = util.timer()
2772 # not very stable timing since we did only one run
2772 # not very stable timing since we did only one run
2773 data['time'] = end - begin
2773 data['time'] = end - begin
2774 data['nbrenamedfiles'] = len(renames)
2774 data['nbrenamedfiles'] = len(renames)
2775 if dostats:
2775 if dostats:
2776 alldata['time'].append(
2776 alldata['time'].append(
2777 (
2777 (
2778 data['time'],
2778 data['time'],
2779 base.hex(),
2779 base.hex(),
2780 parent.hex(),
2780 parent.hex(),
2781 )
2781 )
2782 )
2782 )
2783 alldata['nbrenames'].append(
2783 alldata['nbrenames'].append(
2784 (
2784 (
2785 data['nbrenamedfiles'],
2785 data['nbrenamedfiles'],
2786 base.hex(),
2786 base.hex(),
2787 parent.hex(),
2787 parent.hex(),
2788 )
2788 )
2789 )
2789 )
2790 fm.startitem()
2790 fm.startitem()
2791 fm.data(**data)
2791 fm.data(**data)
2792 out = data.copy()
2792 out = data.copy()
2793 out['source'] = fm.hexfunc(base.node())
2793 out['source'] = fm.hexfunc(base.node())
2794 out['destination'] = fm.hexfunc(parent.node())
2794 out['destination'] = fm.hexfunc(parent.node())
2795 fm.plain(output % out)
2795 fm.plain(output % out)
2796
2796
2797 fm.end()
2797 fm.end()
2798 if dostats:
2798 if dostats:
2799 entries = [
2799 entries = [
2800 ('nbrevs', 'number of revision covered'),
2800 ('nbrevs', 'number of revision covered'),
2801 ('nbmissingfiles', 'number of missing files at head'),
2801 ('nbmissingfiles', 'number of missing files at head'),
2802 ]
2802 ]
2803 if dotiming:
2803 if dotiming:
2804 entries.append(('nbrenames', 'renamed files'))
2804 entries.append(('nbrenames', 'renamed files'))
2805 entries.append(('time', 'time'))
2805 entries.append(('time', 'time'))
2806 _displaystats(ui, opts, entries, alldata)
2806 _displaystats(ui, opts, entries, alldata)
2807
2807
2808
2808
2809 @command(b'perf::cca|perfcca', formatteropts)
2809 @command(b'perf::cca|perfcca', formatteropts)
2810 def perfcca(ui, repo, **opts):
2810 def perfcca(ui, repo, **opts):
2811 opts = _byteskwargs(opts)
2811 opts = _byteskwargs(opts)
2812 timer, fm = gettimer(ui, opts)
2812 timer, fm = gettimer(ui, opts)
2813 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2813 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2814 fm.end()
2814 fm.end()
2815
2815
2816
2816
2817 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2817 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2818 def perffncacheload(ui, repo, **opts):
2818 def perffncacheload(ui, repo, **opts):
2819 opts = _byteskwargs(opts)
2819 opts = _byteskwargs(opts)
2820 timer, fm = gettimer(ui, opts)
2820 timer, fm = gettimer(ui, opts)
2821 s = repo.store
2821 s = repo.store
2822
2822
2823 def d():
2823 def d():
2824 s.fncache._load()
2824 s.fncache._load()
2825
2825
2826 timer(d)
2826 timer(d)
2827 fm.end()
2827 fm.end()
2828
2828
2829
2829
2830 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2830 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2831 def perffncachewrite(ui, repo, **opts):
2831 def perffncachewrite(ui, repo, **opts):
2832 opts = _byteskwargs(opts)
2832 opts = _byteskwargs(opts)
2833 timer, fm = gettimer(ui, opts)
2833 timer, fm = gettimer(ui, opts)
2834 s = repo.store
2834 s = repo.store
2835 lock = repo.lock()
2835 lock = repo.lock()
2836 s.fncache._load()
2836 s.fncache._load()
2837 tr = repo.transaction(b'perffncachewrite')
2837 tr = repo.transaction(b'perffncachewrite')
2838 tr.addbackup(b'fncache')
2838 tr.addbackup(b'fncache')
2839
2839
2840 def d():
2840 def d():
2841 s.fncache._dirty = True
2841 s.fncache._dirty = True
2842 s.fncache.write(tr)
2842 s.fncache.write(tr)
2843
2843
2844 timer(d)
2844 timer(d)
2845 tr.close()
2845 tr.close()
2846 lock.release()
2846 lock.release()
2847 fm.end()
2847 fm.end()
2848
2848
2849
2849
2850 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2850 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2851 def perffncacheencode(ui, repo, **opts):
2851 def perffncacheencode(ui, repo, **opts):
2852 opts = _byteskwargs(opts)
2852 opts = _byteskwargs(opts)
2853 timer, fm = gettimer(ui, opts)
2853 timer, fm = gettimer(ui, opts)
2854 s = repo.store
2854 s = repo.store
2855 s.fncache._load()
2855 s.fncache._load()
2856
2856
2857 def d():
2857 def d():
2858 for p in s.fncache.entries:
2858 for p in s.fncache.entries:
2859 s.encode(p)
2859 s.encode(p)
2860
2860
2861 timer(d)
2861 timer(d)
2862 fm.end()
2862 fm.end()
2863
2863
2864
2864
2865 def _bdiffworker(q, blocks, xdiff, ready, done):
2865 def _bdiffworker(q, blocks, xdiff, ready, done):
2866 while not done.is_set():
2866 while not done.is_set():
2867 pair = q.get()
2867 pair = q.get()
2868 while pair is not None:
2868 while pair is not None:
2869 if xdiff:
2869 if xdiff:
2870 mdiff.bdiff.xdiffblocks(*pair)
2870 mdiff.bdiff.xdiffblocks(*pair)
2871 elif blocks:
2871 elif blocks:
2872 mdiff.bdiff.blocks(*pair)
2872 mdiff.bdiff.blocks(*pair)
2873 else:
2873 else:
2874 mdiff.textdiff(*pair)
2874 mdiff.textdiff(*pair)
2875 q.task_done()
2875 q.task_done()
2876 pair = q.get()
2876 pair = q.get()
2877 q.task_done() # for the None one
2877 q.task_done() # for the None one
2878 with ready:
2878 with ready:
2879 ready.wait()
2879 ready.wait()
2880
2880
2881
2881
2882 def _manifestrevision(repo, mnode):
2882 def _manifestrevision(repo, mnode):
2883 ml = repo.manifestlog
2883 ml = repo.manifestlog
2884
2884
2885 if util.safehasattr(ml, b'getstorage'):
2885 if util.safehasattr(ml, b'getstorage'):
2886 store = ml.getstorage(b'')
2886 store = ml.getstorage(b'')
2887 else:
2887 else:
2888 store = ml._revlog
2888 store = ml._revlog
2889
2889
2890 return store.revision(mnode)
2890 return store.revision(mnode)
2891
2891
2892
2892
2893 @command(
2893 @command(
2894 b'perf::bdiff|perfbdiff',
2894 b'perf::bdiff|perfbdiff',
2895 revlogopts
2895 revlogopts
2896 + formatteropts
2896 + formatteropts
2897 + [
2897 + [
2898 (
2898 (
2899 b'',
2899 b'',
2900 b'count',
2900 b'count',
2901 1,
2901 1,
2902 b'number of revisions to test (when using --startrev)',
2902 b'number of revisions to test (when using --startrev)',
2903 ),
2903 ),
2904 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2904 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2905 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2905 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2906 (b'', b'blocks', False, b'test computing diffs into blocks'),
2906 (b'', b'blocks', False, b'test computing diffs into blocks'),
2907 (b'', b'xdiff', False, b'use xdiff algorithm'),
2907 (b'', b'xdiff', False, b'use xdiff algorithm'),
2908 ],
2908 ],
2909 b'-c|-m|FILE REV',
2909 b'-c|-m|FILE REV',
2910 )
2910 )
2911 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2911 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2912 """benchmark a bdiff between revisions
2912 """benchmark a bdiff between revisions
2913
2913
2914 By default, benchmark a bdiff between its delta parent and itself.
2914 By default, benchmark a bdiff between its delta parent and itself.
2915
2915
2916 With ``--count``, benchmark bdiffs between delta parents and self for N
2916 With ``--count``, benchmark bdiffs between delta parents and self for N
2917 revisions starting at the specified revision.
2917 revisions starting at the specified revision.
2918
2918
2919 With ``--alldata``, assume the requested revision is a changeset and
2919 With ``--alldata``, assume the requested revision is a changeset and
2920 measure bdiffs for all changes related to that changeset (manifest
2920 measure bdiffs for all changes related to that changeset (manifest
2921 and filelogs).
2921 and filelogs).
2922 """
2922 """
2923 opts = _byteskwargs(opts)
2923 opts = _byteskwargs(opts)
2924
2924
2925 if opts[b'xdiff'] and not opts[b'blocks']:
2925 if opts[b'xdiff'] and not opts[b'blocks']:
2926 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2926 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2927
2927
2928 if opts[b'alldata']:
2928 if opts[b'alldata']:
2929 opts[b'changelog'] = True
2929 opts[b'changelog'] = True
2930
2930
2931 if opts.get(b'changelog') or opts.get(b'manifest'):
2931 if opts.get(b'changelog') or opts.get(b'manifest'):
2932 file_, rev = None, file_
2932 file_, rev = None, file_
2933 elif rev is None:
2933 elif rev is None:
2934 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2934 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2935
2935
2936 blocks = opts[b'blocks']
2936 blocks = opts[b'blocks']
2937 xdiff = opts[b'xdiff']
2937 xdiff = opts[b'xdiff']
2938 textpairs = []
2938 textpairs = []
2939
2939
2940 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2940 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2941
2941
2942 startrev = r.rev(r.lookup(rev))
2942 startrev = r.rev(r.lookup(rev))
2943 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2943 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2944 if opts[b'alldata']:
2944 if opts[b'alldata']:
2945 # Load revisions associated with changeset.
2945 # Load revisions associated with changeset.
2946 ctx = repo[rev]
2946 ctx = repo[rev]
2947 mtext = _manifestrevision(repo, ctx.manifestnode())
2947 mtext = _manifestrevision(repo, ctx.manifestnode())
2948 for pctx in ctx.parents():
2948 for pctx in ctx.parents():
2949 pman = _manifestrevision(repo, pctx.manifestnode())
2949 pman = _manifestrevision(repo, pctx.manifestnode())
2950 textpairs.append((pman, mtext))
2950 textpairs.append((pman, mtext))
2951
2951
2952 # Load filelog revisions by iterating manifest delta.
2952 # Load filelog revisions by iterating manifest delta.
2953 man = ctx.manifest()
2953 man = ctx.manifest()
2954 pman = ctx.p1().manifest()
2954 pman = ctx.p1().manifest()
2955 for filename, change in pman.diff(man).items():
2955 for filename, change in pman.diff(man).items():
2956 fctx = repo.file(filename)
2956 fctx = repo.file(filename)
2957 f1 = fctx.revision(change[0][0] or -1)
2957 f1 = fctx.revision(change[0][0] or -1)
2958 f2 = fctx.revision(change[1][0] or -1)
2958 f2 = fctx.revision(change[1][0] or -1)
2959 textpairs.append((f1, f2))
2959 textpairs.append((f1, f2))
2960 else:
2960 else:
2961 dp = r.deltaparent(rev)
2961 dp = r.deltaparent(rev)
2962 textpairs.append((r.revision(dp), r.revision(rev)))
2962 textpairs.append((r.revision(dp), r.revision(rev)))
2963
2963
2964 withthreads = threads > 0
2964 withthreads = threads > 0
2965 if not withthreads:
2965 if not withthreads:
2966
2966
2967 def d():
2967 def d():
2968 for pair in textpairs:
2968 for pair in textpairs:
2969 if xdiff:
2969 if xdiff:
2970 mdiff.bdiff.xdiffblocks(*pair)
2970 mdiff.bdiff.xdiffblocks(*pair)
2971 elif blocks:
2971 elif blocks:
2972 mdiff.bdiff.blocks(*pair)
2972 mdiff.bdiff.blocks(*pair)
2973 else:
2973 else:
2974 mdiff.textdiff(*pair)
2974 mdiff.textdiff(*pair)
2975
2975
2976 else:
2976 else:
2977 q = queue()
2977 q = queue()
2978 for i in _xrange(threads):
2978 for i in _xrange(threads):
2979 q.put(None)
2979 q.put(None)
2980 ready = threading.Condition()
2980 ready = threading.Condition()
2981 done = threading.Event()
2981 done = threading.Event()
2982 for i in _xrange(threads):
2982 for i in _xrange(threads):
2983 threading.Thread(
2983 threading.Thread(
2984 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2984 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2985 ).start()
2985 ).start()
2986 q.join()
2986 q.join()
2987
2987
2988 def d():
2988 def d():
2989 for pair in textpairs:
2989 for pair in textpairs:
2990 q.put(pair)
2990 q.put(pair)
2991 for i in _xrange(threads):
2991 for i in _xrange(threads):
2992 q.put(None)
2992 q.put(None)
2993 with ready:
2993 with ready:
2994 ready.notify_all()
2994 ready.notify_all()
2995 q.join()
2995 q.join()
2996
2996
2997 timer, fm = gettimer(ui, opts)
2997 timer, fm = gettimer(ui, opts)
2998 timer(d)
2998 timer(d)
2999 fm.end()
2999 fm.end()
3000
3000
3001 if withthreads:
3001 if withthreads:
3002 done.set()
3002 done.set()
3003 for i in _xrange(threads):
3003 for i in _xrange(threads):
3004 q.put(None)
3004 q.put(None)
3005 with ready:
3005 with ready:
3006 ready.notify_all()
3006 ready.notify_all()
3007
3007
3008
3008
3009 @command(
3009 @command(
3010 b'perf::unbundle',
3010 b'perf::unbundle',
3011 [
3011 [
3012 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3012 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3013 ]
3013 ]
3014 + formatteropts,
3014 + formatteropts,
3015 b'BUNDLE_FILE',
3015 b'BUNDLE_FILE',
3016 )
3016 )
3017 def perf_unbundle(ui, repo, fname, **opts):
3017 def perf_unbundle(ui, repo, fname, **opts):
3018 """benchmark application of a bundle in a repository.
3018 """benchmark application of a bundle in a repository.
3019
3019
3020 This does not include the final transaction processing
3020 This does not include the final transaction processing
3021
3021
3022 The --as-push option make the unbundle operation appears like it comes from
3022 The --as-push option make the unbundle operation appears like it comes from
3023 a client push. It change some aspect of the processing and associated
3023 a client push. It change some aspect of the processing and associated
3024 performance profile.
3024 performance profile.
3025 """
3025 """
3026
3026
3027 from mercurial import exchange
3027 from mercurial import exchange
3028 from mercurial import bundle2
3028 from mercurial import bundle2
3029 from mercurial import transaction
3029 from mercurial import transaction
3030
3030
3031 opts = _byteskwargs(opts)
3031 opts = _byteskwargs(opts)
3032
3032
3033 ### some compatibility hotfix
3033 ### some compatibility hotfix
3034 #
3034 #
3035 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3035 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3036 # critical regression that break transaction rollback for files that are
3036 # critical regression that break transaction rollback for files that are
3037 # de-inlined.
3037 # de-inlined.
3038 method = transaction.transaction._addentry
3038 method = transaction.transaction._addentry
3039 pre_63edc384d3b7 = "data" in getargspec(method).args
3039 pre_63edc384d3b7 = "data" in getargspec(method).args
3040 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3040 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3041 # a changeset that is a close descendant of 18415fc918a1, the changeset
3041 # a changeset that is a close descendant of 18415fc918a1, the changeset
3042 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3042 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3043 args = getargspec(error.Abort.__init__).args
3043 args = getargspec(error.Abort.__init__).args
3044 post_18415fc918a1 = "detailed_exit_code" in args
3044 post_18415fc918a1 = "detailed_exit_code" in args
3045
3045
3046 unbundle_source = b'perf::unbundle'
3046 unbundle_source = b'perf::unbundle'
3047 if opts[b'as_push']:
3047 if opts[b'as_push']:
3048 unbundle_source = b'push'
3048 unbundle_source = b'push'
3049
3049
3050 old_max_inline = None
3050 old_max_inline = None
3051 try:
3051 try:
3052 if not (pre_63edc384d3b7 or post_18415fc918a1):
3052 if not (pre_63edc384d3b7 or post_18415fc918a1):
3053 # disable inlining
3053 # disable inlining
3054 old_max_inline = mercurial.revlog._maxinline
3054 old_max_inline = mercurial.revlog._maxinline
3055 # large enough to never happen
3055 # large enough to never happen
3056 mercurial.revlog._maxinline = 2 ** 50
3056 mercurial.revlog._maxinline = 2 ** 50
3057
3057
3058 with repo.lock():
3058 with repo.lock():
3059 bundle = [None, None]
3059 bundle = [None, None]
3060 orig_quiet = repo.ui.quiet
3060 orig_quiet = repo.ui.quiet
3061 try:
3061 try:
3062 repo.ui.quiet = True
3062 repo.ui.quiet = True
3063 with open(fname, mode="rb") as f:
3063 with open(fname, mode="rb") as f:
3064
3064
3065 def noop_report(*args, **kwargs):
3065 def noop_report(*args, **kwargs):
3066 pass
3066 pass
3067
3067
3068 def setup():
3068 def setup():
3069 gen, tr = bundle
3069 gen, tr = bundle
3070 if tr is not None:
3070 if tr is not None:
3071 tr.abort()
3071 tr.abort()
3072 bundle[:] = [None, None]
3072 bundle[:] = [None, None]
3073 f.seek(0)
3073 f.seek(0)
3074 bundle[0] = exchange.readbundle(ui, f, fname)
3074 bundle[0] = exchange.readbundle(ui, f, fname)
3075 bundle[1] = repo.transaction(b'perf::unbundle')
3075 bundle[1] = repo.transaction(b'perf::unbundle')
3076 # silence the transaction
3076 # silence the transaction
3077 bundle[1]._report = noop_report
3077 bundle[1]._report = noop_report
3078
3078
3079 def apply():
3079 def apply():
3080 gen, tr = bundle
3080 gen, tr = bundle
3081 bundle2.applybundle(
3081 bundle2.applybundle(
3082 repo,
3082 repo,
3083 gen,
3083 gen,
3084 tr,
3084 tr,
3085 source=unbundle_source,
3085 source=unbundle_source,
3086 url=fname,
3086 url=fname,
3087 )
3087 )
3088
3088
3089 timer, fm = gettimer(ui, opts)
3089 timer, fm = gettimer(ui, opts)
3090 timer(apply, setup=setup)
3090 timer(apply, setup=setup)
3091 fm.end()
3091 fm.end()
3092 finally:
3092 finally:
3093 repo.ui.quiet == orig_quiet
3093 repo.ui.quiet == orig_quiet
3094 gen, tr = bundle
3094 gen, tr = bundle
3095 if tr is not None:
3095 if tr is not None:
3096 tr.abort()
3096 tr.abort()
3097 finally:
3097 finally:
3098 if old_max_inline is not None:
3098 if old_max_inline is not None:
3099 mercurial.revlog._maxinline = old_max_inline
3099 mercurial.revlog._maxinline = old_max_inline
3100
3100
3101
3101
3102 @command(
3102 @command(
3103 b'perf::unidiff|perfunidiff',
3103 b'perf::unidiff|perfunidiff',
3104 revlogopts
3104 revlogopts
3105 + formatteropts
3105 + formatteropts
3106 + [
3106 + [
3107 (
3107 (
3108 b'',
3108 b'',
3109 b'count',
3109 b'count',
3110 1,
3110 1,
3111 b'number of revisions to test (when using --startrev)',
3111 b'number of revisions to test (when using --startrev)',
3112 ),
3112 ),
3113 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3113 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3114 ],
3114 ],
3115 b'-c|-m|FILE REV',
3115 b'-c|-m|FILE REV',
3116 )
3116 )
3117 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3117 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3118 """benchmark a unified diff between revisions
3118 """benchmark a unified diff between revisions
3119
3119
3120 This doesn't include any copy tracing - it's just a unified diff
3120 This doesn't include any copy tracing - it's just a unified diff
3121 of the texts.
3121 of the texts.
3122
3122
3123 By default, benchmark a diff between its delta parent and itself.
3123 By default, benchmark a diff between its delta parent and itself.
3124
3124
3125 With ``--count``, benchmark diffs between delta parents and self for N
3125 With ``--count``, benchmark diffs between delta parents and self for N
3126 revisions starting at the specified revision.
3126 revisions starting at the specified revision.
3127
3127
3128 With ``--alldata``, assume the requested revision is a changeset and
3128 With ``--alldata``, assume the requested revision is a changeset and
3129 measure diffs for all changes related to that changeset (manifest
3129 measure diffs for all changes related to that changeset (manifest
3130 and filelogs).
3130 and filelogs).
3131 """
3131 """
3132 opts = _byteskwargs(opts)
3132 opts = _byteskwargs(opts)
3133 if opts[b'alldata']:
3133 if opts[b'alldata']:
3134 opts[b'changelog'] = True
3134 opts[b'changelog'] = True
3135
3135
3136 if opts.get(b'changelog') or opts.get(b'manifest'):
3136 if opts.get(b'changelog') or opts.get(b'manifest'):
3137 file_, rev = None, file_
3137 file_, rev = None, file_
3138 elif rev is None:
3138 elif rev is None:
3139 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3139 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3140
3140
3141 textpairs = []
3141 textpairs = []
3142
3142
3143 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3143 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3144
3144
3145 startrev = r.rev(r.lookup(rev))
3145 startrev = r.rev(r.lookup(rev))
3146 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3146 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3147 if opts[b'alldata']:
3147 if opts[b'alldata']:
3148 # Load revisions associated with changeset.
3148 # Load revisions associated with changeset.
3149 ctx = repo[rev]
3149 ctx = repo[rev]
3150 mtext = _manifestrevision(repo, ctx.manifestnode())
3150 mtext = _manifestrevision(repo, ctx.manifestnode())
3151 for pctx in ctx.parents():
3151 for pctx in ctx.parents():
3152 pman = _manifestrevision(repo, pctx.manifestnode())
3152 pman = _manifestrevision(repo, pctx.manifestnode())
3153 textpairs.append((pman, mtext))
3153 textpairs.append((pman, mtext))
3154
3154
3155 # Load filelog revisions by iterating manifest delta.
3155 # Load filelog revisions by iterating manifest delta.
3156 man = ctx.manifest()
3156 man = ctx.manifest()
3157 pman = ctx.p1().manifest()
3157 pman = ctx.p1().manifest()
3158 for filename, change in pman.diff(man).items():
3158 for filename, change in pman.diff(man).items():
3159 fctx = repo.file(filename)
3159 fctx = repo.file(filename)
3160 f1 = fctx.revision(change[0][0] or -1)
3160 f1 = fctx.revision(change[0][0] or -1)
3161 f2 = fctx.revision(change[1][0] or -1)
3161 f2 = fctx.revision(change[1][0] or -1)
3162 textpairs.append((f1, f2))
3162 textpairs.append((f1, f2))
3163 else:
3163 else:
3164 dp = r.deltaparent(rev)
3164 dp = r.deltaparent(rev)
3165 textpairs.append((r.revision(dp), r.revision(rev)))
3165 textpairs.append((r.revision(dp), r.revision(rev)))
3166
3166
3167 def d():
3167 def d():
3168 for left, right in textpairs:
3168 for left, right in textpairs:
3169 # The date strings don't matter, so we pass empty strings.
3169 # The date strings don't matter, so we pass empty strings.
3170 headerlines, hunks = mdiff.unidiff(
3170 headerlines, hunks = mdiff.unidiff(
3171 left, b'', right, b'', b'left', b'right', binary=False
3171 left, b'', right, b'', b'left', b'right', binary=False
3172 )
3172 )
3173 # consume iterators in roughly the way patch.py does
3173 # consume iterators in roughly the way patch.py does
3174 b'\n'.join(headerlines)
3174 b'\n'.join(headerlines)
3175 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3175 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3176
3176
3177 timer, fm = gettimer(ui, opts)
3177 timer, fm = gettimer(ui, opts)
3178 timer(d)
3178 timer(d)
3179 fm.end()
3179 fm.end()
3180
3180
3181
3181
3182 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3182 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3183 def perfdiffwd(ui, repo, **opts):
3183 def perfdiffwd(ui, repo, **opts):
3184 """Profile diff of working directory changes"""
3184 """Profile diff of working directory changes"""
3185 opts = _byteskwargs(opts)
3185 opts = _byteskwargs(opts)
3186 timer, fm = gettimer(ui, opts)
3186 timer, fm = gettimer(ui, opts)
3187 options = {
3187 options = {
3188 'w': 'ignore_all_space',
3188 'w': 'ignore_all_space',
3189 'b': 'ignore_space_change',
3189 'b': 'ignore_space_change',
3190 'B': 'ignore_blank_lines',
3190 'B': 'ignore_blank_lines',
3191 }
3191 }
3192
3192
3193 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3193 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3194 opts = {options[c]: b'1' for c in diffopt}
3194 opts = {options[c]: b'1' for c in diffopt}
3195
3195
3196 def d():
3196 def d():
3197 ui.pushbuffer()
3197 ui.pushbuffer()
3198 commands.diff(ui, repo, **opts)
3198 commands.diff(ui, repo, **opts)
3199 ui.popbuffer()
3199 ui.popbuffer()
3200
3200
3201 diffopt = diffopt.encode('ascii')
3201 diffopt = diffopt.encode('ascii')
3202 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3202 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3203 timer(d, title=title)
3203 timer(d, title=title)
3204 fm.end()
3204 fm.end()
3205
3205
3206
3206
3207 @command(
3207 @command(
3208 b'perf::revlogindex|perfrevlogindex',
3208 b'perf::revlogindex|perfrevlogindex',
3209 revlogopts + formatteropts,
3209 revlogopts + formatteropts,
3210 b'-c|-m|FILE',
3210 b'-c|-m|FILE',
3211 )
3211 )
3212 def perfrevlogindex(ui, repo, file_=None, **opts):
3212 def perfrevlogindex(ui, repo, file_=None, **opts):
3213 """Benchmark operations against a revlog index.
3213 """Benchmark operations against a revlog index.
3214
3214
3215 This tests constructing a revlog instance, reading index data,
3215 This tests constructing a revlog instance, reading index data,
3216 parsing index data, and performing various operations related to
3216 parsing index data, and performing various operations related to
3217 index data.
3217 index data.
3218 """
3218 """
3219
3219
3220 opts = _byteskwargs(opts)
3220 opts = _byteskwargs(opts)
3221
3221
3222 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3222 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3223
3223
3224 opener = getattr(rl, 'opener') # trick linter
3224 opener = getattr(rl, 'opener') # trick linter
3225 # compat with hg <= 5.8
3225 # compat with hg <= 5.8
3226 radix = getattr(rl, 'radix', None)
3226 radix = getattr(rl, 'radix', None)
3227 indexfile = getattr(rl, '_indexfile', None)
3227 indexfile = getattr(rl, '_indexfile', None)
3228 if indexfile is None:
3228 if indexfile is None:
3229 # compatibility with <= hg-5.8
3229 # compatibility with <= hg-5.8
3230 indexfile = getattr(rl, 'indexfile')
3230 indexfile = getattr(rl, 'indexfile')
3231 data = opener.read(indexfile)
3231 data = opener.read(indexfile)
3232
3232
3233 header = struct.unpack(b'>I', data[0:4])[0]
3233 header = struct.unpack(b'>I', data[0:4])[0]
3234 version = header & 0xFFFF
3234 version = header & 0xFFFF
3235 if version == 1:
3235 if version == 1:
3236 inline = header & (1 << 16)
3236 inline = header & (1 << 16)
3237 else:
3237 else:
3238 raise error.Abort(b'unsupported revlog version: %d' % version)
3238 raise error.Abort(b'unsupported revlog version: %d' % version)
3239
3239
3240 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3240 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3241 if parse_index_v1 is None:
3241 if parse_index_v1 is None:
3242 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3242 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3243
3243
3244 rllen = len(rl)
3244 rllen = len(rl)
3245
3245
3246 node0 = rl.node(0)
3246 node0 = rl.node(0)
3247 node25 = rl.node(rllen // 4)
3247 node25 = rl.node(rllen // 4)
3248 node50 = rl.node(rllen // 2)
3248 node50 = rl.node(rllen // 2)
3249 node75 = rl.node(rllen // 4 * 3)
3249 node75 = rl.node(rllen // 4 * 3)
3250 node100 = rl.node(rllen - 1)
3250 node100 = rl.node(rllen - 1)
3251
3251
3252 allrevs = range(rllen)
3252 allrevs = range(rllen)
3253 allrevsrev = list(reversed(allrevs))
3253 allrevsrev = list(reversed(allrevs))
3254 allnodes = [rl.node(rev) for rev in range(rllen)]
3254 allnodes = [rl.node(rev) for rev in range(rllen)]
3255 allnodesrev = list(reversed(allnodes))
3255 allnodesrev = list(reversed(allnodes))
3256
3256
3257 def constructor():
3257 def constructor():
3258 if radix is not None:
3258 if radix is not None:
3259 revlog(opener, radix=radix)
3259 revlog(opener, radix=radix)
3260 else:
3260 else:
3261 # hg <= 5.8
3261 # hg <= 5.8
3262 revlog(opener, indexfile=indexfile)
3262 revlog(opener, indexfile=indexfile)
3263
3263
3264 def read():
3264 def read():
3265 with opener(indexfile) as fh:
3265 with opener(indexfile) as fh:
3266 fh.read()
3266 fh.read()
3267
3267
3268 def parseindex():
3268 def parseindex():
3269 parse_index_v1(data, inline)
3269 parse_index_v1(data, inline)
3270
3270
3271 def getentry(revornode):
3271 def getentry(revornode):
3272 index = parse_index_v1(data, inline)[0]
3272 index = parse_index_v1(data, inline)[0]
3273 index[revornode]
3273 index[revornode]
3274
3274
3275 def getentries(revs, count=1):
3275 def getentries(revs, count=1):
3276 index = parse_index_v1(data, inline)[0]
3276 index = parse_index_v1(data, inline)[0]
3277
3277
3278 for i in range(count):
3278 for i in range(count):
3279 for rev in revs:
3279 for rev in revs:
3280 index[rev]
3280 index[rev]
3281
3281
3282 def resolvenode(node):
3282 def resolvenode(node):
3283 index = parse_index_v1(data, inline)[0]
3283 index = parse_index_v1(data, inline)[0]
3284 rev = getattr(index, 'rev', None)
3284 rev = getattr(index, 'rev', None)
3285 if rev is None:
3285 if rev is None:
3286 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3286 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3287 # This only works for the C code.
3287 # This only works for the C code.
3288 if nodemap is None:
3288 if nodemap is None:
3289 return
3289 return
3290 rev = nodemap.__getitem__
3290 rev = nodemap.__getitem__
3291
3291
3292 try:
3292 try:
3293 rev(node)
3293 rev(node)
3294 except error.RevlogError:
3294 except error.RevlogError:
3295 pass
3295 pass
3296
3296
3297 def resolvenodes(nodes, count=1):
3297 def resolvenodes(nodes, count=1):
3298 index = parse_index_v1(data, inline)[0]
3298 index = parse_index_v1(data, inline)[0]
3299 rev = getattr(index, 'rev', None)
3299 rev = getattr(index, 'rev', None)
3300 if rev is None:
3300 if rev is None:
3301 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3301 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3302 # This only works for the C code.
3302 # This only works for the C code.
3303 if nodemap is None:
3303 if nodemap is None:
3304 return
3304 return
3305 rev = nodemap.__getitem__
3305 rev = nodemap.__getitem__
3306
3306
3307 for i in range(count):
3307 for i in range(count):
3308 for node in nodes:
3308 for node in nodes:
3309 try:
3309 try:
3310 rev(node)
3310 rev(node)
3311 except error.RevlogError:
3311 except error.RevlogError:
3312 pass
3312 pass
3313
3313
3314 benches = [
3314 benches = [
3315 (constructor, b'revlog constructor'),
3315 (constructor, b'revlog constructor'),
3316 (read, b'read'),
3316 (read, b'read'),
3317 (parseindex, b'create index object'),
3317 (parseindex, b'create index object'),
3318 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3318 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3319 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3319 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3320 (lambda: resolvenode(node0), b'look up node at rev 0'),
3320 (lambda: resolvenode(node0), b'look up node at rev 0'),
3321 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3321 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3322 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3322 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3323 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3323 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3324 (lambda: resolvenode(node100), b'look up node at tip'),
3324 (lambda: resolvenode(node100), b'look up node at tip'),
3325 # 2x variation is to measure caching impact.
3325 # 2x variation is to measure caching impact.
3326 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3326 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3327 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3327 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3328 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3328 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3329 (
3329 (
3330 lambda: resolvenodes(allnodesrev, 2),
3330 lambda: resolvenodes(allnodesrev, 2),
3331 b'look up all nodes 2x (reverse)',
3331 b'look up all nodes 2x (reverse)',
3332 ),
3332 ),
3333 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3333 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3334 (
3334 (
3335 lambda: getentries(allrevs, 2),
3335 lambda: getentries(allrevs, 2),
3336 b'retrieve all index entries 2x (forward)',
3336 b'retrieve all index entries 2x (forward)',
3337 ),
3337 ),
3338 (
3338 (
3339 lambda: getentries(allrevsrev),
3339 lambda: getentries(allrevsrev),
3340 b'retrieve all index entries (reverse)',
3340 b'retrieve all index entries (reverse)',
3341 ),
3341 ),
3342 (
3342 (
3343 lambda: getentries(allrevsrev, 2),
3343 lambda: getentries(allrevsrev, 2),
3344 b'retrieve all index entries 2x (reverse)',
3344 b'retrieve all index entries 2x (reverse)',
3345 ),
3345 ),
3346 ]
3346 ]
3347
3347
3348 for fn, title in benches:
3348 for fn, title in benches:
3349 timer, fm = gettimer(ui, opts)
3349 timer, fm = gettimer(ui, opts)
3350 timer(fn, title=title)
3350 timer(fn, title=title)
3351 fm.end()
3351 fm.end()
3352
3352
3353
3353
3354 @command(
3354 @command(
3355 b'perf::revlogrevisions|perfrevlogrevisions',
3355 b'perf::revlogrevisions|perfrevlogrevisions',
3356 revlogopts
3356 revlogopts
3357 + formatteropts
3357 + formatteropts
3358 + [
3358 + [
3359 (b'd', b'dist', 100, b'distance between the revisions'),
3359 (b'd', b'dist', 100, b'distance between the revisions'),
3360 (b's', b'startrev', 0, b'revision to start reading at'),
3360 (b's', b'startrev', 0, b'revision to start reading at'),
3361 (b'', b'reverse', False, b'read in reverse'),
3361 (b'', b'reverse', False, b'read in reverse'),
3362 ],
3362 ],
3363 b'-c|-m|FILE',
3363 b'-c|-m|FILE',
3364 )
3364 )
3365 def perfrevlogrevisions(
3365 def perfrevlogrevisions(
3366 ui, repo, file_=None, startrev=0, reverse=False, **opts
3366 ui, repo, file_=None, startrev=0, reverse=False, **opts
3367 ):
3367 ):
3368 """Benchmark reading a series of revisions from a revlog.
3368 """Benchmark reading a series of revisions from a revlog.
3369
3369
3370 By default, we read every ``-d/--dist`` revision from 0 to tip of
3370 By default, we read every ``-d/--dist`` revision from 0 to tip of
3371 the specified revlog.
3371 the specified revlog.
3372
3372
3373 The start revision can be defined via ``-s/--startrev``.
3373 The start revision can be defined via ``-s/--startrev``.
3374 """
3374 """
3375 opts = _byteskwargs(opts)
3375 opts = _byteskwargs(opts)
3376
3376
3377 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3377 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3378 rllen = getlen(ui)(rl)
3378 rllen = getlen(ui)(rl)
3379
3379
3380 if startrev < 0:
3380 if startrev < 0:
3381 startrev = rllen + startrev
3381 startrev = rllen + startrev
3382
3382
3383 def d():
3383 def d():
3384 rl.clearcaches()
3384 rl.clearcaches()
3385
3385
3386 beginrev = startrev
3386 beginrev = startrev
3387 endrev = rllen
3387 endrev = rllen
3388 dist = opts[b'dist']
3388 dist = opts[b'dist']
3389
3389
3390 if reverse:
3390 if reverse:
3391 beginrev, endrev = endrev - 1, beginrev - 1
3391 beginrev, endrev = endrev - 1, beginrev - 1
3392 dist = -1 * dist
3392 dist = -1 * dist
3393
3393
3394 for x in _xrange(beginrev, endrev, dist):
3394 for x in _xrange(beginrev, endrev, dist):
3395 # Old revisions don't support passing int.
3395 # Old revisions don't support passing int.
3396 n = rl.node(x)
3396 n = rl.node(x)
3397 rl.revision(n)
3397 rl.revision(n)
3398
3398
3399 timer, fm = gettimer(ui, opts)
3399 timer, fm = gettimer(ui, opts)
3400 timer(d)
3400 timer(d)
3401 fm.end()
3401 fm.end()
3402
3402
3403
3403
3404 @command(
3404 @command(
3405 b'perf::revlogwrite|perfrevlogwrite',
3405 b'perf::revlogwrite|perfrevlogwrite',
3406 revlogopts
3406 revlogopts
3407 + formatteropts
3407 + formatteropts
3408 + [
3408 + [
3409 (b's', b'startrev', 1000, b'revision to start writing at'),
3409 (b's', b'startrev', 1000, b'revision to start writing at'),
3410 (b'', b'stoprev', -1, b'last revision to write'),
3410 (b'', b'stoprev', -1, b'last revision to write'),
3411 (b'', b'count', 3, b'number of passes to perform'),
3411 (b'', b'count', 3, b'number of passes to perform'),
3412 (b'', b'details', False, b'print timing for every revisions tested'),
3412 (b'', b'details', False, b'print timing for every revisions tested'),
3413 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3413 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3414 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3414 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3415 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3415 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3416 ],
3416 ],
3417 b'-c|-m|FILE',
3417 b'-c|-m|FILE',
3418 )
3418 )
3419 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3419 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3420 """Benchmark writing a series of revisions to a revlog.
3420 """Benchmark writing a series of revisions to a revlog.
3421
3421
3422 Possible source values are:
3422 Possible source values are:
3423 * `full`: add from a full text (default).
3423 * `full`: add from a full text (default).
3424 * `parent-1`: add from a delta to the first parent
3424 * `parent-1`: add from a delta to the first parent
3425 * `parent-2`: add from a delta to the second parent if it exists
3425 * `parent-2`: add from a delta to the second parent if it exists
3426 (use a delta from the first parent otherwise)
3426 (use a delta from the first parent otherwise)
3427 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3427 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3428 * `storage`: add from the existing precomputed deltas
3428 * `storage`: add from the existing precomputed deltas
3429
3429
3430 Note: This performance command measures performance in a custom way. As a
3430 Note: This performance command measures performance in a custom way. As a
3431 result some of the global configuration of the 'perf' command does not
3431 result some of the global configuration of the 'perf' command does not
3432 apply to it:
3432 apply to it:
3433
3433
3434 * ``pre-run``: disabled
3434 * ``pre-run``: disabled
3435
3435
3436 * ``profile-benchmark``: disabled
3436 * ``profile-benchmark``: disabled
3437
3437
3438 * ``run-limits``: disabled use --count instead
3438 * ``run-limits``: disabled use --count instead
3439 """
3439 """
3440 opts = _byteskwargs(opts)
3440 opts = _byteskwargs(opts)
3441
3441
3442 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3442 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3443 rllen = getlen(ui)(rl)
3443 rllen = getlen(ui)(rl)
3444 if startrev < 0:
3444 if startrev < 0:
3445 startrev = rllen + startrev
3445 startrev = rllen + startrev
3446 if stoprev < 0:
3446 if stoprev < 0:
3447 stoprev = rllen + stoprev
3447 stoprev = rllen + stoprev
3448
3448
3449 lazydeltabase = opts['lazydeltabase']
3449 lazydeltabase = opts['lazydeltabase']
3450 source = opts['source']
3450 source = opts['source']
3451 clearcaches = opts['clear_caches']
3451 clearcaches = opts['clear_caches']
3452 validsource = (
3452 validsource = (
3453 b'full',
3453 b'full',
3454 b'parent-1',
3454 b'parent-1',
3455 b'parent-2',
3455 b'parent-2',
3456 b'parent-smallest',
3456 b'parent-smallest',
3457 b'storage',
3457 b'storage',
3458 )
3458 )
3459 if source not in validsource:
3459 if source not in validsource:
3460 raise error.Abort('invalid source type: %s' % source)
3460 raise error.Abort('invalid source type: %s' % source)
3461
3461
3462 ### actually gather results
3462 ### actually gather results
3463 count = opts['count']
3463 count = opts['count']
3464 if count <= 0:
3464 if count <= 0:
3465 raise error.Abort('invalide run count: %d' % count)
3465 raise error.Abort('invalide run count: %d' % count)
3466 allresults = []
3466 allresults = []
3467 for c in range(count):
3467 for c in range(count):
3468 timing = _timeonewrite(
3468 timing = _timeonewrite(
3469 ui,
3469 ui,
3470 rl,
3470 rl,
3471 source,
3471 source,
3472 startrev,
3472 startrev,
3473 stoprev,
3473 stoprev,
3474 c + 1,
3474 c + 1,
3475 lazydeltabase=lazydeltabase,
3475 lazydeltabase=lazydeltabase,
3476 clearcaches=clearcaches,
3476 clearcaches=clearcaches,
3477 )
3477 )
3478 allresults.append(timing)
3478 allresults.append(timing)
3479
3479
3480 ### consolidate the results in a single list
3480 ### consolidate the results in a single list
3481 results = []
3481 results = []
3482 for idx, (rev, t) in enumerate(allresults[0]):
3482 for idx, (rev, t) in enumerate(allresults[0]):
3483 ts = [t]
3483 ts = [t]
3484 for other in allresults[1:]:
3484 for other in allresults[1:]:
3485 orev, ot = other[idx]
3485 orev, ot = other[idx]
3486 assert orev == rev
3486 assert orev == rev
3487 ts.append(ot)
3487 ts.append(ot)
3488 results.append((rev, ts))
3488 results.append((rev, ts))
3489 resultcount = len(results)
3489 resultcount = len(results)
3490
3490
3491 ### Compute and display relevant statistics
3491 ### Compute and display relevant statistics
3492
3492
3493 # get a formatter
3493 # get a formatter
3494 fm = ui.formatter(b'perf', opts)
3494 fm = ui.formatter(b'perf', opts)
3495 displayall = ui.configbool(b"perf", b"all-timing", True)
3495 displayall = ui.configbool(b"perf", b"all-timing", True)
3496
3496
3497 # print individual details if requested
3497 # print individual details if requested
3498 if opts['details']:
3498 if opts['details']:
3499 for idx, item in enumerate(results, 1):
3499 for idx, item in enumerate(results, 1):
3500 rev, data = item
3500 rev, data = item
3501 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3501 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3502 formatone(fm, data, title=title, displayall=displayall)
3502 formatone(fm, data, title=title, displayall=displayall)
3503
3503
3504 # sorts results by median time
3504 # sorts results by median time
3505 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3505 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3506 # list of (name, index) to display)
3506 # list of (name, index) to display)
3507 relevants = [
3507 relevants = [
3508 ("min", 0),
3508 ("min", 0),
3509 ("10%", resultcount * 10 // 100),
3509 ("10%", resultcount * 10 // 100),
3510 ("25%", resultcount * 25 // 100),
3510 ("25%", resultcount * 25 // 100),
3511 ("50%", resultcount * 70 // 100),
3511 ("50%", resultcount * 70 // 100),
3512 ("75%", resultcount * 75 // 100),
3512 ("75%", resultcount * 75 // 100),
3513 ("90%", resultcount * 90 // 100),
3513 ("90%", resultcount * 90 // 100),
3514 ("95%", resultcount * 95 // 100),
3514 ("95%", resultcount * 95 // 100),
3515 ("99%", resultcount * 99 // 100),
3515 ("99%", resultcount * 99 // 100),
3516 ("99.9%", resultcount * 999 // 1000),
3516 ("99.9%", resultcount * 999 // 1000),
3517 ("99.99%", resultcount * 9999 // 10000),
3517 ("99.99%", resultcount * 9999 // 10000),
3518 ("99.999%", resultcount * 99999 // 100000),
3518 ("99.999%", resultcount * 99999 // 100000),
3519 ("max", -1),
3519 ("max", -1),
3520 ]
3520 ]
3521 if not ui.quiet:
3521 if not ui.quiet:
3522 for name, idx in relevants:
3522 for name, idx in relevants:
3523 data = results[idx]
3523 data = results[idx]
3524 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3524 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3525 formatone(fm, data[1], title=title, displayall=displayall)
3525 formatone(fm, data[1], title=title, displayall=displayall)
3526
3526
3527 # XXX summing that many float will not be very precise, we ignore this fact
3527 # XXX summing that many float will not be very precise, we ignore this fact
3528 # for now
3528 # for now
3529 totaltime = []
3529 totaltime = []
3530 for item in allresults:
3530 for item in allresults:
3531 totaltime.append(
3531 totaltime.append(
3532 (
3532 (
3533 sum(x[1][0] for x in item),
3533 sum(x[1][0] for x in item),
3534 sum(x[1][1] for x in item),
3534 sum(x[1][1] for x in item),
3535 sum(x[1][2] for x in item),
3535 sum(x[1][2] for x in item),
3536 )
3536 )
3537 )
3537 )
3538 formatone(
3538 formatone(
3539 fm,
3539 fm,
3540 totaltime,
3540 totaltime,
3541 title="total time (%d revs)" % resultcount,
3541 title="total time (%d revs)" % resultcount,
3542 displayall=displayall,
3542 displayall=displayall,
3543 )
3543 )
3544 fm.end()
3544 fm.end()
3545
3545
3546
3546
3547 class _faketr:
3547 class _faketr:
3548 def add(s, x, y, z=None):
3548 def add(s, x, y, z=None):
3549 return None
3549 return None
3550
3550
3551
3551
3552 def _timeonewrite(
3552 def _timeonewrite(
3553 ui,
3553 ui,
3554 orig,
3554 orig,
3555 source,
3555 source,
3556 startrev,
3556 startrev,
3557 stoprev,
3557 stoprev,
3558 runidx=None,
3558 runidx=None,
3559 lazydeltabase=True,
3559 lazydeltabase=True,
3560 clearcaches=True,
3560 clearcaches=True,
3561 ):
3561 ):
3562 timings = []
3562 timings = []
3563 tr = _faketr()
3563 tr = _faketr()
3564 with _temprevlog(ui, orig, startrev) as dest:
3564 with _temprevlog(ui, orig, startrev) as dest:
3565 if hasattr(dest, "delta_config"):
3565 if hasattr(dest, "delta_config"):
3566 dest.delta_config.lazy_delta_base = lazydeltabase
3566 dest.delta_config.lazy_delta_base = lazydeltabase
3567 else:
3567 else:
3568 dest._lazydeltabase = lazydeltabase
3568 dest._lazydeltabase = lazydeltabase
3569 revs = list(orig.revs(startrev, stoprev))
3569 revs = list(orig.revs(startrev, stoprev))
3570 total = len(revs)
3570 total = len(revs)
3571 topic = 'adding'
3571 topic = 'adding'
3572 if runidx is not None:
3572 if runidx is not None:
3573 topic += ' (run #%d)' % runidx
3573 topic += ' (run #%d)' % runidx
3574 # Support both old and new progress API
3574 # Support both old and new progress API
3575 if util.safehasattr(ui, 'makeprogress'):
3575 if util.safehasattr(ui, 'makeprogress'):
3576 progress = ui.makeprogress(topic, unit='revs', total=total)
3576 progress = ui.makeprogress(topic, unit='revs', total=total)
3577
3577
3578 def updateprogress(pos):
3578 def updateprogress(pos):
3579 progress.update(pos)
3579 progress.update(pos)
3580
3580
3581 def completeprogress():
3581 def completeprogress():
3582 progress.complete()
3582 progress.complete()
3583
3583
3584 else:
3584 else:
3585
3585
3586 def updateprogress(pos):
3586 def updateprogress(pos):
3587 ui.progress(topic, pos, unit='revs', total=total)
3587 ui.progress(topic, pos, unit='revs', total=total)
3588
3588
3589 def completeprogress():
3589 def completeprogress():
3590 ui.progress(topic, None, unit='revs', total=total)
3590 ui.progress(topic, None, unit='revs', total=total)
3591
3591
3592 for idx, rev in enumerate(revs):
3592 for idx, rev in enumerate(revs):
3593 updateprogress(idx)
3593 updateprogress(idx)
3594 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3594 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3595 if clearcaches:
3595 if clearcaches:
3596 dest.index.clearcaches()
3596 dest.index.clearcaches()
3597 dest.clearcaches()
3597 dest.clearcaches()
3598 with timeone() as r:
3598 with timeone() as r:
3599 dest.addrawrevision(*addargs, **addkwargs)
3599 dest.addrawrevision(*addargs, **addkwargs)
3600 timings.append((rev, r[0]))
3600 timings.append((rev, r[0]))
3601 updateprogress(total)
3601 updateprogress(total)
3602 completeprogress()
3602 completeprogress()
3603 return timings
3603 return timings
3604
3604
3605
3605
3606 def _getrevisionseed(orig, rev, tr, source):
3606 def _getrevisionseed(orig, rev, tr, source):
3607 from mercurial.node import nullid
3607 from mercurial.node import nullid
3608
3608
3609 linkrev = orig.linkrev(rev)
3609 linkrev = orig.linkrev(rev)
3610 node = orig.node(rev)
3610 node = orig.node(rev)
3611 p1, p2 = orig.parents(node)
3611 p1, p2 = orig.parents(node)
3612 flags = orig.flags(rev)
3612 flags = orig.flags(rev)
3613 cachedelta = None
3613 cachedelta = None
3614 text = None
3614 text = None
3615
3615
3616 if source == b'full':
3616 if source == b'full':
3617 text = orig.revision(rev)
3617 text = orig.revision(rev)
3618 elif source == b'parent-1':
3618 elif source == b'parent-1':
3619 baserev = orig.rev(p1)
3619 baserev = orig.rev(p1)
3620 cachedelta = (baserev, orig.revdiff(p1, rev))
3620 cachedelta = (baserev, orig.revdiff(p1, rev))
3621 elif source == b'parent-2':
3621 elif source == b'parent-2':
3622 parent = p2
3622 parent = p2
3623 if p2 == nullid:
3623 if p2 == nullid:
3624 parent = p1
3624 parent = p1
3625 baserev = orig.rev(parent)
3625 baserev = orig.rev(parent)
3626 cachedelta = (baserev, orig.revdiff(parent, rev))
3626 cachedelta = (baserev, orig.revdiff(parent, rev))
3627 elif source == b'parent-smallest':
3627 elif source == b'parent-smallest':
3628 p1diff = orig.revdiff(p1, rev)
3628 p1diff = orig.revdiff(p1, rev)
3629 parent = p1
3629 parent = p1
3630 diff = p1diff
3630 diff = p1diff
3631 if p2 != nullid:
3631 if p2 != nullid:
3632 p2diff = orig.revdiff(p2, rev)
3632 p2diff = orig.revdiff(p2, rev)
3633 if len(p1diff) > len(p2diff):
3633 if len(p1diff) > len(p2diff):
3634 parent = p2
3634 parent = p2
3635 diff = p2diff
3635 diff = p2diff
3636 baserev = orig.rev(parent)
3636 baserev = orig.rev(parent)
3637 cachedelta = (baserev, diff)
3637 cachedelta = (baserev, diff)
3638 elif source == b'storage':
3638 elif source == b'storage':
3639 baserev = orig.deltaparent(rev)
3639 baserev = orig.deltaparent(rev)
3640 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3640 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3641
3641
3642 return (
3642 return (
3643 (text, tr, linkrev, p1, p2),
3643 (text, tr, linkrev, p1, p2),
3644 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3644 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3645 )
3645 )
3646
3646
3647
3647
3648 @contextlib.contextmanager
3648 @contextlib.contextmanager
3649 def _temprevlog(ui, orig, truncaterev):
3649 def _temprevlog(ui, orig, truncaterev):
3650 from mercurial import vfs as vfsmod
3650 from mercurial import vfs as vfsmod
3651
3651
3652 if orig._inline:
3652 if orig._inline:
3653 raise error.Abort('not supporting inline revlog (yet)')
3653 raise error.Abort('not supporting inline revlog (yet)')
3654 revlogkwargs = {}
3654 revlogkwargs = {}
3655 k = 'upperboundcomp'
3655 k = 'upperboundcomp'
3656 if util.safehasattr(orig, k):
3656 if util.safehasattr(orig, k):
3657 revlogkwargs[k] = getattr(orig, k)
3657 revlogkwargs[k] = getattr(orig, k)
3658
3658
3659 indexfile = getattr(orig, '_indexfile', None)
3659 indexfile = getattr(orig, '_indexfile', None)
3660 if indexfile is None:
3660 if indexfile is None:
3661 # compatibility with <= hg-5.8
3661 # compatibility with <= hg-5.8
3662 indexfile = getattr(orig, 'indexfile')
3662 indexfile = getattr(orig, 'indexfile')
3663 origindexpath = orig.opener.join(indexfile)
3663 origindexpath = orig.opener.join(indexfile)
3664
3664
3665 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3665 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3666 origdatapath = orig.opener.join(datafile)
3666 origdatapath = orig.opener.join(datafile)
3667 radix = b'revlog'
3667 radix = b'revlog'
3668 indexname = b'revlog.i'
3668 indexname = b'revlog.i'
3669 dataname = b'revlog.d'
3669 dataname = b'revlog.d'
3670
3670
3671 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3671 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3672 try:
3672 try:
3673 # copy the data file in a temporary directory
3673 # copy the data file in a temporary directory
3674 ui.debug('copying data in %s\n' % tmpdir)
3674 ui.debug('copying data in %s\n' % tmpdir)
3675 destindexpath = os.path.join(tmpdir, 'revlog.i')
3675 destindexpath = os.path.join(tmpdir, 'revlog.i')
3676 destdatapath = os.path.join(tmpdir, 'revlog.d')
3676 destdatapath = os.path.join(tmpdir, 'revlog.d')
3677 shutil.copyfile(origindexpath, destindexpath)
3677 shutil.copyfile(origindexpath, destindexpath)
3678 shutil.copyfile(origdatapath, destdatapath)
3678 shutil.copyfile(origdatapath, destdatapath)
3679
3679
3680 # remove the data we want to add again
3680 # remove the data we want to add again
3681 ui.debug('truncating data to be rewritten\n')
3681 ui.debug('truncating data to be rewritten\n')
3682 with open(destindexpath, 'ab') as index:
3682 with open(destindexpath, 'ab') as index:
3683 index.seek(0)
3683 index.seek(0)
3684 index.truncate(truncaterev * orig._io.size)
3684 index.truncate(truncaterev * orig._io.size)
3685 with open(destdatapath, 'ab') as data:
3685 with open(destdatapath, 'ab') as data:
3686 data.seek(0)
3686 data.seek(0)
3687 data.truncate(orig.start(truncaterev))
3687 data.truncate(orig.start(truncaterev))
3688
3688
3689 # instantiate a new revlog from the temporary copy
3689 # instantiate a new revlog from the temporary copy
3690 ui.debug('truncating adding to be rewritten\n')
3690 ui.debug('truncating adding to be rewritten\n')
3691 vfs = vfsmod.vfs(tmpdir)
3691 vfs = vfsmod.vfs(tmpdir)
3692 vfs.options = getattr(orig.opener, 'options', None)
3692 vfs.options = getattr(orig.opener, 'options', None)
3693
3693
3694 try:
3694 try:
3695 dest = revlog(vfs, radix=radix, **revlogkwargs)
3695 dest = revlog(vfs, radix=radix, **revlogkwargs)
3696 except TypeError:
3696 except TypeError:
3697 dest = revlog(
3697 dest = revlog(
3698 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3698 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3699 )
3699 )
3700 if dest._inline:
3700 if dest._inline:
3701 raise error.Abort('not supporting inline revlog (yet)')
3701 raise error.Abort('not supporting inline revlog (yet)')
3702 # make sure internals are initialized
3702 # make sure internals are initialized
3703 dest.revision(len(dest) - 1)
3703 dest.revision(len(dest) - 1)
3704 yield dest
3704 yield dest
3705 del dest, vfs
3705 del dest, vfs
3706 finally:
3706 finally:
3707 shutil.rmtree(tmpdir, True)
3707 shutil.rmtree(tmpdir, True)
3708
3708
3709
3709
3710 @command(
3710 @command(
3711 b'perf::revlogchunks|perfrevlogchunks',
3711 b'perf::revlogchunks|perfrevlogchunks',
3712 revlogopts
3712 revlogopts
3713 + formatteropts
3713 + formatteropts
3714 + [
3714 + [
3715 (b'e', b'engines', b'', b'compression engines to use'),
3715 (b'e', b'engines', b'', b'compression engines to use'),
3716 (b's', b'startrev', 0, b'revision to start at'),
3716 (b's', b'startrev', 0, b'revision to start at'),
3717 ],
3717 ],
3718 b'-c|-m|FILE',
3718 b'-c|-m|FILE',
3719 )
3719 )
3720 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3720 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3721 """Benchmark operations on revlog chunks.
3721 """Benchmark operations on revlog chunks.
3722
3722
3723 Logically, each revlog is a collection of fulltext revisions. However,
3723 Logically, each revlog is a collection of fulltext revisions. However,
3724 stored within each revlog are "chunks" of possibly compressed data. This
3724 stored within each revlog are "chunks" of possibly compressed data. This
3725 data needs to be read and decompressed or compressed and written.
3725 data needs to be read and decompressed or compressed and written.
3726
3726
3727 This command measures the time it takes to read+decompress and recompress
3727 This command measures the time it takes to read+decompress and recompress
3728 chunks in a revlog. It effectively isolates I/O and compression performance.
3728 chunks in a revlog. It effectively isolates I/O and compression performance.
3729 For measurements of higher-level operations like resolving revisions,
3729 For measurements of higher-level operations like resolving revisions,
3730 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3730 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3731 """
3731 """
3732 opts = _byteskwargs(opts)
3732 opts = _byteskwargs(opts)
3733
3733
3734 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3734 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3735
3735
3736 # - _chunkraw was renamed to _getsegmentforrevs
3736 # - _chunkraw was renamed to _getsegmentforrevs
3737 # - _getsegmentforrevs was moved on the inner object
3737 # - _getsegmentforrevs was moved on the inner object
3738 try:
3738 try:
3739 segmentforrevs = rl._inner.get_segment_for_revs
3739 segmentforrevs = rl._inner.get_segment_for_revs
3740 except AttributeError:
3740 except AttributeError:
3741 try:
3741 try:
3742 segmentforrevs = rl._getsegmentforrevs
3742 segmentforrevs = rl._getsegmentforrevs
3743 except AttributeError:
3743 except AttributeError:
3744 segmentforrevs = rl._chunkraw
3744 segmentforrevs = rl._chunkraw
3745
3745
3746 # Verify engines argument.
3746 # Verify engines argument.
3747 if engines:
3747 if engines:
3748 engines = {e.strip() for e in engines.split(b',')}
3748 engines = {e.strip() for e in engines.split(b',')}
3749 for engine in engines:
3749 for engine in engines:
3750 try:
3750 try:
3751 util.compressionengines[engine]
3751 util.compressionengines[engine]
3752 except KeyError:
3752 except KeyError:
3753 raise error.Abort(b'unknown compression engine: %s' % engine)
3753 raise error.Abort(b'unknown compression engine: %s' % engine)
3754 else:
3754 else:
3755 engines = []
3755 engines = []
3756 for e in util.compengines:
3756 for e in util.compengines:
3757 engine = util.compengines[e]
3757 engine = util.compengines[e]
3758 try:
3758 try:
3759 if engine.available():
3759 if engine.available():
3760 engine.revlogcompressor().compress(b'dummy')
3760 engine.revlogcompressor().compress(b'dummy')
3761 engines.append(e)
3761 engines.append(e)
3762 except NotImplementedError:
3762 except NotImplementedError:
3763 pass
3763 pass
3764
3764
3765 revs = list(rl.revs(startrev, len(rl) - 1))
3765 revs = list(rl.revs(startrev, len(rl) - 1))
3766
3766
3767 @contextlib.contextmanager
3767 @contextlib.contextmanager
3768 def reading(rl):
3768 def reading(rl):
3769 if getattr(rl, 'reading', None) is not None:
3769 if getattr(rl, 'reading', None) is not None:
3770 with rl.reading():
3770 with rl.reading():
3771 yield None
3771 yield None
3772 elif rl._inline:
3772 elif rl._inline:
3773 indexfile = getattr(rl, '_indexfile', None)
3773 indexfile = getattr(rl, '_indexfile', None)
3774 if indexfile is None:
3774 if indexfile is None:
3775 # compatibility with <= hg-5.8
3775 # compatibility with <= hg-5.8
3776 indexfile = getattr(rl, 'indexfile')
3776 indexfile = getattr(rl, 'indexfile')
3777 yield getsvfs(repo)(indexfile)
3777 yield getsvfs(repo)(indexfile)
3778 else:
3778 else:
3779 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3779 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3780 yield getsvfs(repo)(datafile)
3780 yield getsvfs(repo)(datafile)
3781
3781
3782 if getattr(rl, 'reading', None) is not None:
3782 if getattr(rl, 'reading', None) is not None:
3783
3783
3784 @contextlib.contextmanager
3784 @contextlib.contextmanager
3785 def lazy_reading(rl):
3785 def lazy_reading(rl):
3786 with rl.reading():
3786 with rl.reading():
3787 yield
3787 yield
3788
3788
3789 else:
3789 else:
3790
3790
3791 @contextlib.contextmanager
3791 @contextlib.contextmanager
3792 def lazy_reading(rl):
3792 def lazy_reading(rl):
3793 yield
3793 yield
3794
3794
3795 def doread():
3795 def doread():
3796 rl.clearcaches()
3796 rl.clearcaches()
3797 for rev in revs:
3797 for rev in revs:
3798 with lazy_reading(rl):
3798 with lazy_reading(rl):
3799 segmentforrevs(rev, rev)
3799 segmentforrevs(rev, rev)
3800
3800
3801 def doreadcachedfh():
3801 def doreadcachedfh():
3802 rl.clearcaches()
3802 rl.clearcaches()
3803 with reading(rl) as fh:
3803 with reading(rl) as fh:
3804 if fh is not None:
3804 if fh is not None:
3805 for rev in revs:
3805 for rev in revs:
3806 segmentforrevs(rev, rev, df=fh)
3806 segmentforrevs(rev, rev, df=fh)
3807 else:
3807 else:
3808 for rev in revs:
3808 for rev in revs:
3809 segmentforrevs(rev, rev)
3809 segmentforrevs(rev, rev)
3810
3810
3811 def doreadbatch():
3811 def doreadbatch():
3812 rl.clearcaches()
3812 rl.clearcaches()
3813 with lazy_reading(rl):
3813 with lazy_reading(rl):
3814 segmentforrevs(revs[0], revs[-1])
3814 segmentforrevs(revs[0], revs[-1])
3815
3815
3816 def doreadbatchcachedfh():
3816 def doreadbatchcachedfh():
3817 rl.clearcaches()
3817 rl.clearcaches()
3818 with reading(rl) as fh:
3818 with reading(rl) as fh:
3819 if fh is not None:
3819 if fh is not None:
3820 segmentforrevs(revs[0], revs[-1], df=fh)
3820 segmentforrevs(revs[0], revs[-1], df=fh)
3821 else:
3821 else:
3822 segmentforrevs(revs[0], revs[-1])
3822 segmentforrevs(revs[0], revs[-1])
3823
3823
3824 def dochunk():
3824 def dochunk():
3825 rl.clearcaches()
3825 rl.clearcaches()
3826 # chunk used to be available directly on the revlog
3826 # chunk used to be available directly on the revlog
3827 _chunk = getattr(rl, '_inner', rl)._chunk
3827 _chunk = getattr(rl, '_inner', rl)._chunk
3828 with reading(rl) as fh:
3828 with reading(rl) as fh:
3829 if fh is not None:
3829 if fh is not None:
3830 for rev in revs:
3830 for rev in revs:
3831 _chunk(rev, df=fh)
3831 _chunk(rev, df=fh)
3832 else:
3832 else:
3833 for rev in revs:
3833 for rev in revs:
3834 _chunk(rev)
3834 _chunk(rev)
3835
3835
3836 chunks = [None]
3836 chunks = [None]
3837
3837
3838 def dochunkbatch():
3838 def dochunkbatch():
3839 rl.clearcaches()
3839 rl.clearcaches()
3840 _chunks = getattr(rl, '_inner', rl)._chunks
3840 _chunks = getattr(rl, '_inner', rl)._chunks
3841 with reading(rl) as fh:
3841 with reading(rl) as fh:
3842 if fh is not None:
3842 if fh is not None:
3843 # Save chunks as a side-effect.
3843 # Save chunks as a side-effect.
3844 chunks[0] = _chunks(revs, df=fh)
3844 chunks[0] = _chunks(revs, df=fh)
3845 else:
3845 else:
3846 # Save chunks as a side-effect.
3846 # Save chunks as a side-effect.
3847 chunks[0] = _chunks(revs)
3847 chunks[0] = _chunks(revs)
3848
3848
3849 def docompress(compressor):
3849 def docompress(compressor):
3850 rl.clearcaches()
3850 rl.clearcaches()
3851
3851
3852 compressor_holder = getattr(rl, '_inner', rl)
3852 compressor_holder = getattr(rl, '_inner', rl)
3853
3853
3854 try:
3854 try:
3855 # Swap in the requested compression engine.
3855 # Swap in the requested compression engine.
3856 oldcompressor = compressor_holder._compressor
3856 oldcompressor = compressor_holder._compressor
3857 compressor_holder._compressor = compressor
3857 compressor_holder._compressor = compressor
3858 for chunk in chunks[0]:
3858 for chunk in chunks[0]:
3859 rl.compress(chunk)
3859 rl.compress(chunk)
3860 finally:
3860 finally:
3861 compressor_holder._compressor = oldcompressor
3861 compressor_holder._compressor = oldcompressor
3862
3862
3863 benches = [
3863 benches = [
3864 (lambda: doread(), b'read'),
3864 (lambda: doread(), b'read'),
3865 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3865 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3866 (lambda: doreadbatch(), b'read batch'),
3866 (lambda: doreadbatch(), b'read batch'),
3867 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3867 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3868 (lambda: dochunk(), b'chunk'),
3868 (lambda: dochunk(), b'chunk'),
3869 (lambda: dochunkbatch(), b'chunk batch'),
3869 (lambda: dochunkbatch(), b'chunk batch'),
3870 ]
3870 ]
3871
3871
3872 for engine in sorted(engines):
3872 for engine in sorted(engines):
3873 compressor = util.compengines[engine].revlogcompressor()
3873 compressor = util.compengines[engine].revlogcompressor()
3874 benches.append(
3874 benches.append(
3875 (
3875 (
3876 functools.partial(docompress, compressor),
3876 functools.partial(docompress, compressor),
3877 b'compress w/ %s' % engine,
3877 b'compress w/ %s' % engine,
3878 )
3878 )
3879 )
3879 )
3880
3880
3881 for fn, title in benches:
3881 for fn, title in benches:
3882 timer, fm = gettimer(ui, opts)
3882 timer, fm = gettimer(ui, opts)
3883 timer(fn, title=title)
3883 timer(fn, title=title)
3884 fm.end()
3884 fm.end()
3885
3885
3886
3886
3887 @command(
3887 @command(
3888 b'perf::revlogrevision|perfrevlogrevision',
3888 b'perf::revlogrevision|perfrevlogrevision',
3889 revlogopts
3889 revlogopts
3890 + formatteropts
3890 + formatteropts
3891 + [(b'', b'cache', False, b'use caches instead of clearing')],
3891 + [(b'', b'cache', False, b'use caches instead of clearing')],
3892 b'-c|-m|FILE REV',
3892 b'-c|-m|FILE REV',
3893 )
3893 )
3894 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3894 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3895 """Benchmark obtaining a revlog revision.
3895 """Benchmark obtaining a revlog revision.
3896
3896
3897 Obtaining a revlog revision consists of roughly the following steps:
3897 Obtaining a revlog revision consists of roughly the following steps:
3898
3898
3899 1. Compute the delta chain
3899 1. Compute the delta chain
3900 2. Slice the delta chain if applicable
3900 2. Slice the delta chain if applicable
3901 3. Obtain the raw chunks for that delta chain
3901 3. Obtain the raw chunks for that delta chain
3902 4. Decompress each raw chunk
3902 4. Decompress each raw chunk
3903 5. Apply binary patches to obtain fulltext
3903 5. Apply binary patches to obtain fulltext
3904 6. Verify hash of fulltext
3904 6. Verify hash of fulltext
3905
3905
3906 This command measures the time spent in each of these phases.
3906 This command measures the time spent in each of these phases.
3907 """
3907 """
3908 opts = _byteskwargs(opts)
3908 opts = _byteskwargs(opts)
3909
3909
3910 if opts.get(b'changelog') or opts.get(b'manifest'):
3910 if opts.get(b'changelog') or opts.get(b'manifest'):
3911 file_, rev = None, file_
3911 file_, rev = None, file_
3912 elif rev is None:
3912 elif rev is None:
3913 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3913 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3914
3914
3915 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3915 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3916
3916
3917 # _chunkraw was renamed to _getsegmentforrevs.
3917 # _chunkraw was renamed to _getsegmentforrevs.
3918 try:
3918 try:
3919 segmentforrevs = r._inner.get_segment_for_revs
3919 segmentforrevs = r._inner.get_segment_for_revs
3920 except AttributeError:
3920 except AttributeError:
3921 try:
3921 try:
3922 segmentforrevs = r._getsegmentforrevs
3922 segmentforrevs = r._getsegmentforrevs
3923 except AttributeError:
3923 except AttributeError:
3924 segmentforrevs = r._chunkraw
3924 segmentforrevs = r._chunkraw
3925
3925
3926 node = r.lookup(rev)
3926 node = r.lookup(rev)
3927 rev = r.rev(node)
3927 rev = r.rev(node)
3928
3928
3929 if getattr(r, 'reading', None) is not None:
3929 if getattr(r, 'reading', None) is not None:
3930
3930
3931 @contextlib.contextmanager
3931 @contextlib.contextmanager
3932 def lazy_reading(r):
3932 def lazy_reading(r):
3933 with r.reading():
3933 with r.reading():
3934 yield
3934 yield
3935
3935
3936 else:
3936 else:
3937
3937
3938 @contextlib.contextmanager
3938 @contextlib.contextmanager
3939 def lazy_reading(r):
3939 def lazy_reading(r):
3940 yield
3940 yield
3941
3941
3942 def getrawchunks(data, chain):
3942 def getrawchunks(data, chain):
3943 start = r.start
3943 start = r.start
3944 length = r.length
3944 length = r.length
3945 inline = r._inline
3945 inline = r._inline
3946 try:
3946 try:
3947 iosize = r.index.entry_size
3947 iosize = r.index.entry_size
3948 except AttributeError:
3948 except AttributeError:
3949 iosize = r._io.size
3949 iosize = r._io.size
3950 buffer = util.buffer
3950 buffer = util.buffer
3951
3951
3952 chunks = []
3952 chunks = []
3953 ladd = chunks.append
3953 ladd = chunks.append
3954 for idx, item in enumerate(chain):
3954 for idx, item in enumerate(chain):
3955 offset = start(item[0])
3955 offset = start(item[0])
3956 bits = data[idx]
3956 bits = data[idx]
3957 for rev in item:
3957 for rev in item:
3958 chunkstart = start(rev)
3958 chunkstart = start(rev)
3959 if inline:
3959 if inline:
3960 chunkstart += (rev + 1) * iosize
3960 chunkstart += (rev + 1) * iosize
3961 chunklength = length(rev)
3961 chunklength = length(rev)
3962 ladd(buffer(bits, chunkstart - offset, chunklength))
3962 ladd(buffer(bits, chunkstart - offset, chunklength))
3963
3963
3964 return chunks
3964 return chunks
3965
3965
3966 def dodeltachain(rev):
3966 def dodeltachain(rev):
3967 if not cache:
3967 if not cache:
3968 r.clearcaches()
3968 r.clearcaches()
3969 r._deltachain(rev)
3969 r._deltachain(rev)
3970
3970
3971 def doread(chain):
3971 def doread(chain):
3972 if not cache:
3972 if not cache:
3973 r.clearcaches()
3973 r.clearcaches()
3974 for item in slicedchain:
3974 for item in slicedchain:
3975 with lazy_reading(r):
3975 with lazy_reading(r):
3976 segmentforrevs(item[0], item[-1])
3976 segmentforrevs(item[0], item[-1])
3977
3977
3978 def doslice(r, chain, size):
3978 def doslice(r, chain, size):
3979 for s in slicechunk(r, chain, targetsize=size):
3979 for s in slicechunk(r, chain, targetsize=size):
3980 pass
3980 pass
3981
3981
3982 def dorawchunks(data, chain):
3982 def dorawchunks(data, chain):
3983 if not cache:
3983 if not cache:
3984 r.clearcaches()
3984 r.clearcaches()
3985 getrawchunks(data, chain)
3985 getrawchunks(data, chain)
3986
3986
3987 def dodecompress(chunks):
3987 def dodecompress(chunks):
3988 decomp = r.decompress
3988 decomp = r.decompress
3989 for chunk in chunks:
3989 for chunk in chunks:
3990 decomp(chunk)
3990 decomp(chunk)
3991
3991
3992 def dopatch(text, bins):
3992 def dopatch(text, bins):
3993 if not cache:
3993 if not cache:
3994 r.clearcaches()
3994 r.clearcaches()
3995 mdiff.patches(text, bins)
3995 mdiff.patches(text, bins)
3996
3996
3997 def dohash(text):
3997 def dohash(text):
3998 if not cache:
3998 if not cache:
3999 r.clearcaches()
3999 r.clearcaches()
4000 r.checkhash(text, node, rev=rev)
4000 r.checkhash(text, node, rev=rev)
4001
4001
4002 def dorevision():
4002 def dorevision():
4003 if not cache:
4003 if not cache:
4004 r.clearcaches()
4004 r.clearcaches()
4005 r.revision(node)
4005 r.revision(node)
4006
4006
4007 try:
4007 try:
4008 from mercurial.revlogutils.deltas import slicechunk
4008 from mercurial.revlogutils.deltas import slicechunk
4009 except ImportError:
4009 except ImportError:
4010 slicechunk = getattr(revlog, '_slicechunk', None)
4010 slicechunk = getattr(revlog, '_slicechunk', None)
4011
4011
4012 size = r.length(rev)
4012 size = r.length(rev)
4013 chain = r._deltachain(rev)[0]
4013 chain = r._deltachain(rev)[0]
4014
4014
4015 with_sparse_read = False
4015 with_sparse_read = False
4016 if hasattr(r, 'data_config'):
4016 if hasattr(r, 'data_config'):
4017 with_sparse_read = r.data_config.with_sparse_read
4017 with_sparse_read = r.data_config.with_sparse_read
4018 elif hasattr(r, '_withsparseread'):
4018 elif hasattr(r, '_withsparseread'):
4019 with_sparse_read = r._withsparseread
4019 with_sparse_read = r._withsparseread
4020 if with_sparse_read:
4020 if with_sparse_read:
4021 slicedchain = (chain,)
4021 slicedchain = (chain,)
4022 else:
4022 else:
4023 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4023 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4024 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4024 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4025 rawchunks = getrawchunks(data, slicedchain)
4025 rawchunks = getrawchunks(data, slicedchain)
4026 bins = r._inner._chunks(chain)
4026 bins = r._inner._chunks(chain)
4027 text = bytes(bins[0])
4027 text = bytes(bins[0])
4028 bins = bins[1:]
4028 bins = bins[1:]
4029 text = mdiff.patches(text, bins)
4029 text = mdiff.patches(text, bins)
4030
4030
4031 benches = [
4031 benches = [
4032 (lambda: dorevision(), b'full'),
4032 (lambda: dorevision(), b'full'),
4033 (lambda: dodeltachain(rev), b'deltachain'),
4033 (lambda: dodeltachain(rev), b'deltachain'),
4034 (lambda: doread(chain), b'read'),
4034 (lambda: doread(chain), b'read'),
4035 ]
4035 ]
4036
4036
4037 if with_sparse_read:
4037 if with_sparse_read:
4038 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4038 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4039 benches.append(slicing)
4039 benches.append(slicing)
4040
4040
4041 benches.extend(
4041 benches.extend(
4042 [
4042 [
4043 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4043 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4044 (lambda: dodecompress(rawchunks), b'decompress'),
4044 (lambda: dodecompress(rawchunks), b'decompress'),
4045 (lambda: dopatch(text, bins), b'patch'),
4045 (lambda: dopatch(text, bins), b'patch'),
4046 (lambda: dohash(text), b'hash'),
4046 (lambda: dohash(text), b'hash'),
4047 ]
4047 ]
4048 )
4048 )
4049
4049
4050 timer, fm = gettimer(ui, opts)
4050 timer, fm = gettimer(ui, opts)
4051 for fn, title in benches:
4051 for fn, title in benches:
4052 timer(fn, title=title)
4052 timer(fn, title=title)
4053 fm.end()
4053 fm.end()
4054
4054
4055
4055
4056 @command(
4056 @command(
4057 b'perf::revset|perfrevset',
4057 b'perf::revset|perfrevset',
4058 [
4058 [
4059 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4059 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4060 (b'', b'contexts', False, b'obtain changectx for each revision'),
4060 (b'', b'contexts', False, b'obtain changectx for each revision'),
4061 ]
4061 ]
4062 + formatteropts,
4062 + formatteropts,
4063 b"REVSET",
4063 b"REVSET",
4064 )
4064 )
4065 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4065 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4066 """benchmark the execution time of a revset
4066 """benchmark the execution time of a revset
4067
4067
4068 Use the --clean option if need to evaluate the impact of build volatile
4068 Use the --clean option if need to evaluate the impact of build volatile
4069 revisions set cache on the revset execution. Volatile cache hold filtered
4069 revisions set cache on the revset execution. Volatile cache hold filtered
4070 and obsolete related cache."""
4070 and obsolete related cache."""
4071 opts = _byteskwargs(opts)
4071 opts = _byteskwargs(opts)
4072
4072
4073 timer, fm = gettimer(ui, opts)
4073 timer, fm = gettimer(ui, opts)
4074
4074
4075 def d():
4075 def d():
4076 if clear:
4076 if clear:
4077 repo.invalidatevolatilesets()
4077 repo.invalidatevolatilesets()
4078 if contexts:
4078 if contexts:
4079 for ctx in repo.set(expr):
4079 for ctx in repo.set(expr):
4080 pass
4080 pass
4081 else:
4081 else:
4082 for r in repo.revs(expr):
4082 for r in repo.revs(expr):
4083 pass
4083 pass
4084
4084
4085 timer(d)
4085 timer(d)
4086 fm.end()
4086 fm.end()
4087
4087
4088
4088
4089 @command(
4089 @command(
4090 b'perf::volatilesets|perfvolatilesets',
4090 b'perf::volatilesets|perfvolatilesets',
4091 [
4091 [
4092 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4092 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4093 ]
4093 ]
4094 + formatteropts,
4094 + formatteropts,
4095 )
4095 )
4096 def perfvolatilesets(ui, repo, *names, **opts):
4096 def perfvolatilesets(ui, repo, *names, **opts):
4097 """benchmark the computation of various volatile set
4097 """benchmark the computation of various volatile set
4098
4098
4099 Volatile set computes element related to filtering and obsolescence."""
4099 Volatile set computes element related to filtering and obsolescence."""
4100 opts = _byteskwargs(opts)
4100 opts = _byteskwargs(opts)
4101 timer, fm = gettimer(ui, opts)
4101 timer, fm = gettimer(ui, opts)
4102 repo = repo.unfiltered()
4102 repo = repo.unfiltered()
4103
4103
4104 def getobs(name):
4104 def getobs(name):
4105 def d():
4105 def d():
4106 repo.invalidatevolatilesets()
4106 repo.invalidatevolatilesets()
4107 if opts[b'clear_obsstore']:
4107 if opts[b'clear_obsstore']:
4108 clearfilecache(repo, b'obsstore')
4108 clearfilecache(repo, b'obsstore')
4109 obsolete.getrevs(repo, name)
4109 obsolete.getrevs(repo, name)
4110
4110
4111 return d
4111 return d
4112
4112
4113 allobs = sorted(obsolete.cachefuncs)
4113 allobs = sorted(obsolete.cachefuncs)
4114 if names:
4114 if names:
4115 allobs = [n for n in allobs if n in names]
4115 allobs = [n for n in allobs if n in names]
4116
4116
4117 for name in allobs:
4117 for name in allobs:
4118 timer(getobs(name), title=name)
4118 timer(getobs(name), title=name)
4119
4119
4120 def getfiltered(name):
4120 def getfiltered(name):
4121 def d():
4121 def d():
4122 repo.invalidatevolatilesets()
4122 repo.invalidatevolatilesets()
4123 if opts[b'clear_obsstore']:
4123 if opts[b'clear_obsstore']:
4124 clearfilecache(repo, b'obsstore')
4124 clearfilecache(repo, b'obsstore')
4125 repoview.filterrevs(repo, name)
4125 repoview.filterrevs(repo, name)
4126
4126
4127 return d
4127 return d
4128
4128
4129 allfilter = sorted(repoview.filtertable)
4129 allfilter = sorted(repoview.filtertable)
4130 if names:
4130 if names:
4131 allfilter = [n for n in allfilter if n in names]
4131 allfilter = [n for n in allfilter if n in names]
4132
4132
4133 for name in allfilter:
4133 for name in allfilter:
4134 timer(getfiltered(name), title=name)
4134 timer(getfiltered(name), title=name)
4135 fm.end()
4135 fm.end()
4136
4136
4137
4137
4138 @command(
4138 @command(
4139 b'perf::branchmap|perfbranchmap',
4139 b'perf::branchmap|perfbranchmap',
4140 [
4140 [
4141 (b'f', b'full', False, b'Includes build time of subset'),
4141 (b'f', b'full', False, b'Includes build time of subset'),
4142 (
4142 (
4143 b'',
4143 b'',
4144 b'clear-revbranch',
4144 b'clear-revbranch',
4145 False,
4145 False,
4146 b'purge the revbranch cache between computation',
4146 b'purge the revbranch cache between computation',
4147 ),
4147 ),
4148 ]
4148 ]
4149 + formatteropts,
4149 + formatteropts,
4150 )
4150 )
4151 def perfbranchmap(ui, repo, *filternames, **opts):
4151 def perfbranchmap(ui, repo, *filternames, **opts):
4152 """benchmark the update of a branchmap
4152 """benchmark the update of a branchmap
4153
4153
4154 This benchmarks the full repo.branchmap() call with read and write disabled
4154 This benchmarks the full repo.branchmap() call with read and write disabled
4155 """
4155 """
4156 opts = _byteskwargs(opts)
4156 opts = _byteskwargs(opts)
4157 full = opts.get(b"full", False)
4157 full = opts.get(b"full", False)
4158 clear_revbranch = opts.get(b"clear_revbranch", False)
4158 clear_revbranch = opts.get(b"clear_revbranch", False)
4159 timer, fm = gettimer(ui, opts)
4159 timer, fm = gettimer(ui, opts)
4160
4160
4161 def getbranchmap(filtername):
4161 def getbranchmap(filtername):
4162 """generate a benchmark function for the filtername"""
4162 """generate a benchmark function for the filtername"""
4163 if filtername is None:
4163 if filtername is None:
4164 view = repo
4164 view = repo
4165 else:
4165 else:
4166 view = repo.filtered(filtername)
4166 view = repo.filtered(filtername)
4167 if util.safehasattr(view._branchcaches, '_per_filter'):
4167 if util.safehasattr(view._branchcaches, '_per_filter'):
4168 filtered = view._branchcaches._per_filter
4168 filtered = view._branchcaches._per_filter
4169 else:
4169 else:
4170 # older versions
4170 # older versions
4171 filtered = view._branchcaches
4171 filtered = view._branchcaches
4172
4172
4173 def d():
4173 def d():
4174 if clear_revbranch:
4174 if clear_revbranch:
4175 repo.revbranchcache()._clear()
4175 repo.revbranchcache()._clear()
4176 if full:
4176 if full:
4177 view._branchcaches.clear()
4177 view._branchcaches.clear()
4178 else:
4178 else:
4179 filtered.pop(filtername, None)
4179 filtered.pop(filtername, None)
4180 view.branchmap()
4180 view.branchmap()
4181
4181
4182 return d
4182 return d
4183
4183
4184 # add filter in smaller subset to bigger subset
4184 # add filter in smaller subset to bigger subset
4185 possiblefilters = set(repoview.filtertable)
4185 possiblefilters = set(repoview.filtertable)
4186 if filternames:
4186 if filternames:
4187 possiblefilters &= set(filternames)
4187 possiblefilters &= set(filternames)
4188 subsettable = getbranchmapsubsettable()
4188 subsettable = getbranchmapsubsettable()
4189 allfilters = []
4189 allfilters = []
4190 while possiblefilters:
4190 while possiblefilters:
4191 for name in possiblefilters:
4191 for name in possiblefilters:
4192 subset = subsettable.get(name)
4192 subset = subsettable.get(name)
4193 if subset not in possiblefilters:
4193 if subset not in possiblefilters:
4194 break
4194 break
4195 else:
4195 else:
4196 assert False, b'subset cycle %s!' % possiblefilters
4196 assert False, b'subset cycle %s!' % possiblefilters
4197 allfilters.append(name)
4197 allfilters.append(name)
4198 possiblefilters.remove(name)
4198 possiblefilters.remove(name)
4199
4199
4200 # warm the cache
4200 # warm the cache
4201 if not full:
4201 if not full:
4202 for name in allfilters:
4202 for name in allfilters:
4203 repo.filtered(name).branchmap()
4203 repo.filtered(name).branchmap()
4204 if not filternames or b'unfiltered' in filternames:
4204 if not filternames or b'unfiltered' in filternames:
4205 # add unfiltered
4205 # add unfiltered
4206 allfilters.append(None)
4206 allfilters.append(None)
4207
4207
4208 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4208 old_branch_cache_from_file = None
4209 branchcacheread = None
4210 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4211 old_branch_cache_from_file = branchmap.branch_cache_from_file
4212 branchmap.branch_cache_from_file = lambda *args: None
4213 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4209 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4214 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4210 branchcacheread.set(classmethod(lambda *args: None))
4215 branchcacheread.set(classmethod(lambda *args: None))
4211 else:
4216 else:
4212 # older versions
4217 # older versions
4213 branchcacheread = safeattrsetter(branchmap, b'read')
4218 branchcacheread = safeattrsetter(branchmap, b'read')
4214 branchcacheread.set(lambda *args: None)
4219 branchcacheread.set(lambda *args: None)
4215 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4220 if util.safehasattr(branchmap, '_LocalBranchCache'):
4216 branchcachewrite.set(lambda *args: None)
4221 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4222 branchcachewrite.set(lambda *args: None)
4223 else:
4224 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4225 branchcachewrite.set(lambda *args: None)
4217 try:
4226 try:
4218 for name in allfilters:
4227 for name in allfilters:
4219 printname = name
4228 printname = name
4220 if name is None:
4229 if name is None:
4221 printname = b'unfiltered'
4230 printname = b'unfiltered'
4222 timer(getbranchmap(name), title=printname)
4231 timer(getbranchmap(name), title=printname)
4223 finally:
4232 finally:
4224 branchcacheread.restore()
4233 if old_branch_cache_from_file is not None:
4234 branchmap.branch_cache_from_file = old_branch_cache_from_file
4235 if branchcacheread is not None:
4236 branchcacheread.restore()
4225 branchcachewrite.restore()
4237 branchcachewrite.restore()
4226 fm.end()
4238 fm.end()
4227
4239
4228
4240
4229 @command(
4241 @command(
4230 b'perf::branchmapupdate|perfbranchmapupdate',
4242 b'perf::branchmapupdate|perfbranchmapupdate',
4231 [
4243 [
4232 (b'', b'base', [], b'subset of revision to start from'),
4244 (b'', b'base', [], b'subset of revision to start from'),
4233 (b'', b'target', [], b'subset of revision to end with'),
4245 (b'', b'target', [], b'subset of revision to end with'),
4234 (b'', b'clear-caches', False, b'clear cache between each runs'),
4246 (b'', b'clear-caches', False, b'clear cache between each runs'),
4235 ]
4247 ]
4236 + formatteropts,
4248 + formatteropts,
4237 )
4249 )
4238 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4250 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4239 """benchmark branchmap update from for <base> revs to <target> revs
4251 """benchmark branchmap update from for <base> revs to <target> revs
4240
4252
4241 If `--clear-caches` is passed, the following items will be reset before
4253 If `--clear-caches` is passed, the following items will be reset before
4242 each update:
4254 each update:
4243 * the changelog instance and associated indexes
4255 * the changelog instance and associated indexes
4244 * the rev-branch-cache instance
4256 * the rev-branch-cache instance
4245
4257
4246 Examples:
4258 Examples:
4247
4259
4248 # update for the one last revision
4260 # update for the one last revision
4249 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4261 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4250
4262
4251 $ update for change coming with a new branch
4263 $ update for change coming with a new branch
4252 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4264 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4253 """
4265 """
4254 from mercurial import branchmap
4266 from mercurial import branchmap
4255 from mercurial import repoview
4267 from mercurial import repoview
4256
4268
4257 opts = _byteskwargs(opts)
4269 opts = _byteskwargs(opts)
4258 timer, fm = gettimer(ui, opts)
4270 timer, fm = gettimer(ui, opts)
4259 clearcaches = opts[b'clear_caches']
4271 clearcaches = opts[b'clear_caches']
4260 unfi = repo.unfiltered()
4272 unfi = repo.unfiltered()
4261 x = [None] # used to pass data between closure
4273 x = [None] # used to pass data between closure
4262
4274
4263 # we use a `list` here to avoid possible side effect from smartset
4275 # we use a `list` here to avoid possible side effect from smartset
4264 baserevs = list(scmutil.revrange(repo, base))
4276 baserevs = list(scmutil.revrange(repo, base))
4265 targetrevs = list(scmutil.revrange(repo, target))
4277 targetrevs = list(scmutil.revrange(repo, target))
4266 if not baserevs:
4278 if not baserevs:
4267 raise error.Abort(b'no revisions selected for --base')
4279 raise error.Abort(b'no revisions selected for --base')
4268 if not targetrevs:
4280 if not targetrevs:
4269 raise error.Abort(b'no revisions selected for --target')
4281 raise error.Abort(b'no revisions selected for --target')
4270
4282
4271 # make sure the target branchmap also contains the one in the base
4283 # make sure the target branchmap also contains the one in the base
4272 targetrevs = list(set(baserevs) | set(targetrevs))
4284 targetrevs = list(set(baserevs) | set(targetrevs))
4273 targetrevs.sort()
4285 targetrevs.sort()
4274
4286
4275 cl = repo.changelog
4287 cl = repo.changelog
4276 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4288 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4277 allbaserevs.sort()
4289 allbaserevs.sort()
4278 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4290 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4279
4291
4280 newrevs = list(alltargetrevs.difference(allbaserevs))
4292 newrevs = list(alltargetrevs.difference(allbaserevs))
4281 newrevs.sort()
4293 newrevs.sort()
4282
4294
4283 allrevs = frozenset(unfi.changelog.revs())
4295 allrevs = frozenset(unfi.changelog.revs())
4284 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4296 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4285 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4297 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4286
4298
4287 def basefilter(repo, visibilityexceptions=None):
4299 def basefilter(repo, visibilityexceptions=None):
4288 return basefilterrevs
4300 return basefilterrevs
4289
4301
4290 def targetfilter(repo, visibilityexceptions=None):
4302 def targetfilter(repo, visibilityexceptions=None):
4291 return targetfilterrevs
4303 return targetfilterrevs
4292
4304
4293 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4305 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4294 ui.status(msg % (len(allbaserevs), len(newrevs)))
4306 ui.status(msg % (len(allbaserevs), len(newrevs)))
4295 if targetfilterrevs:
4307 if targetfilterrevs:
4296 msg = b'(%d revisions still filtered)\n'
4308 msg = b'(%d revisions still filtered)\n'
4297 ui.status(msg % len(targetfilterrevs))
4309 ui.status(msg % len(targetfilterrevs))
4298
4310
4299 try:
4311 try:
4300 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4312 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4301 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4313 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4302
4314
4303 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4315 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4304 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4316 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4305
4317
4306 bcache = repo.branchmap()
4318 bcache = repo.branchmap()
4307 copy_method = 'copy'
4319 copy_method = 'copy'
4308
4320
4309 copy_base_kwargs = copy_base_kwargs = {}
4321 copy_base_kwargs = copy_base_kwargs = {}
4310 if hasattr(bcache, 'copy'):
4322 if hasattr(bcache, 'copy'):
4311 if 'repo' in getargspec(bcache.copy).args:
4323 if 'repo' in getargspec(bcache.copy).args:
4312 copy_base_kwargs = {"repo": baserepo}
4324 copy_base_kwargs = {"repo": baserepo}
4313 copy_target_kwargs = {"repo": targetrepo}
4325 copy_target_kwargs = {"repo": targetrepo}
4314 else:
4326 else:
4315 copy_method = 'inherit_for'
4327 copy_method = 'inherit_for'
4316 copy_base_kwargs = {"repo": baserepo}
4328 copy_base_kwargs = {"repo": baserepo}
4317 copy_target_kwargs = {"repo": targetrepo}
4329 copy_target_kwargs = {"repo": targetrepo}
4318
4330
4319 # try to find an existing branchmap to reuse
4331 # try to find an existing branchmap to reuse
4320 subsettable = getbranchmapsubsettable()
4332 subsettable = getbranchmapsubsettable()
4321 candidatefilter = subsettable.get(None)
4333 candidatefilter = subsettable.get(None)
4322 while candidatefilter is not None:
4334 while candidatefilter is not None:
4323 candidatebm = repo.filtered(candidatefilter).branchmap()
4335 candidatebm = repo.filtered(candidatefilter).branchmap()
4324 if candidatebm.validfor(baserepo):
4336 if candidatebm.validfor(baserepo):
4325 filtered = repoview.filterrevs(repo, candidatefilter)
4337 filtered = repoview.filterrevs(repo, candidatefilter)
4326 missing = [r for r in allbaserevs if r in filtered]
4338 missing = [r for r in allbaserevs if r in filtered]
4327 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4339 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4328 base.update(baserepo, missing)
4340 base.update(baserepo, missing)
4329 break
4341 break
4330 candidatefilter = subsettable.get(candidatefilter)
4342 candidatefilter = subsettable.get(candidatefilter)
4331 else:
4343 else:
4332 # no suitable subset where found
4344 # no suitable subset where found
4333 base = branchmap.branchcache()
4345 base = branchmap.branchcache()
4334 base.update(baserepo, allbaserevs)
4346 base.update(baserepo, allbaserevs)
4335
4347
4336 def setup():
4348 def setup():
4337 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4349 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4338 if clearcaches:
4350 if clearcaches:
4339 unfi._revbranchcache = None
4351 unfi._revbranchcache = None
4340 clearchangelog(repo)
4352 clearchangelog(repo)
4341
4353
4342 def bench():
4354 def bench():
4343 x[0].update(targetrepo, newrevs)
4355 x[0].update(targetrepo, newrevs)
4344
4356
4345 timer(bench, setup=setup)
4357 timer(bench, setup=setup)
4346 fm.end()
4358 fm.end()
4347 finally:
4359 finally:
4348 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4360 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4349 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4361 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4350
4362
4351
4363
4352 @command(
4364 @command(
4353 b'perf::branchmapload|perfbranchmapload',
4365 b'perf::branchmapload|perfbranchmapload',
4354 [
4366 [
4355 (b'f', b'filter', b'', b'Specify repoview filter'),
4367 (b'f', b'filter', b'', b'Specify repoview filter'),
4356 (b'', b'list', False, b'List brachmap filter caches'),
4368 (b'', b'list', False, b'List brachmap filter caches'),
4357 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4369 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4358 ]
4370 ]
4359 + formatteropts,
4371 + formatteropts,
4360 )
4372 )
4361 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4373 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4362 """benchmark reading the branchmap"""
4374 """benchmark reading the branchmap"""
4363 opts = _byteskwargs(opts)
4375 opts = _byteskwargs(opts)
4364 clearrevlogs = opts[b'clear_revlogs']
4376 clearrevlogs = opts[b'clear_revlogs']
4365
4377
4366 if list:
4378 if list:
4367 for name, kind, st in repo.cachevfs.readdir(stat=True):
4379 for name, kind, st in repo.cachevfs.readdir(stat=True):
4368 if name.startswith(b'branch2'):
4380 if name.startswith(b'branch2'):
4369 filtername = name.partition(b'-')[2] or b'unfiltered'
4381 filtername = name.partition(b'-')[2] or b'unfiltered'
4370 ui.status(
4382 ui.status(
4371 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4383 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4372 )
4384 )
4373 return
4385 return
4374 if not filter:
4386 if not filter:
4375 filter = None
4387 filter = None
4376 subsettable = getbranchmapsubsettable()
4388 subsettable = getbranchmapsubsettable()
4377 if filter is None:
4389 if filter is None:
4378 repo = repo.unfiltered()
4390 repo = repo.unfiltered()
4379 else:
4391 else:
4380 repo = repoview.repoview(repo, filter)
4392 repo = repoview.repoview(repo, filter)
4381
4393
4382 repo.branchmap() # make sure we have a relevant, up to date branchmap
4394 repo.branchmap() # make sure we have a relevant, up to date branchmap
4383
4395
4384 try:
4396 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4385 fromfile = branchmap.branchcache.fromfile
4397 if fromfile is None:
4386 except AttributeError:
4398 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4387 # older versions
4399 if fromfile is None:
4388 fromfile = branchmap.read
4400 fromfile = branchmap.read
4389
4401
4390 currentfilter = filter
4402 currentfilter = filter
4391 # try once without timer, the filter may not be cached
4403 # try once without timer, the filter may not be cached
4392 while fromfile(repo) is None:
4404 while fromfile(repo) is None:
4393 currentfilter = subsettable.get(currentfilter)
4405 currentfilter = subsettable.get(currentfilter)
4394 if currentfilter is None:
4406 if currentfilter is None:
4395 raise error.Abort(
4407 raise error.Abort(
4396 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4408 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4397 )
4409 )
4398 repo = repo.filtered(currentfilter)
4410 repo = repo.filtered(currentfilter)
4399 timer, fm = gettimer(ui, opts)
4411 timer, fm = gettimer(ui, opts)
4400
4412
4401 def setup():
4413 def setup():
4402 if clearrevlogs:
4414 if clearrevlogs:
4403 clearchangelog(repo)
4415 clearchangelog(repo)
4404
4416
4405 def bench():
4417 def bench():
4406 fromfile(repo)
4418 fromfile(repo)
4407
4419
4408 timer(bench, setup=setup)
4420 timer(bench, setup=setup)
4409 fm.end()
4421 fm.end()
4410
4422
4411
4423
4412 @command(b'perf::loadmarkers|perfloadmarkers')
4424 @command(b'perf::loadmarkers|perfloadmarkers')
4413 def perfloadmarkers(ui, repo):
4425 def perfloadmarkers(ui, repo):
4414 """benchmark the time to parse the on-disk markers for a repo
4426 """benchmark the time to parse the on-disk markers for a repo
4415
4427
4416 Result is the number of markers in the repo."""
4428 Result is the number of markers in the repo."""
4417 timer, fm = gettimer(ui)
4429 timer, fm = gettimer(ui)
4418 svfs = getsvfs(repo)
4430 svfs = getsvfs(repo)
4419 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4431 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4420 fm.end()
4432 fm.end()
4421
4433
4422
4434
4423 @command(
4435 @command(
4424 b'perf::lrucachedict|perflrucachedict',
4436 b'perf::lrucachedict|perflrucachedict',
4425 formatteropts
4437 formatteropts
4426 + [
4438 + [
4427 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4439 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4428 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4440 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4429 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4441 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4430 (b'', b'size', 4, b'size of cache'),
4442 (b'', b'size', 4, b'size of cache'),
4431 (b'', b'gets', 10000, b'number of key lookups'),
4443 (b'', b'gets', 10000, b'number of key lookups'),
4432 (b'', b'sets', 10000, b'number of key sets'),
4444 (b'', b'sets', 10000, b'number of key sets'),
4433 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4445 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4434 (
4446 (
4435 b'',
4447 b'',
4436 b'mixedgetfreq',
4448 b'mixedgetfreq',
4437 50,
4449 50,
4438 b'frequency of get vs set ops in mixed mode',
4450 b'frequency of get vs set ops in mixed mode',
4439 ),
4451 ),
4440 ],
4452 ],
4441 norepo=True,
4453 norepo=True,
4442 )
4454 )
4443 def perflrucache(
4455 def perflrucache(
4444 ui,
4456 ui,
4445 mincost=0,
4457 mincost=0,
4446 maxcost=100,
4458 maxcost=100,
4447 costlimit=0,
4459 costlimit=0,
4448 size=4,
4460 size=4,
4449 gets=10000,
4461 gets=10000,
4450 sets=10000,
4462 sets=10000,
4451 mixed=10000,
4463 mixed=10000,
4452 mixedgetfreq=50,
4464 mixedgetfreq=50,
4453 **opts
4465 **opts
4454 ):
4466 ):
4455 opts = _byteskwargs(opts)
4467 opts = _byteskwargs(opts)
4456
4468
4457 def doinit():
4469 def doinit():
4458 for i in _xrange(10000):
4470 for i in _xrange(10000):
4459 util.lrucachedict(size)
4471 util.lrucachedict(size)
4460
4472
4461 costrange = list(range(mincost, maxcost + 1))
4473 costrange = list(range(mincost, maxcost + 1))
4462
4474
4463 values = []
4475 values = []
4464 for i in _xrange(size):
4476 for i in _xrange(size):
4465 values.append(random.randint(0, _maxint))
4477 values.append(random.randint(0, _maxint))
4466
4478
4467 # Get mode fills the cache and tests raw lookup performance with no
4479 # Get mode fills the cache and tests raw lookup performance with no
4468 # eviction.
4480 # eviction.
4469 getseq = []
4481 getseq = []
4470 for i in _xrange(gets):
4482 for i in _xrange(gets):
4471 getseq.append(random.choice(values))
4483 getseq.append(random.choice(values))
4472
4484
4473 def dogets():
4485 def dogets():
4474 d = util.lrucachedict(size)
4486 d = util.lrucachedict(size)
4475 for v in values:
4487 for v in values:
4476 d[v] = v
4488 d[v] = v
4477 for key in getseq:
4489 for key in getseq:
4478 value = d[key]
4490 value = d[key]
4479 value # silence pyflakes warning
4491 value # silence pyflakes warning
4480
4492
4481 def dogetscost():
4493 def dogetscost():
4482 d = util.lrucachedict(size, maxcost=costlimit)
4494 d = util.lrucachedict(size, maxcost=costlimit)
4483 for i, v in enumerate(values):
4495 for i, v in enumerate(values):
4484 d.insert(v, v, cost=costs[i])
4496 d.insert(v, v, cost=costs[i])
4485 for key in getseq:
4497 for key in getseq:
4486 try:
4498 try:
4487 value = d[key]
4499 value = d[key]
4488 value # silence pyflakes warning
4500 value # silence pyflakes warning
4489 except KeyError:
4501 except KeyError:
4490 pass
4502 pass
4491
4503
4492 # Set mode tests insertion speed with cache eviction.
4504 # Set mode tests insertion speed with cache eviction.
4493 setseq = []
4505 setseq = []
4494 costs = []
4506 costs = []
4495 for i in _xrange(sets):
4507 for i in _xrange(sets):
4496 setseq.append(random.randint(0, _maxint))
4508 setseq.append(random.randint(0, _maxint))
4497 costs.append(random.choice(costrange))
4509 costs.append(random.choice(costrange))
4498
4510
4499 def doinserts():
4511 def doinserts():
4500 d = util.lrucachedict(size)
4512 d = util.lrucachedict(size)
4501 for v in setseq:
4513 for v in setseq:
4502 d.insert(v, v)
4514 d.insert(v, v)
4503
4515
4504 def doinsertscost():
4516 def doinsertscost():
4505 d = util.lrucachedict(size, maxcost=costlimit)
4517 d = util.lrucachedict(size, maxcost=costlimit)
4506 for i, v in enumerate(setseq):
4518 for i, v in enumerate(setseq):
4507 d.insert(v, v, cost=costs[i])
4519 d.insert(v, v, cost=costs[i])
4508
4520
4509 def dosets():
4521 def dosets():
4510 d = util.lrucachedict(size)
4522 d = util.lrucachedict(size)
4511 for v in setseq:
4523 for v in setseq:
4512 d[v] = v
4524 d[v] = v
4513
4525
4514 # Mixed mode randomly performs gets and sets with eviction.
4526 # Mixed mode randomly performs gets and sets with eviction.
4515 mixedops = []
4527 mixedops = []
4516 for i in _xrange(mixed):
4528 for i in _xrange(mixed):
4517 r = random.randint(0, 100)
4529 r = random.randint(0, 100)
4518 if r < mixedgetfreq:
4530 if r < mixedgetfreq:
4519 op = 0
4531 op = 0
4520 else:
4532 else:
4521 op = 1
4533 op = 1
4522
4534
4523 mixedops.append(
4535 mixedops.append(
4524 (op, random.randint(0, size * 2), random.choice(costrange))
4536 (op, random.randint(0, size * 2), random.choice(costrange))
4525 )
4537 )
4526
4538
4527 def domixed():
4539 def domixed():
4528 d = util.lrucachedict(size)
4540 d = util.lrucachedict(size)
4529
4541
4530 for op, v, cost in mixedops:
4542 for op, v, cost in mixedops:
4531 if op == 0:
4543 if op == 0:
4532 try:
4544 try:
4533 d[v]
4545 d[v]
4534 except KeyError:
4546 except KeyError:
4535 pass
4547 pass
4536 else:
4548 else:
4537 d[v] = v
4549 d[v] = v
4538
4550
4539 def domixedcost():
4551 def domixedcost():
4540 d = util.lrucachedict(size, maxcost=costlimit)
4552 d = util.lrucachedict(size, maxcost=costlimit)
4541
4553
4542 for op, v, cost in mixedops:
4554 for op, v, cost in mixedops:
4543 if op == 0:
4555 if op == 0:
4544 try:
4556 try:
4545 d[v]
4557 d[v]
4546 except KeyError:
4558 except KeyError:
4547 pass
4559 pass
4548 else:
4560 else:
4549 d.insert(v, v, cost=cost)
4561 d.insert(v, v, cost=cost)
4550
4562
4551 benches = [
4563 benches = [
4552 (doinit, b'init'),
4564 (doinit, b'init'),
4553 ]
4565 ]
4554
4566
4555 if costlimit:
4567 if costlimit:
4556 benches.extend(
4568 benches.extend(
4557 [
4569 [
4558 (dogetscost, b'gets w/ cost limit'),
4570 (dogetscost, b'gets w/ cost limit'),
4559 (doinsertscost, b'inserts w/ cost limit'),
4571 (doinsertscost, b'inserts w/ cost limit'),
4560 (domixedcost, b'mixed w/ cost limit'),
4572 (domixedcost, b'mixed w/ cost limit'),
4561 ]
4573 ]
4562 )
4574 )
4563 else:
4575 else:
4564 benches.extend(
4576 benches.extend(
4565 [
4577 [
4566 (dogets, b'gets'),
4578 (dogets, b'gets'),
4567 (doinserts, b'inserts'),
4579 (doinserts, b'inserts'),
4568 (dosets, b'sets'),
4580 (dosets, b'sets'),
4569 (domixed, b'mixed'),
4581 (domixed, b'mixed'),
4570 ]
4582 ]
4571 )
4583 )
4572
4584
4573 for fn, title in benches:
4585 for fn, title in benches:
4574 timer, fm = gettimer(ui, opts)
4586 timer, fm = gettimer(ui, opts)
4575 timer(fn, title=title)
4587 timer(fn, title=title)
4576 fm.end()
4588 fm.end()
4577
4589
4578
4590
4579 @command(
4591 @command(
4580 b'perf::write|perfwrite',
4592 b'perf::write|perfwrite',
4581 formatteropts
4593 formatteropts
4582 + [
4594 + [
4583 (b'', b'write-method', b'write', b'ui write method'),
4595 (b'', b'write-method', b'write', b'ui write method'),
4584 (b'', b'nlines', 100, b'number of lines'),
4596 (b'', b'nlines', 100, b'number of lines'),
4585 (b'', b'nitems', 100, b'number of items (per line)'),
4597 (b'', b'nitems', 100, b'number of items (per line)'),
4586 (b'', b'item', b'x', b'item that is written'),
4598 (b'', b'item', b'x', b'item that is written'),
4587 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4599 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4588 (b'', b'flush-line', None, b'flush after each line'),
4600 (b'', b'flush-line', None, b'flush after each line'),
4589 ],
4601 ],
4590 )
4602 )
4591 def perfwrite(ui, repo, **opts):
4603 def perfwrite(ui, repo, **opts):
4592 """microbenchmark ui.write (and others)"""
4604 """microbenchmark ui.write (and others)"""
4593 opts = _byteskwargs(opts)
4605 opts = _byteskwargs(opts)
4594
4606
4595 write = getattr(ui, _sysstr(opts[b'write_method']))
4607 write = getattr(ui, _sysstr(opts[b'write_method']))
4596 nlines = int(opts[b'nlines'])
4608 nlines = int(opts[b'nlines'])
4597 nitems = int(opts[b'nitems'])
4609 nitems = int(opts[b'nitems'])
4598 item = opts[b'item']
4610 item = opts[b'item']
4599 batch_line = opts.get(b'batch_line')
4611 batch_line = opts.get(b'batch_line')
4600 flush_line = opts.get(b'flush_line')
4612 flush_line = opts.get(b'flush_line')
4601
4613
4602 if batch_line:
4614 if batch_line:
4603 line = item * nitems + b'\n'
4615 line = item * nitems + b'\n'
4604
4616
4605 def benchmark():
4617 def benchmark():
4606 for i in pycompat.xrange(nlines):
4618 for i in pycompat.xrange(nlines):
4607 if batch_line:
4619 if batch_line:
4608 write(line)
4620 write(line)
4609 else:
4621 else:
4610 for i in pycompat.xrange(nitems):
4622 for i in pycompat.xrange(nitems):
4611 write(item)
4623 write(item)
4612 write(b'\n')
4624 write(b'\n')
4613 if flush_line:
4625 if flush_line:
4614 ui.flush()
4626 ui.flush()
4615 ui.flush()
4627 ui.flush()
4616
4628
4617 timer, fm = gettimer(ui, opts)
4629 timer, fm = gettimer(ui, opts)
4618 timer(benchmark)
4630 timer(benchmark)
4619 fm.end()
4631 fm.end()
4620
4632
4621
4633
4622 def uisetup(ui):
4634 def uisetup(ui):
4623 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4635 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4624 commands, b'debugrevlogopts'
4636 commands, b'debugrevlogopts'
4625 ):
4637 ):
4626 # for "historical portability":
4638 # for "historical portability":
4627 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4639 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4628 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4640 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4629 # openrevlog() should cause failure, because it has been
4641 # openrevlog() should cause failure, because it has been
4630 # available since 3.5 (or 49c583ca48c4).
4642 # available since 3.5 (or 49c583ca48c4).
4631 def openrevlog(orig, repo, cmd, file_, opts):
4643 def openrevlog(orig, repo, cmd, file_, opts):
4632 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4644 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4633 raise error.Abort(
4645 raise error.Abort(
4634 b"This version doesn't support --dir option",
4646 b"This version doesn't support --dir option",
4635 hint=b"use 3.5 or later",
4647 hint=b"use 3.5 or later",
4636 )
4648 )
4637 return orig(repo, cmd, file_, opts)
4649 return orig(repo, cmd, file_, opts)
4638
4650
4639 name = _sysstr(b'openrevlog')
4651 name = _sysstr(b'openrevlog')
4640 extensions.wrapfunction(cmdutil, name, openrevlog)
4652 extensions.wrapfunction(cmdutil, name, openrevlog)
4641
4653
4642
4654
4643 @command(
4655 @command(
4644 b'perf::progress|perfprogress',
4656 b'perf::progress|perfprogress',
4645 formatteropts
4657 formatteropts
4646 + [
4658 + [
4647 (b'', b'topic', b'topic', b'topic for progress messages'),
4659 (b'', b'topic', b'topic', b'topic for progress messages'),
4648 (b'c', b'total', 1000000, b'total value we are progressing to'),
4660 (b'c', b'total', 1000000, b'total value we are progressing to'),
4649 ],
4661 ],
4650 norepo=True,
4662 norepo=True,
4651 )
4663 )
4652 def perfprogress(ui, topic=None, total=None, **opts):
4664 def perfprogress(ui, topic=None, total=None, **opts):
4653 """printing of progress bars"""
4665 """printing of progress bars"""
4654 opts = _byteskwargs(opts)
4666 opts = _byteskwargs(opts)
4655
4667
4656 timer, fm = gettimer(ui, opts)
4668 timer, fm = gettimer(ui, opts)
4657
4669
4658 def doprogress():
4670 def doprogress():
4659 with ui.makeprogress(topic, total=total) as progress:
4671 with ui.makeprogress(topic, total=total) as progress:
4660 for i in _xrange(total):
4672 for i in _xrange(total):
4661 progress.increment()
4673 progress.increment()
4662
4674
4663 timer(doprogress)
4675 timer(doprogress)
4664 fm.end()
4676 fm.end()
@@ -1,1079 +1,1097 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import struct
9 import struct
10
10
11 from .node import (
11 from .node import (
12 bin,
12 bin,
13 hex,
13 hex,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from typing import (
17 from typing import (
18 Any,
18 Any,
19 Callable,
19 Callable,
20 Dict,
20 Dict,
21 Iterable,
21 Iterable,
22 List,
22 List,
23 Optional,
23 Optional,
24 Set,
24 Set,
25 TYPE_CHECKING,
25 TYPE_CHECKING,
26 Tuple,
26 Tuple,
27 Union,
27 Union,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 encoding,
31 encoding,
32 error,
32 error,
33 obsolete,
33 obsolete,
34 scmutil,
34 scmutil,
35 util,
35 util,
36 )
36 )
37
37
38 from .utils import (
38 from .utils import (
39 repoviewutil,
39 repoviewutil,
40 stringutil,
40 stringutil,
41 )
41 )
42
42
43 if TYPE_CHECKING:
43 if TYPE_CHECKING:
44 from . import localrepo
44 from . import localrepo
45
45
46 assert [localrepo]
46 assert [localrepo]
47
47
48 subsettable = repoviewutil.subsettable
48 subsettable = repoviewutil.subsettable
49
49
50 calcsize = struct.calcsize
50 calcsize = struct.calcsize
51 pack_into = struct.pack_into
51 pack_into = struct.pack_into
52 unpack_from = struct.unpack_from
52 unpack_from = struct.unpack_from
53
53
54
54
55 class BranchMapCache:
55 class BranchMapCache:
56 """mapping of filtered views of repo with their branchcache"""
56 """mapping of filtered views of repo with their branchcache"""
57
57
58 def __init__(self):
58 def __init__(self):
59 self._per_filter = {}
59 self._per_filter = {}
60
60
61 def __getitem__(self, repo):
61 def __getitem__(self, repo):
62 self.updatecache(repo)
62 self.updatecache(repo)
63 bcache = self._per_filter[repo.filtername]
63 bcache = self._per_filter[repo.filtername]
64 assert bcache._filtername == repo.filtername, (
64 assert bcache._filtername == repo.filtername, (
65 bcache._filtername,
65 bcache._filtername,
66 repo.filtername,
66 repo.filtername,
67 )
67 )
68 return bcache
68 return bcache
69
69
70 def update_disk(self, repo):
70 def update_disk(self, repo):
71 """ensure and up-to-date cache is (or will be) written on disk
71 """ensure and up-to-date cache is (or will be) written on disk
72
72
73 The cache for this repository view is updated if needed and written on
73 The cache for this repository view is updated if needed and written on
74 disk.
74 disk.
75
75
76 If a transaction is in progress, the writing is schedule to transaction
76 If a transaction is in progress, the writing is schedule to transaction
77 close. See the `BranchMapCache.write_dirty` method.
77 close. See the `BranchMapCache.write_dirty` method.
78
78
79 This method exist independently of __getitem__ as it is sometime useful
79 This method exist independently of __getitem__ as it is sometime useful
80 to signal that we have no intend to use the data in memory yet.
80 to signal that we have no intend to use the data in memory yet.
81 """
81 """
82 self.updatecache(repo)
82 self.updatecache(repo)
83 bcache = self._per_filter[repo.filtername]
83 bcache = self._per_filter[repo.filtername]
84 assert bcache._filtername == repo.filtername, (
84 assert bcache._filtername == repo.filtername, (
85 bcache._filtername,
85 bcache._filtername,
86 repo.filtername,
86 repo.filtername,
87 )
87 )
88 tr = repo.currenttransaction()
88 tr = repo.currenttransaction()
89 if getattr(tr, 'finalized', True):
89 if getattr(tr, 'finalized', True):
90 bcache.sync_disk(repo)
90 bcache.sync_disk(repo)
91
91
92 def updatecache(self, repo):
92 def updatecache(self, repo):
93 """Update the cache for the given filtered view on a repository"""
93 """Update the cache for the given filtered view on a repository"""
94 # This can trigger updates for the caches for subsets of the filtered
94 # This can trigger updates for the caches for subsets of the filtered
95 # view, e.g. when there is no cache for this filtered view or the cache
95 # view, e.g. when there is no cache for this filtered view or the cache
96 # is stale.
96 # is stale.
97
97
98 cl = repo.changelog
98 cl = repo.changelog
99 filtername = repo.filtername
99 filtername = repo.filtername
100 bcache = self._per_filter.get(filtername)
100 bcache = self._per_filter.get(filtername)
101 if bcache is None or not bcache.validfor(repo):
101 if bcache is None or not bcache.validfor(repo):
102 # cache object missing or cache object stale? Read from disk
102 # cache object missing or cache object stale? Read from disk
103 bcache = branchcache.fromfile(repo)
103 bcache = branch_cache_from_file(repo)
104
104
105 revs = []
105 revs = []
106 if bcache is None:
106 if bcache is None:
107 # no (fresh) cache available anymore, perhaps we can re-use
107 # no (fresh) cache available anymore, perhaps we can re-use
108 # the cache for a subset, then extend that to add info on missing
108 # the cache for a subset, then extend that to add info on missing
109 # revisions.
109 # revisions.
110 subsetname = subsettable.get(filtername)
110 subsetname = subsettable.get(filtername)
111 if subsetname is not None:
111 if subsetname is not None:
112 subset = repo.filtered(subsetname)
112 subset = repo.filtered(subsetname)
113 self.updatecache(subset)
113 self.updatecache(subset)
114 bcache = self._per_filter[subset.filtername].inherit_for(repo)
114 bcache = self._per_filter[subset.filtername].inherit_for(repo)
115 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
115 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
116 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
116 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
117 else:
117 else:
118 # nothing to fall back on, start empty.
118 # nothing to fall back on, start empty.
119 bcache = branchcache(repo)
119 bcache = new_branch_cache(repo)
120
120
121 revs.extend(cl.revs(start=bcache.tiprev + 1))
121 revs.extend(cl.revs(start=bcache.tiprev + 1))
122 if revs:
122 if revs:
123 bcache.update(repo, revs)
123 bcache.update(repo, revs)
124
124
125 assert bcache.validfor(repo), filtername
125 assert bcache.validfor(repo), filtername
126 self._per_filter[repo.filtername] = bcache
126 self._per_filter[repo.filtername] = bcache
127
127
128 def replace(self, repo, remotebranchmap):
128 def replace(self, repo, remotebranchmap):
129 """Replace the branchmap cache for a repo with a branch mapping.
129 """Replace the branchmap cache for a repo with a branch mapping.
130
130
131 This is likely only called during clone with a branch map from a
131 This is likely only called during clone with a branch map from a
132 remote.
132 remote.
133
133
134 """
134 """
135 cl = repo.changelog
135 cl = repo.changelog
136 clrev = cl.rev
136 clrev = cl.rev
137 clbranchinfo = cl.branchinfo
137 clbranchinfo = cl.branchinfo
138 rbheads = []
138 rbheads = []
139 closed = set()
139 closed = set()
140 for bheads in remotebranchmap.values():
140 for bheads in remotebranchmap.values():
141 rbheads += bheads
141 rbheads += bheads
142 for h in bheads:
142 for h in bheads:
143 r = clrev(h)
143 r = clrev(h)
144 b, c = clbranchinfo(r)
144 b, c = clbranchinfo(r)
145 if c:
145 if c:
146 closed.add(h)
146 closed.add(h)
147
147
148 if rbheads:
148 if rbheads:
149 rtiprev = max((int(clrev(node)) for node in rbheads))
149 rtiprev = max((int(clrev(node)) for node in rbheads))
150 cache = branchcache(
150 cache = new_branch_cache(
151 repo,
151 repo,
152 remotebranchmap,
152 remotebranchmap,
153 repo[rtiprev].node(),
153 repo[rtiprev].node(),
154 rtiprev,
154 rtiprev,
155 closednodes=closed,
155 closednodes=closed,
156 )
156 )
157
157
158 # Try to stick it as low as possible
158 # Try to stick it as low as possible
159 # filter above served are unlikely to be fetch from a clone
159 # filter above served are unlikely to be fetch from a clone
160 for candidate in (b'base', b'immutable', b'served'):
160 for candidate in (b'base', b'immutable', b'served'):
161 rview = repo.filtered(candidate)
161 rview = repo.filtered(candidate)
162 if cache.validfor(rview):
162 if cache.validfor(rview):
163 cache._filtername = candidate
163 cache._filtername = candidate
164 self._per_filter[candidate] = cache
164 self._per_filter[candidate] = cache
165 cache._state = STATE_DIRTY
165 cache._state = STATE_DIRTY
166 cache.write(rview)
166 cache.write(rview)
167 return
167 return
168
168
169 def clear(self):
169 def clear(self):
170 self._per_filter.clear()
170 self._per_filter.clear()
171
171
172 def write_dirty(self, repo):
172 def write_dirty(self, repo):
173 unfi = repo.unfiltered()
173 unfi = repo.unfiltered()
174 for filtername in repoviewutil.get_ordered_subset():
174 for filtername in repoviewutil.get_ordered_subset():
175 cache = self._per_filter.get(filtername)
175 cache = self._per_filter.get(filtername)
176 if cache is None:
176 if cache is None:
177 continue
177 continue
178 if filtername is None:
178 if filtername is None:
179 repo = unfi
179 repo = unfi
180 else:
180 else:
181 repo = unfi.filtered(filtername)
181 repo = unfi.filtered(filtername)
182 cache.sync_disk(repo)
182 cache.sync_disk(repo)
183
183
184
184
185 def _unknownnode(node):
185 def _unknownnode(node):
186 """raises ValueError when branchcache found a node which does not exists"""
186 """raises ValueError when branchcache found a node which does not exists"""
187 raise ValueError('node %s does not exist' % node.hex())
187 raise ValueError('node %s does not exist' % node.hex())
188
188
189
189
190 def _branchcachedesc(repo):
190 def _branchcachedesc(repo):
191 if repo.filtername is not None:
191 if repo.filtername is not None:
192 return b'branch cache (%s)' % repo.filtername
192 return b'branch cache (%s)' % repo.filtername
193 else:
193 else:
194 return b'branch cache'
194 return b'branch cache'
195
195
196
196
197 class _BaseBranchCache:
197 class _BaseBranchCache:
198 """A dict like object that hold branches heads cache.
198 """A dict like object that hold branches heads cache.
199
199
200 This cache is used to avoid costly computations to determine all the
200 This cache is used to avoid costly computations to determine all the
201 branch heads of a repo.
201 branch heads of a repo.
202
203 The cache is serialized on disk in the following format:
204
205 <tip hex node> <tip rev number> [optional filtered repo hex hash]
206 <branch head hex node> <open/closed state> <branch name>
207 <branch head hex node> <open/closed state> <branch name>
208 ...
209
210 The first line is used to check if the cache is still valid. If the
211 branch cache is for a filtered repo view, an optional third hash is
212 included that hashes the hashes of all filtered and obsolete revisions.
213
214 The open/closed state is represented by a single letter 'o' or 'c'.
215 This field can be used to avoid changelog reads when determining if a
216 branch head closes a branch or not.
217 """
202 """
218
203
219 def __init__(
204 def __init__(
220 self,
205 self,
221 repo: "localrepo.localrepository",
206 repo: "localrepo.localrepository",
222 entries: Union[
207 entries: Union[
223 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
208 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
224 ] = (),
209 ] = (),
225 closed_nodes: Optional[Set[bytes]] = None,
210 closed_nodes: Optional[Set[bytes]] = None,
226 ) -> None:
211 ) -> None:
227 """hasnode is a function which can be used to verify whether changelog
212 """hasnode is a function which can be used to verify whether changelog
228 has a given node or not. If it's not provided, we assume that every node
213 has a given node or not. If it's not provided, we assume that every node
229 we have exists in changelog"""
214 we have exists in changelog"""
230 # closednodes is a set of nodes that close their branch. If the branch
215 # closednodes is a set of nodes that close their branch. If the branch
231 # cache has been updated, it may contain nodes that are no longer
216 # cache has been updated, it may contain nodes that are no longer
232 # heads.
217 # heads.
233 if closed_nodes is None:
218 if closed_nodes is None:
234 closed_nodes = set()
219 closed_nodes = set()
235 self._closednodes = set(closed_nodes)
220 self._closednodes = set(closed_nodes)
236 self._entries = dict(entries)
221 self._entries = dict(entries)
237
222
238 def __iter__(self):
223 def __iter__(self):
239 return iter(self._entries)
224 return iter(self._entries)
240
225
241 def __setitem__(self, key, value):
226 def __setitem__(self, key, value):
242 self._entries[key] = value
227 self._entries[key] = value
243
228
244 def __getitem__(self, key):
229 def __getitem__(self, key):
245 return self._entries[key]
230 return self._entries[key]
246
231
247 def __contains__(self, key):
232 def __contains__(self, key):
248 return key in self._entries
233 return key in self._entries
249
234
250 def iteritems(self):
235 def iteritems(self):
251 return self._entries.items()
236 return self._entries.items()
252
237
253 items = iteritems
238 items = iteritems
254
239
255 def hasbranch(self, label):
240 def hasbranch(self, label):
256 """checks whether a branch of this name exists or not"""
241 """checks whether a branch of this name exists or not"""
257 return label in self._entries
242 return label in self._entries
258
243
259 def _branchtip(self, heads):
244 def _branchtip(self, heads):
260 """Return tuple with last open head in heads and false,
245 """Return tuple with last open head in heads and false,
261 otherwise return last closed head and true."""
246 otherwise return last closed head and true."""
262 tip = heads[-1]
247 tip = heads[-1]
263 closed = True
248 closed = True
264 for h in reversed(heads):
249 for h in reversed(heads):
265 if h not in self._closednodes:
250 if h not in self._closednodes:
266 tip = h
251 tip = h
267 closed = False
252 closed = False
268 break
253 break
269 return tip, closed
254 return tip, closed
270
255
271 def branchtip(self, branch):
256 def branchtip(self, branch):
272 """Return the tipmost open head on branch head, otherwise return the
257 """Return the tipmost open head on branch head, otherwise return the
273 tipmost closed head on branch.
258 tipmost closed head on branch.
274 Raise KeyError for unknown branch."""
259 Raise KeyError for unknown branch."""
275 return self._branchtip(self[branch])[0]
260 return self._branchtip(self[branch])[0]
276
261
277 def iteropen(self, nodes):
262 def iteropen(self, nodes):
278 return (n for n in nodes if n not in self._closednodes)
263 return (n for n in nodes if n not in self._closednodes)
279
264
280 def branchheads(self, branch, closed=False):
265 def branchheads(self, branch, closed=False):
281 heads = self._entries[branch]
266 heads = self._entries[branch]
282 if not closed:
267 if not closed:
283 heads = list(self.iteropen(heads))
268 heads = list(self.iteropen(heads))
284 return heads
269 return heads
285
270
286 def iterbranches(self):
271 def iterbranches(self):
287 for bn, heads in self.items():
272 for bn, heads in self.items():
288 yield (bn, heads) + self._branchtip(heads)
273 yield (bn, heads) + self._branchtip(heads)
289
274
290 def iterheads(self):
275 def iterheads(self):
291 """returns all the heads"""
276 """returns all the heads"""
292 return self._entries.values()
277 return self._entries.values()
293
278
294 def update(self, repo, revgen):
279 def update(self, repo, revgen):
295 """Given a branchhead cache, self, that may have extra nodes or be
280 """Given a branchhead cache, self, that may have extra nodes or be
296 missing heads, and a generator of nodes that are strictly a superset of
281 missing heads, and a generator of nodes that are strictly a superset of
297 heads missing, this function updates self to be correct.
282 heads missing, this function updates self to be correct.
298 """
283 """
299 starttime = util.timer()
284 starttime = util.timer()
300 cl = repo.changelog
285 cl = repo.changelog
301 # collect new branch entries
286 # collect new branch entries
302 newbranches = {}
287 newbranches = {}
303 getbranchinfo = repo.revbranchcache().branchinfo
288 getbranchinfo = repo.revbranchcache().branchinfo
304 max_rev = -1
289 max_rev = -1
305 for r in revgen:
290 for r in revgen:
306 branch, closesbranch = getbranchinfo(r)
291 branch, closesbranch = getbranchinfo(r)
307 newbranches.setdefault(branch, []).append(r)
292 newbranches.setdefault(branch, []).append(r)
308 if closesbranch:
293 if closesbranch:
309 self._closednodes.add(cl.node(r))
294 self._closednodes.add(cl.node(r))
310 max_rev = max(max_rev, r)
295 max_rev = max(max_rev, r)
311 if max_rev < 0:
296 if max_rev < 0:
312 msg = "running branchcache.update without revision to update"
297 msg = "running branchcache.update without revision to update"
313 raise error.ProgrammingError(msg)
298 raise error.ProgrammingError(msg)
314
299
315 # Delay fetching the topological heads until they are needed.
300 # Delay fetching the topological heads until they are needed.
316 # A repository without non-continous branches can skip this part.
301 # A repository without non-continous branches can skip this part.
317 topoheads = None
302 topoheads = None
318
303
319 # If a changeset is visible, its parents must be visible too, so
304 # If a changeset is visible, its parents must be visible too, so
320 # use the faster unfiltered parent accessor.
305 # use the faster unfiltered parent accessor.
321 parentrevs = repo.unfiltered().changelog.parentrevs
306 parentrevs = repo.unfiltered().changelog.parentrevs
322
307
323 # Faster than using ctx.obsolete()
308 # Faster than using ctx.obsolete()
324 obsrevs = obsolete.getrevs(repo, b'obsolete')
309 obsrevs = obsolete.getrevs(repo, b'obsolete')
325
310
326 for branch, newheadrevs in newbranches.items():
311 for branch, newheadrevs in newbranches.items():
327 # For every branch, compute the new branchheads.
312 # For every branch, compute the new branchheads.
328 # A branchhead is a revision such that no descendant is on
313 # A branchhead is a revision such that no descendant is on
329 # the same branch.
314 # the same branch.
330 #
315 #
331 # The branchheads are computed iteratively in revision order.
316 # The branchheads are computed iteratively in revision order.
332 # This ensures topological order, i.e. parents are processed
317 # This ensures topological order, i.e. parents are processed
333 # before their children. Ancestors are inclusive here, i.e.
318 # before their children. Ancestors are inclusive here, i.e.
334 # any revision is an ancestor of itself.
319 # any revision is an ancestor of itself.
335 #
320 #
336 # Core observations:
321 # Core observations:
337 # - The current revision is always a branchhead for the
322 # - The current revision is always a branchhead for the
338 # repository up to that point.
323 # repository up to that point.
339 # - It is the first revision of the branch if and only if
324 # - It is the first revision of the branch if and only if
340 # there was no branchhead before. In that case, it is the
325 # there was no branchhead before. In that case, it is the
341 # only branchhead as there are no possible ancestors on
326 # only branchhead as there are no possible ancestors on
342 # the same branch.
327 # the same branch.
343 # - If a parent is on the same branch, a branchhead can
328 # - If a parent is on the same branch, a branchhead can
344 # only be an ancestor of that parent, if it is parent
329 # only be an ancestor of that parent, if it is parent
345 # itself. Otherwise it would have been removed as ancestor
330 # itself. Otherwise it would have been removed as ancestor
346 # of that parent before.
331 # of that parent before.
347 # - Therefore, if all parents are on the same branch, they
332 # - Therefore, if all parents are on the same branch, they
348 # can just be removed from the branchhead set.
333 # can just be removed from the branchhead set.
349 # - If one parent is on the same branch and the other is not
334 # - If one parent is on the same branch and the other is not
350 # and there was exactly one branchhead known, the existing
335 # and there was exactly one branchhead known, the existing
351 # branchhead can only be an ancestor if it is the parent.
336 # branchhead can only be an ancestor if it is the parent.
352 # Otherwise it would have been removed as ancestor of
337 # Otherwise it would have been removed as ancestor of
353 # the parent before. The other parent therefore can't have
338 # the parent before. The other parent therefore can't have
354 # a branchhead as ancestor.
339 # a branchhead as ancestor.
355 # - In all other cases, the parents on different branches
340 # - In all other cases, the parents on different branches
356 # could have a branchhead as ancestor. Those parents are
341 # could have a branchhead as ancestor. Those parents are
357 # kept in the "uncertain" set. If all branchheads are also
342 # kept in the "uncertain" set. If all branchheads are also
358 # topological heads, they can't have descendants and further
343 # topological heads, they can't have descendants and further
359 # checks can be skipped. Otherwise, the ancestors of the
344 # checks can be skipped. Otherwise, the ancestors of the
360 # "uncertain" set are removed from branchheads.
345 # "uncertain" set are removed from branchheads.
361 # This computation is heavy and avoided if at all possible.
346 # This computation is heavy and avoided if at all possible.
362 bheads = self._entries.get(branch, [])
347 bheads = self._entries.get(branch, [])
363 bheadset = {cl.rev(node) for node in bheads}
348 bheadset = {cl.rev(node) for node in bheads}
364 uncertain = set()
349 uncertain = set()
365 for newrev in sorted(newheadrevs):
350 for newrev in sorted(newheadrevs):
366 if newrev in obsrevs:
351 if newrev in obsrevs:
367 # We ignore obsolete changesets as they shouldn't be
352 # We ignore obsolete changesets as they shouldn't be
368 # considered heads.
353 # considered heads.
369 continue
354 continue
370
355
371 if not bheadset:
356 if not bheadset:
372 bheadset.add(newrev)
357 bheadset.add(newrev)
373 continue
358 continue
374
359
375 parents = [p for p in parentrevs(newrev) if p != nullrev]
360 parents = [p for p in parentrevs(newrev) if p != nullrev]
376 samebranch = set()
361 samebranch = set()
377 otherbranch = set()
362 otherbranch = set()
378 obsparents = set()
363 obsparents = set()
379 for p in parents:
364 for p in parents:
380 if p in obsrevs:
365 if p in obsrevs:
381 # We ignored this obsolete changeset earlier, but now
366 # We ignored this obsolete changeset earlier, but now
382 # that it has non-ignored children, we need to make
367 # that it has non-ignored children, we need to make
383 # sure their ancestors are not considered heads. To
368 # sure their ancestors are not considered heads. To
384 # achieve that, we will simply treat this obsolete
369 # achieve that, we will simply treat this obsolete
385 # changeset as a parent from other branch.
370 # changeset as a parent from other branch.
386 obsparents.add(p)
371 obsparents.add(p)
387 elif p in bheadset or getbranchinfo(p)[0] == branch:
372 elif p in bheadset or getbranchinfo(p)[0] == branch:
388 samebranch.add(p)
373 samebranch.add(p)
389 else:
374 else:
390 otherbranch.add(p)
375 otherbranch.add(p)
391 if not (len(bheadset) == len(samebranch) == 1):
376 if not (len(bheadset) == len(samebranch) == 1):
392 uncertain.update(otherbranch)
377 uncertain.update(otherbranch)
393 uncertain.update(obsparents)
378 uncertain.update(obsparents)
394 bheadset.difference_update(samebranch)
379 bheadset.difference_update(samebranch)
395 bheadset.add(newrev)
380 bheadset.add(newrev)
396
381
397 if uncertain:
382 if uncertain:
398 if topoheads is None:
383 if topoheads is None:
399 topoheads = set(cl.headrevs())
384 topoheads = set(cl.headrevs())
400 if bheadset - topoheads:
385 if bheadset - topoheads:
401 floorrev = min(bheadset)
386 floorrev = min(bheadset)
402 if floorrev <= max(uncertain):
387 if floorrev <= max(uncertain):
403 ancestors = set(cl.ancestors(uncertain, floorrev))
388 ancestors = set(cl.ancestors(uncertain, floorrev))
404 bheadset -= ancestors
389 bheadset -= ancestors
405 if bheadset:
390 if bheadset:
406 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
391 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
407
392
408 duration = util.timer() - starttime
393 duration = util.timer() - starttime
409 repo.ui.log(
394 repo.ui.log(
410 b'branchcache',
395 b'branchcache',
411 b'updated %s in %.4f seconds\n',
396 b'updated %s in %.4f seconds\n',
412 _branchcachedesc(repo),
397 _branchcachedesc(repo),
413 duration,
398 duration,
414 )
399 )
415 return max_rev
400 return max_rev
416
401
417
402
418 STATE_CLEAN = 1
403 STATE_CLEAN = 1
419 STATE_INHERITED = 2
404 STATE_INHERITED = 2
420 STATE_DIRTY = 3
405 STATE_DIRTY = 3
421
406
422
407
423 class branchcache(_BaseBranchCache):
408 class _LocalBranchCache(_BaseBranchCache):
424 """Branchmap info for a local repo or repoview"""
409 """base class of branch-map info for a local repo or repoview"""
425
410
426 _base_filename = b"branch2"
411 _base_filename = None
427
412
428 def __init__(
413 def __init__(
429 self,
414 self,
430 repo: "localrepo.localrepository",
415 repo: "localrepo.localrepository",
431 entries: Union[
416 entries: Union[
432 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
417 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
433 ] = (),
418 ] = (),
434 tipnode: Optional[bytes] = None,
419 tipnode: Optional[bytes] = None,
435 tiprev: Optional[int] = nullrev,
420 tiprev: Optional[int] = nullrev,
436 filteredhash: Optional[bytes] = None,
421 filteredhash: Optional[bytes] = None,
437 closednodes: Optional[Set[bytes]] = None,
422 closednodes: Optional[Set[bytes]] = None,
438 hasnode: Optional[Callable[[bytes], bool]] = None,
423 hasnode: Optional[Callable[[bytes], bool]] = None,
439 verify_node: bool = False,
424 verify_node: bool = False,
440 inherited: bool = False,
425 inherited: bool = False,
441 ) -> None:
426 ) -> None:
442 """hasnode is a function which can be used to verify whether changelog
427 """hasnode is a function which can be used to verify whether changelog
443 has a given node or not. If it's not provided, we assume that every node
428 has a given node or not. If it's not provided, we assume that every node
444 we have exists in changelog"""
429 we have exists in changelog"""
445 self._filtername = repo.filtername
430 self._filtername = repo.filtername
446 if tipnode is None:
431 if tipnode is None:
447 self.tipnode = repo.nullid
432 self.tipnode = repo.nullid
448 else:
433 else:
449 self.tipnode = tipnode
434 self.tipnode = tipnode
450 self.tiprev = tiprev
435 self.tiprev = tiprev
451 self.filteredhash = filteredhash
436 self.filteredhash = filteredhash
452 self._state = STATE_CLEAN
437 self._state = STATE_CLEAN
453 if inherited:
438 if inherited:
454 self._state = STATE_INHERITED
439 self._state = STATE_INHERITED
455
440
456 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
441 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
457 # closednodes is a set of nodes that close their branch. If the branch
442 # closednodes is a set of nodes that close their branch. If the branch
458 # cache has been updated, it may contain nodes that are no longer
443 # cache has been updated, it may contain nodes that are no longer
459 # heads.
444 # heads.
460
445
461 # Do we need to verify branch at all ?
446 # Do we need to verify branch at all ?
462 self._verify_node = verify_node
447 self._verify_node = verify_node
463 # branches for which nodes are verified
448 # branches for which nodes are verified
464 self._verifiedbranches = set()
449 self._verifiedbranches = set()
465 self._hasnode = None
450 self._hasnode = None
466 if self._verify_node:
451 if self._verify_node:
467 self._hasnode = repo.changelog.hasnode
452 self._hasnode = repo.changelog.hasnode
468
453
469 def validfor(self, repo):
454 def validfor(self, repo):
470 """check that cache contents are valid for (a subset of) this repo
455 """check that cache contents are valid for (a subset of) this repo
471
456
472 - False when the order of changesets changed or if we detect a strip.
457 - False when the order of changesets changed or if we detect a strip.
473 - True when cache is up-to-date for the current repo or its subset."""
458 - True when cache is up-to-date for the current repo or its subset."""
474 try:
459 try:
475 node = repo.changelog.node(self.tiprev)
460 node = repo.changelog.node(self.tiprev)
476 except IndexError:
461 except IndexError:
477 # changesets were stripped and now we don't even have enough to
462 # changesets were stripped and now we don't even have enough to
478 # find tiprev
463 # find tiprev
479 return False
464 return False
480 if self.tipnode != node:
465 if self.tipnode != node:
481 # tiprev doesn't correspond to tipnode: repo was stripped, or this
466 # tiprev doesn't correspond to tipnode: repo was stripped, or this
482 # repo has a different order of changesets
467 # repo has a different order of changesets
483 return False
468 return False
484 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
469 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
485 # hashes don't match if this repo view has a different set of filtered
470 # hashes don't match if this repo view has a different set of filtered
486 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
471 # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
487 # history was rewritten)
472 # history was rewritten)
488 return self.filteredhash == tiphash
473 return self.filteredhash == tiphash
489
474
490 @classmethod
475 @classmethod
491 def fromfile(cls, repo):
476 def fromfile(cls, repo):
492 f = None
477 f = None
493 try:
478 try:
494 f = repo.cachevfs(cls._filename(repo))
479 f = repo.cachevfs(cls._filename(repo))
495 lineiter = iter(f)
480 lineiter = iter(f)
496 init_kwargs = cls._load_header(repo, lineiter)
481 init_kwargs = cls._load_header(repo, lineiter)
497 bcache = cls(
482 bcache = cls(
498 repo,
483 repo,
499 verify_node=True,
484 verify_node=True,
500 **init_kwargs,
485 **init_kwargs,
501 )
486 )
502 if not bcache.validfor(repo):
487 if not bcache.validfor(repo):
503 # invalidate the cache
488 # invalidate the cache
504 raise ValueError('tip differs')
489 raise ValueError('tip differs')
505 bcache._load_heads(repo, lineiter)
490 bcache._load_heads(repo, lineiter)
506 except (IOError, OSError):
491 except (IOError, OSError):
507 return None
492 return None
508
493
509 except Exception as inst:
494 except Exception as inst:
510 if repo.ui.debugflag:
495 if repo.ui.debugflag:
511 msg = b'invalid %s: %s\n'
496 msg = b'invalid %s: %s\n'
512 msg %= (
497 msg %= (
513 _branchcachedesc(repo),
498 _branchcachedesc(repo),
514 stringutil.forcebytestr(inst),
499 stringutil.forcebytestr(inst),
515 )
500 )
516 repo.ui.debug(msg)
501 repo.ui.debug(msg)
517 bcache = None
502 bcache = None
518
503
519 finally:
504 finally:
520 if f:
505 if f:
521 f.close()
506 f.close()
522
507
523 return bcache
508 return bcache
524
509
525 @classmethod
510 @classmethod
526 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
511 def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
527 """parse the head of a branchmap file
512 """parse the head of a branchmap file
528
513
529 return parameters to pass to a newly created class instance.
514 return parameters to pass to a newly created class instance.
530 """
515 """
531 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
516 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
532 last, lrev = cachekey[:2]
517 last, lrev = cachekey[:2]
533 last, lrev = bin(last), int(lrev)
518 last, lrev = bin(last), int(lrev)
534 filteredhash = None
519 filteredhash = None
535 if len(cachekey) > 2:
520 if len(cachekey) > 2:
536 filteredhash = bin(cachekey[2])
521 filteredhash = bin(cachekey[2])
537 return {
522 return {
538 "tipnode": last,
523 "tipnode": last,
539 "tiprev": lrev,
524 "tiprev": lrev,
540 "filteredhash": filteredhash,
525 "filteredhash": filteredhash,
541 }
526 }
542
527
543 def _load_heads(self, repo, lineiter):
528 def _load_heads(self, repo, lineiter):
544 """fully loads the branchcache by reading from the file using the line
529 """fully loads the branchcache by reading from the file using the line
545 iterator passed"""
530 iterator passed"""
546 for line in lineiter:
531 for line in lineiter:
547 line = line.rstrip(b'\n')
532 line = line.rstrip(b'\n')
548 if not line:
533 if not line:
549 continue
534 continue
550 node, state, label = line.split(b" ", 2)
535 node, state, label = line.split(b" ", 2)
551 if state not in b'oc':
536 if state not in b'oc':
552 raise ValueError('invalid branch state')
537 raise ValueError('invalid branch state')
553 label = encoding.tolocal(label.strip())
538 label = encoding.tolocal(label.strip())
554 node = bin(node)
539 node = bin(node)
555 self._entries.setdefault(label, []).append(node)
540 self._entries.setdefault(label, []).append(node)
556 if state == b'c':
541 if state == b'c':
557 self._closednodes.add(node)
542 self._closednodes.add(node)
558
543
559 @classmethod
544 @classmethod
560 def _filename(cls, repo):
545 def _filename(cls, repo):
561 """name of a branchcache file for a given repo or repoview"""
546 """name of a branchcache file for a given repo or repoview"""
562 filename = cls._base_filename
547 filename = cls._base_filename
548 assert filename is not None
563 if repo.filtername:
549 if repo.filtername:
564 filename = b'%s-%s' % (filename, repo.filtername)
550 filename = b'%s-%s' % (filename, repo.filtername)
565 return filename
551 return filename
566
552
567 def inherit_for(self, repo):
553 def inherit_for(self, repo):
568 """return a deep copy of the branchcache object"""
554 """return a deep copy of the branchcache object"""
569 assert repo.filtername != self._filtername
555 assert repo.filtername != self._filtername
570 other = type(self)(
556 other = type(self)(
571 repo=repo,
557 repo=repo,
572 # we always do a shally copy of self._entries, and the values is
558 # we always do a shally copy of self._entries, and the values is
573 # always replaced, so no need to deepcopy until the above remains
559 # always replaced, so no need to deepcopy until the above remains
574 # true.
560 # true.
575 entries=self._entries,
561 entries=self._entries,
576 tipnode=self.tipnode,
562 tipnode=self.tipnode,
577 tiprev=self.tiprev,
563 tiprev=self.tiprev,
578 filteredhash=self.filteredhash,
564 filteredhash=self.filteredhash,
579 closednodes=set(self._closednodes),
565 closednodes=set(self._closednodes),
580 verify_node=self._verify_node,
566 verify_node=self._verify_node,
581 inherited=True,
567 inherited=True,
582 )
568 )
583 # also copy information about the current verification state
569 # also copy information about the current verification state
584 other._verifiedbranches = set(self._verifiedbranches)
570 other._verifiedbranches = set(self._verifiedbranches)
585 return other
571 return other
586
572
587 def sync_disk(self, repo):
573 def sync_disk(self, repo):
588 """synchronise the on disk file with the cache state
574 """synchronise the on disk file with the cache state
589
575
590 If new value specific to this filter level need to be written, the file
576 If new value specific to this filter level need to be written, the file
591 will be updated, if the state of the branchcache is inherited from a
577 will be updated, if the state of the branchcache is inherited from a
592 subset, any stalled on disk file will be deleted.
578 subset, any stalled on disk file will be deleted.
593
579
594 That method does nothing if there is nothing to do.
580 That method does nothing if there is nothing to do.
595 """
581 """
596 if self._state == STATE_DIRTY:
582 if self._state == STATE_DIRTY:
597 self.write(repo)
583 self.write(repo)
598 elif self._state == STATE_INHERITED:
584 elif self._state == STATE_INHERITED:
599 filename = self._filename(repo)
585 filename = self._filename(repo)
600 repo.cachevfs.tryunlink(filename)
586 repo.cachevfs.tryunlink(filename)
601
587
602 def write(self, repo):
588 def write(self, repo):
603 assert self._filtername == repo.filtername, (
589 assert self._filtername == repo.filtername, (
604 self._filtername,
590 self._filtername,
605 repo.filtername,
591 repo.filtername,
606 )
592 )
607 assert self._state == STATE_DIRTY, self._state
593 assert self._state == STATE_DIRTY, self._state
608 # This method should not be called during an open transaction
594 # This method should not be called during an open transaction
609 tr = repo.currenttransaction()
595 tr = repo.currenttransaction()
610 if not getattr(tr, 'finalized', True):
596 if not getattr(tr, 'finalized', True):
611 msg = "writing branchcache in the middle of a transaction"
597 msg = "writing branchcache in the middle of a transaction"
612 raise error.ProgrammingError(msg)
598 raise error.ProgrammingError(msg)
613 try:
599 try:
614 filename = self._filename(repo)
600 filename = self._filename(repo)
615 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
601 with repo.cachevfs(filename, b"w", atomictemp=True) as f:
616 self._write_header(f)
602 self._write_header(f)
617 nodecount = self._write_heads(f)
603 nodecount = self._write_heads(f)
618 repo.ui.log(
604 repo.ui.log(
619 b'branchcache',
605 b'branchcache',
620 b'wrote %s with %d labels and %d nodes\n',
606 b'wrote %s with %d labels and %d nodes\n',
621 _branchcachedesc(repo),
607 _branchcachedesc(repo),
622 len(self._entries),
608 len(self._entries),
623 nodecount,
609 nodecount,
624 )
610 )
625 self._state = STATE_CLEAN
611 self._state = STATE_CLEAN
626 except (IOError, OSError, error.Abort) as inst:
612 except (IOError, OSError, error.Abort) as inst:
627 # Abort may be raised by read only opener, so log and continue
613 # Abort may be raised by read only opener, so log and continue
628 repo.ui.debug(
614 repo.ui.debug(
629 b"couldn't write branch cache: %s\n"
615 b"couldn't write branch cache: %s\n"
630 % stringutil.forcebytestr(inst)
616 % stringutil.forcebytestr(inst)
631 )
617 )
632
618
633 def _write_header(self, fp) -> None:
619 def _write_header(self, fp) -> None:
634 """write the branch cache header to a file"""
620 """write the branch cache header to a file"""
635 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
621 cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
636 if self.filteredhash is not None:
622 if self.filteredhash is not None:
637 cachekey.append(hex(self.filteredhash))
623 cachekey.append(hex(self.filteredhash))
638 fp.write(b" ".join(cachekey) + b'\n')
624 fp.write(b" ".join(cachekey) + b'\n')
639
625
640 def _write_heads(self, fp) -> int:
626 def _write_heads(self, fp) -> int:
641 """write list of heads to a file
627 """write list of heads to a file
642
628
643 Return the number of heads written."""
629 Return the number of heads written."""
644 nodecount = 0
630 nodecount = 0
645 for label, nodes in sorted(self._entries.items()):
631 for label, nodes in sorted(self._entries.items()):
646 label = encoding.fromlocal(label)
632 label = encoding.fromlocal(label)
647 for node in nodes:
633 for node in nodes:
648 nodecount += 1
634 nodecount += 1
649 if node in self._closednodes:
635 if node in self._closednodes:
650 state = b'c'
636 state = b'c'
651 else:
637 else:
652 state = b'o'
638 state = b'o'
653 fp.write(b"%s %s %s\n" % (hex(node), state, label))
639 fp.write(b"%s %s %s\n" % (hex(node), state, label))
654 return nodecount
640 return nodecount
655
641
656 def _verifybranch(self, branch):
642 def _verifybranch(self, branch):
657 """verify head nodes for the given branch."""
643 """verify head nodes for the given branch."""
658 if not self._verify_node:
644 if not self._verify_node:
659 return
645 return
660 if branch not in self._entries or branch in self._verifiedbranches:
646 if branch not in self._entries or branch in self._verifiedbranches:
661 return
647 return
662 assert self._hasnode is not None
648 assert self._hasnode is not None
663 for n in self._entries[branch]:
649 for n in self._entries[branch]:
664 if not self._hasnode(n):
650 if not self._hasnode(n):
665 _unknownnode(n)
651 _unknownnode(n)
666
652
667 self._verifiedbranches.add(branch)
653 self._verifiedbranches.add(branch)
668
654
669 def _verifyall(self):
655 def _verifyall(self):
670 """verifies nodes of all the branches"""
656 """verifies nodes of all the branches"""
671 for b in self._entries.keys():
657 for b in self._entries.keys():
672 if b not in self._verifiedbranches:
658 if b not in self._verifiedbranches:
673 self._verifybranch(b)
659 self._verifybranch(b)
674
660
675 def __getitem__(self, key):
661 def __getitem__(self, key):
676 self._verifybranch(key)
662 self._verifybranch(key)
677 return super().__getitem__(key)
663 return super().__getitem__(key)
678
664
679 def __contains__(self, key):
665 def __contains__(self, key):
680 self._verifybranch(key)
666 self._verifybranch(key)
681 return super().__contains__(key)
667 return super().__contains__(key)
682
668
683 def iteritems(self):
669 def iteritems(self):
684 self._verifyall()
670 self._verifyall()
685 return super().iteritems()
671 return super().iteritems()
686
672
687 items = iteritems
673 items = iteritems
688
674
689 def iterheads(self):
675 def iterheads(self):
690 """returns all the heads"""
676 """returns all the heads"""
691 self._verifyall()
677 self._verifyall()
692 return super().iterheads()
678 return super().iterheads()
693
679
694 def hasbranch(self, label):
680 def hasbranch(self, label):
695 """checks whether a branch of this name exists or not"""
681 """checks whether a branch of this name exists or not"""
696 self._verifybranch(label)
682 self._verifybranch(label)
697 return super().hasbranch(label)
683 return super().hasbranch(label)
698
684
699 def branchheads(self, branch, closed=False):
685 def branchheads(self, branch, closed=False):
700 self._verifybranch(branch)
686 self._verifybranch(branch)
701 return super().branchheads(branch, closed=closed)
687 return super().branchheads(branch, closed=closed)
702
688
703 def update(self, repo, revgen):
689 def update(self, repo, revgen):
704 assert self._filtername == repo.filtername, (
690 assert self._filtername == repo.filtername, (
705 self._filtername,
691 self._filtername,
706 repo.filtername,
692 repo.filtername,
707 )
693 )
708 cl = repo.changelog
694 cl = repo.changelog
709 max_rev = super().update(repo, revgen)
695 max_rev = super().update(repo, revgen)
710 # new tip revision which we found after iterating items from new
696 # new tip revision which we found after iterating items from new
711 # branches
697 # branches
712 if max_rev is not None and max_rev > self.tiprev:
698 if max_rev is not None and max_rev > self.tiprev:
713 self.tiprev = max_rev
699 self.tiprev = max_rev
714 self.tipnode = cl.node(max_rev)
700 self.tipnode = cl.node(max_rev)
715
701
716 if not self.validfor(repo):
702 if not self.validfor(repo):
717 # old cache key is now invalid for the repo, but we've just updated
703 # old cache key is now invalid for the repo, but we've just updated
718 # the cache and we assume it's valid, so let's make the cache key
704 # the cache and we assume it's valid, so let's make the cache key
719 # valid as well by recomputing it from the cached data
705 # valid as well by recomputing it from the cached data
720 self.tipnode = repo.nullid
706 self.tipnode = repo.nullid
721 self.tiprev = nullrev
707 self.tiprev = nullrev
722 for heads in self.iterheads():
708 for heads in self.iterheads():
723 if not heads:
709 if not heads:
724 # all revisions on a branch are obsolete
710 # all revisions on a branch are obsolete
725 continue
711 continue
726 # note: tiprev is not necessarily the tip revision of repo,
712 # note: tiprev is not necessarily the tip revision of repo,
727 # because the tip could be obsolete (i.e. not a head)
713 # because the tip could be obsolete (i.e. not a head)
728 tiprev = max(cl.rev(node) for node in heads)
714 tiprev = max(cl.rev(node) for node in heads)
729 if tiprev > self.tiprev:
715 if tiprev > self.tiprev:
730 self.tipnode = cl.node(tiprev)
716 self.tipnode = cl.node(tiprev)
731 self.tiprev = tiprev
717 self.tiprev = tiprev
732 self.filteredhash = scmutil.filteredhash(
718 self.filteredhash = scmutil.filteredhash(
733 repo, self.tiprev, needobsolete=True
719 repo, self.tiprev, needobsolete=True
734 )
720 )
735 self._state = STATE_DIRTY
721 self._state = STATE_DIRTY
736 tr = repo.currenttransaction()
722 tr = repo.currenttransaction()
737 if getattr(tr, 'finalized', True):
723 if getattr(tr, 'finalized', True):
738 # Avoid premature writing.
724 # Avoid premature writing.
739 #
725 #
740 # (The cache warming setup by localrepo will update the file later.)
726 # (The cache warming setup by localrepo will update the file later.)
741 self.write(repo)
727 self.write(repo)
742
728
743
729
730 def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
731 """Build a branch cache from on-disk data if possible"""
732 return BranchCacheV2.fromfile(repo)
733
734
735 def new_branch_cache(repo, *args, **kwargs):
736 """Build a new branch cache from argument"""
737 return BranchCacheV2(repo, *args, **kwargs)
738
739
740 class BranchCacheV2(_LocalBranchCache):
741 """a branch cache using version 2 of the format on disk
742
743 The cache is serialized on disk in the following format:
744
745 <tip hex node> <tip rev number> [optional filtered repo hex hash]
746 <branch head hex node> <open/closed state> <branch name>
747 <branch head hex node> <open/closed state> <branch name>
748 ...
749
750 The first line is used to check if the cache is still valid. If the
751 branch cache is for a filtered repo view, an optional third hash is
752 included that hashes the hashes of all filtered and obsolete revisions.
753
754 The open/closed state is represented by a single letter 'o' or 'c'.
755 This field can be used to avoid changelog reads when determining if a
756 branch head closes a branch or not.
757 """
758
759 _base_filename = b"branch2"
760
761
744 class remotebranchcache(_BaseBranchCache):
762 class remotebranchcache(_BaseBranchCache):
745 """Branchmap info for a remote connection, should not write locally"""
763 """Branchmap info for a remote connection, should not write locally"""
746
764
747 def __init__(
765 def __init__(
748 self,
766 self,
749 repo: "localrepo.localrepository",
767 repo: "localrepo.localrepository",
750 entries: Union[
768 entries: Union[
751 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
769 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
752 ] = (),
770 ] = (),
753 closednodes: Optional[Set[bytes]] = None,
771 closednodes: Optional[Set[bytes]] = None,
754 ) -> None:
772 ) -> None:
755 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
773 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
756
774
757
775
758 # Revision branch info cache
776 # Revision branch info cache
759
777
760 _rbcversion = b'-v1'
778 _rbcversion = b'-v1'
761 _rbcnames = b'rbc-names' + _rbcversion
779 _rbcnames = b'rbc-names' + _rbcversion
762 _rbcrevs = b'rbc-revs' + _rbcversion
780 _rbcrevs = b'rbc-revs' + _rbcversion
763 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
781 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
764 _rbcrecfmt = b'>4sI'
782 _rbcrecfmt = b'>4sI'
765 _rbcrecsize = calcsize(_rbcrecfmt)
783 _rbcrecsize = calcsize(_rbcrecfmt)
766 _rbcmininc = 64 * _rbcrecsize
784 _rbcmininc = 64 * _rbcrecsize
767 _rbcnodelen = 4
785 _rbcnodelen = 4
768 _rbcbranchidxmask = 0x7FFFFFFF
786 _rbcbranchidxmask = 0x7FFFFFFF
769 _rbccloseflag = 0x80000000
787 _rbccloseflag = 0x80000000
770
788
771
789
772 class rbcrevs:
790 class rbcrevs:
773 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
791 """a byte string consisting of an immutable prefix followed by a mutable suffix"""
774
792
775 def __init__(self, revs):
793 def __init__(self, revs):
776 self._prefix = revs
794 self._prefix = revs
777 self._rest = bytearray()
795 self._rest = bytearray()
778
796
779 def __len__(self):
797 def __len__(self):
780 return len(self._prefix) + len(self._rest)
798 return len(self._prefix) + len(self._rest)
781
799
782 def unpack_record(self, rbcrevidx):
800 def unpack_record(self, rbcrevidx):
783 if rbcrevidx < len(self._prefix):
801 if rbcrevidx < len(self._prefix):
784 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
802 return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx)
785 else:
803 else:
786 return unpack_from(
804 return unpack_from(
787 _rbcrecfmt,
805 _rbcrecfmt,
788 util.buffer(self._rest),
806 util.buffer(self._rest),
789 rbcrevidx - len(self._prefix),
807 rbcrevidx - len(self._prefix),
790 )
808 )
791
809
792 def make_mutable(self):
810 def make_mutable(self):
793 if len(self._prefix) > 0:
811 if len(self._prefix) > 0:
794 entirety = bytearray()
812 entirety = bytearray()
795 entirety[:] = self._prefix
813 entirety[:] = self._prefix
796 entirety.extend(self._rest)
814 entirety.extend(self._rest)
797 self._rest = entirety
815 self._rest = entirety
798 self._prefix = bytearray()
816 self._prefix = bytearray()
799
817
800 def truncate(self, pos):
818 def truncate(self, pos):
801 self.make_mutable()
819 self.make_mutable()
802 del self._rest[pos:]
820 del self._rest[pos:]
803
821
804 def pack_into(self, rbcrevidx, node, branchidx):
822 def pack_into(self, rbcrevidx, node, branchidx):
805 if rbcrevidx < len(self._prefix):
823 if rbcrevidx < len(self._prefix):
806 self.make_mutable()
824 self.make_mutable()
807 buf = self._rest
825 buf = self._rest
808 start_offset = rbcrevidx - len(self._prefix)
826 start_offset = rbcrevidx - len(self._prefix)
809 end_offset = start_offset + _rbcrecsize
827 end_offset = start_offset + _rbcrecsize
810
828
811 if len(self._rest) < end_offset:
829 if len(self._rest) < end_offset:
812 # bytearray doesn't allocate extra space at least in Python 3.7.
830 # bytearray doesn't allocate extra space at least in Python 3.7.
813 # When multiple changesets are added in a row, precise resize would
831 # When multiple changesets are added in a row, precise resize would
814 # result in quadratic complexity. Overallocate to compensate by
832 # result in quadratic complexity. Overallocate to compensate by
815 # using the classic doubling technique for dynamic arrays instead.
833 # using the classic doubling technique for dynamic arrays instead.
816 # If there was a gap in the map before, less space will be reserved.
834 # If there was a gap in the map before, less space will be reserved.
817 self._rest.extend(b'\0' * end_offset)
835 self._rest.extend(b'\0' * end_offset)
818 return pack_into(
836 return pack_into(
819 _rbcrecfmt,
837 _rbcrecfmt,
820 buf,
838 buf,
821 start_offset,
839 start_offset,
822 node,
840 node,
823 branchidx,
841 branchidx,
824 )
842 )
825
843
826 def extend(self, extension):
844 def extend(self, extension):
827 return self._rest.extend(extension)
845 return self._rest.extend(extension)
828
846
829 def slice(self, begin, end):
847 def slice(self, begin, end):
830 if begin < len(self._prefix):
848 if begin < len(self._prefix):
831 acc = bytearray()
849 acc = bytearray()
832 acc[:] = self._prefix[begin:end]
850 acc[:] = self._prefix[begin:end]
833 acc.extend(
851 acc.extend(
834 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
852 self._rest[begin - len(self._prefix) : end - len(self._prefix)]
835 )
853 )
836 return acc
854 return acc
837 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
855 return self._rest[begin - len(self._prefix) : end - len(self._prefix)]
838
856
839
857
840 class revbranchcache:
858 class revbranchcache:
841 """Persistent cache, mapping from revision number to branch name and close.
859 """Persistent cache, mapping from revision number to branch name and close.
842 This is a low level cache, independent of filtering.
860 This is a low level cache, independent of filtering.
843
861
844 Branch names are stored in rbc-names in internal encoding separated by 0.
862 Branch names are stored in rbc-names in internal encoding separated by 0.
845 rbc-names is append-only, and each branch name is only stored once and will
863 rbc-names is append-only, and each branch name is only stored once and will
846 thus have a unique index.
864 thus have a unique index.
847
865
848 The branch info for each revision is stored in rbc-revs as constant size
866 The branch info for each revision is stored in rbc-revs as constant size
849 records. The whole file is read into memory, but it is only 'parsed' on
867 records. The whole file is read into memory, but it is only 'parsed' on
850 demand. The file is usually append-only but will be truncated if repo
868 demand. The file is usually append-only but will be truncated if repo
851 modification is detected.
869 modification is detected.
852 The record for each revision contains the first 4 bytes of the
870 The record for each revision contains the first 4 bytes of the
853 corresponding node hash, and the record is only used if it still matches.
871 corresponding node hash, and the record is only used if it still matches.
854 Even a completely trashed rbc-revs fill thus still give the right result
872 Even a completely trashed rbc-revs fill thus still give the right result
855 while converging towards full recovery ... assuming no incorrectly matching
873 while converging towards full recovery ... assuming no incorrectly matching
856 node hashes.
874 node hashes.
857 The record also contains 4 bytes where 31 bits contains the index of the
875 The record also contains 4 bytes where 31 bits contains the index of the
858 branch and the last bit indicate that it is a branch close commit.
876 branch and the last bit indicate that it is a branch close commit.
859 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
877 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
860 and will grow with it but be 1/8th of its size.
878 and will grow with it but be 1/8th of its size.
861 """
879 """
862
880
863 def __init__(self, repo, readonly=True):
881 def __init__(self, repo, readonly=True):
864 assert repo.filtername is None
882 assert repo.filtername is None
865 self._repo = repo
883 self._repo = repo
866 self._names = [] # branch names in local encoding with static index
884 self._names = [] # branch names in local encoding with static index
867 self._rbcrevs = rbcrevs(bytearray())
885 self._rbcrevs = rbcrevs(bytearray())
868 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
886 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
869 try:
887 try:
870 bndata = repo.cachevfs.read(_rbcnames)
888 bndata = repo.cachevfs.read(_rbcnames)
871 self._rbcsnameslen = len(bndata) # for verification before writing
889 self._rbcsnameslen = len(bndata) # for verification before writing
872 if bndata:
890 if bndata:
873 self._names = [
891 self._names = [
874 encoding.tolocal(bn) for bn in bndata.split(b'\0')
892 encoding.tolocal(bn) for bn in bndata.split(b'\0')
875 ]
893 ]
876 except (IOError, OSError):
894 except (IOError, OSError):
877 if readonly:
895 if readonly:
878 # don't try to use cache - fall back to the slow path
896 # don't try to use cache - fall back to the slow path
879 self.branchinfo = self._branchinfo
897 self.branchinfo = self._branchinfo
880
898
881 if self._names:
899 if self._names:
882 try:
900 try:
883 if repo.ui.configbool(b'storage', b'revbranchcache.mmap'):
901 if repo.ui.configbool(b'storage', b'revbranchcache.mmap'):
884 with repo.cachevfs(_rbcrevs) as fp:
902 with repo.cachevfs(_rbcrevs) as fp:
885 data = util.buffer(util.mmapread(fp))
903 data = util.buffer(util.mmapread(fp))
886 else:
904 else:
887 data = repo.cachevfs.read(_rbcrevs)
905 data = repo.cachevfs.read(_rbcrevs)
888 self._rbcrevs = rbcrevs(data)
906 self._rbcrevs = rbcrevs(data)
889 except (IOError, OSError) as inst:
907 except (IOError, OSError) as inst:
890 repo.ui.debug(
908 repo.ui.debug(
891 b"couldn't read revision branch cache: %s\n"
909 b"couldn't read revision branch cache: %s\n"
892 % stringutil.forcebytestr(inst)
910 % stringutil.forcebytestr(inst)
893 )
911 )
894 # remember number of good records on disk
912 # remember number of good records on disk
895 self._rbcrevslen = min(
913 self._rbcrevslen = min(
896 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
914 len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)
897 )
915 )
898 if self._rbcrevslen == 0:
916 if self._rbcrevslen == 0:
899 self._names = []
917 self._names = []
900 self._rbcnamescount = len(self._names) # number of names read at
918 self._rbcnamescount = len(self._names) # number of names read at
901 # _rbcsnameslen
919 # _rbcsnameslen
902
920
903 def _clear(self):
921 def _clear(self):
904 self._rbcsnameslen = 0
922 self._rbcsnameslen = 0
905 del self._names[:]
923 del self._names[:]
906 self._rbcnamescount = 0
924 self._rbcnamescount = 0
907 self._rbcrevslen = len(self._repo.changelog)
925 self._rbcrevslen = len(self._repo.changelog)
908 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
926 self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize))
909 util.clearcachedproperty(self, b'_namesreverse')
927 util.clearcachedproperty(self, b'_namesreverse')
910
928
911 @util.propertycache
929 @util.propertycache
912 def _namesreverse(self):
930 def _namesreverse(self):
913 return {b: r for r, b in enumerate(self._names)}
931 return {b: r for r, b in enumerate(self._names)}
914
932
915 def branchinfo(self, rev):
933 def branchinfo(self, rev):
916 """Return branch name and close flag for rev, using and updating
934 """Return branch name and close flag for rev, using and updating
917 persistent cache."""
935 persistent cache."""
918 changelog = self._repo.changelog
936 changelog = self._repo.changelog
919 rbcrevidx = rev * _rbcrecsize
937 rbcrevidx = rev * _rbcrecsize
920
938
921 # avoid negative index, changelog.read(nullrev) is fast without cache
939 # avoid negative index, changelog.read(nullrev) is fast without cache
922 if rev == nullrev:
940 if rev == nullrev:
923 return changelog.branchinfo(rev)
941 return changelog.branchinfo(rev)
924
942
925 # if requested rev isn't allocated, grow and cache the rev info
943 # if requested rev isn't allocated, grow and cache the rev info
926 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
944 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
927 return self._branchinfo(rev)
945 return self._branchinfo(rev)
928
946
929 # fast path: extract data from cache, use it if node is matching
947 # fast path: extract data from cache, use it if node is matching
930 reponode = changelog.node(rev)[:_rbcnodelen]
948 reponode = changelog.node(rev)[:_rbcnodelen]
931 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
949 cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx)
932 close = bool(branchidx & _rbccloseflag)
950 close = bool(branchidx & _rbccloseflag)
933 if close:
951 if close:
934 branchidx &= _rbcbranchidxmask
952 branchidx &= _rbcbranchidxmask
935 if cachenode == b'\0\0\0\0':
953 if cachenode == b'\0\0\0\0':
936 pass
954 pass
937 elif cachenode == reponode:
955 elif cachenode == reponode:
938 try:
956 try:
939 return self._names[branchidx], close
957 return self._names[branchidx], close
940 except IndexError:
958 except IndexError:
941 # recover from invalid reference to unknown branch
959 # recover from invalid reference to unknown branch
942 self._repo.ui.debug(
960 self._repo.ui.debug(
943 b"referenced branch names not found"
961 b"referenced branch names not found"
944 b" - rebuilding revision branch cache from scratch\n"
962 b" - rebuilding revision branch cache from scratch\n"
945 )
963 )
946 self._clear()
964 self._clear()
947 else:
965 else:
948 # rev/node map has changed, invalidate the cache from here up
966 # rev/node map has changed, invalidate the cache from here up
949 self._repo.ui.debug(
967 self._repo.ui.debug(
950 b"history modification detected - truncating "
968 b"history modification detected - truncating "
951 b"revision branch cache to revision %d\n" % rev
969 b"revision branch cache to revision %d\n" % rev
952 )
970 )
953 truncate = rbcrevidx + _rbcrecsize
971 truncate = rbcrevidx + _rbcrecsize
954 self._rbcrevs.truncate(truncate)
972 self._rbcrevs.truncate(truncate)
955 self._rbcrevslen = min(self._rbcrevslen, truncate)
973 self._rbcrevslen = min(self._rbcrevslen, truncate)
956
974
957 # fall back to slow path and make sure it will be written to disk
975 # fall back to slow path and make sure it will be written to disk
958 return self._branchinfo(rev)
976 return self._branchinfo(rev)
959
977
960 def _branchinfo(self, rev):
978 def _branchinfo(self, rev):
961 """Retrieve branch info from changelog and update _rbcrevs"""
979 """Retrieve branch info from changelog and update _rbcrevs"""
962 changelog = self._repo.changelog
980 changelog = self._repo.changelog
963 b, close = changelog.branchinfo(rev)
981 b, close = changelog.branchinfo(rev)
964 if b in self._namesreverse:
982 if b in self._namesreverse:
965 branchidx = self._namesreverse[b]
983 branchidx = self._namesreverse[b]
966 else:
984 else:
967 branchidx = len(self._names)
985 branchidx = len(self._names)
968 self._names.append(b)
986 self._names.append(b)
969 self._namesreverse[b] = branchidx
987 self._namesreverse[b] = branchidx
970 reponode = changelog.node(rev)
988 reponode = changelog.node(rev)
971 if close:
989 if close:
972 branchidx |= _rbccloseflag
990 branchidx |= _rbccloseflag
973 self._setcachedata(rev, reponode, branchidx)
991 self._setcachedata(rev, reponode, branchidx)
974 return b, close
992 return b, close
975
993
976 def setdata(self, rev, changelogrevision):
994 def setdata(self, rev, changelogrevision):
977 """add new data information to the cache"""
995 """add new data information to the cache"""
978 branch, close = changelogrevision.branchinfo
996 branch, close = changelogrevision.branchinfo
979
997
980 if branch in self._namesreverse:
998 if branch in self._namesreverse:
981 branchidx = self._namesreverse[branch]
999 branchidx = self._namesreverse[branch]
982 else:
1000 else:
983 branchidx = len(self._names)
1001 branchidx = len(self._names)
984 self._names.append(branch)
1002 self._names.append(branch)
985 self._namesreverse[branch] = branchidx
1003 self._namesreverse[branch] = branchidx
986 if close:
1004 if close:
987 branchidx |= _rbccloseflag
1005 branchidx |= _rbccloseflag
988 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
1006 self._setcachedata(rev, self._repo.changelog.node(rev), branchidx)
989 # If no cache data were readable (non exists, bad permission, etc)
1007 # If no cache data were readable (non exists, bad permission, etc)
990 # the cache was bypassing itself by setting:
1008 # the cache was bypassing itself by setting:
991 #
1009 #
992 # self.branchinfo = self._branchinfo
1010 # self.branchinfo = self._branchinfo
993 #
1011 #
994 # Since we now have data in the cache, we need to drop this bypassing.
1012 # Since we now have data in the cache, we need to drop this bypassing.
995 if 'branchinfo' in vars(self):
1013 if 'branchinfo' in vars(self):
996 del self.branchinfo
1014 del self.branchinfo
997
1015
998 def _setcachedata(self, rev, node, branchidx):
1016 def _setcachedata(self, rev, node, branchidx):
999 """Writes the node's branch data to the in-memory cache data."""
1017 """Writes the node's branch data to the in-memory cache data."""
1000 if rev == nullrev:
1018 if rev == nullrev:
1001 return
1019 return
1002 rbcrevidx = rev * _rbcrecsize
1020 rbcrevidx = rev * _rbcrecsize
1003 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
1021 self._rbcrevs.pack_into(rbcrevidx, node, branchidx)
1004 self._rbcrevslen = min(self._rbcrevslen, rev)
1022 self._rbcrevslen = min(self._rbcrevslen, rev)
1005
1023
1006 tr = self._repo.currenttransaction()
1024 tr = self._repo.currenttransaction()
1007 if tr:
1025 if tr:
1008 tr.addfinalize(b'write-revbranchcache', self.write)
1026 tr.addfinalize(b'write-revbranchcache', self.write)
1009
1027
1010 def write(self, tr=None):
1028 def write(self, tr=None):
1011 """Save branch cache if it is dirty."""
1029 """Save branch cache if it is dirty."""
1012 repo = self._repo
1030 repo = self._repo
1013 wlock = None
1031 wlock = None
1014 step = b''
1032 step = b''
1015 try:
1033 try:
1016 # write the new names
1034 # write the new names
1017 if self._rbcnamescount < len(self._names):
1035 if self._rbcnamescount < len(self._names):
1018 wlock = repo.wlock(wait=False)
1036 wlock = repo.wlock(wait=False)
1019 step = b' names'
1037 step = b' names'
1020 self._writenames(repo)
1038 self._writenames(repo)
1021
1039
1022 # write the new revs
1040 # write the new revs
1023 start = self._rbcrevslen * _rbcrecsize
1041 start = self._rbcrevslen * _rbcrecsize
1024 if start != len(self._rbcrevs):
1042 if start != len(self._rbcrevs):
1025 step = b''
1043 step = b''
1026 if wlock is None:
1044 if wlock is None:
1027 wlock = repo.wlock(wait=False)
1045 wlock = repo.wlock(wait=False)
1028 self._writerevs(repo, start)
1046 self._writerevs(repo, start)
1029
1047
1030 except (IOError, OSError, error.Abort, error.LockError) as inst:
1048 except (IOError, OSError, error.Abort, error.LockError) as inst:
1031 repo.ui.debug(
1049 repo.ui.debug(
1032 b"couldn't write revision branch cache%s: %s\n"
1050 b"couldn't write revision branch cache%s: %s\n"
1033 % (step, stringutil.forcebytestr(inst))
1051 % (step, stringutil.forcebytestr(inst))
1034 )
1052 )
1035 finally:
1053 finally:
1036 if wlock is not None:
1054 if wlock is not None:
1037 wlock.release()
1055 wlock.release()
1038
1056
1039 def _writenames(self, repo):
1057 def _writenames(self, repo):
1040 """write the new branch names to revbranchcache"""
1058 """write the new branch names to revbranchcache"""
1041 if self._rbcnamescount != 0:
1059 if self._rbcnamescount != 0:
1042 f = repo.cachevfs.open(_rbcnames, b'ab')
1060 f = repo.cachevfs.open(_rbcnames, b'ab')
1043 if f.tell() == self._rbcsnameslen:
1061 if f.tell() == self._rbcsnameslen:
1044 f.write(b'\0')
1062 f.write(b'\0')
1045 else:
1063 else:
1046 f.close()
1064 f.close()
1047 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1065 repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames)
1048 self._rbcnamescount = 0
1066 self._rbcnamescount = 0
1049 self._rbcrevslen = 0
1067 self._rbcrevslen = 0
1050 if self._rbcnamescount == 0:
1068 if self._rbcnamescount == 0:
1051 # before rewriting names, make sure references are removed
1069 # before rewriting names, make sure references are removed
1052 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1070 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
1053 f = repo.cachevfs.open(_rbcnames, b'wb')
1071 f = repo.cachevfs.open(_rbcnames, b'wb')
1054 f.write(
1072 f.write(
1055 b'\0'.join(
1073 b'\0'.join(
1056 encoding.fromlocal(b)
1074 encoding.fromlocal(b)
1057 for b in self._names[self._rbcnamescount :]
1075 for b in self._names[self._rbcnamescount :]
1058 )
1076 )
1059 )
1077 )
1060 self._rbcsnameslen = f.tell()
1078 self._rbcsnameslen = f.tell()
1061 f.close()
1079 f.close()
1062 self._rbcnamescount = len(self._names)
1080 self._rbcnamescount = len(self._names)
1063
1081
1064 def _writerevs(self, repo, start):
1082 def _writerevs(self, repo, start):
1065 """write the new revs to revbranchcache"""
1083 """write the new revs to revbranchcache"""
1066 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1084 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
1067 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1085 with repo.cachevfs.open(_rbcrevs, b'ab') as f:
1068 if f.tell() != start:
1086 if f.tell() != start:
1069 repo.ui.debug(
1087 repo.ui.debug(
1070 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1088 b"truncating cache/%s to %d\n" % (_rbcrevs, start)
1071 )
1089 )
1072 f.seek(start)
1090 f.seek(start)
1073 if f.tell() != start:
1091 if f.tell() != start:
1074 start = 0
1092 start = 0
1075 f.seek(start)
1093 f.seek(start)
1076 f.truncate()
1094 f.truncate()
1077 end = revs * _rbcrecsize
1095 end = revs * _rbcrecsize
1078 f.write(self._rbcrevs.slice(start, end))
1096 f.write(self._rbcrevs.slice(start, end))
1079 self._rbcrevslen = revs
1097 self._rbcrevslen = revs
General Comments 0
You need to be logged in to leave comments. Login now