##// END OF EJS Templates
perf: add a `--clear-fnode-cache-rev` argument to perf::tags...
marmoute -
r51832:e4c4adb6 stable
parent child Browse files
Show More
@@ -1,4497 +1,4530 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistics will be reported for each benchmark: best,
11 When set, additional statistics will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (The first iteration is benchmarked)
24
24
25 ``run-limits``
25 ``run-limits``
26 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 conditions are considered in order with the following logic:
28 conditions are considered in order with the following logic:
29
29
30 If benchmark has been running for <time> seconds, and we have performed
30 If benchmark has been running for <time> seconds, and we have performed
31 <numberofrun> iterations, stop the benchmark,
31 <numberofrun> iterations, stop the benchmark,
32
32
33 The default value is: `3.0-100, 10.0-3`
33 The default value is: `3.0-100, 10.0-3`
34
34
35 ``stub``
35 ``stub``
36 When set, benchmarks will only be run once, useful for testing
36 When set, benchmarks will only be run once, useful for testing
37 (default: off)
37 (default: off)
38 '''
38 '''
39
39
40 # "historical portability" policy of perf.py:
40 # "historical portability" policy of perf.py:
41 #
41 #
42 # We have to do:
42 # We have to do:
43 # - make perf.py "loadable" with as wide Mercurial version as possible
43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 # This doesn't mean that perf commands work correctly with that Mercurial.
44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 # - make historical perf command work correctly with as wide Mercurial
46 # - make historical perf command work correctly with as wide Mercurial
47 # version as possible
47 # version as possible
48 #
48 #
49 # We have to do, if possible with reasonable cost:
49 # We have to do, if possible with reasonable cost:
50 # - make recent perf command for historical feature work correctly
50 # - make recent perf command for historical feature work correctly
51 # with early Mercurial
51 # with early Mercurial
52 #
52 #
53 # We don't have to do:
53 # We don't have to do:
54 # - make perf command for recent feature work correctly with early
54 # - make perf command for recent feature work correctly with early
55 # Mercurial
55 # Mercurial
56
56
57 import contextlib
57 import contextlib
58 import functools
58 import functools
59 import gc
59 import gc
60 import os
60 import os
61 import random
61 import random
62 import shutil
62 import shutil
63 import struct
63 import struct
64 import sys
64 import sys
65 import tempfile
65 import tempfile
66 import threading
66 import threading
67 import time
67 import time
68
68
69 import mercurial.revlog
69 import mercurial.revlog
70 from mercurial import (
70 from mercurial import (
71 changegroup,
71 changegroup,
72 cmdutil,
72 cmdutil,
73 commands,
73 commands,
74 copies,
74 copies,
75 error,
75 error,
76 extensions,
76 extensions,
77 hg,
77 hg,
78 mdiff,
78 mdiff,
79 merge,
79 merge,
80 util,
80 util,
81 )
81 )
82
82
83 # for "historical portability":
83 # for "historical portability":
84 # try to import modules separately (in dict order), and ignore
84 # try to import modules separately (in dict order), and ignore
85 # failure, because these aren't available with early Mercurial
85 # failure, because these aren't available with early Mercurial
86 try:
86 try:
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 except ImportError:
88 except ImportError:
89 pass
89 pass
90 try:
90 try:
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 except ImportError:
92 except ImportError:
93 pass
93 pass
94 try:
94 try:
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96
96
97 dir(registrar) # forcibly load it
97 dir(registrar) # forcibly load it
98 except ImportError:
98 except ImportError:
99 registrar = None
99 registrar = None
100 try:
100 try:
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 except ImportError:
102 except ImportError:
103 pass
103 pass
104 try:
104 try:
105 from mercurial.utils import repoviewutil # since 5.0
105 from mercurial.utils import repoviewutil # since 5.0
106 except ImportError:
106 except ImportError:
107 repoviewutil = None
107 repoviewutil = None
108 try:
108 try:
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 except ImportError:
110 except ImportError:
111 pass
111 pass
112 try:
112 try:
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 except ImportError:
114 except ImportError:
115 pass
115 pass
116
116
117 try:
117 try:
118 from mercurial import profiling
118 from mercurial import profiling
119 except ImportError:
119 except ImportError:
120 profiling = None
120 profiling = None
121
121
122 try:
122 try:
123 from mercurial.revlogutils import constants as revlog_constants
123 from mercurial.revlogutils import constants as revlog_constants
124
124
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126
126
127 def revlog(opener, *args, **kwargs):
127 def revlog(opener, *args, **kwargs):
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129
129
130
130
131 except (ImportError, AttributeError):
131 except (ImportError, AttributeError):
132 perf_rl_kind = None
132 perf_rl_kind = None
133
133
134 def revlog(opener, *args, **kwargs):
134 def revlog(opener, *args, **kwargs):
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136
136
137
137
138 def identity(a):
138 def identity(a):
139 return a
139 return a
140
140
141
141
142 try:
142 try:
143 from mercurial import pycompat
143 from mercurial import pycompat
144
144
145 getargspec = pycompat.getargspec # added to module after 4.5
145 getargspec = pycompat.getargspec # added to module after 4.5
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 if pycompat.ispy3:
151 if pycompat.ispy3:
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 else:
153 else:
154 _maxint = sys.maxint
154 _maxint = sys.maxint
155 except (NameError, ImportError, AttributeError):
155 except (NameError, ImportError, AttributeError):
156 import inspect
156 import inspect
157
157
158 getargspec = inspect.getargspec
158 getargspec = inspect.getargspec
159 _byteskwargs = identity
159 _byteskwargs = identity
160 _bytestr = str
160 _bytestr = str
161 fsencode = identity # no py3 support
161 fsencode = identity # no py3 support
162 _maxint = sys.maxint # no py3 support
162 _maxint = sys.maxint # no py3 support
163 _sysstr = lambda x: x # no py3 support
163 _sysstr = lambda x: x # no py3 support
164 _xrange = xrange
164 _xrange = xrange
165
165
166 try:
166 try:
167 # 4.7+
167 # 4.7+
168 queue = pycompat.queue.Queue
168 queue = pycompat.queue.Queue
169 except (NameError, AttributeError, ImportError):
169 except (NameError, AttributeError, ImportError):
170 # <4.7.
170 # <4.7.
171 try:
171 try:
172 queue = pycompat.queue
172 queue = pycompat.queue
173 except (NameError, AttributeError, ImportError):
173 except (NameError, AttributeError, ImportError):
174 import Queue as queue
174 import Queue as queue
175
175
176 try:
176 try:
177 from mercurial import logcmdutil
177 from mercurial import logcmdutil
178
178
179 makelogtemplater = logcmdutil.maketemplater
179 makelogtemplater = logcmdutil.maketemplater
180 except (AttributeError, ImportError):
180 except (AttributeError, ImportError):
181 try:
181 try:
182 makelogtemplater = cmdutil.makelogtemplater
182 makelogtemplater = cmdutil.makelogtemplater
183 except (AttributeError, ImportError):
183 except (AttributeError, ImportError):
184 makelogtemplater = None
184 makelogtemplater = None
185
185
186 # for "historical portability":
186 # for "historical portability":
187 # define util.safehasattr forcibly, because util.safehasattr has been
187 # define util.safehasattr forcibly, because util.safehasattr has been
188 # available since 1.9.3 (or 94b200a11cf7)
188 # available since 1.9.3 (or 94b200a11cf7)
189 _undefined = object()
189 _undefined = object()
190
190
191
191
192 def safehasattr(thing, attr):
192 def safehasattr(thing, attr):
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194
194
195
195
196 setattr(util, 'safehasattr', safehasattr)
196 setattr(util, 'safehasattr', safehasattr)
197
197
198 # for "historical portability":
198 # for "historical portability":
199 # define util.timer forcibly, because util.timer has been available
199 # define util.timer forcibly, because util.timer has been available
200 # since ae5d60bb70c9
200 # since ae5d60bb70c9
201 if safehasattr(time, 'perf_counter'):
201 if safehasattr(time, 'perf_counter'):
202 util.timer = time.perf_counter
202 util.timer = time.perf_counter
203 elif os.name == b'nt':
203 elif os.name == b'nt':
204 util.timer = time.clock
204 util.timer = time.clock
205 else:
205 else:
206 util.timer = time.time
206 util.timer = time.time
207
207
208 # for "historical portability":
208 # for "historical portability":
209 # use locally defined empty option list, if formatteropts isn't
209 # use locally defined empty option list, if formatteropts isn't
210 # available, because commands.formatteropts has been available since
210 # available, because commands.formatteropts has been available since
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 # available since 2.2 (or ae5f92e154d3)
212 # available since 2.2 (or ae5f92e154d3)
213 formatteropts = getattr(
213 formatteropts = getattr(
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 )
215 )
216
216
217 # for "historical portability":
217 # for "historical portability":
218 # use locally defined option list, if debugrevlogopts isn't available,
218 # use locally defined option list, if debugrevlogopts isn't available,
219 # because commands.debugrevlogopts has been available since 3.7 (or
219 # because commands.debugrevlogopts has been available since 3.7 (or
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 # since 1.9 (or a79fea6b3e77).
221 # since 1.9 (or a79fea6b3e77).
222 revlogopts = getattr(
222 revlogopts = getattr(
223 cmdutil,
223 cmdutil,
224 "debugrevlogopts",
224 "debugrevlogopts",
225 getattr(
225 getattr(
226 commands,
226 commands,
227 "debugrevlogopts",
227 "debugrevlogopts",
228 [
228 [
229 (b'c', b'changelog', False, b'open changelog'),
229 (b'c', b'changelog', False, b'open changelog'),
230 (b'm', b'manifest', False, b'open manifest'),
230 (b'm', b'manifest', False, b'open manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
231 (b'', b'dir', False, b'open directory manifest'),
232 ],
232 ],
233 ),
233 ),
234 )
234 )
235
235
236 cmdtable = {}
236 cmdtable = {}
237
237
238
238
239 # for "historical portability":
239 # for "historical portability":
240 # define parsealiases locally, because cmdutil.parsealiases has been
240 # define parsealiases locally, because cmdutil.parsealiases has been
241 # available since 1.5 (or 6252852b4332)
241 # available since 1.5 (or 6252852b4332)
242 def parsealiases(cmd):
242 def parsealiases(cmd):
243 return cmd.split(b"|")
243 return cmd.split(b"|")
244
244
245
245
246 if safehasattr(registrar, 'command'):
246 if safehasattr(registrar, 'command'):
247 command = registrar.command(cmdtable)
247 command = registrar.command(cmdtable)
248 elif safehasattr(cmdutil, 'command'):
248 elif safehasattr(cmdutil, 'command'):
249 command = cmdutil.command(cmdtable)
249 command = cmdutil.command(cmdtable)
250 if 'norepo' not in getargspec(command).args:
250 if 'norepo' not in getargspec(command).args:
251 # for "historical portability":
251 # for "historical portability":
252 # wrap original cmdutil.command, because "norepo" option has
252 # wrap original cmdutil.command, because "norepo" option has
253 # been available since 3.1 (or 75a96326cecb)
253 # been available since 3.1 (or 75a96326cecb)
254 _command = command
254 _command = command
255
255
256 def command(name, options=(), synopsis=None, norepo=False):
256 def command(name, options=(), synopsis=None, norepo=False):
257 if norepo:
257 if norepo:
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 return _command(name, list(options), synopsis)
259 return _command(name, list(options), synopsis)
260
260
261
261
262 else:
262 else:
263 # for "historical portability":
263 # for "historical portability":
264 # define "@command" annotation locally, because cmdutil.command
264 # define "@command" annotation locally, because cmdutil.command
265 # has been available since 1.9 (or 2daa5179e73f)
265 # has been available since 1.9 (or 2daa5179e73f)
266 def command(name, options=(), synopsis=None, norepo=False):
266 def command(name, options=(), synopsis=None, norepo=False):
267 def decorator(func):
267 def decorator(func):
268 if synopsis:
268 if synopsis:
269 cmdtable[name] = func, list(options), synopsis
269 cmdtable[name] = func, list(options), synopsis
270 else:
270 else:
271 cmdtable[name] = func, list(options)
271 cmdtable[name] = func, list(options)
272 if norepo:
272 if norepo:
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 return func
274 return func
275
275
276 return decorator
276 return decorator
277
277
278
278
279 try:
279 try:
280 import mercurial.registrar
280 import mercurial.registrar
281 import mercurial.configitems
281 import mercurial.configitems
282
282
283 configtable = {}
283 configtable = {}
284 configitem = mercurial.registrar.configitem(configtable)
284 configitem = mercurial.registrar.configitem(configtable)
285 configitem(
285 configitem(
286 b'perf',
286 b'perf',
287 b'presleep',
287 b'presleep',
288 default=mercurial.configitems.dynamicdefault,
288 default=mercurial.configitems.dynamicdefault,
289 experimental=True,
289 experimental=True,
290 )
290 )
291 configitem(
291 configitem(
292 b'perf',
292 b'perf',
293 b'stub',
293 b'stub',
294 default=mercurial.configitems.dynamicdefault,
294 default=mercurial.configitems.dynamicdefault,
295 experimental=True,
295 experimental=True,
296 )
296 )
297 configitem(
297 configitem(
298 b'perf',
298 b'perf',
299 b'parentscount',
299 b'parentscount',
300 default=mercurial.configitems.dynamicdefault,
300 default=mercurial.configitems.dynamicdefault,
301 experimental=True,
301 experimental=True,
302 )
302 )
303 configitem(
303 configitem(
304 b'perf',
304 b'perf',
305 b'all-timing',
305 b'all-timing',
306 default=mercurial.configitems.dynamicdefault,
306 default=mercurial.configitems.dynamicdefault,
307 experimental=True,
307 experimental=True,
308 )
308 )
309 configitem(
309 configitem(
310 b'perf',
310 b'perf',
311 b'pre-run',
311 b'pre-run',
312 default=mercurial.configitems.dynamicdefault,
312 default=mercurial.configitems.dynamicdefault,
313 )
313 )
314 configitem(
314 configitem(
315 b'perf',
315 b'perf',
316 b'profile-benchmark',
316 b'profile-benchmark',
317 default=mercurial.configitems.dynamicdefault,
317 default=mercurial.configitems.dynamicdefault,
318 )
318 )
319 configitem(
319 configitem(
320 b'perf',
320 b'perf',
321 b'run-limits',
321 b'run-limits',
322 default=mercurial.configitems.dynamicdefault,
322 default=mercurial.configitems.dynamicdefault,
323 experimental=True,
323 experimental=True,
324 )
324 )
325 except (ImportError, AttributeError):
325 except (ImportError, AttributeError):
326 pass
326 pass
327 except TypeError:
327 except TypeError:
328 # compatibility fix for a11fd395e83f
328 # compatibility fix for a11fd395e83f
329 # hg version: 5.2
329 # hg version: 5.2
330 configitem(
330 configitem(
331 b'perf',
331 b'perf',
332 b'presleep',
332 b'presleep',
333 default=mercurial.configitems.dynamicdefault,
333 default=mercurial.configitems.dynamicdefault,
334 )
334 )
335 configitem(
335 configitem(
336 b'perf',
336 b'perf',
337 b'stub',
337 b'stub',
338 default=mercurial.configitems.dynamicdefault,
338 default=mercurial.configitems.dynamicdefault,
339 )
339 )
340 configitem(
340 configitem(
341 b'perf',
341 b'perf',
342 b'parentscount',
342 b'parentscount',
343 default=mercurial.configitems.dynamicdefault,
343 default=mercurial.configitems.dynamicdefault,
344 )
344 )
345 configitem(
345 configitem(
346 b'perf',
346 b'perf',
347 b'all-timing',
347 b'all-timing',
348 default=mercurial.configitems.dynamicdefault,
348 default=mercurial.configitems.dynamicdefault,
349 )
349 )
350 configitem(
350 configitem(
351 b'perf',
351 b'perf',
352 b'pre-run',
352 b'pre-run',
353 default=mercurial.configitems.dynamicdefault,
353 default=mercurial.configitems.dynamicdefault,
354 )
354 )
355 configitem(
355 configitem(
356 b'perf',
356 b'perf',
357 b'profile-benchmark',
357 b'profile-benchmark',
358 default=mercurial.configitems.dynamicdefault,
358 default=mercurial.configitems.dynamicdefault,
359 )
359 )
360 configitem(
360 configitem(
361 b'perf',
361 b'perf',
362 b'run-limits',
362 b'run-limits',
363 default=mercurial.configitems.dynamicdefault,
363 default=mercurial.configitems.dynamicdefault,
364 )
364 )
365
365
366
366
367 def getlen(ui):
367 def getlen(ui):
368 if ui.configbool(b"perf", b"stub", False):
368 if ui.configbool(b"perf", b"stub", False):
369 return lambda x: 1
369 return lambda x: 1
370 return len
370 return len
371
371
372
372
373 class noop:
373 class noop:
374 """dummy context manager"""
374 """dummy context manager"""
375
375
376 def __enter__(self):
376 def __enter__(self):
377 pass
377 pass
378
378
379 def __exit__(self, *args):
379 def __exit__(self, *args):
380 pass
380 pass
381
381
382
382
383 NOOPCTX = noop()
383 NOOPCTX = noop()
384
384
385
385
386 def gettimer(ui, opts=None):
386 def gettimer(ui, opts=None):
387 """return a timer function and formatter: (timer, formatter)
387 """return a timer function and formatter: (timer, formatter)
388
388
389 This function exists to gather the creation of formatter in a single
389 This function exists to gather the creation of formatter in a single
390 place instead of duplicating it in all performance commands."""
390 place instead of duplicating it in all performance commands."""
391
391
392 # enforce an idle period before execution to counteract power management
392 # enforce an idle period before execution to counteract power management
393 # experimental config: perf.presleep
393 # experimental config: perf.presleep
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395
395
396 if opts is None:
396 if opts is None:
397 opts = {}
397 opts = {}
398 # redirect all to stderr unless buffer api is in use
398 # redirect all to stderr unless buffer api is in use
399 if not ui._buffers:
399 if not ui._buffers:
400 ui = ui.copy()
400 ui = ui.copy()
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 if uifout:
402 if uifout:
403 # for "historical portability":
403 # for "historical portability":
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 uifout.set(ui.ferr)
405 uifout.set(ui.ferr)
406
406
407 # get a formatter
407 # get a formatter
408 uiformatter = getattr(ui, 'formatter', None)
408 uiformatter = getattr(ui, 'formatter', None)
409 if uiformatter:
409 if uiformatter:
410 fm = uiformatter(b'perf', opts)
410 fm = uiformatter(b'perf', opts)
411 else:
411 else:
412 # for "historical portability":
412 # for "historical portability":
413 # define formatter locally, because ui.formatter has been
413 # define formatter locally, because ui.formatter has been
414 # available since 2.2 (or ae5f92e154d3)
414 # available since 2.2 (or ae5f92e154d3)
415 from mercurial import node
415 from mercurial import node
416
416
417 class defaultformatter:
417 class defaultformatter:
418 """Minimized composition of baseformatter and plainformatter"""
418 """Minimized composition of baseformatter and plainformatter"""
419
419
420 def __init__(self, ui, topic, opts):
420 def __init__(self, ui, topic, opts):
421 self._ui = ui
421 self._ui = ui
422 if ui.debugflag:
422 if ui.debugflag:
423 self.hexfunc = node.hex
423 self.hexfunc = node.hex
424 else:
424 else:
425 self.hexfunc = node.short
425 self.hexfunc = node.short
426
426
427 def __nonzero__(self):
427 def __nonzero__(self):
428 return False
428 return False
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def startitem(self):
432 def startitem(self):
433 pass
433 pass
434
434
435 def data(self, **data):
435 def data(self, **data):
436 pass
436 pass
437
437
438 def write(self, fields, deftext, *fielddata, **opts):
438 def write(self, fields, deftext, *fielddata, **opts):
439 self._ui.write(deftext % fielddata, **opts)
439 self._ui.write(deftext % fielddata, **opts)
440
440
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 if cond:
442 if cond:
443 self._ui.write(deftext % fielddata, **opts)
443 self._ui.write(deftext % fielddata, **opts)
444
444
445 def plain(self, text, **opts):
445 def plain(self, text, **opts):
446 self._ui.write(text, **opts)
446 self._ui.write(text, **opts)
447
447
448 def end(self):
448 def end(self):
449 pass
449 pass
450
450
451 fm = defaultformatter(ui, b'perf', opts)
451 fm = defaultformatter(ui, b'perf', opts)
452
452
453 # stub function, runs code only once instead of in a loop
453 # stub function, runs code only once instead of in a loop
454 # experimental config: perf.stub
454 # experimental config: perf.stub
455 if ui.configbool(b"perf", b"stub", False):
455 if ui.configbool(b"perf", b"stub", False):
456 return functools.partial(stub_timer, fm), fm
456 return functools.partial(stub_timer, fm), fm
457
457
458 # experimental config: perf.all-timing
458 # experimental config: perf.all-timing
459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 displayall = ui.configbool(b"perf", b"all-timing", False)
460
460
461 # experimental config: perf.run-limits
461 # experimental config: perf.run-limits
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 limits = []
463 limits = []
464 for item in limitspec:
464 for item in limitspec:
465 parts = item.split(b'-', 1)
465 parts = item.split(b'-', 1)
466 if len(parts) < 2:
466 if len(parts) < 2:
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 continue
468 continue
469 try:
469 try:
470 time_limit = float(_sysstr(parts[0]))
470 time_limit = float(_sysstr(parts[0]))
471 except ValueError as e:
471 except ValueError as e:
472 ui.warn(
472 ui.warn(
473 (
473 (
474 b'malformatted run limit entry, %s: %s\n'
474 b'malformatted run limit entry, %s: %s\n'
475 % (_bytestr(e), item)
475 % (_bytestr(e), item)
476 )
476 )
477 )
477 )
478 continue
478 continue
479 try:
479 try:
480 run_limit = int(_sysstr(parts[1]))
480 run_limit = int(_sysstr(parts[1]))
481 except ValueError as e:
481 except ValueError as e:
482 ui.warn(
482 ui.warn(
483 (
483 (
484 b'malformatted run limit entry, %s: %s\n'
484 b'malformatted run limit entry, %s: %s\n'
485 % (_bytestr(e), item)
485 % (_bytestr(e), item)
486 )
486 )
487 )
487 )
488 continue
488 continue
489 limits.append((time_limit, run_limit))
489 limits.append((time_limit, run_limit))
490 if not limits:
490 if not limits:
491 limits = DEFAULTLIMITS
491 limits = DEFAULTLIMITS
492
492
493 profiler = None
493 profiler = None
494 if profiling is not None:
494 if profiling is not None:
495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
496 profiler = profiling.profile(ui)
497
497
498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 t = functools.partial(
499 t = functools.partial(
500 _timer,
500 _timer,
501 fm,
501 fm,
502 displayall=displayall,
502 displayall=displayall,
503 limits=limits,
503 limits=limits,
504 prerun=prerun,
504 prerun=prerun,
505 profiler=profiler,
505 profiler=profiler,
506 )
506 )
507 return t, fm
507 return t, fm
508
508
509
509
510 def stub_timer(fm, func, setup=None, title=None):
510 def stub_timer(fm, func, setup=None, title=None):
511 if setup is not None:
511 if setup is not None:
512 setup()
512 setup()
513 func()
513 func()
514
514
515
515
516 @contextlib.contextmanager
516 @contextlib.contextmanager
517 def timeone():
517 def timeone():
518 r = []
518 r = []
519 ostart = os.times()
519 ostart = os.times()
520 cstart = util.timer()
520 cstart = util.timer()
521 yield r
521 yield r
522 cstop = util.timer()
522 cstop = util.timer()
523 ostop = os.times()
523 ostop = os.times()
524 a, b = ostart, ostop
524 a, b = ostart, ostop
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526
526
527
527
528 # list of stop condition (elapsed time, minimal run count)
528 # list of stop condition (elapsed time, minimal run count)
529 DEFAULTLIMITS = (
529 DEFAULTLIMITS = (
530 (3.0, 100),
530 (3.0, 100),
531 (10.0, 3),
531 (10.0, 3),
532 )
532 )
533
533
534
534
535 @contextlib.contextmanager
535 @contextlib.contextmanager
536 def noop_context():
536 def noop_context():
537 yield
537 yield
538
538
539
539
540 def _timer(
540 def _timer(
541 fm,
541 fm,
542 func,
542 func,
543 setup=None,
543 setup=None,
544 context=noop_context,
544 context=noop_context,
545 title=None,
545 title=None,
546 displayall=False,
546 displayall=False,
547 limits=DEFAULTLIMITS,
547 limits=DEFAULTLIMITS,
548 prerun=0,
548 prerun=0,
549 profiler=None,
549 profiler=None,
550 ):
550 ):
551 gc.collect()
551 gc.collect()
552 results = []
552 results = []
553 begin = util.timer()
553 begin = util.timer()
554 count = 0
554 count = 0
555 if profiler is None:
555 if profiler is None:
556 profiler = NOOPCTX
556 profiler = NOOPCTX
557 for i in range(prerun):
557 for i in range(prerun):
558 if setup is not None:
558 if setup is not None:
559 setup()
559 setup()
560 with context():
560 with context():
561 func()
561 func()
562 keepgoing = True
562 keepgoing = True
563 while keepgoing:
563 while keepgoing:
564 if setup is not None:
564 if setup is not None:
565 setup()
565 setup()
566 with context():
566 with context():
567 with profiler:
567 with profiler:
568 with timeone() as item:
568 with timeone() as item:
569 r = func()
569 r = func()
570 profiler = NOOPCTX
570 profiler = NOOPCTX
571 count += 1
571 count += 1
572 results.append(item[0])
572 results.append(item[0])
573 cstop = util.timer()
573 cstop = util.timer()
574 # Look for a stop condition.
574 # Look for a stop condition.
575 elapsed = cstop - begin
575 elapsed = cstop - begin
576 for t, mincount in limits:
576 for t, mincount in limits:
577 if elapsed >= t and count >= mincount:
577 if elapsed >= t and count >= mincount:
578 keepgoing = False
578 keepgoing = False
579 break
579 break
580
580
581 formatone(fm, results, title=title, result=r, displayall=displayall)
581 formatone(fm, results, title=title, result=r, displayall=displayall)
582
582
583
583
584 def formatone(fm, timings, title=None, result=None, displayall=False):
584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 count = len(timings)
585 count = len(timings)
586
586
587 fm.startitem()
587 fm.startitem()
588
588
589 if title:
589 if title:
590 fm.write(b'title', b'! %s\n', title)
590 fm.write(b'title', b'! %s\n', title)
591 if result:
591 if result:
592 fm.write(b'result', b'! result: %s\n', result)
592 fm.write(b'result', b'! result: %s\n', result)
593
593
594 def display(role, entry):
594 def display(role, entry):
595 prefix = b''
595 prefix = b''
596 if role != b'best':
596 if role != b'best':
597 prefix = b'%s.' % role
597 prefix = b'%s.' % role
598 fm.plain(b'!')
598 fm.plain(b'!')
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 fm.write(prefix + b'user', b' user %f', entry[1])
601 fm.write(prefix + b'user', b' user %f', entry[1])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 fm.plain(b'\n')
604 fm.plain(b'\n')
605
605
606 timings.sort()
606 timings.sort()
607 min_val = timings[0]
607 min_val = timings[0]
608 display(b'best', min_val)
608 display(b'best', min_val)
609 if displayall:
609 if displayall:
610 max_val = timings[-1]
610 max_val = timings[-1]
611 display(b'max', max_val)
611 display(b'max', max_val)
612 avg = tuple([sum(x) / count for x in zip(*timings)])
612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 display(b'avg', avg)
613 display(b'avg', avg)
614 median = timings[len(timings) // 2]
614 median = timings[len(timings) // 2]
615 display(b'median', median)
615 display(b'median', median)
616
616
617
617
618 # utilities for historical portability
618 # utilities for historical portability
619
619
620
620
621 def getint(ui, section, name, default):
621 def getint(ui, section, name, default):
622 # for "historical portability":
622 # for "historical portability":
623 # ui.configint has been available since 1.9 (or fa2b596db182)
623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 v = ui.config(section, name, None)
624 v = ui.config(section, name, None)
625 if v is None:
625 if v is None:
626 return default
626 return default
627 try:
627 try:
628 return int(v)
628 return int(v)
629 except ValueError:
629 except ValueError:
630 raise error.ConfigError(
630 raise error.ConfigError(
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 )
632 )
633
633
634
634
635 def safeattrsetter(obj, name, ignoremissing=False):
635 def safeattrsetter(obj, name, ignoremissing=False):
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637
637
638 This function is aborted, if 'obj' doesn't have 'name' attribute
638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 at runtime. This avoids overlooking removal of an attribute, which
639 at runtime. This avoids overlooking removal of an attribute, which
640 breaks assumption of performance measurement, in the future.
640 breaks assumption of performance measurement, in the future.
641
641
642 This function returns the object to (1) assign a new value, and
642 This function returns the object to (1) assign a new value, and
643 (2) restore an original value to the attribute.
643 (2) restore an original value to the attribute.
644
644
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 abortion, and this function returns None. This is useful to
646 abortion, and this function returns None. This is useful to
647 examine an attribute, which isn't ensured in all Mercurial
647 examine an attribute, which isn't ensured in all Mercurial
648 versions.
648 versions.
649 """
649 """
650 if not util.safehasattr(obj, name):
650 if not util.safehasattr(obj, name):
651 if ignoremissing:
651 if ignoremissing:
652 return None
652 return None
653 raise error.Abort(
653 raise error.Abort(
654 (
654 (
655 b"missing attribute %s of %s might break assumption"
655 b"missing attribute %s of %s might break assumption"
656 b" of performance measurement"
656 b" of performance measurement"
657 )
657 )
658 % (name, obj)
658 % (name, obj)
659 )
659 )
660
660
661 origvalue = getattr(obj, _sysstr(name))
661 origvalue = getattr(obj, _sysstr(name))
662
662
663 class attrutil:
663 class attrutil:
664 def set(self, newvalue):
664 def set(self, newvalue):
665 setattr(obj, _sysstr(name), newvalue)
665 setattr(obj, _sysstr(name), newvalue)
666
666
667 def restore(self):
667 def restore(self):
668 setattr(obj, _sysstr(name), origvalue)
668 setattr(obj, _sysstr(name), origvalue)
669
669
670 return attrutil()
670 return attrutil()
671
671
672
672
673 # utilities to examine each internal API changes
673 # utilities to examine each internal API changes
674
674
675
675
676 def getbranchmapsubsettable():
676 def getbranchmapsubsettable():
677 # for "historical portability":
677 # for "historical portability":
678 # subsettable is defined in:
678 # subsettable is defined in:
679 # - branchmap since 2.9 (or 175c6fd8cacc)
679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 # - repoview since 2.5 (or 59a9f18d4587)
680 # - repoview since 2.5 (or 59a9f18d4587)
681 # - repoviewutil since 5.0
681 # - repoviewutil since 5.0
682 for mod in (branchmap, repoview, repoviewutil):
682 for mod in (branchmap, repoview, repoviewutil):
683 subsettable = getattr(mod, 'subsettable', None)
683 subsettable = getattr(mod, 'subsettable', None)
684 if subsettable:
684 if subsettable:
685 return subsettable
685 return subsettable
686
686
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 # branchmap and repoview modules exist, but subsettable attribute
688 # branchmap and repoview modules exist, but subsettable attribute
689 # doesn't)
689 # doesn't)
690 raise error.Abort(
690 raise error.Abort(
691 b"perfbranchmap not available with this Mercurial",
691 b"perfbranchmap not available with this Mercurial",
692 hint=b"use 2.5 or later",
692 hint=b"use 2.5 or later",
693 )
693 )
694
694
695
695
696 def getsvfs(repo):
696 def getsvfs(repo):
697 """Return appropriate object to access files under .hg/store"""
697 """Return appropriate object to access files under .hg/store"""
698 # for "historical portability":
698 # for "historical portability":
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 svfs = getattr(repo, 'svfs', None)
700 svfs = getattr(repo, 'svfs', None)
701 if svfs:
701 if svfs:
702 return svfs
702 return svfs
703 else:
703 else:
704 return getattr(repo, 'sopener')
704 return getattr(repo, 'sopener')
705
705
706
706
707 def getvfs(repo):
707 def getvfs(repo):
708 """Return appropriate object to access files under .hg"""
708 """Return appropriate object to access files under .hg"""
709 # for "historical portability":
709 # for "historical portability":
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 vfs = getattr(repo, 'vfs', None)
711 vfs = getattr(repo, 'vfs', None)
712 if vfs:
712 if vfs:
713 return vfs
713 return vfs
714 else:
714 else:
715 return getattr(repo, 'opener')
715 return getattr(repo, 'opener')
716
716
717
717
718 def repocleartagscachefunc(repo):
718 def repocleartagscachefunc(repo):
719 """Return the function to clear tags cache according to repo internal API"""
719 """Return the function to clear tags cache according to repo internal API"""
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 # correct way to clear tags cache, because existing code paths
722 # correct way to clear tags cache, because existing code paths
723 # expect _tagscache to be a structured object.
723 # expect _tagscache to be a structured object.
724 def clearcache():
724 def clearcache():
725 # _tagscache has been filteredpropertycache since 2.5 (or
725 # _tagscache has been filteredpropertycache since 2.5 (or
726 # 98c867ac1330), and delattr() can't work in such case
726 # 98c867ac1330), and delattr() can't work in such case
727 if '_tagscache' in vars(repo):
727 if '_tagscache' in vars(repo):
728 del repo.__dict__['_tagscache']
728 del repo.__dict__['_tagscache']
729
729
730 return clearcache
730 return clearcache
731
731
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 if repotags: # since 1.4 (or 5614a628d173)
733 if repotags: # since 1.4 (or 5614a628d173)
734 return lambda: repotags.set(None)
734 return lambda: repotags.set(None)
735
735
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 return lambda: repotagscache.set(None)
738 return lambda: repotagscache.set(None)
739
739
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 # this point, but it isn't so problematic, because:
741 # this point, but it isn't so problematic, because:
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 # in perftags() causes failure soon
743 # in perftags() causes failure soon
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 raise error.Abort(b"tags API of this hg command is unknown")
745 raise error.Abort(b"tags API of this hg command is unknown")
746
746
747
747
748 # utilities to clear cache
748 # utilities to clear cache
749
749
750
750
751 def clearfilecache(obj, attrname):
751 def clearfilecache(obj, attrname):
752 unfiltered = getattr(obj, 'unfiltered', None)
752 unfiltered = getattr(obj, 'unfiltered', None)
753 if unfiltered is not None:
753 if unfiltered is not None:
754 obj = obj.unfiltered()
754 obj = obj.unfiltered()
755 if attrname in vars(obj):
755 if attrname in vars(obj):
756 delattr(obj, attrname)
756 delattr(obj, attrname)
757 obj._filecache.pop(attrname, None)
757 obj._filecache.pop(attrname, None)
758
758
759
759
760 def clearchangelog(repo):
760 def clearchangelog(repo):
761 if repo is not repo.unfiltered():
761 if repo is not repo.unfiltered():
762 object.__setattr__(repo, '_clcachekey', None)
762 object.__setattr__(repo, '_clcachekey', None)
763 object.__setattr__(repo, '_clcache', None)
763 object.__setattr__(repo, '_clcache', None)
764 clearfilecache(repo.unfiltered(), 'changelog')
764 clearfilecache(repo.unfiltered(), 'changelog')
765
765
766
766
767 # perf commands
767 # perf commands
768
768
769
769
770 @command(b'perf::walk|perfwalk', formatteropts)
770 @command(b'perf::walk|perfwalk', formatteropts)
771 def perfwalk(ui, repo, *pats, **opts):
771 def perfwalk(ui, repo, *pats, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 m = scmutil.match(repo[None], pats, {})
774 m = scmutil.match(repo[None], pats, {})
775 timer(
775 timer(
776 lambda: len(
776 lambda: len(
777 list(
777 list(
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 )
779 )
780 )
780 )
781 )
781 )
782 fm.end()
782 fm.end()
783
783
784
784
785 @command(b'perf::annotate|perfannotate', formatteropts)
785 @command(b'perf::annotate|perfannotate', formatteropts)
786 def perfannotate(ui, repo, f, **opts):
786 def perfannotate(ui, repo, f, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 fc = repo[b'.'][f]
789 fc = repo[b'.'][f]
790 timer(lambda: len(fc.annotate(True)))
790 timer(lambda: len(fc.annotate(True)))
791 fm.end()
791 fm.end()
792
792
793
793
794 @command(
794 @command(
795 b'perf::status|perfstatus',
795 b'perf::status|perfstatus',
796 [
796 [
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 ]
799 ]
800 + formatteropts,
800 + formatteropts,
801 )
801 )
802 def perfstatus(ui, repo, **opts):
802 def perfstatus(ui, repo, **opts):
803 """benchmark the performance of a single status call
803 """benchmark the performance of a single status call
804
804
805 The repository data are preserved between each call.
805 The repository data are preserved between each call.
806
806
807 By default, only the status of the tracked file are requested. If
807 By default, only the status of the tracked file are requested. If
808 `--unknown` is passed, the "unknown" files are also tracked.
808 `--unknown` is passed, the "unknown" files are also tracked.
809 """
809 """
810 opts = _byteskwargs(opts)
810 opts = _byteskwargs(opts)
811 # m = match.always(repo.root, repo.getcwd())
811 # m = match.always(repo.root, repo.getcwd())
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 # False))))
813 # False))))
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 if opts[b'dirstate']:
815 if opts[b'dirstate']:
816 dirstate = repo.dirstate
816 dirstate = repo.dirstate
817 m = scmutil.matchall(repo)
817 m = scmutil.matchall(repo)
818 unknown = opts[b'unknown']
818 unknown = opts[b'unknown']
819
819
820 def status_dirstate():
820 def status_dirstate():
821 s = dirstate.status(
821 s = dirstate.status(
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 )
823 )
824 sum(map(bool, s))
824 sum(map(bool, s))
825
825
826 if util.safehasattr(dirstate, 'running_status'):
826 if util.safehasattr(dirstate, 'running_status'):
827 with dirstate.running_status(repo):
827 with dirstate.running_status(repo):
828 timer(status_dirstate)
828 timer(status_dirstate)
829 dirstate.invalidate()
829 dirstate.invalidate()
830 else:
830 else:
831 timer(status_dirstate)
831 timer(status_dirstate)
832 else:
832 else:
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 fm.end()
834 fm.end()
835
835
836
836
837 @command(b'perf::addremove|perfaddremove', formatteropts)
837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 def perfaddremove(ui, repo, **opts):
838 def perfaddremove(ui, repo, **opts):
839 opts = _byteskwargs(opts)
839 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
840 timer, fm = gettimer(ui, opts)
841 try:
841 try:
842 oldquiet = repo.ui.quiet
842 oldquiet = repo.ui.quiet
843 repo.ui.quiet = True
843 repo.ui.quiet = True
844 matcher = scmutil.match(repo[None])
844 matcher = scmutil.match(repo[None])
845 opts[b'dry_run'] = True
845 opts[b'dry_run'] = True
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 uipathfn = scmutil.getuipathfn(repo)
847 uipathfn = scmutil.getuipathfn(repo)
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 else:
849 else:
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 finally:
851 finally:
852 repo.ui.quiet = oldquiet
852 repo.ui.quiet = oldquiet
853 fm.end()
853 fm.end()
854
854
855
855
856 def clearcaches(cl):
856 def clearcaches(cl):
857 # behave somewhat consistently across internal API changes
857 # behave somewhat consistently across internal API changes
858 if util.safehasattr(cl, b'clearcaches'):
858 if util.safehasattr(cl, b'clearcaches'):
859 cl.clearcaches()
859 cl.clearcaches()
860 elif util.safehasattr(cl, b'_nodecache'):
860 elif util.safehasattr(cl, b'_nodecache'):
861 # <= hg-5.2
861 # <= hg-5.2
862 from mercurial.node import nullid, nullrev
862 from mercurial.node import nullid, nullrev
863
863
864 cl._nodecache = {nullid: nullrev}
864 cl._nodecache = {nullid: nullrev}
865 cl._nodepos = None
865 cl._nodepos = None
866
866
867
867
868 @command(b'perf::heads|perfheads', formatteropts)
868 @command(b'perf::heads|perfheads', formatteropts)
869 def perfheads(ui, repo, **opts):
869 def perfheads(ui, repo, **opts):
870 """benchmark the computation of a changelog heads"""
870 """benchmark the computation of a changelog heads"""
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 def s():
875 def s():
876 clearcaches(cl)
876 clearcaches(cl)
877
877
878 def d():
878 def d():
879 len(cl.headrevs())
879 len(cl.headrevs())
880
880
881 timer(d, setup=s)
881 timer(d, setup=s)
882 fm.end()
882 fm.end()
883
883
884
884
885 def _default_clear_on_disk_tags_cache(repo):
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
886 from mercurial import tags
887
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
889
890
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
892 from mercurial import tags
893
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
895
896
896
897 def _default_forget_fnodes(repo, revs):
898 """function used by the perf extension to prune some entries from the
899 fnodes cache"""
900 from mercurial import tags
901
902 missing_1 = b'\xff' * 4
903 missing_2 = b'\xff' * 20
904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 for r in revs:
906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 cache.write()
908
909
897 @command(
910 @command(
898 b'perf::tags|perftags',
911 b'perf::tags|perftags',
899 formatteropts
912 formatteropts
900 + [
913 + [
901 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
902 (
915 (
903 b'',
916 b'',
904 b'clear-on-disk-cache',
917 b'clear-on-disk-cache',
905 False,
918 False,
906 b'clear on disk tags cache (DESTRUCTIVE)',
919 b'clear on disk tags cache (DESTRUCTIVE)',
907 ),
920 ),
908 (
921 (
909 b'',
922 b'',
910 b'clear-fnode-cache',
923 b'clear-fnode-cache-all',
911 False,
924 False,
912 b'clear on disk file node cache (DESTRUCTIVE),',
925 b'clear on disk file node cache (DESTRUCTIVE),',
913 ),
926 ),
927 (
928 b'',
929 b'clear-fnode-cache-rev',
930 [],
931 b'clear on disk file node cache (DESTRUCTIVE),',
932 b'REVS',
933 ),
914 ],
934 ],
915 )
935 )
916 def perftags(ui, repo, **opts):
936 def perftags(ui, repo, **opts):
917 """Benchmark tags retrieval in various situation
937 """Benchmark tags retrieval in various situation
918
938
919 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
939 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
920 altering performance after the command was run. However, it does not
940 altering performance after the command was run. However, it does not
921 destroy any stored data.
941 destroy any stored data.
922 """
942 """
923 from mercurial import tags
943 from mercurial import tags
924
944
925 opts = _byteskwargs(opts)
945 opts = _byteskwargs(opts)
926 timer, fm = gettimer(ui, opts)
946 timer, fm = gettimer(ui, opts)
927 repocleartagscache = repocleartagscachefunc(repo)
947 repocleartagscache = repocleartagscachefunc(repo)
928 clearrevlogs = opts[b'clear_revlogs']
948 clearrevlogs = opts[b'clear_revlogs']
929 clear_disk = opts[b'clear_on_disk_cache']
949 clear_disk = opts[b'clear_on_disk_cache']
930 clear_fnode = opts[b'clear_fnode_cache']
950 clear_fnode = opts[b'clear_fnode_cache_all']
951
952 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
931
953
932 clear_disk_fn = getattr(
954 clear_disk_fn = getattr(
933 tags,
955 tags,
934 "clear_cache_on_disk",
956 "clear_cache_on_disk",
935 _default_clear_on_disk_tags_cache,
957 _default_clear_on_disk_tags_cache,
936 )
958 )
937 clear_fnodes_fn = getattr(
959 clear_fnodes_fn = getattr(
938 tags,
960 tags,
939 "clear_cache_fnodes",
961 "clear_cache_fnodes",
940 _default_clear_on_disk_tags_fnodes_cache,
962 _default_clear_on_disk_tags_fnodes_cache,
941 )
963 )
964 clear_fnodes_rev_fn = getattr(
965 tags,
966 "forget_fnodes",
967 _default_forget_fnodes,
968 )
969
970 clear_revs = None
971 if clear_fnode_revs:
972 clear_revs = scmutil.revrange(repo, clear_fnode_revs)
942
973
943 def s():
974 def s():
944 if clearrevlogs:
975 if clearrevlogs:
945 clearchangelog(repo)
976 clearchangelog(repo)
946 clearfilecache(repo.unfiltered(), 'manifest')
977 clearfilecache(repo.unfiltered(), 'manifest')
947 if clear_disk:
978 if clear_disk:
948 clear_disk_fn(repo)
979 clear_disk_fn(repo)
949 if clear_fnode:
980 if clear_fnode:
950 clear_fnodes_fn(repo)
981 clear_fnodes_fn(repo)
982 elif clear_revs is not None:
983 clear_fnodes_rev_fn(repo, clear_revs)
951 repocleartagscache()
984 repocleartagscache()
952
985
953 def t():
986 def t():
954 len(repo.tags())
987 len(repo.tags())
955
988
956 timer(t, setup=s)
989 timer(t, setup=s)
957 fm.end()
990 fm.end()
958
991
959
992
960 @command(b'perf::ancestors|perfancestors', formatteropts)
993 @command(b'perf::ancestors|perfancestors', formatteropts)
961 def perfancestors(ui, repo, **opts):
994 def perfancestors(ui, repo, **opts):
962 opts = _byteskwargs(opts)
995 opts = _byteskwargs(opts)
963 timer, fm = gettimer(ui, opts)
996 timer, fm = gettimer(ui, opts)
964 heads = repo.changelog.headrevs()
997 heads = repo.changelog.headrevs()
965
998
966 def d():
999 def d():
967 for a in repo.changelog.ancestors(heads):
1000 for a in repo.changelog.ancestors(heads):
968 pass
1001 pass
969
1002
970 timer(d)
1003 timer(d)
971 fm.end()
1004 fm.end()
972
1005
973
1006
974 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1007 @command(b'perf::ancestorset|perfancestorset', formatteropts)
975 def perfancestorset(ui, repo, revset, **opts):
1008 def perfancestorset(ui, repo, revset, **opts):
976 opts = _byteskwargs(opts)
1009 opts = _byteskwargs(opts)
977 timer, fm = gettimer(ui, opts)
1010 timer, fm = gettimer(ui, opts)
978 revs = repo.revs(revset)
1011 revs = repo.revs(revset)
979 heads = repo.changelog.headrevs()
1012 heads = repo.changelog.headrevs()
980
1013
981 def d():
1014 def d():
982 s = repo.changelog.ancestors(heads)
1015 s = repo.changelog.ancestors(heads)
983 for rev in revs:
1016 for rev in revs:
984 rev in s
1017 rev in s
985
1018
986 timer(d)
1019 timer(d)
987 fm.end()
1020 fm.end()
988
1021
989
1022
990 @command(
1023 @command(
991 b'perf::delta-find',
1024 b'perf::delta-find',
992 revlogopts + formatteropts,
1025 revlogopts + formatteropts,
993 b'-c|-m|FILE REV',
1026 b'-c|-m|FILE REV',
994 )
1027 )
995 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1028 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
996 """benchmark the process of finding a valid delta for a revlog revision
1029 """benchmark the process of finding a valid delta for a revlog revision
997
1030
998 When a revlog receives a new revision (e.g. from a commit, or from an
1031 When a revlog receives a new revision (e.g. from a commit, or from an
999 incoming bundle), it searches for a suitable delta-base to produce a delta.
1032 incoming bundle), it searches for a suitable delta-base to produce a delta.
1000 This perf command measures how much time we spend in this process. It
1033 This perf command measures how much time we spend in this process. It
1001 operates on an already stored revision.
1034 operates on an already stored revision.
1002
1035
1003 See `hg help debug-delta-find` for another related command.
1036 See `hg help debug-delta-find` for another related command.
1004 """
1037 """
1005 from mercurial import revlogutils
1038 from mercurial import revlogutils
1006 import mercurial.revlogutils.deltas as deltautil
1039 import mercurial.revlogutils.deltas as deltautil
1007
1040
1008 opts = _byteskwargs(opts)
1041 opts = _byteskwargs(opts)
1009 if arg_2 is None:
1042 if arg_2 is None:
1010 file_ = None
1043 file_ = None
1011 rev = arg_1
1044 rev = arg_1
1012 else:
1045 else:
1013 file_ = arg_1
1046 file_ = arg_1
1014 rev = arg_2
1047 rev = arg_2
1015
1048
1016 repo = repo.unfiltered()
1049 repo = repo.unfiltered()
1017
1050
1018 timer, fm = gettimer(ui, opts)
1051 timer, fm = gettimer(ui, opts)
1019
1052
1020 rev = int(rev)
1053 rev = int(rev)
1021
1054
1022 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1055 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1023
1056
1024 deltacomputer = deltautil.deltacomputer(revlog)
1057 deltacomputer = deltautil.deltacomputer(revlog)
1025
1058
1026 node = revlog.node(rev)
1059 node = revlog.node(rev)
1027 p1r, p2r = revlog.parentrevs(rev)
1060 p1r, p2r = revlog.parentrevs(rev)
1028 p1 = revlog.node(p1r)
1061 p1 = revlog.node(p1r)
1029 p2 = revlog.node(p2r)
1062 p2 = revlog.node(p2r)
1030 full_text = revlog.revision(rev)
1063 full_text = revlog.revision(rev)
1031 textlen = len(full_text)
1064 textlen = len(full_text)
1032 cachedelta = None
1065 cachedelta = None
1033 flags = revlog.flags(rev)
1066 flags = revlog.flags(rev)
1034
1067
1035 revinfo = revlogutils.revisioninfo(
1068 revinfo = revlogutils.revisioninfo(
1036 node,
1069 node,
1037 p1,
1070 p1,
1038 p2,
1071 p2,
1039 [full_text], # btext
1072 [full_text], # btext
1040 textlen,
1073 textlen,
1041 cachedelta,
1074 cachedelta,
1042 flags,
1075 flags,
1043 )
1076 )
1044
1077
1045 # Note: we should probably purge the potential caches (like the full
1078 # Note: we should probably purge the potential caches (like the full
1046 # manifest cache) between runs.
1079 # manifest cache) between runs.
1047 def find_one():
1080 def find_one():
1048 with revlog._datafp() as fh:
1081 with revlog._datafp() as fh:
1049 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1082 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1050
1083
1051 timer(find_one)
1084 timer(find_one)
1052 fm.end()
1085 fm.end()
1053
1086
1054
1087
1055 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1088 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1056 def perfdiscovery(ui, repo, path, **opts):
1089 def perfdiscovery(ui, repo, path, **opts):
1057 """benchmark discovery between local repo and the peer at given path"""
1090 """benchmark discovery between local repo and the peer at given path"""
1058 repos = [repo, None]
1091 repos = [repo, None]
1059 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1060
1093
1061 try:
1094 try:
1062 from mercurial.utils.urlutil import get_unique_pull_path_obj
1095 from mercurial.utils.urlutil import get_unique_pull_path_obj
1063
1096
1064 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1097 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1065 except ImportError:
1098 except ImportError:
1066 try:
1099 try:
1067 from mercurial.utils.urlutil import get_unique_pull_path
1100 from mercurial.utils.urlutil import get_unique_pull_path
1068
1101
1069 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1102 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1070 except ImportError:
1103 except ImportError:
1071 path = ui.expandpath(path)
1104 path = ui.expandpath(path)
1072
1105
1073 def s():
1106 def s():
1074 repos[1] = hg.peer(ui, opts, path)
1107 repos[1] = hg.peer(ui, opts, path)
1075
1108
1076 def d():
1109 def d():
1077 setdiscovery.findcommonheads(ui, *repos)
1110 setdiscovery.findcommonheads(ui, *repos)
1078
1111
1079 timer(d, setup=s)
1112 timer(d, setup=s)
1080 fm.end()
1113 fm.end()
1081
1114
1082
1115
1083 @command(
1116 @command(
1084 b'perf::bookmarks|perfbookmarks',
1117 b'perf::bookmarks|perfbookmarks',
1085 formatteropts
1118 formatteropts
1086 + [
1119 + [
1087 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1120 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1088 ],
1121 ],
1089 )
1122 )
1090 def perfbookmarks(ui, repo, **opts):
1123 def perfbookmarks(ui, repo, **opts):
1091 """benchmark parsing bookmarks from disk to memory"""
1124 """benchmark parsing bookmarks from disk to memory"""
1092 opts = _byteskwargs(opts)
1125 opts = _byteskwargs(opts)
1093 timer, fm = gettimer(ui, opts)
1126 timer, fm = gettimer(ui, opts)
1094
1127
1095 clearrevlogs = opts[b'clear_revlogs']
1128 clearrevlogs = opts[b'clear_revlogs']
1096
1129
1097 def s():
1130 def s():
1098 if clearrevlogs:
1131 if clearrevlogs:
1099 clearchangelog(repo)
1132 clearchangelog(repo)
1100 clearfilecache(repo, b'_bookmarks')
1133 clearfilecache(repo, b'_bookmarks')
1101
1134
1102 def d():
1135 def d():
1103 repo._bookmarks
1136 repo._bookmarks
1104
1137
1105 timer(d, setup=s)
1138 timer(d, setup=s)
1106 fm.end()
1139 fm.end()
1107
1140
1108
1141
1109 @command(
1142 @command(
1110 b'perf::bundle',
1143 b'perf::bundle',
1111 [
1144 [
1112 (
1145 (
1113 b'r',
1146 b'r',
1114 b'rev',
1147 b'rev',
1115 [],
1148 [],
1116 b'changesets to bundle',
1149 b'changesets to bundle',
1117 b'REV',
1150 b'REV',
1118 ),
1151 ),
1119 (
1152 (
1120 b't',
1153 b't',
1121 b'type',
1154 b'type',
1122 b'none',
1155 b'none',
1123 b'bundlespec to use (see `hg help bundlespec`)',
1156 b'bundlespec to use (see `hg help bundlespec`)',
1124 b'TYPE',
1157 b'TYPE',
1125 ),
1158 ),
1126 ]
1159 ]
1127 + formatteropts,
1160 + formatteropts,
1128 b'REVS',
1161 b'REVS',
1129 )
1162 )
1130 def perfbundle(ui, repo, *revs, **opts):
1163 def perfbundle(ui, repo, *revs, **opts):
1131 """benchmark the creation of a bundle from a repository
1164 """benchmark the creation of a bundle from a repository
1132
1165
1133 For now, this only supports "none" compression.
1166 For now, this only supports "none" compression.
1134 """
1167 """
1135 try:
1168 try:
1136 from mercurial import bundlecaches
1169 from mercurial import bundlecaches
1137
1170
1138 parsebundlespec = bundlecaches.parsebundlespec
1171 parsebundlespec = bundlecaches.parsebundlespec
1139 except ImportError:
1172 except ImportError:
1140 from mercurial import exchange
1173 from mercurial import exchange
1141
1174
1142 parsebundlespec = exchange.parsebundlespec
1175 parsebundlespec = exchange.parsebundlespec
1143
1176
1144 from mercurial import discovery
1177 from mercurial import discovery
1145 from mercurial import bundle2
1178 from mercurial import bundle2
1146
1179
1147 opts = _byteskwargs(opts)
1180 opts = _byteskwargs(opts)
1148 timer, fm = gettimer(ui, opts)
1181 timer, fm = gettimer(ui, opts)
1149
1182
1150 cl = repo.changelog
1183 cl = repo.changelog
1151 revs = list(revs)
1184 revs = list(revs)
1152 revs.extend(opts.get(b'rev', ()))
1185 revs.extend(opts.get(b'rev', ()))
1153 revs = scmutil.revrange(repo, revs)
1186 revs = scmutil.revrange(repo, revs)
1154 if not revs:
1187 if not revs:
1155 raise error.Abort(b"not revision specified")
1188 raise error.Abort(b"not revision specified")
1156 # make it a consistent set (ie: without topological gaps)
1189 # make it a consistent set (ie: without topological gaps)
1157 old_len = len(revs)
1190 old_len = len(revs)
1158 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1191 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1159 if old_len != len(revs):
1192 if old_len != len(revs):
1160 new_count = len(revs) - old_len
1193 new_count = len(revs) - old_len
1161 msg = b"add %d new revisions to make it a consistent set\n"
1194 msg = b"add %d new revisions to make it a consistent set\n"
1162 ui.write_err(msg % new_count)
1195 ui.write_err(msg % new_count)
1163
1196
1164 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1197 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1165 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1198 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1166 outgoing = discovery.outgoing(repo, bases, targets)
1199 outgoing = discovery.outgoing(repo, bases, targets)
1167
1200
1168 bundle_spec = opts.get(b'type')
1201 bundle_spec = opts.get(b'type')
1169
1202
1170 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1203 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1171
1204
1172 cgversion = bundle_spec.params.get(b"cg.version")
1205 cgversion = bundle_spec.params.get(b"cg.version")
1173 if cgversion is None:
1206 if cgversion is None:
1174 if bundle_spec.version == b'v1':
1207 if bundle_spec.version == b'v1':
1175 cgversion = b'01'
1208 cgversion = b'01'
1176 if bundle_spec.version == b'v2':
1209 if bundle_spec.version == b'v2':
1177 cgversion = b'02'
1210 cgversion = b'02'
1178 if cgversion not in changegroup.supportedoutgoingversions(repo):
1211 if cgversion not in changegroup.supportedoutgoingversions(repo):
1179 err = b"repository does not support bundle version %s"
1212 err = b"repository does not support bundle version %s"
1180 raise error.Abort(err % cgversion)
1213 raise error.Abort(err % cgversion)
1181
1214
1182 if cgversion == b'01': # bundle1
1215 if cgversion == b'01': # bundle1
1183 bversion = b'HG10' + bundle_spec.wirecompression
1216 bversion = b'HG10' + bundle_spec.wirecompression
1184 bcompression = None
1217 bcompression = None
1185 elif cgversion in (b'02', b'03'):
1218 elif cgversion in (b'02', b'03'):
1186 bversion = b'HG20'
1219 bversion = b'HG20'
1187 bcompression = bundle_spec.wirecompression
1220 bcompression = bundle_spec.wirecompression
1188 else:
1221 else:
1189 err = b'perf::bundle: unexpected changegroup version %s'
1222 err = b'perf::bundle: unexpected changegroup version %s'
1190 raise error.ProgrammingError(err % cgversion)
1223 raise error.ProgrammingError(err % cgversion)
1191
1224
1192 if bcompression is None:
1225 if bcompression is None:
1193 bcompression = b'UN'
1226 bcompression = b'UN'
1194
1227
1195 if bcompression != b'UN':
1228 if bcompression != b'UN':
1196 err = b'perf::bundle: compression currently unsupported: %s'
1229 err = b'perf::bundle: compression currently unsupported: %s'
1197 raise error.ProgrammingError(err % bcompression)
1230 raise error.ProgrammingError(err % bcompression)
1198
1231
1199 def do_bundle():
1232 def do_bundle():
1200 bundle2.writenewbundle(
1233 bundle2.writenewbundle(
1201 ui,
1234 ui,
1202 repo,
1235 repo,
1203 b'perf::bundle',
1236 b'perf::bundle',
1204 os.devnull,
1237 os.devnull,
1205 bversion,
1238 bversion,
1206 outgoing,
1239 outgoing,
1207 bundle_spec.params,
1240 bundle_spec.params,
1208 )
1241 )
1209
1242
1210 timer(do_bundle)
1243 timer(do_bundle)
1211 fm.end()
1244 fm.end()
1212
1245
1213
1246
1214 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1247 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1215 def perfbundleread(ui, repo, bundlepath, **opts):
1248 def perfbundleread(ui, repo, bundlepath, **opts):
1216 """Benchmark reading of bundle files.
1249 """Benchmark reading of bundle files.
1217
1250
1218 This command is meant to isolate the I/O part of bundle reading as
1251 This command is meant to isolate the I/O part of bundle reading as
1219 much as possible.
1252 much as possible.
1220 """
1253 """
1221 from mercurial import (
1254 from mercurial import (
1222 bundle2,
1255 bundle2,
1223 exchange,
1256 exchange,
1224 streamclone,
1257 streamclone,
1225 )
1258 )
1226
1259
1227 opts = _byteskwargs(opts)
1260 opts = _byteskwargs(opts)
1228
1261
1229 def makebench(fn):
1262 def makebench(fn):
1230 def run():
1263 def run():
1231 with open(bundlepath, b'rb') as fh:
1264 with open(bundlepath, b'rb') as fh:
1232 bundle = exchange.readbundle(ui, fh, bundlepath)
1265 bundle = exchange.readbundle(ui, fh, bundlepath)
1233 fn(bundle)
1266 fn(bundle)
1234
1267
1235 return run
1268 return run
1236
1269
1237 def makereadnbytes(size):
1270 def makereadnbytes(size):
1238 def run():
1271 def run():
1239 with open(bundlepath, b'rb') as fh:
1272 with open(bundlepath, b'rb') as fh:
1240 bundle = exchange.readbundle(ui, fh, bundlepath)
1273 bundle = exchange.readbundle(ui, fh, bundlepath)
1241 while bundle.read(size):
1274 while bundle.read(size):
1242 pass
1275 pass
1243
1276
1244 return run
1277 return run
1245
1278
1246 def makestdioread(size):
1279 def makestdioread(size):
1247 def run():
1280 def run():
1248 with open(bundlepath, b'rb') as fh:
1281 with open(bundlepath, b'rb') as fh:
1249 while fh.read(size):
1282 while fh.read(size):
1250 pass
1283 pass
1251
1284
1252 return run
1285 return run
1253
1286
1254 # bundle1
1287 # bundle1
1255
1288
1256 def deltaiter(bundle):
1289 def deltaiter(bundle):
1257 for delta in bundle.deltaiter():
1290 for delta in bundle.deltaiter():
1258 pass
1291 pass
1259
1292
1260 def iterchunks(bundle):
1293 def iterchunks(bundle):
1261 for chunk in bundle.getchunks():
1294 for chunk in bundle.getchunks():
1262 pass
1295 pass
1263
1296
1264 # bundle2
1297 # bundle2
1265
1298
1266 def forwardchunks(bundle):
1299 def forwardchunks(bundle):
1267 for chunk in bundle._forwardchunks():
1300 for chunk in bundle._forwardchunks():
1268 pass
1301 pass
1269
1302
1270 def iterparts(bundle):
1303 def iterparts(bundle):
1271 for part in bundle.iterparts():
1304 for part in bundle.iterparts():
1272 pass
1305 pass
1273
1306
1274 def iterpartsseekable(bundle):
1307 def iterpartsseekable(bundle):
1275 for part in bundle.iterparts(seekable=True):
1308 for part in bundle.iterparts(seekable=True):
1276 pass
1309 pass
1277
1310
1278 def seek(bundle):
1311 def seek(bundle):
1279 for part in bundle.iterparts(seekable=True):
1312 for part in bundle.iterparts(seekable=True):
1280 part.seek(0, os.SEEK_END)
1313 part.seek(0, os.SEEK_END)
1281
1314
1282 def makepartreadnbytes(size):
1315 def makepartreadnbytes(size):
1283 def run():
1316 def run():
1284 with open(bundlepath, b'rb') as fh:
1317 with open(bundlepath, b'rb') as fh:
1285 bundle = exchange.readbundle(ui, fh, bundlepath)
1318 bundle = exchange.readbundle(ui, fh, bundlepath)
1286 for part in bundle.iterparts():
1319 for part in bundle.iterparts():
1287 while part.read(size):
1320 while part.read(size):
1288 pass
1321 pass
1289
1322
1290 return run
1323 return run
1291
1324
1292 benches = [
1325 benches = [
1293 (makestdioread(8192), b'read(8k)'),
1326 (makestdioread(8192), b'read(8k)'),
1294 (makestdioread(16384), b'read(16k)'),
1327 (makestdioread(16384), b'read(16k)'),
1295 (makestdioread(32768), b'read(32k)'),
1328 (makestdioread(32768), b'read(32k)'),
1296 (makestdioread(131072), b'read(128k)'),
1329 (makestdioread(131072), b'read(128k)'),
1297 ]
1330 ]
1298
1331
1299 with open(bundlepath, b'rb') as fh:
1332 with open(bundlepath, b'rb') as fh:
1300 bundle = exchange.readbundle(ui, fh, bundlepath)
1333 bundle = exchange.readbundle(ui, fh, bundlepath)
1301
1334
1302 if isinstance(bundle, changegroup.cg1unpacker):
1335 if isinstance(bundle, changegroup.cg1unpacker):
1303 benches.extend(
1336 benches.extend(
1304 [
1337 [
1305 (makebench(deltaiter), b'cg1 deltaiter()'),
1338 (makebench(deltaiter), b'cg1 deltaiter()'),
1306 (makebench(iterchunks), b'cg1 getchunks()'),
1339 (makebench(iterchunks), b'cg1 getchunks()'),
1307 (makereadnbytes(8192), b'cg1 read(8k)'),
1340 (makereadnbytes(8192), b'cg1 read(8k)'),
1308 (makereadnbytes(16384), b'cg1 read(16k)'),
1341 (makereadnbytes(16384), b'cg1 read(16k)'),
1309 (makereadnbytes(32768), b'cg1 read(32k)'),
1342 (makereadnbytes(32768), b'cg1 read(32k)'),
1310 (makereadnbytes(131072), b'cg1 read(128k)'),
1343 (makereadnbytes(131072), b'cg1 read(128k)'),
1311 ]
1344 ]
1312 )
1345 )
1313 elif isinstance(bundle, bundle2.unbundle20):
1346 elif isinstance(bundle, bundle2.unbundle20):
1314 benches.extend(
1347 benches.extend(
1315 [
1348 [
1316 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1349 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1317 (makebench(iterparts), b'bundle2 iterparts()'),
1350 (makebench(iterparts), b'bundle2 iterparts()'),
1318 (
1351 (
1319 makebench(iterpartsseekable),
1352 makebench(iterpartsseekable),
1320 b'bundle2 iterparts() seekable',
1353 b'bundle2 iterparts() seekable',
1321 ),
1354 ),
1322 (makebench(seek), b'bundle2 part seek()'),
1355 (makebench(seek), b'bundle2 part seek()'),
1323 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1356 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1324 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1357 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1325 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1358 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1326 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1359 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1327 ]
1360 ]
1328 )
1361 )
1329 elif isinstance(bundle, streamclone.streamcloneapplier):
1362 elif isinstance(bundle, streamclone.streamcloneapplier):
1330 raise error.Abort(b'stream clone bundles not supported')
1363 raise error.Abort(b'stream clone bundles not supported')
1331 else:
1364 else:
1332 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1365 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1333
1366
1334 for fn, title in benches:
1367 for fn, title in benches:
1335 timer, fm = gettimer(ui, opts)
1368 timer, fm = gettimer(ui, opts)
1336 timer(fn, title=title)
1369 timer(fn, title=title)
1337 fm.end()
1370 fm.end()
1338
1371
1339
1372
1340 @command(
1373 @command(
1341 b'perf::changegroupchangelog|perfchangegroupchangelog',
1374 b'perf::changegroupchangelog|perfchangegroupchangelog',
1342 formatteropts
1375 formatteropts
1343 + [
1376 + [
1344 (b'', b'cgversion', b'02', b'changegroup version'),
1377 (b'', b'cgversion', b'02', b'changegroup version'),
1345 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1378 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1346 ],
1379 ],
1347 )
1380 )
1348 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1381 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1349 """Benchmark producing a changelog group for a changegroup.
1382 """Benchmark producing a changelog group for a changegroup.
1350
1383
1351 This measures the time spent processing the changelog during a
1384 This measures the time spent processing the changelog during a
1352 bundle operation. This occurs during `hg bundle` and on a server
1385 bundle operation. This occurs during `hg bundle` and on a server
1353 processing a `getbundle` wire protocol request (handles clones
1386 processing a `getbundle` wire protocol request (handles clones
1354 and pull requests).
1387 and pull requests).
1355
1388
1356 By default, all revisions are added to the changegroup.
1389 By default, all revisions are added to the changegroup.
1357 """
1390 """
1358 opts = _byteskwargs(opts)
1391 opts = _byteskwargs(opts)
1359 cl = repo.changelog
1392 cl = repo.changelog
1360 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1393 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1361 bundler = changegroup.getbundler(cgversion, repo)
1394 bundler = changegroup.getbundler(cgversion, repo)
1362
1395
1363 def d():
1396 def d():
1364 state, chunks = bundler._generatechangelog(cl, nodes)
1397 state, chunks = bundler._generatechangelog(cl, nodes)
1365 for chunk in chunks:
1398 for chunk in chunks:
1366 pass
1399 pass
1367
1400
1368 timer, fm = gettimer(ui, opts)
1401 timer, fm = gettimer(ui, opts)
1369
1402
1370 # Terminal printing can interfere with timing. So disable it.
1403 # Terminal printing can interfere with timing. So disable it.
1371 with ui.configoverride({(b'progress', b'disable'): True}):
1404 with ui.configoverride({(b'progress', b'disable'): True}):
1372 timer(d)
1405 timer(d)
1373
1406
1374 fm.end()
1407 fm.end()
1375
1408
1376
1409
1377 @command(b'perf::dirs|perfdirs', formatteropts)
1410 @command(b'perf::dirs|perfdirs', formatteropts)
1378 def perfdirs(ui, repo, **opts):
1411 def perfdirs(ui, repo, **opts):
1379 opts = _byteskwargs(opts)
1412 opts = _byteskwargs(opts)
1380 timer, fm = gettimer(ui, opts)
1413 timer, fm = gettimer(ui, opts)
1381 dirstate = repo.dirstate
1414 dirstate = repo.dirstate
1382 b'a' in dirstate
1415 b'a' in dirstate
1383
1416
1384 def d():
1417 def d():
1385 dirstate.hasdir(b'a')
1418 dirstate.hasdir(b'a')
1386 try:
1419 try:
1387 del dirstate._map._dirs
1420 del dirstate._map._dirs
1388 except AttributeError:
1421 except AttributeError:
1389 pass
1422 pass
1390
1423
1391 timer(d)
1424 timer(d)
1392 fm.end()
1425 fm.end()
1393
1426
1394
1427
1395 @command(
1428 @command(
1396 b'perf::dirstate|perfdirstate',
1429 b'perf::dirstate|perfdirstate',
1397 [
1430 [
1398 (
1431 (
1399 b'',
1432 b'',
1400 b'iteration',
1433 b'iteration',
1401 None,
1434 None,
1402 b'benchmark a full iteration for the dirstate',
1435 b'benchmark a full iteration for the dirstate',
1403 ),
1436 ),
1404 (
1437 (
1405 b'',
1438 b'',
1406 b'contains',
1439 b'contains',
1407 None,
1440 None,
1408 b'benchmark a large amount of `nf in dirstate` calls',
1441 b'benchmark a large amount of `nf in dirstate` calls',
1409 ),
1442 ),
1410 ]
1443 ]
1411 + formatteropts,
1444 + formatteropts,
1412 )
1445 )
1413 def perfdirstate(ui, repo, **opts):
1446 def perfdirstate(ui, repo, **opts):
1414 """benchmap the time of various distate operations
1447 """benchmap the time of various distate operations
1415
1448
1416 By default benchmark the time necessary to load a dirstate from scratch.
1449 By default benchmark the time necessary to load a dirstate from scratch.
1417 The dirstate is loaded to the point were a "contains" request can be
1450 The dirstate is loaded to the point were a "contains" request can be
1418 answered.
1451 answered.
1419 """
1452 """
1420 opts = _byteskwargs(opts)
1453 opts = _byteskwargs(opts)
1421 timer, fm = gettimer(ui, opts)
1454 timer, fm = gettimer(ui, opts)
1422 b"a" in repo.dirstate
1455 b"a" in repo.dirstate
1423
1456
1424 if opts[b'iteration'] and opts[b'contains']:
1457 if opts[b'iteration'] and opts[b'contains']:
1425 msg = b'only specify one of --iteration or --contains'
1458 msg = b'only specify one of --iteration or --contains'
1426 raise error.Abort(msg)
1459 raise error.Abort(msg)
1427
1460
1428 if opts[b'iteration']:
1461 if opts[b'iteration']:
1429 setup = None
1462 setup = None
1430 dirstate = repo.dirstate
1463 dirstate = repo.dirstate
1431
1464
1432 def d():
1465 def d():
1433 for f in dirstate:
1466 for f in dirstate:
1434 pass
1467 pass
1435
1468
1436 elif opts[b'contains']:
1469 elif opts[b'contains']:
1437 setup = None
1470 setup = None
1438 dirstate = repo.dirstate
1471 dirstate = repo.dirstate
1439 allfiles = list(dirstate)
1472 allfiles = list(dirstate)
1440 # also add file path that will be "missing" from the dirstate
1473 # also add file path that will be "missing" from the dirstate
1441 allfiles.extend([f[::-1] for f in allfiles])
1474 allfiles.extend([f[::-1] for f in allfiles])
1442
1475
1443 def d():
1476 def d():
1444 for f in allfiles:
1477 for f in allfiles:
1445 f in dirstate
1478 f in dirstate
1446
1479
1447 else:
1480 else:
1448
1481
1449 def setup():
1482 def setup():
1450 repo.dirstate.invalidate()
1483 repo.dirstate.invalidate()
1451
1484
1452 def d():
1485 def d():
1453 b"a" in repo.dirstate
1486 b"a" in repo.dirstate
1454
1487
1455 timer(d, setup=setup)
1488 timer(d, setup=setup)
1456 fm.end()
1489 fm.end()
1457
1490
1458
1491
1459 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1492 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1460 def perfdirstatedirs(ui, repo, **opts):
1493 def perfdirstatedirs(ui, repo, **opts):
1461 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1494 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1462 opts = _byteskwargs(opts)
1495 opts = _byteskwargs(opts)
1463 timer, fm = gettimer(ui, opts)
1496 timer, fm = gettimer(ui, opts)
1464 repo.dirstate.hasdir(b"a")
1497 repo.dirstate.hasdir(b"a")
1465
1498
1466 def setup():
1499 def setup():
1467 try:
1500 try:
1468 del repo.dirstate._map._dirs
1501 del repo.dirstate._map._dirs
1469 except AttributeError:
1502 except AttributeError:
1470 pass
1503 pass
1471
1504
1472 def d():
1505 def d():
1473 repo.dirstate.hasdir(b"a")
1506 repo.dirstate.hasdir(b"a")
1474
1507
1475 timer(d, setup=setup)
1508 timer(d, setup=setup)
1476 fm.end()
1509 fm.end()
1477
1510
1478
1511
1479 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1512 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1480 def perfdirstatefoldmap(ui, repo, **opts):
1513 def perfdirstatefoldmap(ui, repo, **opts):
1481 """benchmap a `dirstate._map.filefoldmap.get()` request
1514 """benchmap a `dirstate._map.filefoldmap.get()` request
1482
1515
1483 The dirstate filefoldmap cache is dropped between every request.
1516 The dirstate filefoldmap cache is dropped between every request.
1484 """
1517 """
1485 opts = _byteskwargs(opts)
1518 opts = _byteskwargs(opts)
1486 timer, fm = gettimer(ui, opts)
1519 timer, fm = gettimer(ui, opts)
1487 dirstate = repo.dirstate
1520 dirstate = repo.dirstate
1488 dirstate._map.filefoldmap.get(b'a')
1521 dirstate._map.filefoldmap.get(b'a')
1489
1522
1490 def setup():
1523 def setup():
1491 del dirstate._map.filefoldmap
1524 del dirstate._map.filefoldmap
1492
1525
1493 def d():
1526 def d():
1494 dirstate._map.filefoldmap.get(b'a')
1527 dirstate._map.filefoldmap.get(b'a')
1495
1528
1496 timer(d, setup=setup)
1529 timer(d, setup=setup)
1497 fm.end()
1530 fm.end()
1498
1531
1499
1532
1500 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1533 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1501 def perfdirfoldmap(ui, repo, **opts):
1534 def perfdirfoldmap(ui, repo, **opts):
1502 """benchmap a `dirstate._map.dirfoldmap.get()` request
1535 """benchmap a `dirstate._map.dirfoldmap.get()` request
1503
1536
1504 The dirstate dirfoldmap cache is dropped between every request.
1537 The dirstate dirfoldmap cache is dropped between every request.
1505 """
1538 """
1506 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1507 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1508 dirstate = repo.dirstate
1541 dirstate = repo.dirstate
1509 dirstate._map.dirfoldmap.get(b'a')
1542 dirstate._map.dirfoldmap.get(b'a')
1510
1543
1511 def setup():
1544 def setup():
1512 del dirstate._map.dirfoldmap
1545 del dirstate._map.dirfoldmap
1513 try:
1546 try:
1514 del dirstate._map._dirs
1547 del dirstate._map._dirs
1515 except AttributeError:
1548 except AttributeError:
1516 pass
1549 pass
1517
1550
1518 def d():
1551 def d():
1519 dirstate._map.dirfoldmap.get(b'a')
1552 dirstate._map.dirfoldmap.get(b'a')
1520
1553
1521 timer(d, setup=setup)
1554 timer(d, setup=setup)
1522 fm.end()
1555 fm.end()
1523
1556
1524
1557
1525 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1558 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1526 def perfdirstatewrite(ui, repo, **opts):
1559 def perfdirstatewrite(ui, repo, **opts):
1527 """benchmap the time it take to write a dirstate on disk"""
1560 """benchmap the time it take to write a dirstate on disk"""
1528 opts = _byteskwargs(opts)
1561 opts = _byteskwargs(opts)
1529 timer, fm = gettimer(ui, opts)
1562 timer, fm = gettimer(ui, opts)
1530 ds = repo.dirstate
1563 ds = repo.dirstate
1531 b"a" in ds
1564 b"a" in ds
1532
1565
1533 def setup():
1566 def setup():
1534 ds._dirty = True
1567 ds._dirty = True
1535
1568
1536 def d():
1569 def d():
1537 ds.write(repo.currenttransaction())
1570 ds.write(repo.currenttransaction())
1538
1571
1539 with repo.wlock():
1572 with repo.wlock():
1540 timer(d, setup=setup)
1573 timer(d, setup=setup)
1541 fm.end()
1574 fm.end()
1542
1575
1543
1576
1544 def _getmergerevs(repo, opts):
1577 def _getmergerevs(repo, opts):
1545 """parse command argument to return rev involved in merge
1578 """parse command argument to return rev involved in merge
1546
1579
1547 input: options dictionnary with `rev`, `from` and `bse`
1580 input: options dictionnary with `rev`, `from` and `bse`
1548 output: (localctx, otherctx, basectx)
1581 output: (localctx, otherctx, basectx)
1549 """
1582 """
1550 if opts[b'from']:
1583 if opts[b'from']:
1551 fromrev = scmutil.revsingle(repo, opts[b'from'])
1584 fromrev = scmutil.revsingle(repo, opts[b'from'])
1552 wctx = repo[fromrev]
1585 wctx = repo[fromrev]
1553 else:
1586 else:
1554 wctx = repo[None]
1587 wctx = repo[None]
1555 # we don't want working dir files to be stat'd in the benchmark, so
1588 # we don't want working dir files to be stat'd in the benchmark, so
1556 # prime that cache
1589 # prime that cache
1557 wctx.dirty()
1590 wctx.dirty()
1558 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1591 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1559 if opts[b'base']:
1592 if opts[b'base']:
1560 fromrev = scmutil.revsingle(repo, opts[b'base'])
1593 fromrev = scmutil.revsingle(repo, opts[b'base'])
1561 ancestor = repo[fromrev]
1594 ancestor = repo[fromrev]
1562 else:
1595 else:
1563 ancestor = wctx.ancestor(rctx)
1596 ancestor = wctx.ancestor(rctx)
1564 return (wctx, rctx, ancestor)
1597 return (wctx, rctx, ancestor)
1565
1598
1566
1599
1567 @command(
1600 @command(
1568 b'perf::mergecalculate|perfmergecalculate',
1601 b'perf::mergecalculate|perfmergecalculate',
1569 [
1602 [
1570 (b'r', b'rev', b'.', b'rev to merge against'),
1603 (b'r', b'rev', b'.', b'rev to merge against'),
1571 (b'', b'from', b'', b'rev to merge from'),
1604 (b'', b'from', b'', b'rev to merge from'),
1572 (b'', b'base', b'', b'the revision to use as base'),
1605 (b'', b'base', b'', b'the revision to use as base'),
1573 ]
1606 ]
1574 + formatteropts,
1607 + formatteropts,
1575 )
1608 )
1576 def perfmergecalculate(ui, repo, **opts):
1609 def perfmergecalculate(ui, repo, **opts):
1577 opts = _byteskwargs(opts)
1610 opts = _byteskwargs(opts)
1578 timer, fm = gettimer(ui, opts)
1611 timer, fm = gettimer(ui, opts)
1579
1612
1580 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1613 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1581
1614
1582 def d():
1615 def d():
1583 # acceptremote is True because we don't want prompts in the middle of
1616 # acceptremote is True because we don't want prompts in the middle of
1584 # our benchmark
1617 # our benchmark
1585 merge.calculateupdates(
1618 merge.calculateupdates(
1586 repo,
1619 repo,
1587 wctx,
1620 wctx,
1588 rctx,
1621 rctx,
1589 [ancestor],
1622 [ancestor],
1590 branchmerge=False,
1623 branchmerge=False,
1591 force=False,
1624 force=False,
1592 acceptremote=True,
1625 acceptremote=True,
1593 followcopies=True,
1626 followcopies=True,
1594 )
1627 )
1595
1628
1596 timer(d)
1629 timer(d)
1597 fm.end()
1630 fm.end()
1598
1631
1599
1632
1600 @command(
1633 @command(
1601 b'perf::mergecopies|perfmergecopies',
1634 b'perf::mergecopies|perfmergecopies',
1602 [
1635 [
1603 (b'r', b'rev', b'.', b'rev to merge against'),
1636 (b'r', b'rev', b'.', b'rev to merge against'),
1604 (b'', b'from', b'', b'rev to merge from'),
1637 (b'', b'from', b'', b'rev to merge from'),
1605 (b'', b'base', b'', b'the revision to use as base'),
1638 (b'', b'base', b'', b'the revision to use as base'),
1606 ]
1639 ]
1607 + formatteropts,
1640 + formatteropts,
1608 )
1641 )
1609 def perfmergecopies(ui, repo, **opts):
1642 def perfmergecopies(ui, repo, **opts):
1610 """measure runtime of `copies.mergecopies`"""
1643 """measure runtime of `copies.mergecopies`"""
1611 opts = _byteskwargs(opts)
1644 opts = _byteskwargs(opts)
1612 timer, fm = gettimer(ui, opts)
1645 timer, fm = gettimer(ui, opts)
1613 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1646 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1614
1647
1615 def d():
1648 def d():
1616 # acceptremote is True because we don't want prompts in the middle of
1649 # acceptremote is True because we don't want prompts in the middle of
1617 # our benchmark
1650 # our benchmark
1618 copies.mergecopies(repo, wctx, rctx, ancestor)
1651 copies.mergecopies(repo, wctx, rctx, ancestor)
1619
1652
1620 timer(d)
1653 timer(d)
1621 fm.end()
1654 fm.end()
1622
1655
1623
1656
1624 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1657 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1625 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1658 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1626 """benchmark the copy tracing logic"""
1659 """benchmark the copy tracing logic"""
1627 opts = _byteskwargs(opts)
1660 opts = _byteskwargs(opts)
1628 timer, fm = gettimer(ui, opts)
1661 timer, fm = gettimer(ui, opts)
1629 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1662 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1630 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1663 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1631
1664
1632 def d():
1665 def d():
1633 copies.pathcopies(ctx1, ctx2)
1666 copies.pathcopies(ctx1, ctx2)
1634
1667
1635 timer(d)
1668 timer(d)
1636 fm.end()
1669 fm.end()
1637
1670
1638
1671
1639 @command(
1672 @command(
1640 b'perf::phases|perfphases',
1673 b'perf::phases|perfphases',
1641 [
1674 [
1642 (b'', b'full', False, b'include file reading time too'),
1675 (b'', b'full', False, b'include file reading time too'),
1643 ],
1676 ],
1644 b"",
1677 b"",
1645 )
1678 )
1646 def perfphases(ui, repo, **opts):
1679 def perfphases(ui, repo, **opts):
1647 """benchmark phasesets computation"""
1680 """benchmark phasesets computation"""
1648 opts = _byteskwargs(opts)
1681 opts = _byteskwargs(opts)
1649 timer, fm = gettimer(ui, opts)
1682 timer, fm = gettimer(ui, opts)
1650 _phases = repo._phasecache
1683 _phases = repo._phasecache
1651 full = opts.get(b'full')
1684 full = opts.get(b'full')
1652
1685
1653 def d():
1686 def d():
1654 phases = _phases
1687 phases = _phases
1655 if full:
1688 if full:
1656 clearfilecache(repo, b'_phasecache')
1689 clearfilecache(repo, b'_phasecache')
1657 phases = repo._phasecache
1690 phases = repo._phasecache
1658 phases.invalidate()
1691 phases.invalidate()
1659 phases.loadphaserevs(repo)
1692 phases.loadphaserevs(repo)
1660
1693
1661 timer(d)
1694 timer(d)
1662 fm.end()
1695 fm.end()
1663
1696
1664
1697
1665 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1698 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1666 def perfphasesremote(ui, repo, dest=None, **opts):
1699 def perfphasesremote(ui, repo, dest=None, **opts):
1667 """benchmark time needed to analyse phases of the remote server"""
1700 """benchmark time needed to analyse phases of the remote server"""
1668 from mercurial.node import bin
1701 from mercurial.node import bin
1669 from mercurial import (
1702 from mercurial import (
1670 exchange,
1703 exchange,
1671 hg,
1704 hg,
1672 phases,
1705 phases,
1673 )
1706 )
1674
1707
1675 opts = _byteskwargs(opts)
1708 opts = _byteskwargs(opts)
1676 timer, fm = gettimer(ui, opts)
1709 timer, fm = gettimer(ui, opts)
1677
1710
1678 path = ui.getpath(dest, default=(b'default-push', b'default'))
1711 path = ui.getpath(dest, default=(b'default-push', b'default'))
1679 if not path:
1712 if not path:
1680 raise error.Abort(
1713 raise error.Abort(
1681 b'default repository not configured!',
1714 b'default repository not configured!',
1682 hint=b"see 'hg help config.paths'",
1715 hint=b"see 'hg help config.paths'",
1683 )
1716 )
1684 if util.safehasattr(path, 'main_path'):
1717 if util.safehasattr(path, 'main_path'):
1685 path = path.get_push_variant()
1718 path = path.get_push_variant()
1686 dest = path.loc
1719 dest = path.loc
1687 else:
1720 else:
1688 dest = path.pushloc or path.loc
1721 dest = path.pushloc or path.loc
1689 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1722 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1690 other = hg.peer(repo, opts, dest)
1723 other = hg.peer(repo, opts, dest)
1691
1724
1692 # easier to perform discovery through the operation
1725 # easier to perform discovery through the operation
1693 op = exchange.pushoperation(repo, other)
1726 op = exchange.pushoperation(repo, other)
1694 exchange._pushdiscoverychangeset(op)
1727 exchange._pushdiscoverychangeset(op)
1695
1728
1696 remotesubset = op.fallbackheads
1729 remotesubset = op.fallbackheads
1697
1730
1698 with other.commandexecutor() as e:
1731 with other.commandexecutor() as e:
1699 remotephases = e.callcommand(
1732 remotephases = e.callcommand(
1700 b'listkeys', {b'namespace': b'phases'}
1733 b'listkeys', {b'namespace': b'phases'}
1701 ).result()
1734 ).result()
1702 del other
1735 del other
1703 publishing = remotephases.get(b'publishing', False)
1736 publishing = remotephases.get(b'publishing', False)
1704 if publishing:
1737 if publishing:
1705 ui.statusnoi18n(b'publishing: yes\n')
1738 ui.statusnoi18n(b'publishing: yes\n')
1706 else:
1739 else:
1707 ui.statusnoi18n(b'publishing: no\n')
1740 ui.statusnoi18n(b'publishing: no\n')
1708
1741
1709 has_node = getattr(repo.changelog.index, 'has_node', None)
1742 has_node = getattr(repo.changelog.index, 'has_node', None)
1710 if has_node is None:
1743 if has_node is None:
1711 has_node = repo.changelog.nodemap.__contains__
1744 has_node = repo.changelog.nodemap.__contains__
1712 nonpublishroots = 0
1745 nonpublishroots = 0
1713 for nhex, phase in remotephases.iteritems():
1746 for nhex, phase in remotephases.iteritems():
1714 if nhex == b'publishing': # ignore data related to publish option
1747 if nhex == b'publishing': # ignore data related to publish option
1715 continue
1748 continue
1716 node = bin(nhex)
1749 node = bin(nhex)
1717 if has_node(node) and int(phase):
1750 if has_node(node) and int(phase):
1718 nonpublishroots += 1
1751 nonpublishroots += 1
1719 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1752 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1720 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1753 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1721
1754
1722 def d():
1755 def d():
1723 phases.remotephasessummary(repo, remotesubset, remotephases)
1756 phases.remotephasessummary(repo, remotesubset, remotephases)
1724
1757
1725 timer(d)
1758 timer(d)
1726 fm.end()
1759 fm.end()
1727
1760
1728
1761
1729 @command(
1762 @command(
1730 b'perf::manifest|perfmanifest',
1763 b'perf::manifest|perfmanifest',
1731 [
1764 [
1732 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1765 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1733 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1766 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1734 ]
1767 ]
1735 + formatteropts,
1768 + formatteropts,
1736 b'REV|NODE',
1769 b'REV|NODE',
1737 )
1770 )
1738 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1771 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1739 """benchmark the time to read a manifest from disk and return a usable
1772 """benchmark the time to read a manifest from disk and return a usable
1740 dict-like object
1773 dict-like object
1741
1774
1742 Manifest caches are cleared before retrieval."""
1775 Manifest caches are cleared before retrieval."""
1743 opts = _byteskwargs(opts)
1776 opts = _byteskwargs(opts)
1744 timer, fm = gettimer(ui, opts)
1777 timer, fm = gettimer(ui, opts)
1745 if not manifest_rev:
1778 if not manifest_rev:
1746 ctx = scmutil.revsingle(repo, rev, rev)
1779 ctx = scmutil.revsingle(repo, rev, rev)
1747 t = ctx.manifestnode()
1780 t = ctx.manifestnode()
1748 else:
1781 else:
1749 from mercurial.node import bin
1782 from mercurial.node import bin
1750
1783
1751 if len(rev) == 40:
1784 if len(rev) == 40:
1752 t = bin(rev)
1785 t = bin(rev)
1753 else:
1786 else:
1754 try:
1787 try:
1755 rev = int(rev)
1788 rev = int(rev)
1756
1789
1757 if util.safehasattr(repo.manifestlog, b'getstorage'):
1790 if util.safehasattr(repo.manifestlog, b'getstorage'):
1758 t = repo.manifestlog.getstorage(b'').node(rev)
1791 t = repo.manifestlog.getstorage(b'').node(rev)
1759 else:
1792 else:
1760 t = repo.manifestlog._revlog.lookup(rev)
1793 t = repo.manifestlog._revlog.lookup(rev)
1761 except ValueError:
1794 except ValueError:
1762 raise error.Abort(
1795 raise error.Abort(
1763 b'manifest revision must be integer or full node'
1796 b'manifest revision must be integer or full node'
1764 )
1797 )
1765
1798
1766 def d():
1799 def d():
1767 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1800 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1768 repo.manifestlog[t].read()
1801 repo.manifestlog[t].read()
1769
1802
1770 timer(d)
1803 timer(d)
1771 fm.end()
1804 fm.end()
1772
1805
1773
1806
1774 @command(b'perf::changeset|perfchangeset', formatteropts)
1807 @command(b'perf::changeset|perfchangeset', formatteropts)
1775 def perfchangeset(ui, repo, rev, **opts):
1808 def perfchangeset(ui, repo, rev, **opts):
1776 opts = _byteskwargs(opts)
1809 opts = _byteskwargs(opts)
1777 timer, fm = gettimer(ui, opts)
1810 timer, fm = gettimer(ui, opts)
1778 n = scmutil.revsingle(repo, rev).node()
1811 n = scmutil.revsingle(repo, rev).node()
1779
1812
1780 def d():
1813 def d():
1781 repo.changelog.read(n)
1814 repo.changelog.read(n)
1782 # repo.changelog._cache = None
1815 # repo.changelog._cache = None
1783
1816
1784 timer(d)
1817 timer(d)
1785 fm.end()
1818 fm.end()
1786
1819
1787
1820
1788 @command(b'perf::ignore|perfignore', formatteropts)
1821 @command(b'perf::ignore|perfignore', formatteropts)
1789 def perfignore(ui, repo, **opts):
1822 def perfignore(ui, repo, **opts):
1790 """benchmark operation related to computing ignore"""
1823 """benchmark operation related to computing ignore"""
1791 opts = _byteskwargs(opts)
1824 opts = _byteskwargs(opts)
1792 timer, fm = gettimer(ui, opts)
1825 timer, fm = gettimer(ui, opts)
1793 dirstate = repo.dirstate
1826 dirstate = repo.dirstate
1794
1827
1795 def setupone():
1828 def setupone():
1796 dirstate.invalidate()
1829 dirstate.invalidate()
1797 clearfilecache(dirstate, b'_ignore')
1830 clearfilecache(dirstate, b'_ignore')
1798
1831
1799 def runone():
1832 def runone():
1800 dirstate._ignore
1833 dirstate._ignore
1801
1834
1802 timer(runone, setup=setupone, title=b"load")
1835 timer(runone, setup=setupone, title=b"load")
1803 fm.end()
1836 fm.end()
1804
1837
1805
1838
1806 @command(
1839 @command(
1807 b'perf::index|perfindex',
1840 b'perf::index|perfindex',
1808 [
1841 [
1809 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1842 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1810 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1843 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1811 ]
1844 ]
1812 + formatteropts,
1845 + formatteropts,
1813 )
1846 )
1814 def perfindex(ui, repo, **opts):
1847 def perfindex(ui, repo, **opts):
1815 """benchmark index creation time followed by a lookup
1848 """benchmark index creation time followed by a lookup
1816
1849
1817 The default is to look `tip` up. Depending on the index implementation,
1850 The default is to look `tip` up. Depending on the index implementation,
1818 the revision looked up can matters. For example, an implementation
1851 the revision looked up can matters. For example, an implementation
1819 scanning the index will have a faster lookup time for `--rev tip` than for
1852 scanning the index will have a faster lookup time for `--rev tip` than for
1820 `--rev 0`. The number of looked up revisions and their order can also
1853 `--rev 0`. The number of looked up revisions and their order can also
1821 matters.
1854 matters.
1822
1855
1823 Example of useful set to test:
1856 Example of useful set to test:
1824
1857
1825 * tip
1858 * tip
1826 * 0
1859 * 0
1827 * -10:
1860 * -10:
1828 * :10
1861 * :10
1829 * -10: + :10
1862 * -10: + :10
1830 * :10: + -10:
1863 * :10: + -10:
1831 * -10000:
1864 * -10000:
1832 * -10000: + 0
1865 * -10000: + 0
1833
1866
1834 It is not currently possible to check for lookup of a missing node. For
1867 It is not currently possible to check for lookup of a missing node. For
1835 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1868 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1836 import mercurial.revlog
1869 import mercurial.revlog
1837
1870
1838 opts = _byteskwargs(opts)
1871 opts = _byteskwargs(opts)
1839 timer, fm = gettimer(ui, opts)
1872 timer, fm = gettimer(ui, opts)
1840 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1873 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1841 if opts[b'no_lookup']:
1874 if opts[b'no_lookup']:
1842 if opts['rev']:
1875 if opts['rev']:
1843 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1876 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1844 nodes = []
1877 nodes = []
1845 elif not opts[b'rev']:
1878 elif not opts[b'rev']:
1846 nodes = [repo[b"tip"].node()]
1879 nodes = [repo[b"tip"].node()]
1847 else:
1880 else:
1848 revs = scmutil.revrange(repo, opts[b'rev'])
1881 revs = scmutil.revrange(repo, opts[b'rev'])
1849 cl = repo.changelog
1882 cl = repo.changelog
1850 nodes = [cl.node(r) for r in revs]
1883 nodes = [cl.node(r) for r in revs]
1851
1884
1852 unfi = repo.unfiltered()
1885 unfi = repo.unfiltered()
1853 # find the filecache func directly
1886 # find the filecache func directly
1854 # This avoid polluting the benchmark with the filecache logic
1887 # This avoid polluting the benchmark with the filecache logic
1855 makecl = unfi.__class__.changelog.func
1888 makecl = unfi.__class__.changelog.func
1856
1889
1857 def setup():
1890 def setup():
1858 # probably not necessary, but for good measure
1891 # probably not necessary, but for good measure
1859 clearchangelog(unfi)
1892 clearchangelog(unfi)
1860
1893
1861 def d():
1894 def d():
1862 cl = makecl(unfi)
1895 cl = makecl(unfi)
1863 for n in nodes:
1896 for n in nodes:
1864 cl.rev(n)
1897 cl.rev(n)
1865
1898
1866 timer(d, setup=setup)
1899 timer(d, setup=setup)
1867 fm.end()
1900 fm.end()
1868
1901
1869
1902
1870 @command(
1903 @command(
1871 b'perf::nodemap|perfnodemap',
1904 b'perf::nodemap|perfnodemap',
1872 [
1905 [
1873 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1906 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1874 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1907 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1875 ]
1908 ]
1876 + formatteropts,
1909 + formatteropts,
1877 )
1910 )
1878 def perfnodemap(ui, repo, **opts):
1911 def perfnodemap(ui, repo, **opts):
1879 """benchmark the time necessary to look up revision from a cold nodemap
1912 """benchmark the time necessary to look up revision from a cold nodemap
1880
1913
1881 Depending on the implementation, the amount and order of revision we look
1914 Depending on the implementation, the amount and order of revision we look
1882 up can varies. Example of useful set to test:
1915 up can varies. Example of useful set to test:
1883 * tip
1916 * tip
1884 * 0
1917 * 0
1885 * -10:
1918 * -10:
1886 * :10
1919 * :10
1887 * -10: + :10
1920 * -10: + :10
1888 * :10: + -10:
1921 * :10: + -10:
1889 * -10000:
1922 * -10000:
1890 * -10000: + 0
1923 * -10000: + 0
1891
1924
1892 The command currently focus on valid binary lookup. Benchmarking for
1925 The command currently focus on valid binary lookup. Benchmarking for
1893 hexlookup, prefix lookup and missing lookup would also be valuable.
1926 hexlookup, prefix lookup and missing lookup would also be valuable.
1894 """
1927 """
1895 import mercurial.revlog
1928 import mercurial.revlog
1896
1929
1897 opts = _byteskwargs(opts)
1930 opts = _byteskwargs(opts)
1898 timer, fm = gettimer(ui, opts)
1931 timer, fm = gettimer(ui, opts)
1899 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1932 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1900
1933
1901 unfi = repo.unfiltered()
1934 unfi = repo.unfiltered()
1902 clearcaches = opts[b'clear_caches']
1935 clearcaches = opts[b'clear_caches']
1903 # find the filecache func directly
1936 # find the filecache func directly
1904 # This avoid polluting the benchmark with the filecache logic
1937 # This avoid polluting the benchmark with the filecache logic
1905 makecl = unfi.__class__.changelog.func
1938 makecl = unfi.__class__.changelog.func
1906 if not opts[b'rev']:
1939 if not opts[b'rev']:
1907 raise error.Abort(b'use --rev to specify revisions to look up')
1940 raise error.Abort(b'use --rev to specify revisions to look up')
1908 revs = scmutil.revrange(repo, opts[b'rev'])
1941 revs = scmutil.revrange(repo, opts[b'rev'])
1909 cl = repo.changelog
1942 cl = repo.changelog
1910 nodes = [cl.node(r) for r in revs]
1943 nodes = [cl.node(r) for r in revs]
1911
1944
1912 # use a list to pass reference to a nodemap from one closure to the next
1945 # use a list to pass reference to a nodemap from one closure to the next
1913 nodeget = [None]
1946 nodeget = [None]
1914
1947
1915 def setnodeget():
1948 def setnodeget():
1916 # probably not necessary, but for good measure
1949 # probably not necessary, but for good measure
1917 clearchangelog(unfi)
1950 clearchangelog(unfi)
1918 cl = makecl(unfi)
1951 cl = makecl(unfi)
1919 if util.safehasattr(cl.index, 'get_rev'):
1952 if util.safehasattr(cl.index, 'get_rev'):
1920 nodeget[0] = cl.index.get_rev
1953 nodeget[0] = cl.index.get_rev
1921 else:
1954 else:
1922 nodeget[0] = cl.nodemap.get
1955 nodeget[0] = cl.nodemap.get
1923
1956
1924 def d():
1957 def d():
1925 get = nodeget[0]
1958 get = nodeget[0]
1926 for n in nodes:
1959 for n in nodes:
1927 get(n)
1960 get(n)
1928
1961
1929 setup = None
1962 setup = None
1930 if clearcaches:
1963 if clearcaches:
1931
1964
1932 def setup():
1965 def setup():
1933 setnodeget()
1966 setnodeget()
1934
1967
1935 else:
1968 else:
1936 setnodeget()
1969 setnodeget()
1937 d() # prewarm the data structure
1970 d() # prewarm the data structure
1938 timer(d, setup=setup)
1971 timer(d, setup=setup)
1939 fm.end()
1972 fm.end()
1940
1973
1941
1974
1942 @command(b'perf::startup|perfstartup', formatteropts)
1975 @command(b'perf::startup|perfstartup', formatteropts)
1943 def perfstartup(ui, repo, **opts):
1976 def perfstartup(ui, repo, **opts):
1944 opts = _byteskwargs(opts)
1977 opts = _byteskwargs(opts)
1945 timer, fm = gettimer(ui, opts)
1978 timer, fm = gettimer(ui, opts)
1946
1979
1947 def d():
1980 def d():
1948 if os.name != 'nt':
1981 if os.name != 'nt':
1949 os.system(
1982 os.system(
1950 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1983 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1951 )
1984 )
1952 else:
1985 else:
1953 os.environ['HGRCPATH'] = r' '
1986 os.environ['HGRCPATH'] = r' '
1954 os.system("%s version -q > NUL" % sys.argv[0])
1987 os.system("%s version -q > NUL" % sys.argv[0])
1955
1988
1956 timer(d)
1989 timer(d)
1957 fm.end()
1990 fm.end()
1958
1991
1959
1992
1960 def _find_stream_generator(version):
1993 def _find_stream_generator(version):
1961 """find the proper generator function for this stream version"""
1994 """find the proper generator function for this stream version"""
1962 import mercurial.streamclone
1995 import mercurial.streamclone
1963
1996
1964 available = {}
1997 available = {}
1965
1998
1966 # try to fetch a v1 generator
1999 # try to fetch a v1 generator
1967 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2000 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1968 if generatev1 is not None:
2001 if generatev1 is not None:
1969
2002
1970 def generate(repo):
2003 def generate(repo):
1971 entries, bytes, data = generatev2(repo, None, None, True)
2004 entries, bytes, data = generatev2(repo, None, None, True)
1972 return data
2005 return data
1973
2006
1974 available[b'v1'] = generatev1
2007 available[b'v1'] = generatev1
1975 # try to fetch a v2 generator
2008 # try to fetch a v2 generator
1976 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2009 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1977 if generatev2 is not None:
2010 if generatev2 is not None:
1978
2011
1979 def generate(repo):
2012 def generate(repo):
1980 entries, bytes, data = generatev2(repo, None, None, True)
2013 entries, bytes, data = generatev2(repo, None, None, True)
1981 return data
2014 return data
1982
2015
1983 available[b'v2'] = generate
2016 available[b'v2'] = generate
1984 # try to fetch a v3 generator
2017 # try to fetch a v3 generator
1985 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2018 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1986 if generatev3 is not None:
2019 if generatev3 is not None:
1987
2020
1988 def generate(repo):
2021 def generate(repo):
1989 entries, bytes, data = generatev3(repo, None, None, True)
2022 entries, bytes, data = generatev3(repo, None, None, True)
1990 return data
2023 return data
1991
2024
1992 available[b'v3-exp'] = generate
2025 available[b'v3-exp'] = generate
1993
2026
1994 # resolve the request
2027 # resolve the request
1995 if version == b"latest":
2028 if version == b"latest":
1996 # latest is the highest non experimental version
2029 # latest is the highest non experimental version
1997 latest_key = max(v for v in available if b'-exp' not in v)
2030 latest_key = max(v for v in available if b'-exp' not in v)
1998 return available[latest_key]
2031 return available[latest_key]
1999 elif version in available:
2032 elif version in available:
2000 return available[version]
2033 return available[version]
2001 else:
2034 else:
2002 msg = b"unkown or unavailable version: %s"
2035 msg = b"unkown or unavailable version: %s"
2003 msg %= version
2036 msg %= version
2004 hint = b"available versions: %s"
2037 hint = b"available versions: %s"
2005 hint %= b', '.join(sorted(available))
2038 hint %= b', '.join(sorted(available))
2006 raise error.Abort(msg, hint=hint)
2039 raise error.Abort(msg, hint=hint)
2007
2040
2008
2041
2009 @command(
2042 @command(
2010 b'perf::stream-locked-section',
2043 b'perf::stream-locked-section',
2011 [
2044 [
2012 (
2045 (
2013 b'',
2046 b'',
2014 b'stream-version',
2047 b'stream-version',
2015 b'latest',
2048 b'latest',
2016 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2049 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2017 ),
2050 ),
2018 ]
2051 ]
2019 + formatteropts,
2052 + formatteropts,
2020 )
2053 )
2021 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2054 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2022 """benchmark the initial, repo-locked, section of a stream-clone"""
2055 """benchmark the initial, repo-locked, section of a stream-clone"""
2023
2056
2024 opts = _byteskwargs(opts)
2057 opts = _byteskwargs(opts)
2025 timer, fm = gettimer(ui, opts)
2058 timer, fm = gettimer(ui, opts)
2026
2059
2027 # deletion of the generator may trigger some cleanup that we do not want to
2060 # deletion of the generator may trigger some cleanup that we do not want to
2028 # measure
2061 # measure
2029 result_holder = [None]
2062 result_holder = [None]
2030
2063
2031 def setupone():
2064 def setupone():
2032 result_holder[0] = None
2065 result_holder[0] = None
2033
2066
2034 generate = _find_stream_generator(stream_version)
2067 generate = _find_stream_generator(stream_version)
2035
2068
2036 def runone():
2069 def runone():
2037 # the lock is held for the duration the initialisation
2070 # the lock is held for the duration the initialisation
2038 result_holder[0] = generate(repo)
2071 result_holder[0] = generate(repo)
2039
2072
2040 timer(runone, setup=setupone, title=b"load")
2073 timer(runone, setup=setupone, title=b"load")
2041 fm.end()
2074 fm.end()
2042
2075
2043
2076
2044 @command(
2077 @command(
2045 b'perf::stream-generate',
2078 b'perf::stream-generate',
2046 [
2079 [
2047 (
2080 (
2048 b'',
2081 b'',
2049 b'stream-version',
2082 b'stream-version',
2050 b'latest',
2083 b'latest',
2051 b'stream version to us ("v1", "v2" or "latest", (the default))',
2084 b'stream version to us ("v1", "v2" or "latest", (the default))',
2052 ),
2085 ),
2053 ]
2086 ]
2054 + formatteropts,
2087 + formatteropts,
2055 )
2088 )
2056 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2089 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2057 """benchmark the full generation of a stream clone"""
2090 """benchmark the full generation of a stream clone"""
2058
2091
2059 opts = _byteskwargs(opts)
2092 opts = _byteskwargs(opts)
2060 timer, fm = gettimer(ui, opts)
2093 timer, fm = gettimer(ui, opts)
2061
2094
2062 # deletion of the generator may trigger some cleanup that we do not want to
2095 # deletion of the generator may trigger some cleanup that we do not want to
2063 # measure
2096 # measure
2064
2097
2065 generate = _find_stream_generator(stream_version)
2098 generate = _find_stream_generator(stream_version)
2066
2099
2067 def runone():
2100 def runone():
2068 # the lock is held for the duration the initialisation
2101 # the lock is held for the duration the initialisation
2069 for chunk in generate(repo):
2102 for chunk in generate(repo):
2070 pass
2103 pass
2071
2104
2072 timer(runone, title=b"generate")
2105 timer(runone, title=b"generate")
2073 fm.end()
2106 fm.end()
2074
2107
2075
2108
2076 @command(
2109 @command(
2077 b'perf::stream-consume',
2110 b'perf::stream-consume',
2078 formatteropts,
2111 formatteropts,
2079 )
2112 )
2080 def perf_stream_clone_consume(ui, repo, filename, **opts):
2113 def perf_stream_clone_consume(ui, repo, filename, **opts):
2081 """benchmark the full application of a stream clone
2114 """benchmark the full application of a stream clone
2082
2115
2083 This include the creation of the repository
2116 This include the creation of the repository
2084 """
2117 """
2085 # try except to appease check code
2118 # try except to appease check code
2086 msg = b"mercurial too old, missing necessary module: %s"
2119 msg = b"mercurial too old, missing necessary module: %s"
2087 try:
2120 try:
2088 from mercurial import bundle2
2121 from mercurial import bundle2
2089 except ImportError as exc:
2122 except ImportError as exc:
2090 msg %= _bytestr(exc)
2123 msg %= _bytestr(exc)
2091 raise error.Abort(msg)
2124 raise error.Abort(msg)
2092 try:
2125 try:
2093 from mercurial import exchange
2126 from mercurial import exchange
2094 except ImportError as exc:
2127 except ImportError as exc:
2095 msg %= _bytestr(exc)
2128 msg %= _bytestr(exc)
2096 raise error.Abort(msg)
2129 raise error.Abort(msg)
2097 try:
2130 try:
2098 from mercurial import hg
2131 from mercurial import hg
2099 except ImportError as exc:
2132 except ImportError as exc:
2100 msg %= _bytestr(exc)
2133 msg %= _bytestr(exc)
2101 raise error.Abort(msg)
2134 raise error.Abort(msg)
2102 try:
2135 try:
2103 from mercurial import localrepo
2136 from mercurial import localrepo
2104 except ImportError as exc:
2137 except ImportError as exc:
2105 msg %= _bytestr(exc)
2138 msg %= _bytestr(exc)
2106 raise error.Abort(msg)
2139 raise error.Abort(msg)
2107
2140
2108 opts = _byteskwargs(opts)
2141 opts = _byteskwargs(opts)
2109 timer, fm = gettimer(ui, opts)
2142 timer, fm = gettimer(ui, opts)
2110
2143
2111 # deletion of the generator may trigger some cleanup that we do not want to
2144 # deletion of the generator may trigger some cleanup that we do not want to
2112 # measure
2145 # measure
2113 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2146 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2114 raise error.Abort("not a readable file: %s" % filename)
2147 raise error.Abort("not a readable file: %s" % filename)
2115
2148
2116 run_variables = [None, None]
2149 run_variables = [None, None]
2117
2150
2118 @contextlib.contextmanager
2151 @contextlib.contextmanager
2119 def context():
2152 def context():
2120 with open(filename, mode='rb') as bundle:
2153 with open(filename, mode='rb') as bundle:
2121 with tempfile.TemporaryDirectory() as tmp_dir:
2154 with tempfile.TemporaryDirectory() as tmp_dir:
2122 tmp_dir = fsencode(tmp_dir)
2155 tmp_dir = fsencode(tmp_dir)
2123 run_variables[0] = bundle
2156 run_variables[0] = bundle
2124 run_variables[1] = tmp_dir
2157 run_variables[1] = tmp_dir
2125 yield
2158 yield
2126 run_variables[0] = None
2159 run_variables[0] = None
2127 run_variables[1] = None
2160 run_variables[1] = None
2128
2161
2129 def runone():
2162 def runone():
2130 bundle = run_variables[0]
2163 bundle = run_variables[0]
2131 tmp_dir = run_variables[1]
2164 tmp_dir = run_variables[1]
2132 # only pass ui when no srcrepo
2165 # only pass ui when no srcrepo
2133 localrepo.createrepository(
2166 localrepo.createrepository(
2134 repo.ui, tmp_dir, requirements=repo.requirements
2167 repo.ui, tmp_dir, requirements=repo.requirements
2135 )
2168 )
2136 target = hg.repository(repo.ui, tmp_dir)
2169 target = hg.repository(repo.ui, tmp_dir)
2137 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2170 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2138 # stream v1
2171 # stream v1
2139 if util.safehasattr(gen, 'apply'):
2172 if util.safehasattr(gen, 'apply'):
2140 gen.apply(target)
2173 gen.apply(target)
2141 else:
2174 else:
2142 with target.transaction(b"perf::stream-consume") as tr:
2175 with target.transaction(b"perf::stream-consume") as tr:
2143 bundle2.applybundle(
2176 bundle2.applybundle(
2144 target,
2177 target,
2145 gen,
2178 gen,
2146 tr,
2179 tr,
2147 source=b'unbundle',
2180 source=b'unbundle',
2148 url=filename,
2181 url=filename,
2149 )
2182 )
2150
2183
2151 timer(runone, context=context, title=b"consume")
2184 timer(runone, context=context, title=b"consume")
2152 fm.end()
2185 fm.end()
2153
2186
2154
2187
2155 @command(b'perf::parents|perfparents', formatteropts)
2188 @command(b'perf::parents|perfparents', formatteropts)
2156 def perfparents(ui, repo, **opts):
2189 def perfparents(ui, repo, **opts):
2157 """benchmark the time necessary to fetch one changeset's parents.
2190 """benchmark the time necessary to fetch one changeset's parents.
2158
2191
2159 The fetch is done using the `node identifier`, traversing all object layers
2192 The fetch is done using the `node identifier`, traversing all object layers
2160 from the repository object. The first N revisions will be used for this
2193 from the repository object. The first N revisions will be used for this
2161 benchmark. N is controlled by the ``perf.parentscount`` config option
2194 benchmark. N is controlled by the ``perf.parentscount`` config option
2162 (default: 1000).
2195 (default: 1000).
2163 """
2196 """
2164 opts = _byteskwargs(opts)
2197 opts = _byteskwargs(opts)
2165 timer, fm = gettimer(ui, opts)
2198 timer, fm = gettimer(ui, opts)
2166 # control the number of commits perfparents iterates over
2199 # control the number of commits perfparents iterates over
2167 # experimental config: perf.parentscount
2200 # experimental config: perf.parentscount
2168 count = getint(ui, b"perf", b"parentscount", 1000)
2201 count = getint(ui, b"perf", b"parentscount", 1000)
2169 if len(repo.changelog) < count:
2202 if len(repo.changelog) < count:
2170 raise error.Abort(b"repo needs %d commits for this test" % count)
2203 raise error.Abort(b"repo needs %d commits for this test" % count)
2171 repo = repo.unfiltered()
2204 repo = repo.unfiltered()
2172 nl = [repo.changelog.node(i) for i in _xrange(count)]
2205 nl = [repo.changelog.node(i) for i in _xrange(count)]
2173
2206
2174 def d():
2207 def d():
2175 for n in nl:
2208 for n in nl:
2176 repo.changelog.parents(n)
2209 repo.changelog.parents(n)
2177
2210
2178 timer(d)
2211 timer(d)
2179 fm.end()
2212 fm.end()
2180
2213
2181
2214
2182 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2215 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2183 def perfctxfiles(ui, repo, x, **opts):
2216 def perfctxfiles(ui, repo, x, **opts):
2184 opts = _byteskwargs(opts)
2217 opts = _byteskwargs(opts)
2185 x = int(x)
2218 x = int(x)
2186 timer, fm = gettimer(ui, opts)
2219 timer, fm = gettimer(ui, opts)
2187
2220
2188 def d():
2221 def d():
2189 len(repo[x].files())
2222 len(repo[x].files())
2190
2223
2191 timer(d)
2224 timer(d)
2192 fm.end()
2225 fm.end()
2193
2226
2194
2227
2195 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2228 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2196 def perfrawfiles(ui, repo, x, **opts):
2229 def perfrawfiles(ui, repo, x, **opts):
2197 opts = _byteskwargs(opts)
2230 opts = _byteskwargs(opts)
2198 x = int(x)
2231 x = int(x)
2199 timer, fm = gettimer(ui, opts)
2232 timer, fm = gettimer(ui, opts)
2200 cl = repo.changelog
2233 cl = repo.changelog
2201
2234
2202 def d():
2235 def d():
2203 len(cl.read(x)[3])
2236 len(cl.read(x)[3])
2204
2237
2205 timer(d)
2238 timer(d)
2206 fm.end()
2239 fm.end()
2207
2240
2208
2241
2209 @command(b'perf::lookup|perflookup', formatteropts)
2242 @command(b'perf::lookup|perflookup', formatteropts)
2210 def perflookup(ui, repo, rev, **opts):
2243 def perflookup(ui, repo, rev, **opts):
2211 opts = _byteskwargs(opts)
2244 opts = _byteskwargs(opts)
2212 timer, fm = gettimer(ui, opts)
2245 timer, fm = gettimer(ui, opts)
2213 timer(lambda: len(repo.lookup(rev)))
2246 timer(lambda: len(repo.lookup(rev)))
2214 fm.end()
2247 fm.end()
2215
2248
2216
2249
2217 @command(
2250 @command(
2218 b'perf::linelogedits|perflinelogedits',
2251 b'perf::linelogedits|perflinelogedits',
2219 [
2252 [
2220 (b'n', b'edits', 10000, b'number of edits'),
2253 (b'n', b'edits', 10000, b'number of edits'),
2221 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2254 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2222 ],
2255 ],
2223 norepo=True,
2256 norepo=True,
2224 )
2257 )
2225 def perflinelogedits(ui, **opts):
2258 def perflinelogedits(ui, **opts):
2226 from mercurial import linelog
2259 from mercurial import linelog
2227
2260
2228 opts = _byteskwargs(opts)
2261 opts = _byteskwargs(opts)
2229
2262
2230 edits = opts[b'edits']
2263 edits = opts[b'edits']
2231 maxhunklines = opts[b'max_hunk_lines']
2264 maxhunklines = opts[b'max_hunk_lines']
2232
2265
2233 maxb1 = 100000
2266 maxb1 = 100000
2234 random.seed(0)
2267 random.seed(0)
2235 randint = random.randint
2268 randint = random.randint
2236 currentlines = 0
2269 currentlines = 0
2237 arglist = []
2270 arglist = []
2238 for rev in _xrange(edits):
2271 for rev in _xrange(edits):
2239 a1 = randint(0, currentlines)
2272 a1 = randint(0, currentlines)
2240 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2273 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2241 b1 = randint(0, maxb1)
2274 b1 = randint(0, maxb1)
2242 b2 = randint(b1, b1 + maxhunklines)
2275 b2 = randint(b1, b1 + maxhunklines)
2243 currentlines += (b2 - b1) - (a2 - a1)
2276 currentlines += (b2 - b1) - (a2 - a1)
2244 arglist.append((rev, a1, a2, b1, b2))
2277 arglist.append((rev, a1, a2, b1, b2))
2245
2278
2246 def d():
2279 def d():
2247 ll = linelog.linelog()
2280 ll = linelog.linelog()
2248 for args in arglist:
2281 for args in arglist:
2249 ll.replacelines(*args)
2282 ll.replacelines(*args)
2250
2283
2251 timer, fm = gettimer(ui, opts)
2284 timer, fm = gettimer(ui, opts)
2252 timer(d)
2285 timer(d)
2253 fm.end()
2286 fm.end()
2254
2287
2255
2288
2256 @command(b'perf::revrange|perfrevrange', formatteropts)
2289 @command(b'perf::revrange|perfrevrange', formatteropts)
2257 def perfrevrange(ui, repo, *specs, **opts):
2290 def perfrevrange(ui, repo, *specs, **opts):
2258 opts = _byteskwargs(opts)
2291 opts = _byteskwargs(opts)
2259 timer, fm = gettimer(ui, opts)
2292 timer, fm = gettimer(ui, opts)
2260 revrange = scmutil.revrange
2293 revrange = scmutil.revrange
2261 timer(lambda: len(revrange(repo, specs)))
2294 timer(lambda: len(revrange(repo, specs)))
2262 fm.end()
2295 fm.end()
2263
2296
2264
2297
2265 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2298 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2266 def perfnodelookup(ui, repo, rev, **opts):
2299 def perfnodelookup(ui, repo, rev, **opts):
2267 opts = _byteskwargs(opts)
2300 opts = _byteskwargs(opts)
2268 timer, fm = gettimer(ui, opts)
2301 timer, fm = gettimer(ui, opts)
2269 import mercurial.revlog
2302 import mercurial.revlog
2270
2303
2271 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2304 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2272 n = scmutil.revsingle(repo, rev).node()
2305 n = scmutil.revsingle(repo, rev).node()
2273
2306
2274 try:
2307 try:
2275 cl = revlog(getsvfs(repo), radix=b"00changelog")
2308 cl = revlog(getsvfs(repo), radix=b"00changelog")
2276 except TypeError:
2309 except TypeError:
2277 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2310 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2278
2311
2279 def d():
2312 def d():
2280 cl.rev(n)
2313 cl.rev(n)
2281 clearcaches(cl)
2314 clearcaches(cl)
2282
2315
2283 timer(d)
2316 timer(d)
2284 fm.end()
2317 fm.end()
2285
2318
2286
2319
2287 @command(
2320 @command(
2288 b'perf::log|perflog',
2321 b'perf::log|perflog',
2289 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2322 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2290 )
2323 )
2291 def perflog(ui, repo, rev=None, **opts):
2324 def perflog(ui, repo, rev=None, **opts):
2292 opts = _byteskwargs(opts)
2325 opts = _byteskwargs(opts)
2293 if rev is None:
2326 if rev is None:
2294 rev = []
2327 rev = []
2295 timer, fm = gettimer(ui, opts)
2328 timer, fm = gettimer(ui, opts)
2296 ui.pushbuffer()
2329 ui.pushbuffer()
2297 timer(
2330 timer(
2298 lambda: commands.log(
2331 lambda: commands.log(
2299 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2332 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2300 )
2333 )
2301 )
2334 )
2302 ui.popbuffer()
2335 ui.popbuffer()
2303 fm.end()
2336 fm.end()
2304
2337
2305
2338
2306 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2339 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2307 def perfmoonwalk(ui, repo, **opts):
2340 def perfmoonwalk(ui, repo, **opts):
2308 """benchmark walking the changelog backwards
2341 """benchmark walking the changelog backwards
2309
2342
2310 This also loads the changelog data for each revision in the changelog.
2343 This also loads the changelog data for each revision in the changelog.
2311 """
2344 """
2312 opts = _byteskwargs(opts)
2345 opts = _byteskwargs(opts)
2313 timer, fm = gettimer(ui, opts)
2346 timer, fm = gettimer(ui, opts)
2314
2347
2315 def moonwalk():
2348 def moonwalk():
2316 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2349 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2317 ctx = repo[i]
2350 ctx = repo[i]
2318 ctx.branch() # read changelog data (in addition to the index)
2351 ctx.branch() # read changelog data (in addition to the index)
2319
2352
2320 timer(moonwalk)
2353 timer(moonwalk)
2321 fm.end()
2354 fm.end()
2322
2355
2323
2356
2324 @command(
2357 @command(
2325 b'perf::templating|perftemplating',
2358 b'perf::templating|perftemplating',
2326 [
2359 [
2327 (b'r', b'rev', [], b'revisions to run the template on'),
2360 (b'r', b'rev', [], b'revisions to run the template on'),
2328 ]
2361 ]
2329 + formatteropts,
2362 + formatteropts,
2330 )
2363 )
2331 def perftemplating(ui, repo, testedtemplate=None, **opts):
2364 def perftemplating(ui, repo, testedtemplate=None, **opts):
2332 """test the rendering time of a given template"""
2365 """test the rendering time of a given template"""
2333 if makelogtemplater is None:
2366 if makelogtemplater is None:
2334 raise error.Abort(
2367 raise error.Abort(
2335 b"perftemplating not available with this Mercurial",
2368 b"perftemplating not available with this Mercurial",
2336 hint=b"use 4.3 or later",
2369 hint=b"use 4.3 or later",
2337 )
2370 )
2338
2371
2339 opts = _byteskwargs(opts)
2372 opts = _byteskwargs(opts)
2340
2373
2341 nullui = ui.copy()
2374 nullui = ui.copy()
2342 nullui.fout = open(os.devnull, 'wb')
2375 nullui.fout = open(os.devnull, 'wb')
2343 nullui.disablepager()
2376 nullui.disablepager()
2344 revs = opts.get(b'rev')
2377 revs = opts.get(b'rev')
2345 if not revs:
2378 if not revs:
2346 revs = [b'all()']
2379 revs = [b'all()']
2347 revs = list(scmutil.revrange(repo, revs))
2380 revs = list(scmutil.revrange(repo, revs))
2348
2381
2349 defaulttemplate = (
2382 defaulttemplate = (
2350 b'{date|shortdate} [{rev}:{node|short}]'
2383 b'{date|shortdate} [{rev}:{node|short}]'
2351 b' {author|person}: {desc|firstline}\n'
2384 b' {author|person}: {desc|firstline}\n'
2352 )
2385 )
2353 if testedtemplate is None:
2386 if testedtemplate is None:
2354 testedtemplate = defaulttemplate
2387 testedtemplate = defaulttemplate
2355 displayer = makelogtemplater(nullui, repo, testedtemplate)
2388 displayer = makelogtemplater(nullui, repo, testedtemplate)
2356
2389
2357 def format():
2390 def format():
2358 for r in revs:
2391 for r in revs:
2359 ctx = repo[r]
2392 ctx = repo[r]
2360 displayer.show(ctx)
2393 displayer.show(ctx)
2361 displayer.flush(ctx)
2394 displayer.flush(ctx)
2362
2395
2363 timer, fm = gettimer(ui, opts)
2396 timer, fm = gettimer(ui, opts)
2364 timer(format)
2397 timer(format)
2365 fm.end()
2398 fm.end()
2366
2399
2367
2400
2368 def _displaystats(ui, opts, entries, data):
2401 def _displaystats(ui, opts, entries, data):
2369 # use a second formatter because the data are quite different, not sure
2402 # use a second formatter because the data are quite different, not sure
2370 # how it flies with the templater.
2403 # how it flies with the templater.
2371 fm = ui.formatter(b'perf-stats', opts)
2404 fm = ui.formatter(b'perf-stats', opts)
2372 for key, title in entries:
2405 for key, title in entries:
2373 values = data[key]
2406 values = data[key]
2374 nbvalues = len(data)
2407 nbvalues = len(data)
2375 values.sort()
2408 values.sort()
2376 stats = {
2409 stats = {
2377 'key': key,
2410 'key': key,
2378 'title': title,
2411 'title': title,
2379 'nbitems': len(values),
2412 'nbitems': len(values),
2380 'min': values[0][0],
2413 'min': values[0][0],
2381 '10%': values[(nbvalues * 10) // 100][0],
2414 '10%': values[(nbvalues * 10) // 100][0],
2382 '25%': values[(nbvalues * 25) // 100][0],
2415 '25%': values[(nbvalues * 25) // 100][0],
2383 '50%': values[(nbvalues * 50) // 100][0],
2416 '50%': values[(nbvalues * 50) // 100][0],
2384 '75%': values[(nbvalues * 75) // 100][0],
2417 '75%': values[(nbvalues * 75) // 100][0],
2385 '80%': values[(nbvalues * 80) // 100][0],
2418 '80%': values[(nbvalues * 80) // 100][0],
2386 '85%': values[(nbvalues * 85) // 100][0],
2419 '85%': values[(nbvalues * 85) // 100][0],
2387 '90%': values[(nbvalues * 90) // 100][0],
2420 '90%': values[(nbvalues * 90) // 100][0],
2388 '95%': values[(nbvalues * 95) // 100][0],
2421 '95%': values[(nbvalues * 95) // 100][0],
2389 '99%': values[(nbvalues * 99) // 100][0],
2422 '99%': values[(nbvalues * 99) // 100][0],
2390 'max': values[-1][0],
2423 'max': values[-1][0],
2391 }
2424 }
2392 fm.startitem()
2425 fm.startitem()
2393 fm.data(**stats)
2426 fm.data(**stats)
2394 # make node pretty for the human output
2427 # make node pretty for the human output
2395 fm.plain('### %s (%d items)\n' % (title, len(values)))
2428 fm.plain('### %s (%d items)\n' % (title, len(values)))
2396 lines = [
2429 lines = [
2397 'min',
2430 'min',
2398 '10%',
2431 '10%',
2399 '25%',
2432 '25%',
2400 '50%',
2433 '50%',
2401 '75%',
2434 '75%',
2402 '80%',
2435 '80%',
2403 '85%',
2436 '85%',
2404 '90%',
2437 '90%',
2405 '95%',
2438 '95%',
2406 '99%',
2439 '99%',
2407 'max',
2440 'max',
2408 ]
2441 ]
2409 for l in lines:
2442 for l in lines:
2410 fm.plain('%s: %s\n' % (l, stats[l]))
2443 fm.plain('%s: %s\n' % (l, stats[l]))
2411 fm.end()
2444 fm.end()
2412
2445
2413
2446
2414 @command(
2447 @command(
2415 b'perf::helper-mergecopies|perfhelper-mergecopies',
2448 b'perf::helper-mergecopies|perfhelper-mergecopies',
2416 formatteropts
2449 formatteropts
2417 + [
2450 + [
2418 (b'r', b'revs', [], b'restrict search to these revisions'),
2451 (b'r', b'revs', [], b'restrict search to these revisions'),
2419 (b'', b'timing', False, b'provides extra data (costly)'),
2452 (b'', b'timing', False, b'provides extra data (costly)'),
2420 (b'', b'stats', False, b'provides statistic about the measured data'),
2453 (b'', b'stats', False, b'provides statistic about the measured data'),
2421 ],
2454 ],
2422 )
2455 )
2423 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2456 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2424 """find statistics about potential parameters for `perfmergecopies`
2457 """find statistics about potential parameters for `perfmergecopies`
2425
2458
2426 This command find (base, p1, p2) triplet relevant for copytracing
2459 This command find (base, p1, p2) triplet relevant for copytracing
2427 benchmarking in the context of a merge. It reports values for some of the
2460 benchmarking in the context of a merge. It reports values for some of the
2428 parameters that impact merge copy tracing time during merge.
2461 parameters that impact merge copy tracing time during merge.
2429
2462
2430 If `--timing` is set, rename detection is run and the associated timing
2463 If `--timing` is set, rename detection is run and the associated timing
2431 will be reported. The extra details come at the cost of slower command
2464 will be reported. The extra details come at the cost of slower command
2432 execution.
2465 execution.
2433
2466
2434 Since rename detection is only run once, other factors might easily
2467 Since rename detection is only run once, other factors might easily
2435 affect the precision of the timing. However it should give a good
2468 affect the precision of the timing. However it should give a good
2436 approximation of which revision triplets are very costly.
2469 approximation of which revision triplets are very costly.
2437 """
2470 """
2438 opts = _byteskwargs(opts)
2471 opts = _byteskwargs(opts)
2439 fm = ui.formatter(b'perf', opts)
2472 fm = ui.formatter(b'perf', opts)
2440 dotiming = opts[b'timing']
2473 dotiming = opts[b'timing']
2441 dostats = opts[b'stats']
2474 dostats = opts[b'stats']
2442
2475
2443 output_template = [
2476 output_template = [
2444 ("base", "%(base)12s"),
2477 ("base", "%(base)12s"),
2445 ("p1", "%(p1.node)12s"),
2478 ("p1", "%(p1.node)12s"),
2446 ("p2", "%(p2.node)12s"),
2479 ("p2", "%(p2.node)12s"),
2447 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2480 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2448 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2481 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2449 ("p1.renames", "%(p1.renamedfiles)12d"),
2482 ("p1.renames", "%(p1.renamedfiles)12d"),
2450 ("p1.time", "%(p1.time)12.3f"),
2483 ("p1.time", "%(p1.time)12.3f"),
2451 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2484 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2452 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2485 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2453 ("p2.renames", "%(p2.renamedfiles)12d"),
2486 ("p2.renames", "%(p2.renamedfiles)12d"),
2454 ("p2.time", "%(p2.time)12.3f"),
2487 ("p2.time", "%(p2.time)12.3f"),
2455 ("renames", "%(nbrenamedfiles)12d"),
2488 ("renames", "%(nbrenamedfiles)12d"),
2456 ("total.time", "%(time)12.3f"),
2489 ("total.time", "%(time)12.3f"),
2457 ]
2490 ]
2458 if not dotiming:
2491 if not dotiming:
2459 output_template = [
2492 output_template = [
2460 i
2493 i
2461 for i in output_template
2494 for i in output_template
2462 if not ('time' in i[0] or 'renames' in i[0])
2495 if not ('time' in i[0] or 'renames' in i[0])
2463 ]
2496 ]
2464 header_names = [h for (h, v) in output_template]
2497 header_names = [h for (h, v) in output_template]
2465 output = ' '.join([v for (h, v) in output_template]) + '\n'
2498 output = ' '.join([v for (h, v) in output_template]) + '\n'
2466 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2499 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2467 fm.plain(header % tuple(header_names))
2500 fm.plain(header % tuple(header_names))
2468
2501
2469 if not revs:
2502 if not revs:
2470 revs = ['all()']
2503 revs = ['all()']
2471 revs = scmutil.revrange(repo, revs)
2504 revs = scmutil.revrange(repo, revs)
2472
2505
2473 if dostats:
2506 if dostats:
2474 alldata = {
2507 alldata = {
2475 'nbrevs': [],
2508 'nbrevs': [],
2476 'nbmissingfiles': [],
2509 'nbmissingfiles': [],
2477 }
2510 }
2478 if dotiming:
2511 if dotiming:
2479 alldata['parentnbrenames'] = []
2512 alldata['parentnbrenames'] = []
2480 alldata['totalnbrenames'] = []
2513 alldata['totalnbrenames'] = []
2481 alldata['parenttime'] = []
2514 alldata['parenttime'] = []
2482 alldata['totaltime'] = []
2515 alldata['totaltime'] = []
2483
2516
2484 roi = repo.revs('merge() and %ld', revs)
2517 roi = repo.revs('merge() and %ld', revs)
2485 for r in roi:
2518 for r in roi:
2486 ctx = repo[r]
2519 ctx = repo[r]
2487 p1 = ctx.p1()
2520 p1 = ctx.p1()
2488 p2 = ctx.p2()
2521 p2 = ctx.p2()
2489 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2522 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2490 for b in bases:
2523 for b in bases:
2491 b = repo[b]
2524 b = repo[b]
2492 p1missing = copies._computeforwardmissing(b, p1)
2525 p1missing = copies._computeforwardmissing(b, p1)
2493 p2missing = copies._computeforwardmissing(b, p2)
2526 p2missing = copies._computeforwardmissing(b, p2)
2494 data = {
2527 data = {
2495 b'base': b.hex(),
2528 b'base': b.hex(),
2496 b'p1.node': p1.hex(),
2529 b'p1.node': p1.hex(),
2497 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2530 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2498 b'p1.nbmissingfiles': len(p1missing),
2531 b'p1.nbmissingfiles': len(p1missing),
2499 b'p2.node': p2.hex(),
2532 b'p2.node': p2.hex(),
2500 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2533 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2501 b'p2.nbmissingfiles': len(p2missing),
2534 b'p2.nbmissingfiles': len(p2missing),
2502 }
2535 }
2503 if dostats:
2536 if dostats:
2504 if p1missing:
2537 if p1missing:
2505 alldata['nbrevs'].append(
2538 alldata['nbrevs'].append(
2506 (data['p1.nbrevs'], b.hex(), p1.hex())
2539 (data['p1.nbrevs'], b.hex(), p1.hex())
2507 )
2540 )
2508 alldata['nbmissingfiles'].append(
2541 alldata['nbmissingfiles'].append(
2509 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2542 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2510 )
2543 )
2511 if p2missing:
2544 if p2missing:
2512 alldata['nbrevs'].append(
2545 alldata['nbrevs'].append(
2513 (data['p2.nbrevs'], b.hex(), p2.hex())
2546 (data['p2.nbrevs'], b.hex(), p2.hex())
2514 )
2547 )
2515 alldata['nbmissingfiles'].append(
2548 alldata['nbmissingfiles'].append(
2516 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2549 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2517 )
2550 )
2518 if dotiming:
2551 if dotiming:
2519 begin = util.timer()
2552 begin = util.timer()
2520 mergedata = copies.mergecopies(repo, p1, p2, b)
2553 mergedata = copies.mergecopies(repo, p1, p2, b)
2521 end = util.timer()
2554 end = util.timer()
2522 # not very stable timing since we did only one run
2555 # not very stable timing since we did only one run
2523 data['time'] = end - begin
2556 data['time'] = end - begin
2524 # mergedata contains five dicts: "copy", "movewithdir",
2557 # mergedata contains five dicts: "copy", "movewithdir",
2525 # "diverge", "renamedelete" and "dirmove".
2558 # "diverge", "renamedelete" and "dirmove".
2526 # The first 4 are about renamed file so lets count that.
2559 # The first 4 are about renamed file so lets count that.
2527 renames = len(mergedata[0])
2560 renames = len(mergedata[0])
2528 renames += len(mergedata[1])
2561 renames += len(mergedata[1])
2529 renames += len(mergedata[2])
2562 renames += len(mergedata[2])
2530 renames += len(mergedata[3])
2563 renames += len(mergedata[3])
2531 data['nbrenamedfiles'] = renames
2564 data['nbrenamedfiles'] = renames
2532 begin = util.timer()
2565 begin = util.timer()
2533 p1renames = copies.pathcopies(b, p1)
2566 p1renames = copies.pathcopies(b, p1)
2534 end = util.timer()
2567 end = util.timer()
2535 data['p1.time'] = end - begin
2568 data['p1.time'] = end - begin
2536 begin = util.timer()
2569 begin = util.timer()
2537 p2renames = copies.pathcopies(b, p2)
2570 p2renames = copies.pathcopies(b, p2)
2538 end = util.timer()
2571 end = util.timer()
2539 data['p2.time'] = end - begin
2572 data['p2.time'] = end - begin
2540 data['p1.renamedfiles'] = len(p1renames)
2573 data['p1.renamedfiles'] = len(p1renames)
2541 data['p2.renamedfiles'] = len(p2renames)
2574 data['p2.renamedfiles'] = len(p2renames)
2542
2575
2543 if dostats:
2576 if dostats:
2544 if p1missing:
2577 if p1missing:
2545 alldata['parentnbrenames'].append(
2578 alldata['parentnbrenames'].append(
2546 (data['p1.renamedfiles'], b.hex(), p1.hex())
2579 (data['p1.renamedfiles'], b.hex(), p1.hex())
2547 )
2580 )
2548 alldata['parenttime'].append(
2581 alldata['parenttime'].append(
2549 (data['p1.time'], b.hex(), p1.hex())
2582 (data['p1.time'], b.hex(), p1.hex())
2550 )
2583 )
2551 if p2missing:
2584 if p2missing:
2552 alldata['parentnbrenames'].append(
2585 alldata['parentnbrenames'].append(
2553 (data['p2.renamedfiles'], b.hex(), p2.hex())
2586 (data['p2.renamedfiles'], b.hex(), p2.hex())
2554 )
2587 )
2555 alldata['parenttime'].append(
2588 alldata['parenttime'].append(
2556 (data['p2.time'], b.hex(), p2.hex())
2589 (data['p2.time'], b.hex(), p2.hex())
2557 )
2590 )
2558 if p1missing or p2missing:
2591 if p1missing or p2missing:
2559 alldata['totalnbrenames'].append(
2592 alldata['totalnbrenames'].append(
2560 (
2593 (
2561 data['nbrenamedfiles'],
2594 data['nbrenamedfiles'],
2562 b.hex(),
2595 b.hex(),
2563 p1.hex(),
2596 p1.hex(),
2564 p2.hex(),
2597 p2.hex(),
2565 )
2598 )
2566 )
2599 )
2567 alldata['totaltime'].append(
2600 alldata['totaltime'].append(
2568 (data['time'], b.hex(), p1.hex(), p2.hex())
2601 (data['time'], b.hex(), p1.hex(), p2.hex())
2569 )
2602 )
2570 fm.startitem()
2603 fm.startitem()
2571 fm.data(**data)
2604 fm.data(**data)
2572 # make node pretty for the human output
2605 # make node pretty for the human output
2573 out = data.copy()
2606 out = data.copy()
2574 out['base'] = fm.hexfunc(b.node())
2607 out['base'] = fm.hexfunc(b.node())
2575 out['p1.node'] = fm.hexfunc(p1.node())
2608 out['p1.node'] = fm.hexfunc(p1.node())
2576 out['p2.node'] = fm.hexfunc(p2.node())
2609 out['p2.node'] = fm.hexfunc(p2.node())
2577 fm.plain(output % out)
2610 fm.plain(output % out)
2578
2611
2579 fm.end()
2612 fm.end()
2580 if dostats:
2613 if dostats:
2581 # use a second formatter because the data are quite different, not sure
2614 # use a second formatter because the data are quite different, not sure
2582 # how it flies with the templater.
2615 # how it flies with the templater.
2583 entries = [
2616 entries = [
2584 ('nbrevs', 'number of revision covered'),
2617 ('nbrevs', 'number of revision covered'),
2585 ('nbmissingfiles', 'number of missing files at head'),
2618 ('nbmissingfiles', 'number of missing files at head'),
2586 ]
2619 ]
2587 if dotiming:
2620 if dotiming:
2588 entries.append(
2621 entries.append(
2589 ('parentnbrenames', 'rename from one parent to base')
2622 ('parentnbrenames', 'rename from one parent to base')
2590 )
2623 )
2591 entries.append(('totalnbrenames', 'total number of renames'))
2624 entries.append(('totalnbrenames', 'total number of renames'))
2592 entries.append(('parenttime', 'time for one parent'))
2625 entries.append(('parenttime', 'time for one parent'))
2593 entries.append(('totaltime', 'time for both parents'))
2626 entries.append(('totaltime', 'time for both parents'))
2594 _displaystats(ui, opts, entries, alldata)
2627 _displaystats(ui, opts, entries, alldata)
2595
2628
2596
2629
2597 @command(
2630 @command(
2598 b'perf::helper-pathcopies|perfhelper-pathcopies',
2631 b'perf::helper-pathcopies|perfhelper-pathcopies',
2599 formatteropts
2632 formatteropts
2600 + [
2633 + [
2601 (b'r', b'revs', [], b'restrict search to these revisions'),
2634 (b'r', b'revs', [], b'restrict search to these revisions'),
2602 (b'', b'timing', False, b'provides extra data (costly)'),
2635 (b'', b'timing', False, b'provides extra data (costly)'),
2603 (b'', b'stats', False, b'provides statistic about the measured data'),
2636 (b'', b'stats', False, b'provides statistic about the measured data'),
2604 ],
2637 ],
2605 )
2638 )
2606 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2639 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2607 """find statistic about potential parameters for the `perftracecopies`
2640 """find statistic about potential parameters for the `perftracecopies`
2608
2641
2609 This command find source-destination pair relevant for copytracing testing.
2642 This command find source-destination pair relevant for copytracing testing.
2610 It report value for some of the parameters that impact copy tracing time.
2643 It report value for some of the parameters that impact copy tracing time.
2611
2644
2612 If `--timing` is set, rename detection is run and the associated timing
2645 If `--timing` is set, rename detection is run and the associated timing
2613 will be reported. The extra details comes at the cost of a slower command
2646 will be reported. The extra details comes at the cost of a slower command
2614 execution.
2647 execution.
2615
2648
2616 Since the rename detection is only run once, other factors might easily
2649 Since the rename detection is only run once, other factors might easily
2617 affect the precision of the timing. However it should give a good
2650 affect the precision of the timing. However it should give a good
2618 approximation of which revision pairs are very costly.
2651 approximation of which revision pairs are very costly.
2619 """
2652 """
2620 opts = _byteskwargs(opts)
2653 opts = _byteskwargs(opts)
2621 fm = ui.formatter(b'perf', opts)
2654 fm = ui.formatter(b'perf', opts)
2622 dotiming = opts[b'timing']
2655 dotiming = opts[b'timing']
2623 dostats = opts[b'stats']
2656 dostats = opts[b'stats']
2624
2657
2625 if dotiming:
2658 if dotiming:
2626 header = '%12s %12s %12s %12s %12s %12s\n'
2659 header = '%12s %12s %12s %12s %12s %12s\n'
2627 output = (
2660 output = (
2628 "%(source)12s %(destination)12s "
2661 "%(source)12s %(destination)12s "
2629 "%(nbrevs)12d %(nbmissingfiles)12d "
2662 "%(nbrevs)12d %(nbmissingfiles)12d "
2630 "%(nbrenamedfiles)12d %(time)18.5f\n"
2663 "%(nbrenamedfiles)12d %(time)18.5f\n"
2631 )
2664 )
2632 header_names = (
2665 header_names = (
2633 "source",
2666 "source",
2634 "destination",
2667 "destination",
2635 "nb-revs",
2668 "nb-revs",
2636 "nb-files",
2669 "nb-files",
2637 "nb-renames",
2670 "nb-renames",
2638 "time",
2671 "time",
2639 )
2672 )
2640 fm.plain(header % header_names)
2673 fm.plain(header % header_names)
2641 else:
2674 else:
2642 header = '%12s %12s %12s %12s\n'
2675 header = '%12s %12s %12s %12s\n'
2643 output = (
2676 output = (
2644 "%(source)12s %(destination)12s "
2677 "%(source)12s %(destination)12s "
2645 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2678 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2646 )
2679 )
2647 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2680 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2648
2681
2649 if not revs:
2682 if not revs:
2650 revs = ['all()']
2683 revs = ['all()']
2651 revs = scmutil.revrange(repo, revs)
2684 revs = scmutil.revrange(repo, revs)
2652
2685
2653 if dostats:
2686 if dostats:
2654 alldata = {
2687 alldata = {
2655 'nbrevs': [],
2688 'nbrevs': [],
2656 'nbmissingfiles': [],
2689 'nbmissingfiles': [],
2657 }
2690 }
2658 if dotiming:
2691 if dotiming:
2659 alldata['nbrenames'] = []
2692 alldata['nbrenames'] = []
2660 alldata['time'] = []
2693 alldata['time'] = []
2661
2694
2662 roi = repo.revs('merge() and %ld', revs)
2695 roi = repo.revs('merge() and %ld', revs)
2663 for r in roi:
2696 for r in roi:
2664 ctx = repo[r]
2697 ctx = repo[r]
2665 p1 = ctx.p1().rev()
2698 p1 = ctx.p1().rev()
2666 p2 = ctx.p2().rev()
2699 p2 = ctx.p2().rev()
2667 bases = repo.changelog._commonancestorsheads(p1, p2)
2700 bases = repo.changelog._commonancestorsheads(p1, p2)
2668 for p in (p1, p2):
2701 for p in (p1, p2):
2669 for b in bases:
2702 for b in bases:
2670 base = repo[b]
2703 base = repo[b]
2671 parent = repo[p]
2704 parent = repo[p]
2672 missing = copies._computeforwardmissing(base, parent)
2705 missing = copies._computeforwardmissing(base, parent)
2673 if not missing:
2706 if not missing:
2674 continue
2707 continue
2675 data = {
2708 data = {
2676 b'source': base.hex(),
2709 b'source': base.hex(),
2677 b'destination': parent.hex(),
2710 b'destination': parent.hex(),
2678 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2711 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2679 b'nbmissingfiles': len(missing),
2712 b'nbmissingfiles': len(missing),
2680 }
2713 }
2681 if dostats:
2714 if dostats:
2682 alldata['nbrevs'].append(
2715 alldata['nbrevs'].append(
2683 (
2716 (
2684 data['nbrevs'],
2717 data['nbrevs'],
2685 base.hex(),
2718 base.hex(),
2686 parent.hex(),
2719 parent.hex(),
2687 )
2720 )
2688 )
2721 )
2689 alldata['nbmissingfiles'].append(
2722 alldata['nbmissingfiles'].append(
2690 (
2723 (
2691 data['nbmissingfiles'],
2724 data['nbmissingfiles'],
2692 base.hex(),
2725 base.hex(),
2693 parent.hex(),
2726 parent.hex(),
2694 )
2727 )
2695 )
2728 )
2696 if dotiming:
2729 if dotiming:
2697 begin = util.timer()
2730 begin = util.timer()
2698 renames = copies.pathcopies(base, parent)
2731 renames = copies.pathcopies(base, parent)
2699 end = util.timer()
2732 end = util.timer()
2700 # not very stable timing since we did only one run
2733 # not very stable timing since we did only one run
2701 data['time'] = end - begin
2734 data['time'] = end - begin
2702 data['nbrenamedfiles'] = len(renames)
2735 data['nbrenamedfiles'] = len(renames)
2703 if dostats:
2736 if dostats:
2704 alldata['time'].append(
2737 alldata['time'].append(
2705 (
2738 (
2706 data['time'],
2739 data['time'],
2707 base.hex(),
2740 base.hex(),
2708 parent.hex(),
2741 parent.hex(),
2709 )
2742 )
2710 )
2743 )
2711 alldata['nbrenames'].append(
2744 alldata['nbrenames'].append(
2712 (
2745 (
2713 data['nbrenamedfiles'],
2746 data['nbrenamedfiles'],
2714 base.hex(),
2747 base.hex(),
2715 parent.hex(),
2748 parent.hex(),
2716 )
2749 )
2717 )
2750 )
2718 fm.startitem()
2751 fm.startitem()
2719 fm.data(**data)
2752 fm.data(**data)
2720 out = data.copy()
2753 out = data.copy()
2721 out['source'] = fm.hexfunc(base.node())
2754 out['source'] = fm.hexfunc(base.node())
2722 out['destination'] = fm.hexfunc(parent.node())
2755 out['destination'] = fm.hexfunc(parent.node())
2723 fm.plain(output % out)
2756 fm.plain(output % out)
2724
2757
2725 fm.end()
2758 fm.end()
2726 if dostats:
2759 if dostats:
2727 entries = [
2760 entries = [
2728 ('nbrevs', 'number of revision covered'),
2761 ('nbrevs', 'number of revision covered'),
2729 ('nbmissingfiles', 'number of missing files at head'),
2762 ('nbmissingfiles', 'number of missing files at head'),
2730 ]
2763 ]
2731 if dotiming:
2764 if dotiming:
2732 entries.append(('nbrenames', 'renamed files'))
2765 entries.append(('nbrenames', 'renamed files'))
2733 entries.append(('time', 'time'))
2766 entries.append(('time', 'time'))
2734 _displaystats(ui, opts, entries, alldata)
2767 _displaystats(ui, opts, entries, alldata)
2735
2768
2736
2769
2737 @command(b'perf::cca|perfcca', formatteropts)
2770 @command(b'perf::cca|perfcca', formatteropts)
2738 def perfcca(ui, repo, **opts):
2771 def perfcca(ui, repo, **opts):
2739 opts = _byteskwargs(opts)
2772 opts = _byteskwargs(opts)
2740 timer, fm = gettimer(ui, opts)
2773 timer, fm = gettimer(ui, opts)
2741 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2774 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2742 fm.end()
2775 fm.end()
2743
2776
2744
2777
2745 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2778 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2746 def perffncacheload(ui, repo, **opts):
2779 def perffncacheload(ui, repo, **opts):
2747 opts = _byteskwargs(opts)
2780 opts = _byteskwargs(opts)
2748 timer, fm = gettimer(ui, opts)
2781 timer, fm = gettimer(ui, opts)
2749 s = repo.store
2782 s = repo.store
2750
2783
2751 def d():
2784 def d():
2752 s.fncache._load()
2785 s.fncache._load()
2753
2786
2754 timer(d)
2787 timer(d)
2755 fm.end()
2788 fm.end()
2756
2789
2757
2790
2758 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2791 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2759 def perffncachewrite(ui, repo, **opts):
2792 def perffncachewrite(ui, repo, **opts):
2760 opts = _byteskwargs(opts)
2793 opts = _byteskwargs(opts)
2761 timer, fm = gettimer(ui, opts)
2794 timer, fm = gettimer(ui, opts)
2762 s = repo.store
2795 s = repo.store
2763 lock = repo.lock()
2796 lock = repo.lock()
2764 s.fncache._load()
2797 s.fncache._load()
2765 tr = repo.transaction(b'perffncachewrite')
2798 tr = repo.transaction(b'perffncachewrite')
2766 tr.addbackup(b'fncache')
2799 tr.addbackup(b'fncache')
2767
2800
2768 def d():
2801 def d():
2769 s.fncache._dirty = True
2802 s.fncache._dirty = True
2770 s.fncache.write(tr)
2803 s.fncache.write(tr)
2771
2804
2772 timer(d)
2805 timer(d)
2773 tr.close()
2806 tr.close()
2774 lock.release()
2807 lock.release()
2775 fm.end()
2808 fm.end()
2776
2809
2777
2810
2778 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2811 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2779 def perffncacheencode(ui, repo, **opts):
2812 def perffncacheencode(ui, repo, **opts):
2780 opts = _byteskwargs(opts)
2813 opts = _byteskwargs(opts)
2781 timer, fm = gettimer(ui, opts)
2814 timer, fm = gettimer(ui, opts)
2782 s = repo.store
2815 s = repo.store
2783 s.fncache._load()
2816 s.fncache._load()
2784
2817
2785 def d():
2818 def d():
2786 for p in s.fncache.entries:
2819 for p in s.fncache.entries:
2787 s.encode(p)
2820 s.encode(p)
2788
2821
2789 timer(d)
2822 timer(d)
2790 fm.end()
2823 fm.end()
2791
2824
2792
2825
2793 def _bdiffworker(q, blocks, xdiff, ready, done):
2826 def _bdiffworker(q, blocks, xdiff, ready, done):
2794 while not done.is_set():
2827 while not done.is_set():
2795 pair = q.get()
2828 pair = q.get()
2796 while pair is not None:
2829 while pair is not None:
2797 if xdiff:
2830 if xdiff:
2798 mdiff.bdiff.xdiffblocks(*pair)
2831 mdiff.bdiff.xdiffblocks(*pair)
2799 elif blocks:
2832 elif blocks:
2800 mdiff.bdiff.blocks(*pair)
2833 mdiff.bdiff.blocks(*pair)
2801 else:
2834 else:
2802 mdiff.textdiff(*pair)
2835 mdiff.textdiff(*pair)
2803 q.task_done()
2836 q.task_done()
2804 pair = q.get()
2837 pair = q.get()
2805 q.task_done() # for the None one
2838 q.task_done() # for the None one
2806 with ready:
2839 with ready:
2807 ready.wait()
2840 ready.wait()
2808
2841
2809
2842
2810 def _manifestrevision(repo, mnode):
2843 def _manifestrevision(repo, mnode):
2811 ml = repo.manifestlog
2844 ml = repo.manifestlog
2812
2845
2813 if util.safehasattr(ml, b'getstorage'):
2846 if util.safehasattr(ml, b'getstorage'):
2814 store = ml.getstorage(b'')
2847 store = ml.getstorage(b'')
2815 else:
2848 else:
2816 store = ml._revlog
2849 store = ml._revlog
2817
2850
2818 return store.revision(mnode)
2851 return store.revision(mnode)
2819
2852
2820
2853
2821 @command(
2854 @command(
2822 b'perf::bdiff|perfbdiff',
2855 b'perf::bdiff|perfbdiff',
2823 revlogopts
2856 revlogopts
2824 + formatteropts
2857 + formatteropts
2825 + [
2858 + [
2826 (
2859 (
2827 b'',
2860 b'',
2828 b'count',
2861 b'count',
2829 1,
2862 1,
2830 b'number of revisions to test (when using --startrev)',
2863 b'number of revisions to test (when using --startrev)',
2831 ),
2864 ),
2832 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2865 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2833 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2866 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2834 (b'', b'blocks', False, b'test computing diffs into blocks'),
2867 (b'', b'blocks', False, b'test computing diffs into blocks'),
2835 (b'', b'xdiff', False, b'use xdiff algorithm'),
2868 (b'', b'xdiff', False, b'use xdiff algorithm'),
2836 ],
2869 ],
2837 b'-c|-m|FILE REV',
2870 b'-c|-m|FILE REV',
2838 )
2871 )
2839 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2872 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2840 """benchmark a bdiff between revisions
2873 """benchmark a bdiff between revisions
2841
2874
2842 By default, benchmark a bdiff between its delta parent and itself.
2875 By default, benchmark a bdiff between its delta parent and itself.
2843
2876
2844 With ``--count``, benchmark bdiffs between delta parents and self for N
2877 With ``--count``, benchmark bdiffs between delta parents and self for N
2845 revisions starting at the specified revision.
2878 revisions starting at the specified revision.
2846
2879
2847 With ``--alldata``, assume the requested revision is a changeset and
2880 With ``--alldata``, assume the requested revision is a changeset and
2848 measure bdiffs for all changes related to that changeset (manifest
2881 measure bdiffs for all changes related to that changeset (manifest
2849 and filelogs).
2882 and filelogs).
2850 """
2883 """
2851 opts = _byteskwargs(opts)
2884 opts = _byteskwargs(opts)
2852
2885
2853 if opts[b'xdiff'] and not opts[b'blocks']:
2886 if opts[b'xdiff'] and not opts[b'blocks']:
2854 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2887 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2855
2888
2856 if opts[b'alldata']:
2889 if opts[b'alldata']:
2857 opts[b'changelog'] = True
2890 opts[b'changelog'] = True
2858
2891
2859 if opts.get(b'changelog') or opts.get(b'manifest'):
2892 if opts.get(b'changelog') or opts.get(b'manifest'):
2860 file_, rev = None, file_
2893 file_, rev = None, file_
2861 elif rev is None:
2894 elif rev is None:
2862 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2895 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2863
2896
2864 blocks = opts[b'blocks']
2897 blocks = opts[b'blocks']
2865 xdiff = opts[b'xdiff']
2898 xdiff = opts[b'xdiff']
2866 textpairs = []
2899 textpairs = []
2867
2900
2868 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2901 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2869
2902
2870 startrev = r.rev(r.lookup(rev))
2903 startrev = r.rev(r.lookup(rev))
2871 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2904 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2872 if opts[b'alldata']:
2905 if opts[b'alldata']:
2873 # Load revisions associated with changeset.
2906 # Load revisions associated with changeset.
2874 ctx = repo[rev]
2907 ctx = repo[rev]
2875 mtext = _manifestrevision(repo, ctx.manifestnode())
2908 mtext = _manifestrevision(repo, ctx.manifestnode())
2876 for pctx in ctx.parents():
2909 for pctx in ctx.parents():
2877 pman = _manifestrevision(repo, pctx.manifestnode())
2910 pman = _manifestrevision(repo, pctx.manifestnode())
2878 textpairs.append((pman, mtext))
2911 textpairs.append((pman, mtext))
2879
2912
2880 # Load filelog revisions by iterating manifest delta.
2913 # Load filelog revisions by iterating manifest delta.
2881 man = ctx.manifest()
2914 man = ctx.manifest()
2882 pman = ctx.p1().manifest()
2915 pman = ctx.p1().manifest()
2883 for filename, change in pman.diff(man).items():
2916 for filename, change in pman.diff(man).items():
2884 fctx = repo.file(filename)
2917 fctx = repo.file(filename)
2885 f1 = fctx.revision(change[0][0] or -1)
2918 f1 = fctx.revision(change[0][0] or -1)
2886 f2 = fctx.revision(change[1][0] or -1)
2919 f2 = fctx.revision(change[1][0] or -1)
2887 textpairs.append((f1, f2))
2920 textpairs.append((f1, f2))
2888 else:
2921 else:
2889 dp = r.deltaparent(rev)
2922 dp = r.deltaparent(rev)
2890 textpairs.append((r.revision(dp), r.revision(rev)))
2923 textpairs.append((r.revision(dp), r.revision(rev)))
2891
2924
2892 withthreads = threads > 0
2925 withthreads = threads > 0
2893 if not withthreads:
2926 if not withthreads:
2894
2927
2895 def d():
2928 def d():
2896 for pair in textpairs:
2929 for pair in textpairs:
2897 if xdiff:
2930 if xdiff:
2898 mdiff.bdiff.xdiffblocks(*pair)
2931 mdiff.bdiff.xdiffblocks(*pair)
2899 elif blocks:
2932 elif blocks:
2900 mdiff.bdiff.blocks(*pair)
2933 mdiff.bdiff.blocks(*pair)
2901 else:
2934 else:
2902 mdiff.textdiff(*pair)
2935 mdiff.textdiff(*pair)
2903
2936
2904 else:
2937 else:
2905 q = queue()
2938 q = queue()
2906 for i in _xrange(threads):
2939 for i in _xrange(threads):
2907 q.put(None)
2940 q.put(None)
2908 ready = threading.Condition()
2941 ready = threading.Condition()
2909 done = threading.Event()
2942 done = threading.Event()
2910 for i in _xrange(threads):
2943 for i in _xrange(threads):
2911 threading.Thread(
2944 threading.Thread(
2912 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2945 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2913 ).start()
2946 ).start()
2914 q.join()
2947 q.join()
2915
2948
2916 def d():
2949 def d():
2917 for pair in textpairs:
2950 for pair in textpairs:
2918 q.put(pair)
2951 q.put(pair)
2919 for i in _xrange(threads):
2952 for i in _xrange(threads):
2920 q.put(None)
2953 q.put(None)
2921 with ready:
2954 with ready:
2922 ready.notify_all()
2955 ready.notify_all()
2923 q.join()
2956 q.join()
2924
2957
2925 timer, fm = gettimer(ui, opts)
2958 timer, fm = gettimer(ui, opts)
2926 timer(d)
2959 timer(d)
2927 fm.end()
2960 fm.end()
2928
2961
2929 if withthreads:
2962 if withthreads:
2930 done.set()
2963 done.set()
2931 for i in _xrange(threads):
2964 for i in _xrange(threads):
2932 q.put(None)
2965 q.put(None)
2933 with ready:
2966 with ready:
2934 ready.notify_all()
2967 ready.notify_all()
2935
2968
2936
2969
2937 @command(
2970 @command(
2938 b'perf::unbundle',
2971 b'perf::unbundle',
2939 formatteropts,
2972 formatteropts,
2940 b'BUNDLE_FILE',
2973 b'BUNDLE_FILE',
2941 )
2974 )
2942 def perf_unbundle(ui, repo, fname, **opts):
2975 def perf_unbundle(ui, repo, fname, **opts):
2943 """benchmark application of a bundle in a repository.
2976 """benchmark application of a bundle in a repository.
2944
2977
2945 This does not include the final transaction processing"""
2978 This does not include the final transaction processing"""
2946
2979
2947 from mercurial import exchange
2980 from mercurial import exchange
2948 from mercurial import bundle2
2981 from mercurial import bundle2
2949 from mercurial import transaction
2982 from mercurial import transaction
2950
2983
2951 opts = _byteskwargs(opts)
2984 opts = _byteskwargs(opts)
2952
2985
2953 ### some compatibility hotfix
2986 ### some compatibility hotfix
2954 #
2987 #
2955 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2988 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2956 # critical regression that break transaction rollback for files that are
2989 # critical regression that break transaction rollback for files that are
2957 # de-inlined.
2990 # de-inlined.
2958 method = transaction.transaction._addentry
2991 method = transaction.transaction._addentry
2959 pre_63edc384d3b7 = "data" in getargspec(method).args
2992 pre_63edc384d3b7 = "data" in getargspec(method).args
2960 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2993 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2961 # a changeset that is a close descendant of 18415fc918a1, the changeset
2994 # a changeset that is a close descendant of 18415fc918a1, the changeset
2962 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2995 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2963 args = getargspec(error.Abort.__init__).args
2996 args = getargspec(error.Abort.__init__).args
2964 post_18415fc918a1 = "detailed_exit_code" in args
2997 post_18415fc918a1 = "detailed_exit_code" in args
2965
2998
2966 old_max_inline = None
2999 old_max_inline = None
2967 try:
3000 try:
2968 if not (pre_63edc384d3b7 or post_18415fc918a1):
3001 if not (pre_63edc384d3b7 or post_18415fc918a1):
2969 # disable inlining
3002 # disable inlining
2970 old_max_inline = mercurial.revlog._maxinline
3003 old_max_inline = mercurial.revlog._maxinline
2971 # large enough to never happen
3004 # large enough to never happen
2972 mercurial.revlog._maxinline = 2 ** 50
3005 mercurial.revlog._maxinline = 2 ** 50
2973
3006
2974 with repo.lock():
3007 with repo.lock():
2975 bundle = [None, None]
3008 bundle = [None, None]
2976 orig_quiet = repo.ui.quiet
3009 orig_quiet = repo.ui.quiet
2977 try:
3010 try:
2978 repo.ui.quiet = True
3011 repo.ui.quiet = True
2979 with open(fname, mode="rb") as f:
3012 with open(fname, mode="rb") as f:
2980
3013
2981 def noop_report(*args, **kwargs):
3014 def noop_report(*args, **kwargs):
2982 pass
3015 pass
2983
3016
2984 def setup():
3017 def setup():
2985 gen, tr = bundle
3018 gen, tr = bundle
2986 if tr is not None:
3019 if tr is not None:
2987 tr.abort()
3020 tr.abort()
2988 bundle[:] = [None, None]
3021 bundle[:] = [None, None]
2989 f.seek(0)
3022 f.seek(0)
2990 bundle[0] = exchange.readbundle(ui, f, fname)
3023 bundle[0] = exchange.readbundle(ui, f, fname)
2991 bundle[1] = repo.transaction(b'perf::unbundle')
3024 bundle[1] = repo.transaction(b'perf::unbundle')
2992 # silence the transaction
3025 # silence the transaction
2993 bundle[1]._report = noop_report
3026 bundle[1]._report = noop_report
2994
3027
2995 def apply():
3028 def apply():
2996 gen, tr = bundle
3029 gen, tr = bundle
2997 bundle2.applybundle(
3030 bundle2.applybundle(
2998 repo,
3031 repo,
2999 gen,
3032 gen,
3000 tr,
3033 tr,
3001 source=b'perf::unbundle',
3034 source=b'perf::unbundle',
3002 url=fname,
3035 url=fname,
3003 )
3036 )
3004
3037
3005 timer, fm = gettimer(ui, opts)
3038 timer, fm = gettimer(ui, opts)
3006 timer(apply, setup=setup)
3039 timer(apply, setup=setup)
3007 fm.end()
3040 fm.end()
3008 finally:
3041 finally:
3009 repo.ui.quiet == orig_quiet
3042 repo.ui.quiet == orig_quiet
3010 gen, tr = bundle
3043 gen, tr = bundle
3011 if tr is not None:
3044 if tr is not None:
3012 tr.abort()
3045 tr.abort()
3013 finally:
3046 finally:
3014 if old_max_inline is not None:
3047 if old_max_inline is not None:
3015 mercurial.revlog._maxinline = old_max_inline
3048 mercurial.revlog._maxinline = old_max_inline
3016
3049
3017
3050
3018 @command(
3051 @command(
3019 b'perf::unidiff|perfunidiff',
3052 b'perf::unidiff|perfunidiff',
3020 revlogopts
3053 revlogopts
3021 + formatteropts
3054 + formatteropts
3022 + [
3055 + [
3023 (
3056 (
3024 b'',
3057 b'',
3025 b'count',
3058 b'count',
3026 1,
3059 1,
3027 b'number of revisions to test (when using --startrev)',
3060 b'number of revisions to test (when using --startrev)',
3028 ),
3061 ),
3029 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3062 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3030 ],
3063 ],
3031 b'-c|-m|FILE REV',
3064 b'-c|-m|FILE REV',
3032 )
3065 )
3033 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3066 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3034 """benchmark a unified diff between revisions
3067 """benchmark a unified diff between revisions
3035
3068
3036 This doesn't include any copy tracing - it's just a unified diff
3069 This doesn't include any copy tracing - it's just a unified diff
3037 of the texts.
3070 of the texts.
3038
3071
3039 By default, benchmark a diff between its delta parent and itself.
3072 By default, benchmark a diff between its delta parent and itself.
3040
3073
3041 With ``--count``, benchmark diffs between delta parents and self for N
3074 With ``--count``, benchmark diffs between delta parents and self for N
3042 revisions starting at the specified revision.
3075 revisions starting at the specified revision.
3043
3076
3044 With ``--alldata``, assume the requested revision is a changeset and
3077 With ``--alldata``, assume the requested revision is a changeset and
3045 measure diffs for all changes related to that changeset (manifest
3078 measure diffs for all changes related to that changeset (manifest
3046 and filelogs).
3079 and filelogs).
3047 """
3080 """
3048 opts = _byteskwargs(opts)
3081 opts = _byteskwargs(opts)
3049 if opts[b'alldata']:
3082 if opts[b'alldata']:
3050 opts[b'changelog'] = True
3083 opts[b'changelog'] = True
3051
3084
3052 if opts.get(b'changelog') or opts.get(b'manifest'):
3085 if opts.get(b'changelog') or opts.get(b'manifest'):
3053 file_, rev = None, file_
3086 file_, rev = None, file_
3054 elif rev is None:
3087 elif rev is None:
3055 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3088 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3056
3089
3057 textpairs = []
3090 textpairs = []
3058
3091
3059 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3092 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3060
3093
3061 startrev = r.rev(r.lookup(rev))
3094 startrev = r.rev(r.lookup(rev))
3062 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3095 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3063 if opts[b'alldata']:
3096 if opts[b'alldata']:
3064 # Load revisions associated with changeset.
3097 # Load revisions associated with changeset.
3065 ctx = repo[rev]
3098 ctx = repo[rev]
3066 mtext = _manifestrevision(repo, ctx.manifestnode())
3099 mtext = _manifestrevision(repo, ctx.manifestnode())
3067 for pctx in ctx.parents():
3100 for pctx in ctx.parents():
3068 pman = _manifestrevision(repo, pctx.manifestnode())
3101 pman = _manifestrevision(repo, pctx.manifestnode())
3069 textpairs.append((pman, mtext))
3102 textpairs.append((pman, mtext))
3070
3103
3071 # Load filelog revisions by iterating manifest delta.
3104 # Load filelog revisions by iterating manifest delta.
3072 man = ctx.manifest()
3105 man = ctx.manifest()
3073 pman = ctx.p1().manifest()
3106 pman = ctx.p1().manifest()
3074 for filename, change in pman.diff(man).items():
3107 for filename, change in pman.diff(man).items():
3075 fctx = repo.file(filename)
3108 fctx = repo.file(filename)
3076 f1 = fctx.revision(change[0][0] or -1)
3109 f1 = fctx.revision(change[0][0] or -1)
3077 f2 = fctx.revision(change[1][0] or -1)
3110 f2 = fctx.revision(change[1][0] or -1)
3078 textpairs.append((f1, f2))
3111 textpairs.append((f1, f2))
3079 else:
3112 else:
3080 dp = r.deltaparent(rev)
3113 dp = r.deltaparent(rev)
3081 textpairs.append((r.revision(dp), r.revision(rev)))
3114 textpairs.append((r.revision(dp), r.revision(rev)))
3082
3115
3083 def d():
3116 def d():
3084 for left, right in textpairs:
3117 for left, right in textpairs:
3085 # The date strings don't matter, so we pass empty strings.
3118 # The date strings don't matter, so we pass empty strings.
3086 headerlines, hunks = mdiff.unidiff(
3119 headerlines, hunks = mdiff.unidiff(
3087 left, b'', right, b'', b'left', b'right', binary=False
3120 left, b'', right, b'', b'left', b'right', binary=False
3088 )
3121 )
3089 # consume iterators in roughly the way patch.py does
3122 # consume iterators in roughly the way patch.py does
3090 b'\n'.join(headerlines)
3123 b'\n'.join(headerlines)
3091 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3124 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3092
3125
3093 timer, fm = gettimer(ui, opts)
3126 timer, fm = gettimer(ui, opts)
3094 timer(d)
3127 timer(d)
3095 fm.end()
3128 fm.end()
3096
3129
3097
3130
3098 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3131 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3099 def perfdiffwd(ui, repo, **opts):
3132 def perfdiffwd(ui, repo, **opts):
3100 """Profile diff of working directory changes"""
3133 """Profile diff of working directory changes"""
3101 opts = _byteskwargs(opts)
3134 opts = _byteskwargs(opts)
3102 timer, fm = gettimer(ui, opts)
3135 timer, fm = gettimer(ui, opts)
3103 options = {
3136 options = {
3104 'w': 'ignore_all_space',
3137 'w': 'ignore_all_space',
3105 'b': 'ignore_space_change',
3138 'b': 'ignore_space_change',
3106 'B': 'ignore_blank_lines',
3139 'B': 'ignore_blank_lines',
3107 }
3140 }
3108
3141
3109 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3142 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3110 opts = {options[c]: b'1' for c in diffopt}
3143 opts = {options[c]: b'1' for c in diffopt}
3111
3144
3112 def d():
3145 def d():
3113 ui.pushbuffer()
3146 ui.pushbuffer()
3114 commands.diff(ui, repo, **opts)
3147 commands.diff(ui, repo, **opts)
3115 ui.popbuffer()
3148 ui.popbuffer()
3116
3149
3117 diffopt = diffopt.encode('ascii')
3150 diffopt = diffopt.encode('ascii')
3118 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3151 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3119 timer(d, title=title)
3152 timer(d, title=title)
3120 fm.end()
3153 fm.end()
3121
3154
3122
3155
3123 @command(
3156 @command(
3124 b'perf::revlogindex|perfrevlogindex',
3157 b'perf::revlogindex|perfrevlogindex',
3125 revlogopts + formatteropts,
3158 revlogopts + formatteropts,
3126 b'-c|-m|FILE',
3159 b'-c|-m|FILE',
3127 )
3160 )
3128 def perfrevlogindex(ui, repo, file_=None, **opts):
3161 def perfrevlogindex(ui, repo, file_=None, **opts):
3129 """Benchmark operations against a revlog index.
3162 """Benchmark operations against a revlog index.
3130
3163
3131 This tests constructing a revlog instance, reading index data,
3164 This tests constructing a revlog instance, reading index data,
3132 parsing index data, and performing various operations related to
3165 parsing index data, and performing various operations related to
3133 index data.
3166 index data.
3134 """
3167 """
3135
3168
3136 opts = _byteskwargs(opts)
3169 opts = _byteskwargs(opts)
3137
3170
3138 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3171 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3139
3172
3140 opener = getattr(rl, 'opener') # trick linter
3173 opener = getattr(rl, 'opener') # trick linter
3141 # compat with hg <= 5.8
3174 # compat with hg <= 5.8
3142 radix = getattr(rl, 'radix', None)
3175 radix = getattr(rl, 'radix', None)
3143 indexfile = getattr(rl, '_indexfile', None)
3176 indexfile = getattr(rl, '_indexfile', None)
3144 if indexfile is None:
3177 if indexfile is None:
3145 # compatibility with <= hg-5.8
3178 # compatibility with <= hg-5.8
3146 indexfile = getattr(rl, 'indexfile')
3179 indexfile = getattr(rl, 'indexfile')
3147 data = opener.read(indexfile)
3180 data = opener.read(indexfile)
3148
3181
3149 header = struct.unpack(b'>I', data[0:4])[0]
3182 header = struct.unpack(b'>I', data[0:4])[0]
3150 version = header & 0xFFFF
3183 version = header & 0xFFFF
3151 if version == 1:
3184 if version == 1:
3152 inline = header & (1 << 16)
3185 inline = header & (1 << 16)
3153 else:
3186 else:
3154 raise error.Abort(b'unsupported revlog version: %d' % version)
3187 raise error.Abort(b'unsupported revlog version: %d' % version)
3155
3188
3156 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3189 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3157 if parse_index_v1 is None:
3190 if parse_index_v1 is None:
3158 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3191 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3159
3192
3160 rllen = len(rl)
3193 rllen = len(rl)
3161
3194
3162 node0 = rl.node(0)
3195 node0 = rl.node(0)
3163 node25 = rl.node(rllen // 4)
3196 node25 = rl.node(rllen // 4)
3164 node50 = rl.node(rllen // 2)
3197 node50 = rl.node(rllen // 2)
3165 node75 = rl.node(rllen // 4 * 3)
3198 node75 = rl.node(rllen // 4 * 3)
3166 node100 = rl.node(rllen - 1)
3199 node100 = rl.node(rllen - 1)
3167
3200
3168 allrevs = range(rllen)
3201 allrevs = range(rllen)
3169 allrevsrev = list(reversed(allrevs))
3202 allrevsrev = list(reversed(allrevs))
3170 allnodes = [rl.node(rev) for rev in range(rllen)]
3203 allnodes = [rl.node(rev) for rev in range(rllen)]
3171 allnodesrev = list(reversed(allnodes))
3204 allnodesrev = list(reversed(allnodes))
3172
3205
3173 def constructor():
3206 def constructor():
3174 if radix is not None:
3207 if radix is not None:
3175 revlog(opener, radix=radix)
3208 revlog(opener, radix=radix)
3176 else:
3209 else:
3177 # hg <= 5.8
3210 # hg <= 5.8
3178 revlog(opener, indexfile=indexfile)
3211 revlog(opener, indexfile=indexfile)
3179
3212
3180 def read():
3213 def read():
3181 with opener(indexfile) as fh:
3214 with opener(indexfile) as fh:
3182 fh.read()
3215 fh.read()
3183
3216
3184 def parseindex():
3217 def parseindex():
3185 parse_index_v1(data, inline)
3218 parse_index_v1(data, inline)
3186
3219
3187 def getentry(revornode):
3220 def getentry(revornode):
3188 index = parse_index_v1(data, inline)[0]
3221 index = parse_index_v1(data, inline)[0]
3189 index[revornode]
3222 index[revornode]
3190
3223
3191 def getentries(revs, count=1):
3224 def getentries(revs, count=1):
3192 index = parse_index_v1(data, inline)[0]
3225 index = parse_index_v1(data, inline)[0]
3193
3226
3194 for i in range(count):
3227 for i in range(count):
3195 for rev in revs:
3228 for rev in revs:
3196 index[rev]
3229 index[rev]
3197
3230
3198 def resolvenode(node):
3231 def resolvenode(node):
3199 index = parse_index_v1(data, inline)[0]
3232 index = parse_index_v1(data, inline)[0]
3200 rev = getattr(index, 'rev', None)
3233 rev = getattr(index, 'rev', None)
3201 if rev is None:
3234 if rev is None:
3202 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3235 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3203 # This only works for the C code.
3236 # This only works for the C code.
3204 if nodemap is None:
3237 if nodemap is None:
3205 return
3238 return
3206 rev = nodemap.__getitem__
3239 rev = nodemap.__getitem__
3207
3240
3208 try:
3241 try:
3209 rev(node)
3242 rev(node)
3210 except error.RevlogError:
3243 except error.RevlogError:
3211 pass
3244 pass
3212
3245
3213 def resolvenodes(nodes, count=1):
3246 def resolvenodes(nodes, count=1):
3214 index = parse_index_v1(data, inline)[0]
3247 index = parse_index_v1(data, inline)[0]
3215 rev = getattr(index, 'rev', None)
3248 rev = getattr(index, 'rev', None)
3216 if rev is None:
3249 if rev is None:
3217 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3250 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3218 # This only works for the C code.
3251 # This only works for the C code.
3219 if nodemap is None:
3252 if nodemap is None:
3220 return
3253 return
3221 rev = nodemap.__getitem__
3254 rev = nodemap.__getitem__
3222
3255
3223 for i in range(count):
3256 for i in range(count):
3224 for node in nodes:
3257 for node in nodes:
3225 try:
3258 try:
3226 rev(node)
3259 rev(node)
3227 except error.RevlogError:
3260 except error.RevlogError:
3228 pass
3261 pass
3229
3262
3230 benches = [
3263 benches = [
3231 (constructor, b'revlog constructor'),
3264 (constructor, b'revlog constructor'),
3232 (read, b'read'),
3265 (read, b'read'),
3233 (parseindex, b'create index object'),
3266 (parseindex, b'create index object'),
3234 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3267 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3235 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3268 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3236 (lambda: resolvenode(node0), b'look up node at rev 0'),
3269 (lambda: resolvenode(node0), b'look up node at rev 0'),
3237 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3270 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3238 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3271 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3239 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3272 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3240 (lambda: resolvenode(node100), b'look up node at tip'),
3273 (lambda: resolvenode(node100), b'look up node at tip'),
3241 # 2x variation is to measure caching impact.
3274 # 2x variation is to measure caching impact.
3242 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3275 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3243 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3276 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3244 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3277 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3245 (
3278 (
3246 lambda: resolvenodes(allnodesrev, 2),
3279 lambda: resolvenodes(allnodesrev, 2),
3247 b'look up all nodes 2x (reverse)',
3280 b'look up all nodes 2x (reverse)',
3248 ),
3281 ),
3249 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3282 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3250 (
3283 (
3251 lambda: getentries(allrevs, 2),
3284 lambda: getentries(allrevs, 2),
3252 b'retrieve all index entries 2x (forward)',
3285 b'retrieve all index entries 2x (forward)',
3253 ),
3286 ),
3254 (
3287 (
3255 lambda: getentries(allrevsrev),
3288 lambda: getentries(allrevsrev),
3256 b'retrieve all index entries (reverse)',
3289 b'retrieve all index entries (reverse)',
3257 ),
3290 ),
3258 (
3291 (
3259 lambda: getentries(allrevsrev, 2),
3292 lambda: getentries(allrevsrev, 2),
3260 b'retrieve all index entries 2x (reverse)',
3293 b'retrieve all index entries 2x (reverse)',
3261 ),
3294 ),
3262 ]
3295 ]
3263
3296
3264 for fn, title in benches:
3297 for fn, title in benches:
3265 timer, fm = gettimer(ui, opts)
3298 timer, fm = gettimer(ui, opts)
3266 timer(fn, title=title)
3299 timer(fn, title=title)
3267 fm.end()
3300 fm.end()
3268
3301
3269
3302
3270 @command(
3303 @command(
3271 b'perf::revlogrevisions|perfrevlogrevisions',
3304 b'perf::revlogrevisions|perfrevlogrevisions',
3272 revlogopts
3305 revlogopts
3273 + formatteropts
3306 + formatteropts
3274 + [
3307 + [
3275 (b'd', b'dist', 100, b'distance between the revisions'),
3308 (b'd', b'dist', 100, b'distance between the revisions'),
3276 (b's', b'startrev', 0, b'revision to start reading at'),
3309 (b's', b'startrev', 0, b'revision to start reading at'),
3277 (b'', b'reverse', False, b'read in reverse'),
3310 (b'', b'reverse', False, b'read in reverse'),
3278 ],
3311 ],
3279 b'-c|-m|FILE',
3312 b'-c|-m|FILE',
3280 )
3313 )
3281 def perfrevlogrevisions(
3314 def perfrevlogrevisions(
3282 ui, repo, file_=None, startrev=0, reverse=False, **opts
3315 ui, repo, file_=None, startrev=0, reverse=False, **opts
3283 ):
3316 ):
3284 """Benchmark reading a series of revisions from a revlog.
3317 """Benchmark reading a series of revisions from a revlog.
3285
3318
3286 By default, we read every ``-d/--dist`` revision from 0 to tip of
3319 By default, we read every ``-d/--dist`` revision from 0 to tip of
3287 the specified revlog.
3320 the specified revlog.
3288
3321
3289 The start revision can be defined via ``-s/--startrev``.
3322 The start revision can be defined via ``-s/--startrev``.
3290 """
3323 """
3291 opts = _byteskwargs(opts)
3324 opts = _byteskwargs(opts)
3292
3325
3293 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3326 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3294 rllen = getlen(ui)(rl)
3327 rllen = getlen(ui)(rl)
3295
3328
3296 if startrev < 0:
3329 if startrev < 0:
3297 startrev = rllen + startrev
3330 startrev = rllen + startrev
3298
3331
3299 def d():
3332 def d():
3300 rl.clearcaches()
3333 rl.clearcaches()
3301
3334
3302 beginrev = startrev
3335 beginrev = startrev
3303 endrev = rllen
3336 endrev = rllen
3304 dist = opts[b'dist']
3337 dist = opts[b'dist']
3305
3338
3306 if reverse:
3339 if reverse:
3307 beginrev, endrev = endrev - 1, beginrev - 1
3340 beginrev, endrev = endrev - 1, beginrev - 1
3308 dist = -1 * dist
3341 dist = -1 * dist
3309
3342
3310 for x in _xrange(beginrev, endrev, dist):
3343 for x in _xrange(beginrev, endrev, dist):
3311 # Old revisions don't support passing int.
3344 # Old revisions don't support passing int.
3312 n = rl.node(x)
3345 n = rl.node(x)
3313 rl.revision(n)
3346 rl.revision(n)
3314
3347
3315 timer, fm = gettimer(ui, opts)
3348 timer, fm = gettimer(ui, opts)
3316 timer(d)
3349 timer(d)
3317 fm.end()
3350 fm.end()
3318
3351
3319
3352
3320 @command(
3353 @command(
3321 b'perf::revlogwrite|perfrevlogwrite',
3354 b'perf::revlogwrite|perfrevlogwrite',
3322 revlogopts
3355 revlogopts
3323 + formatteropts
3356 + formatteropts
3324 + [
3357 + [
3325 (b's', b'startrev', 1000, b'revision to start writing at'),
3358 (b's', b'startrev', 1000, b'revision to start writing at'),
3326 (b'', b'stoprev', -1, b'last revision to write'),
3359 (b'', b'stoprev', -1, b'last revision to write'),
3327 (b'', b'count', 3, b'number of passes to perform'),
3360 (b'', b'count', 3, b'number of passes to perform'),
3328 (b'', b'details', False, b'print timing for every revisions tested'),
3361 (b'', b'details', False, b'print timing for every revisions tested'),
3329 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3362 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3330 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3363 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3331 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3364 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3332 ],
3365 ],
3333 b'-c|-m|FILE',
3366 b'-c|-m|FILE',
3334 )
3367 )
3335 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3368 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3336 """Benchmark writing a series of revisions to a revlog.
3369 """Benchmark writing a series of revisions to a revlog.
3337
3370
3338 Possible source values are:
3371 Possible source values are:
3339 * `full`: add from a full text (default).
3372 * `full`: add from a full text (default).
3340 * `parent-1`: add from a delta to the first parent
3373 * `parent-1`: add from a delta to the first parent
3341 * `parent-2`: add from a delta to the second parent if it exists
3374 * `parent-2`: add from a delta to the second parent if it exists
3342 (use a delta from the first parent otherwise)
3375 (use a delta from the first parent otherwise)
3343 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3376 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3344 * `storage`: add from the existing precomputed deltas
3377 * `storage`: add from the existing precomputed deltas
3345
3378
3346 Note: This performance command measures performance in a custom way. As a
3379 Note: This performance command measures performance in a custom way. As a
3347 result some of the global configuration of the 'perf' command does not
3380 result some of the global configuration of the 'perf' command does not
3348 apply to it:
3381 apply to it:
3349
3382
3350 * ``pre-run``: disabled
3383 * ``pre-run``: disabled
3351
3384
3352 * ``profile-benchmark``: disabled
3385 * ``profile-benchmark``: disabled
3353
3386
3354 * ``run-limits``: disabled use --count instead
3387 * ``run-limits``: disabled use --count instead
3355 """
3388 """
3356 opts = _byteskwargs(opts)
3389 opts = _byteskwargs(opts)
3357
3390
3358 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3391 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3359 rllen = getlen(ui)(rl)
3392 rllen = getlen(ui)(rl)
3360 if startrev < 0:
3393 if startrev < 0:
3361 startrev = rllen + startrev
3394 startrev = rllen + startrev
3362 if stoprev < 0:
3395 if stoprev < 0:
3363 stoprev = rllen + stoprev
3396 stoprev = rllen + stoprev
3364
3397
3365 lazydeltabase = opts['lazydeltabase']
3398 lazydeltabase = opts['lazydeltabase']
3366 source = opts['source']
3399 source = opts['source']
3367 clearcaches = opts['clear_caches']
3400 clearcaches = opts['clear_caches']
3368 validsource = (
3401 validsource = (
3369 b'full',
3402 b'full',
3370 b'parent-1',
3403 b'parent-1',
3371 b'parent-2',
3404 b'parent-2',
3372 b'parent-smallest',
3405 b'parent-smallest',
3373 b'storage',
3406 b'storage',
3374 )
3407 )
3375 if source not in validsource:
3408 if source not in validsource:
3376 raise error.Abort('invalid source type: %s' % source)
3409 raise error.Abort('invalid source type: %s' % source)
3377
3410
3378 ### actually gather results
3411 ### actually gather results
3379 count = opts['count']
3412 count = opts['count']
3380 if count <= 0:
3413 if count <= 0:
3381 raise error.Abort('invalide run count: %d' % count)
3414 raise error.Abort('invalide run count: %d' % count)
3382 allresults = []
3415 allresults = []
3383 for c in range(count):
3416 for c in range(count):
3384 timing = _timeonewrite(
3417 timing = _timeonewrite(
3385 ui,
3418 ui,
3386 rl,
3419 rl,
3387 source,
3420 source,
3388 startrev,
3421 startrev,
3389 stoprev,
3422 stoprev,
3390 c + 1,
3423 c + 1,
3391 lazydeltabase=lazydeltabase,
3424 lazydeltabase=lazydeltabase,
3392 clearcaches=clearcaches,
3425 clearcaches=clearcaches,
3393 )
3426 )
3394 allresults.append(timing)
3427 allresults.append(timing)
3395
3428
3396 ### consolidate the results in a single list
3429 ### consolidate the results in a single list
3397 results = []
3430 results = []
3398 for idx, (rev, t) in enumerate(allresults[0]):
3431 for idx, (rev, t) in enumerate(allresults[0]):
3399 ts = [t]
3432 ts = [t]
3400 for other in allresults[1:]:
3433 for other in allresults[1:]:
3401 orev, ot = other[idx]
3434 orev, ot = other[idx]
3402 assert orev == rev
3435 assert orev == rev
3403 ts.append(ot)
3436 ts.append(ot)
3404 results.append((rev, ts))
3437 results.append((rev, ts))
3405 resultcount = len(results)
3438 resultcount = len(results)
3406
3439
3407 ### Compute and display relevant statistics
3440 ### Compute and display relevant statistics
3408
3441
3409 # get a formatter
3442 # get a formatter
3410 fm = ui.formatter(b'perf', opts)
3443 fm = ui.formatter(b'perf', opts)
3411 displayall = ui.configbool(b"perf", b"all-timing", False)
3444 displayall = ui.configbool(b"perf", b"all-timing", False)
3412
3445
3413 # print individual details if requested
3446 # print individual details if requested
3414 if opts['details']:
3447 if opts['details']:
3415 for idx, item in enumerate(results, 1):
3448 for idx, item in enumerate(results, 1):
3416 rev, data = item
3449 rev, data = item
3417 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3450 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3418 formatone(fm, data, title=title, displayall=displayall)
3451 formatone(fm, data, title=title, displayall=displayall)
3419
3452
3420 # sorts results by median time
3453 # sorts results by median time
3421 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3454 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3422 # list of (name, index) to display)
3455 # list of (name, index) to display)
3423 relevants = [
3456 relevants = [
3424 ("min", 0),
3457 ("min", 0),
3425 ("10%", resultcount * 10 // 100),
3458 ("10%", resultcount * 10 // 100),
3426 ("25%", resultcount * 25 // 100),
3459 ("25%", resultcount * 25 // 100),
3427 ("50%", resultcount * 70 // 100),
3460 ("50%", resultcount * 70 // 100),
3428 ("75%", resultcount * 75 // 100),
3461 ("75%", resultcount * 75 // 100),
3429 ("90%", resultcount * 90 // 100),
3462 ("90%", resultcount * 90 // 100),
3430 ("95%", resultcount * 95 // 100),
3463 ("95%", resultcount * 95 // 100),
3431 ("99%", resultcount * 99 // 100),
3464 ("99%", resultcount * 99 // 100),
3432 ("99.9%", resultcount * 999 // 1000),
3465 ("99.9%", resultcount * 999 // 1000),
3433 ("99.99%", resultcount * 9999 // 10000),
3466 ("99.99%", resultcount * 9999 // 10000),
3434 ("99.999%", resultcount * 99999 // 100000),
3467 ("99.999%", resultcount * 99999 // 100000),
3435 ("max", -1),
3468 ("max", -1),
3436 ]
3469 ]
3437 if not ui.quiet:
3470 if not ui.quiet:
3438 for name, idx in relevants:
3471 for name, idx in relevants:
3439 data = results[idx]
3472 data = results[idx]
3440 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3473 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3441 formatone(fm, data[1], title=title, displayall=displayall)
3474 formatone(fm, data[1], title=title, displayall=displayall)
3442
3475
3443 # XXX summing that many float will not be very precise, we ignore this fact
3476 # XXX summing that many float will not be very precise, we ignore this fact
3444 # for now
3477 # for now
3445 totaltime = []
3478 totaltime = []
3446 for item in allresults:
3479 for item in allresults:
3447 totaltime.append(
3480 totaltime.append(
3448 (
3481 (
3449 sum(x[1][0] for x in item),
3482 sum(x[1][0] for x in item),
3450 sum(x[1][1] for x in item),
3483 sum(x[1][1] for x in item),
3451 sum(x[1][2] for x in item),
3484 sum(x[1][2] for x in item),
3452 )
3485 )
3453 )
3486 )
3454 formatone(
3487 formatone(
3455 fm,
3488 fm,
3456 totaltime,
3489 totaltime,
3457 title="total time (%d revs)" % resultcount,
3490 title="total time (%d revs)" % resultcount,
3458 displayall=displayall,
3491 displayall=displayall,
3459 )
3492 )
3460 fm.end()
3493 fm.end()
3461
3494
3462
3495
3463 class _faketr:
3496 class _faketr:
3464 def add(s, x, y, z=None):
3497 def add(s, x, y, z=None):
3465 return None
3498 return None
3466
3499
3467
3500
3468 def _timeonewrite(
3501 def _timeonewrite(
3469 ui,
3502 ui,
3470 orig,
3503 orig,
3471 source,
3504 source,
3472 startrev,
3505 startrev,
3473 stoprev,
3506 stoprev,
3474 runidx=None,
3507 runidx=None,
3475 lazydeltabase=True,
3508 lazydeltabase=True,
3476 clearcaches=True,
3509 clearcaches=True,
3477 ):
3510 ):
3478 timings = []
3511 timings = []
3479 tr = _faketr()
3512 tr = _faketr()
3480 with _temprevlog(ui, orig, startrev) as dest:
3513 with _temprevlog(ui, orig, startrev) as dest:
3481 dest._lazydeltabase = lazydeltabase
3514 dest._lazydeltabase = lazydeltabase
3482 revs = list(orig.revs(startrev, stoprev))
3515 revs = list(orig.revs(startrev, stoprev))
3483 total = len(revs)
3516 total = len(revs)
3484 topic = 'adding'
3517 topic = 'adding'
3485 if runidx is not None:
3518 if runidx is not None:
3486 topic += ' (run #%d)' % runidx
3519 topic += ' (run #%d)' % runidx
3487 # Support both old and new progress API
3520 # Support both old and new progress API
3488 if util.safehasattr(ui, 'makeprogress'):
3521 if util.safehasattr(ui, 'makeprogress'):
3489 progress = ui.makeprogress(topic, unit='revs', total=total)
3522 progress = ui.makeprogress(topic, unit='revs', total=total)
3490
3523
3491 def updateprogress(pos):
3524 def updateprogress(pos):
3492 progress.update(pos)
3525 progress.update(pos)
3493
3526
3494 def completeprogress():
3527 def completeprogress():
3495 progress.complete()
3528 progress.complete()
3496
3529
3497 else:
3530 else:
3498
3531
3499 def updateprogress(pos):
3532 def updateprogress(pos):
3500 ui.progress(topic, pos, unit='revs', total=total)
3533 ui.progress(topic, pos, unit='revs', total=total)
3501
3534
3502 def completeprogress():
3535 def completeprogress():
3503 ui.progress(topic, None, unit='revs', total=total)
3536 ui.progress(topic, None, unit='revs', total=total)
3504
3537
3505 for idx, rev in enumerate(revs):
3538 for idx, rev in enumerate(revs):
3506 updateprogress(idx)
3539 updateprogress(idx)
3507 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3540 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3508 if clearcaches:
3541 if clearcaches:
3509 dest.index.clearcaches()
3542 dest.index.clearcaches()
3510 dest.clearcaches()
3543 dest.clearcaches()
3511 with timeone() as r:
3544 with timeone() as r:
3512 dest.addrawrevision(*addargs, **addkwargs)
3545 dest.addrawrevision(*addargs, **addkwargs)
3513 timings.append((rev, r[0]))
3546 timings.append((rev, r[0]))
3514 updateprogress(total)
3547 updateprogress(total)
3515 completeprogress()
3548 completeprogress()
3516 return timings
3549 return timings
3517
3550
3518
3551
3519 def _getrevisionseed(orig, rev, tr, source):
3552 def _getrevisionseed(orig, rev, tr, source):
3520 from mercurial.node import nullid
3553 from mercurial.node import nullid
3521
3554
3522 linkrev = orig.linkrev(rev)
3555 linkrev = orig.linkrev(rev)
3523 node = orig.node(rev)
3556 node = orig.node(rev)
3524 p1, p2 = orig.parents(node)
3557 p1, p2 = orig.parents(node)
3525 flags = orig.flags(rev)
3558 flags = orig.flags(rev)
3526 cachedelta = None
3559 cachedelta = None
3527 text = None
3560 text = None
3528
3561
3529 if source == b'full':
3562 if source == b'full':
3530 text = orig.revision(rev)
3563 text = orig.revision(rev)
3531 elif source == b'parent-1':
3564 elif source == b'parent-1':
3532 baserev = orig.rev(p1)
3565 baserev = orig.rev(p1)
3533 cachedelta = (baserev, orig.revdiff(p1, rev))
3566 cachedelta = (baserev, orig.revdiff(p1, rev))
3534 elif source == b'parent-2':
3567 elif source == b'parent-2':
3535 parent = p2
3568 parent = p2
3536 if p2 == nullid:
3569 if p2 == nullid:
3537 parent = p1
3570 parent = p1
3538 baserev = orig.rev(parent)
3571 baserev = orig.rev(parent)
3539 cachedelta = (baserev, orig.revdiff(parent, rev))
3572 cachedelta = (baserev, orig.revdiff(parent, rev))
3540 elif source == b'parent-smallest':
3573 elif source == b'parent-smallest':
3541 p1diff = orig.revdiff(p1, rev)
3574 p1diff = orig.revdiff(p1, rev)
3542 parent = p1
3575 parent = p1
3543 diff = p1diff
3576 diff = p1diff
3544 if p2 != nullid:
3577 if p2 != nullid:
3545 p2diff = orig.revdiff(p2, rev)
3578 p2diff = orig.revdiff(p2, rev)
3546 if len(p1diff) > len(p2diff):
3579 if len(p1diff) > len(p2diff):
3547 parent = p2
3580 parent = p2
3548 diff = p2diff
3581 diff = p2diff
3549 baserev = orig.rev(parent)
3582 baserev = orig.rev(parent)
3550 cachedelta = (baserev, diff)
3583 cachedelta = (baserev, diff)
3551 elif source == b'storage':
3584 elif source == b'storage':
3552 baserev = orig.deltaparent(rev)
3585 baserev = orig.deltaparent(rev)
3553 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3586 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3554
3587
3555 return (
3588 return (
3556 (text, tr, linkrev, p1, p2),
3589 (text, tr, linkrev, p1, p2),
3557 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3590 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3558 )
3591 )
3559
3592
3560
3593
3561 @contextlib.contextmanager
3594 @contextlib.contextmanager
3562 def _temprevlog(ui, orig, truncaterev):
3595 def _temprevlog(ui, orig, truncaterev):
3563 from mercurial import vfs as vfsmod
3596 from mercurial import vfs as vfsmod
3564
3597
3565 if orig._inline:
3598 if orig._inline:
3566 raise error.Abort('not supporting inline revlog (yet)')
3599 raise error.Abort('not supporting inline revlog (yet)')
3567 revlogkwargs = {}
3600 revlogkwargs = {}
3568 k = 'upperboundcomp'
3601 k = 'upperboundcomp'
3569 if util.safehasattr(orig, k):
3602 if util.safehasattr(orig, k):
3570 revlogkwargs[k] = getattr(orig, k)
3603 revlogkwargs[k] = getattr(orig, k)
3571
3604
3572 indexfile = getattr(orig, '_indexfile', None)
3605 indexfile = getattr(orig, '_indexfile', None)
3573 if indexfile is None:
3606 if indexfile is None:
3574 # compatibility with <= hg-5.8
3607 # compatibility with <= hg-5.8
3575 indexfile = getattr(orig, 'indexfile')
3608 indexfile = getattr(orig, 'indexfile')
3576 origindexpath = orig.opener.join(indexfile)
3609 origindexpath = orig.opener.join(indexfile)
3577
3610
3578 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3611 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3579 origdatapath = orig.opener.join(datafile)
3612 origdatapath = orig.opener.join(datafile)
3580 radix = b'revlog'
3613 radix = b'revlog'
3581 indexname = b'revlog.i'
3614 indexname = b'revlog.i'
3582 dataname = b'revlog.d'
3615 dataname = b'revlog.d'
3583
3616
3584 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3617 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3585 try:
3618 try:
3586 # copy the data file in a temporary directory
3619 # copy the data file in a temporary directory
3587 ui.debug('copying data in %s\n' % tmpdir)
3620 ui.debug('copying data in %s\n' % tmpdir)
3588 destindexpath = os.path.join(tmpdir, 'revlog.i')
3621 destindexpath = os.path.join(tmpdir, 'revlog.i')
3589 destdatapath = os.path.join(tmpdir, 'revlog.d')
3622 destdatapath = os.path.join(tmpdir, 'revlog.d')
3590 shutil.copyfile(origindexpath, destindexpath)
3623 shutil.copyfile(origindexpath, destindexpath)
3591 shutil.copyfile(origdatapath, destdatapath)
3624 shutil.copyfile(origdatapath, destdatapath)
3592
3625
3593 # remove the data we want to add again
3626 # remove the data we want to add again
3594 ui.debug('truncating data to be rewritten\n')
3627 ui.debug('truncating data to be rewritten\n')
3595 with open(destindexpath, 'ab') as index:
3628 with open(destindexpath, 'ab') as index:
3596 index.seek(0)
3629 index.seek(0)
3597 index.truncate(truncaterev * orig._io.size)
3630 index.truncate(truncaterev * orig._io.size)
3598 with open(destdatapath, 'ab') as data:
3631 with open(destdatapath, 'ab') as data:
3599 data.seek(0)
3632 data.seek(0)
3600 data.truncate(orig.start(truncaterev))
3633 data.truncate(orig.start(truncaterev))
3601
3634
3602 # instantiate a new revlog from the temporary copy
3635 # instantiate a new revlog from the temporary copy
3603 ui.debug('truncating adding to be rewritten\n')
3636 ui.debug('truncating adding to be rewritten\n')
3604 vfs = vfsmod.vfs(tmpdir)
3637 vfs = vfsmod.vfs(tmpdir)
3605 vfs.options = getattr(orig.opener, 'options', None)
3638 vfs.options = getattr(orig.opener, 'options', None)
3606
3639
3607 try:
3640 try:
3608 dest = revlog(vfs, radix=radix, **revlogkwargs)
3641 dest = revlog(vfs, radix=radix, **revlogkwargs)
3609 except TypeError:
3642 except TypeError:
3610 dest = revlog(
3643 dest = revlog(
3611 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3644 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3612 )
3645 )
3613 if dest._inline:
3646 if dest._inline:
3614 raise error.Abort('not supporting inline revlog (yet)')
3647 raise error.Abort('not supporting inline revlog (yet)')
3615 # make sure internals are initialized
3648 # make sure internals are initialized
3616 dest.revision(len(dest) - 1)
3649 dest.revision(len(dest) - 1)
3617 yield dest
3650 yield dest
3618 del dest, vfs
3651 del dest, vfs
3619 finally:
3652 finally:
3620 shutil.rmtree(tmpdir, True)
3653 shutil.rmtree(tmpdir, True)
3621
3654
3622
3655
3623 @command(
3656 @command(
3624 b'perf::revlogchunks|perfrevlogchunks',
3657 b'perf::revlogchunks|perfrevlogchunks',
3625 revlogopts
3658 revlogopts
3626 + formatteropts
3659 + formatteropts
3627 + [
3660 + [
3628 (b'e', b'engines', b'', b'compression engines to use'),
3661 (b'e', b'engines', b'', b'compression engines to use'),
3629 (b's', b'startrev', 0, b'revision to start at'),
3662 (b's', b'startrev', 0, b'revision to start at'),
3630 ],
3663 ],
3631 b'-c|-m|FILE',
3664 b'-c|-m|FILE',
3632 )
3665 )
3633 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3666 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3634 """Benchmark operations on revlog chunks.
3667 """Benchmark operations on revlog chunks.
3635
3668
3636 Logically, each revlog is a collection of fulltext revisions. However,
3669 Logically, each revlog is a collection of fulltext revisions. However,
3637 stored within each revlog are "chunks" of possibly compressed data. This
3670 stored within each revlog are "chunks" of possibly compressed data. This
3638 data needs to be read and decompressed or compressed and written.
3671 data needs to be read and decompressed or compressed and written.
3639
3672
3640 This command measures the time it takes to read+decompress and recompress
3673 This command measures the time it takes to read+decompress and recompress
3641 chunks in a revlog. It effectively isolates I/O and compression performance.
3674 chunks in a revlog. It effectively isolates I/O and compression performance.
3642 For measurements of higher-level operations like resolving revisions,
3675 For measurements of higher-level operations like resolving revisions,
3643 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3676 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3644 """
3677 """
3645 opts = _byteskwargs(opts)
3678 opts = _byteskwargs(opts)
3646
3679
3647 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3680 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3648
3681
3649 # _chunkraw was renamed to _getsegmentforrevs.
3682 # _chunkraw was renamed to _getsegmentforrevs.
3650 try:
3683 try:
3651 segmentforrevs = rl._getsegmentforrevs
3684 segmentforrevs = rl._getsegmentforrevs
3652 except AttributeError:
3685 except AttributeError:
3653 segmentforrevs = rl._chunkraw
3686 segmentforrevs = rl._chunkraw
3654
3687
3655 # Verify engines argument.
3688 # Verify engines argument.
3656 if engines:
3689 if engines:
3657 engines = {e.strip() for e in engines.split(b',')}
3690 engines = {e.strip() for e in engines.split(b',')}
3658 for engine in engines:
3691 for engine in engines:
3659 try:
3692 try:
3660 util.compressionengines[engine]
3693 util.compressionengines[engine]
3661 except KeyError:
3694 except KeyError:
3662 raise error.Abort(b'unknown compression engine: %s' % engine)
3695 raise error.Abort(b'unknown compression engine: %s' % engine)
3663 else:
3696 else:
3664 engines = []
3697 engines = []
3665 for e in util.compengines:
3698 for e in util.compengines:
3666 engine = util.compengines[e]
3699 engine = util.compengines[e]
3667 try:
3700 try:
3668 if engine.available():
3701 if engine.available():
3669 engine.revlogcompressor().compress(b'dummy')
3702 engine.revlogcompressor().compress(b'dummy')
3670 engines.append(e)
3703 engines.append(e)
3671 except NotImplementedError:
3704 except NotImplementedError:
3672 pass
3705 pass
3673
3706
3674 revs = list(rl.revs(startrev, len(rl) - 1))
3707 revs = list(rl.revs(startrev, len(rl) - 1))
3675
3708
3676 def rlfh(rl):
3709 def rlfh(rl):
3677 if rl._inline:
3710 if rl._inline:
3678 indexfile = getattr(rl, '_indexfile', None)
3711 indexfile = getattr(rl, '_indexfile', None)
3679 if indexfile is None:
3712 if indexfile is None:
3680 # compatibility with <= hg-5.8
3713 # compatibility with <= hg-5.8
3681 indexfile = getattr(rl, 'indexfile')
3714 indexfile = getattr(rl, 'indexfile')
3682 return getsvfs(repo)(indexfile)
3715 return getsvfs(repo)(indexfile)
3683 else:
3716 else:
3684 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3717 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3685 return getsvfs(repo)(datafile)
3718 return getsvfs(repo)(datafile)
3686
3719
3687 def doread():
3720 def doread():
3688 rl.clearcaches()
3721 rl.clearcaches()
3689 for rev in revs:
3722 for rev in revs:
3690 segmentforrevs(rev, rev)
3723 segmentforrevs(rev, rev)
3691
3724
3692 def doreadcachedfh():
3725 def doreadcachedfh():
3693 rl.clearcaches()
3726 rl.clearcaches()
3694 fh = rlfh(rl)
3727 fh = rlfh(rl)
3695 for rev in revs:
3728 for rev in revs:
3696 segmentforrevs(rev, rev, df=fh)
3729 segmentforrevs(rev, rev, df=fh)
3697
3730
3698 def doreadbatch():
3731 def doreadbatch():
3699 rl.clearcaches()
3732 rl.clearcaches()
3700 segmentforrevs(revs[0], revs[-1])
3733 segmentforrevs(revs[0], revs[-1])
3701
3734
3702 def doreadbatchcachedfh():
3735 def doreadbatchcachedfh():
3703 rl.clearcaches()
3736 rl.clearcaches()
3704 fh = rlfh(rl)
3737 fh = rlfh(rl)
3705 segmentforrevs(revs[0], revs[-1], df=fh)
3738 segmentforrevs(revs[0], revs[-1], df=fh)
3706
3739
3707 def dochunk():
3740 def dochunk():
3708 rl.clearcaches()
3741 rl.clearcaches()
3709 fh = rlfh(rl)
3742 fh = rlfh(rl)
3710 for rev in revs:
3743 for rev in revs:
3711 rl._chunk(rev, df=fh)
3744 rl._chunk(rev, df=fh)
3712
3745
3713 chunks = [None]
3746 chunks = [None]
3714
3747
3715 def dochunkbatch():
3748 def dochunkbatch():
3716 rl.clearcaches()
3749 rl.clearcaches()
3717 fh = rlfh(rl)
3750 fh = rlfh(rl)
3718 # Save chunks as a side-effect.
3751 # Save chunks as a side-effect.
3719 chunks[0] = rl._chunks(revs, df=fh)
3752 chunks[0] = rl._chunks(revs, df=fh)
3720
3753
3721 def docompress(compressor):
3754 def docompress(compressor):
3722 rl.clearcaches()
3755 rl.clearcaches()
3723
3756
3724 try:
3757 try:
3725 # Swap in the requested compression engine.
3758 # Swap in the requested compression engine.
3726 oldcompressor = rl._compressor
3759 oldcompressor = rl._compressor
3727 rl._compressor = compressor
3760 rl._compressor = compressor
3728 for chunk in chunks[0]:
3761 for chunk in chunks[0]:
3729 rl.compress(chunk)
3762 rl.compress(chunk)
3730 finally:
3763 finally:
3731 rl._compressor = oldcompressor
3764 rl._compressor = oldcompressor
3732
3765
3733 benches = [
3766 benches = [
3734 (lambda: doread(), b'read'),
3767 (lambda: doread(), b'read'),
3735 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3768 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3736 (lambda: doreadbatch(), b'read batch'),
3769 (lambda: doreadbatch(), b'read batch'),
3737 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3770 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3738 (lambda: dochunk(), b'chunk'),
3771 (lambda: dochunk(), b'chunk'),
3739 (lambda: dochunkbatch(), b'chunk batch'),
3772 (lambda: dochunkbatch(), b'chunk batch'),
3740 ]
3773 ]
3741
3774
3742 for engine in sorted(engines):
3775 for engine in sorted(engines):
3743 compressor = util.compengines[engine].revlogcompressor()
3776 compressor = util.compengines[engine].revlogcompressor()
3744 benches.append(
3777 benches.append(
3745 (
3778 (
3746 functools.partial(docompress, compressor),
3779 functools.partial(docompress, compressor),
3747 b'compress w/ %s' % engine,
3780 b'compress w/ %s' % engine,
3748 )
3781 )
3749 )
3782 )
3750
3783
3751 for fn, title in benches:
3784 for fn, title in benches:
3752 timer, fm = gettimer(ui, opts)
3785 timer, fm = gettimer(ui, opts)
3753 timer(fn, title=title)
3786 timer(fn, title=title)
3754 fm.end()
3787 fm.end()
3755
3788
3756
3789
3757 @command(
3790 @command(
3758 b'perf::revlogrevision|perfrevlogrevision',
3791 b'perf::revlogrevision|perfrevlogrevision',
3759 revlogopts
3792 revlogopts
3760 + formatteropts
3793 + formatteropts
3761 + [(b'', b'cache', False, b'use caches instead of clearing')],
3794 + [(b'', b'cache', False, b'use caches instead of clearing')],
3762 b'-c|-m|FILE REV',
3795 b'-c|-m|FILE REV',
3763 )
3796 )
3764 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3797 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3765 """Benchmark obtaining a revlog revision.
3798 """Benchmark obtaining a revlog revision.
3766
3799
3767 Obtaining a revlog revision consists of roughly the following steps:
3800 Obtaining a revlog revision consists of roughly the following steps:
3768
3801
3769 1. Compute the delta chain
3802 1. Compute the delta chain
3770 2. Slice the delta chain if applicable
3803 2. Slice the delta chain if applicable
3771 3. Obtain the raw chunks for that delta chain
3804 3. Obtain the raw chunks for that delta chain
3772 4. Decompress each raw chunk
3805 4. Decompress each raw chunk
3773 5. Apply binary patches to obtain fulltext
3806 5. Apply binary patches to obtain fulltext
3774 6. Verify hash of fulltext
3807 6. Verify hash of fulltext
3775
3808
3776 This command measures the time spent in each of these phases.
3809 This command measures the time spent in each of these phases.
3777 """
3810 """
3778 opts = _byteskwargs(opts)
3811 opts = _byteskwargs(opts)
3779
3812
3780 if opts.get(b'changelog') or opts.get(b'manifest'):
3813 if opts.get(b'changelog') or opts.get(b'manifest'):
3781 file_, rev = None, file_
3814 file_, rev = None, file_
3782 elif rev is None:
3815 elif rev is None:
3783 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3816 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3784
3817
3785 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3818 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3786
3819
3787 # _chunkraw was renamed to _getsegmentforrevs.
3820 # _chunkraw was renamed to _getsegmentforrevs.
3788 try:
3821 try:
3789 segmentforrevs = r._getsegmentforrevs
3822 segmentforrevs = r._getsegmentforrevs
3790 except AttributeError:
3823 except AttributeError:
3791 segmentforrevs = r._chunkraw
3824 segmentforrevs = r._chunkraw
3792
3825
3793 node = r.lookup(rev)
3826 node = r.lookup(rev)
3794 rev = r.rev(node)
3827 rev = r.rev(node)
3795
3828
3796 def getrawchunks(data, chain):
3829 def getrawchunks(data, chain):
3797 start = r.start
3830 start = r.start
3798 length = r.length
3831 length = r.length
3799 inline = r._inline
3832 inline = r._inline
3800 try:
3833 try:
3801 iosize = r.index.entry_size
3834 iosize = r.index.entry_size
3802 except AttributeError:
3835 except AttributeError:
3803 iosize = r._io.size
3836 iosize = r._io.size
3804 buffer = util.buffer
3837 buffer = util.buffer
3805
3838
3806 chunks = []
3839 chunks = []
3807 ladd = chunks.append
3840 ladd = chunks.append
3808 for idx, item in enumerate(chain):
3841 for idx, item in enumerate(chain):
3809 offset = start(item[0])
3842 offset = start(item[0])
3810 bits = data[idx]
3843 bits = data[idx]
3811 for rev in item:
3844 for rev in item:
3812 chunkstart = start(rev)
3845 chunkstart = start(rev)
3813 if inline:
3846 if inline:
3814 chunkstart += (rev + 1) * iosize
3847 chunkstart += (rev + 1) * iosize
3815 chunklength = length(rev)
3848 chunklength = length(rev)
3816 ladd(buffer(bits, chunkstart - offset, chunklength))
3849 ladd(buffer(bits, chunkstart - offset, chunklength))
3817
3850
3818 return chunks
3851 return chunks
3819
3852
3820 def dodeltachain(rev):
3853 def dodeltachain(rev):
3821 if not cache:
3854 if not cache:
3822 r.clearcaches()
3855 r.clearcaches()
3823 r._deltachain(rev)
3856 r._deltachain(rev)
3824
3857
3825 def doread(chain):
3858 def doread(chain):
3826 if not cache:
3859 if not cache:
3827 r.clearcaches()
3860 r.clearcaches()
3828 for item in slicedchain:
3861 for item in slicedchain:
3829 segmentforrevs(item[0], item[-1])
3862 segmentforrevs(item[0], item[-1])
3830
3863
3831 def doslice(r, chain, size):
3864 def doslice(r, chain, size):
3832 for s in slicechunk(r, chain, targetsize=size):
3865 for s in slicechunk(r, chain, targetsize=size):
3833 pass
3866 pass
3834
3867
3835 def dorawchunks(data, chain):
3868 def dorawchunks(data, chain):
3836 if not cache:
3869 if not cache:
3837 r.clearcaches()
3870 r.clearcaches()
3838 getrawchunks(data, chain)
3871 getrawchunks(data, chain)
3839
3872
3840 def dodecompress(chunks):
3873 def dodecompress(chunks):
3841 decomp = r.decompress
3874 decomp = r.decompress
3842 for chunk in chunks:
3875 for chunk in chunks:
3843 decomp(chunk)
3876 decomp(chunk)
3844
3877
3845 def dopatch(text, bins):
3878 def dopatch(text, bins):
3846 if not cache:
3879 if not cache:
3847 r.clearcaches()
3880 r.clearcaches()
3848 mdiff.patches(text, bins)
3881 mdiff.patches(text, bins)
3849
3882
3850 def dohash(text):
3883 def dohash(text):
3851 if not cache:
3884 if not cache:
3852 r.clearcaches()
3885 r.clearcaches()
3853 r.checkhash(text, node, rev=rev)
3886 r.checkhash(text, node, rev=rev)
3854
3887
3855 def dorevision():
3888 def dorevision():
3856 if not cache:
3889 if not cache:
3857 r.clearcaches()
3890 r.clearcaches()
3858 r.revision(node)
3891 r.revision(node)
3859
3892
3860 try:
3893 try:
3861 from mercurial.revlogutils.deltas import slicechunk
3894 from mercurial.revlogutils.deltas import slicechunk
3862 except ImportError:
3895 except ImportError:
3863 slicechunk = getattr(revlog, '_slicechunk', None)
3896 slicechunk = getattr(revlog, '_slicechunk', None)
3864
3897
3865 size = r.length(rev)
3898 size = r.length(rev)
3866 chain = r._deltachain(rev)[0]
3899 chain = r._deltachain(rev)[0]
3867 if not getattr(r, '_withsparseread', False):
3900 if not getattr(r, '_withsparseread', False):
3868 slicedchain = (chain,)
3901 slicedchain = (chain,)
3869 else:
3902 else:
3870 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3903 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3871 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3904 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3872 rawchunks = getrawchunks(data, slicedchain)
3905 rawchunks = getrawchunks(data, slicedchain)
3873 bins = r._chunks(chain)
3906 bins = r._chunks(chain)
3874 text = bytes(bins[0])
3907 text = bytes(bins[0])
3875 bins = bins[1:]
3908 bins = bins[1:]
3876 text = mdiff.patches(text, bins)
3909 text = mdiff.patches(text, bins)
3877
3910
3878 benches = [
3911 benches = [
3879 (lambda: dorevision(), b'full'),
3912 (lambda: dorevision(), b'full'),
3880 (lambda: dodeltachain(rev), b'deltachain'),
3913 (lambda: dodeltachain(rev), b'deltachain'),
3881 (lambda: doread(chain), b'read'),
3914 (lambda: doread(chain), b'read'),
3882 ]
3915 ]
3883
3916
3884 if getattr(r, '_withsparseread', False):
3917 if getattr(r, '_withsparseread', False):
3885 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3918 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3886 benches.append(slicing)
3919 benches.append(slicing)
3887
3920
3888 benches.extend(
3921 benches.extend(
3889 [
3922 [
3890 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3923 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3891 (lambda: dodecompress(rawchunks), b'decompress'),
3924 (lambda: dodecompress(rawchunks), b'decompress'),
3892 (lambda: dopatch(text, bins), b'patch'),
3925 (lambda: dopatch(text, bins), b'patch'),
3893 (lambda: dohash(text), b'hash'),
3926 (lambda: dohash(text), b'hash'),
3894 ]
3927 ]
3895 )
3928 )
3896
3929
3897 timer, fm = gettimer(ui, opts)
3930 timer, fm = gettimer(ui, opts)
3898 for fn, title in benches:
3931 for fn, title in benches:
3899 timer(fn, title=title)
3932 timer(fn, title=title)
3900 fm.end()
3933 fm.end()
3901
3934
3902
3935
3903 @command(
3936 @command(
3904 b'perf::revset|perfrevset',
3937 b'perf::revset|perfrevset',
3905 [
3938 [
3906 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3939 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3907 (b'', b'contexts', False, b'obtain changectx for each revision'),
3940 (b'', b'contexts', False, b'obtain changectx for each revision'),
3908 ]
3941 ]
3909 + formatteropts,
3942 + formatteropts,
3910 b"REVSET",
3943 b"REVSET",
3911 )
3944 )
3912 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3945 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3913 """benchmark the execution time of a revset
3946 """benchmark the execution time of a revset
3914
3947
3915 Use the --clean option if need to evaluate the impact of build volatile
3948 Use the --clean option if need to evaluate the impact of build volatile
3916 revisions set cache on the revset execution. Volatile cache hold filtered
3949 revisions set cache on the revset execution. Volatile cache hold filtered
3917 and obsolete related cache."""
3950 and obsolete related cache."""
3918 opts = _byteskwargs(opts)
3951 opts = _byteskwargs(opts)
3919
3952
3920 timer, fm = gettimer(ui, opts)
3953 timer, fm = gettimer(ui, opts)
3921
3954
3922 def d():
3955 def d():
3923 if clear:
3956 if clear:
3924 repo.invalidatevolatilesets()
3957 repo.invalidatevolatilesets()
3925 if contexts:
3958 if contexts:
3926 for ctx in repo.set(expr):
3959 for ctx in repo.set(expr):
3927 pass
3960 pass
3928 else:
3961 else:
3929 for r in repo.revs(expr):
3962 for r in repo.revs(expr):
3930 pass
3963 pass
3931
3964
3932 timer(d)
3965 timer(d)
3933 fm.end()
3966 fm.end()
3934
3967
3935
3968
3936 @command(
3969 @command(
3937 b'perf::volatilesets|perfvolatilesets',
3970 b'perf::volatilesets|perfvolatilesets',
3938 [
3971 [
3939 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3972 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3940 ]
3973 ]
3941 + formatteropts,
3974 + formatteropts,
3942 )
3975 )
3943 def perfvolatilesets(ui, repo, *names, **opts):
3976 def perfvolatilesets(ui, repo, *names, **opts):
3944 """benchmark the computation of various volatile set
3977 """benchmark the computation of various volatile set
3945
3978
3946 Volatile set computes element related to filtering and obsolescence."""
3979 Volatile set computes element related to filtering and obsolescence."""
3947 opts = _byteskwargs(opts)
3980 opts = _byteskwargs(opts)
3948 timer, fm = gettimer(ui, opts)
3981 timer, fm = gettimer(ui, opts)
3949 repo = repo.unfiltered()
3982 repo = repo.unfiltered()
3950
3983
3951 def getobs(name):
3984 def getobs(name):
3952 def d():
3985 def d():
3953 repo.invalidatevolatilesets()
3986 repo.invalidatevolatilesets()
3954 if opts[b'clear_obsstore']:
3987 if opts[b'clear_obsstore']:
3955 clearfilecache(repo, b'obsstore')
3988 clearfilecache(repo, b'obsstore')
3956 obsolete.getrevs(repo, name)
3989 obsolete.getrevs(repo, name)
3957
3990
3958 return d
3991 return d
3959
3992
3960 allobs = sorted(obsolete.cachefuncs)
3993 allobs = sorted(obsolete.cachefuncs)
3961 if names:
3994 if names:
3962 allobs = [n for n in allobs if n in names]
3995 allobs = [n for n in allobs if n in names]
3963
3996
3964 for name in allobs:
3997 for name in allobs:
3965 timer(getobs(name), title=name)
3998 timer(getobs(name), title=name)
3966
3999
3967 def getfiltered(name):
4000 def getfiltered(name):
3968 def d():
4001 def d():
3969 repo.invalidatevolatilesets()
4002 repo.invalidatevolatilesets()
3970 if opts[b'clear_obsstore']:
4003 if opts[b'clear_obsstore']:
3971 clearfilecache(repo, b'obsstore')
4004 clearfilecache(repo, b'obsstore')
3972 repoview.filterrevs(repo, name)
4005 repoview.filterrevs(repo, name)
3973
4006
3974 return d
4007 return d
3975
4008
3976 allfilter = sorted(repoview.filtertable)
4009 allfilter = sorted(repoview.filtertable)
3977 if names:
4010 if names:
3978 allfilter = [n for n in allfilter if n in names]
4011 allfilter = [n for n in allfilter if n in names]
3979
4012
3980 for name in allfilter:
4013 for name in allfilter:
3981 timer(getfiltered(name), title=name)
4014 timer(getfiltered(name), title=name)
3982 fm.end()
4015 fm.end()
3983
4016
3984
4017
3985 @command(
4018 @command(
3986 b'perf::branchmap|perfbranchmap',
4019 b'perf::branchmap|perfbranchmap',
3987 [
4020 [
3988 (b'f', b'full', False, b'Includes build time of subset'),
4021 (b'f', b'full', False, b'Includes build time of subset'),
3989 (
4022 (
3990 b'',
4023 b'',
3991 b'clear-revbranch',
4024 b'clear-revbranch',
3992 False,
4025 False,
3993 b'purge the revbranch cache between computation',
4026 b'purge the revbranch cache between computation',
3994 ),
4027 ),
3995 ]
4028 ]
3996 + formatteropts,
4029 + formatteropts,
3997 )
4030 )
3998 def perfbranchmap(ui, repo, *filternames, **opts):
4031 def perfbranchmap(ui, repo, *filternames, **opts):
3999 """benchmark the update of a branchmap
4032 """benchmark the update of a branchmap
4000
4033
4001 This benchmarks the full repo.branchmap() call with read and write disabled
4034 This benchmarks the full repo.branchmap() call with read and write disabled
4002 """
4035 """
4003 opts = _byteskwargs(opts)
4036 opts = _byteskwargs(opts)
4004 full = opts.get(b"full", False)
4037 full = opts.get(b"full", False)
4005 clear_revbranch = opts.get(b"clear_revbranch", False)
4038 clear_revbranch = opts.get(b"clear_revbranch", False)
4006 timer, fm = gettimer(ui, opts)
4039 timer, fm = gettimer(ui, opts)
4007
4040
4008 def getbranchmap(filtername):
4041 def getbranchmap(filtername):
4009 """generate a benchmark function for the filtername"""
4042 """generate a benchmark function for the filtername"""
4010 if filtername is None:
4043 if filtername is None:
4011 view = repo
4044 view = repo
4012 else:
4045 else:
4013 view = repo.filtered(filtername)
4046 view = repo.filtered(filtername)
4014 if util.safehasattr(view._branchcaches, '_per_filter'):
4047 if util.safehasattr(view._branchcaches, '_per_filter'):
4015 filtered = view._branchcaches._per_filter
4048 filtered = view._branchcaches._per_filter
4016 else:
4049 else:
4017 # older versions
4050 # older versions
4018 filtered = view._branchcaches
4051 filtered = view._branchcaches
4019
4052
4020 def d():
4053 def d():
4021 if clear_revbranch:
4054 if clear_revbranch:
4022 repo.revbranchcache()._clear()
4055 repo.revbranchcache()._clear()
4023 if full:
4056 if full:
4024 view._branchcaches.clear()
4057 view._branchcaches.clear()
4025 else:
4058 else:
4026 filtered.pop(filtername, None)
4059 filtered.pop(filtername, None)
4027 view.branchmap()
4060 view.branchmap()
4028
4061
4029 return d
4062 return d
4030
4063
4031 # add filter in smaller subset to bigger subset
4064 # add filter in smaller subset to bigger subset
4032 possiblefilters = set(repoview.filtertable)
4065 possiblefilters = set(repoview.filtertable)
4033 if filternames:
4066 if filternames:
4034 possiblefilters &= set(filternames)
4067 possiblefilters &= set(filternames)
4035 subsettable = getbranchmapsubsettable()
4068 subsettable = getbranchmapsubsettable()
4036 allfilters = []
4069 allfilters = []
4037 while possiblefilters:
4070 while possiblefilters:
4038 for name in possiblefilters:
4071 for name in possiblefilters:
4039 subset = subsettable.get(name)
4072 subset = subsettable.get(name)
4040 if subset not in possiblefilters:
4073 if subset not in possiblefilters:
4041 break
4074 break
4042 else:
4075 else:
4043 assert False, b'subset cycle %s!' % possiblefilters
4076 assert False, b'subset cycle %s!' % possiblefilters
4044 allfilters.append(name)
4077 allfilters.append(name)
4045 possiblefilters.remove(name)
4078 possiblefilters.remove(name)
4046
4079
4047 # warm the cache
4080 # warm the cache
4048 if not full:
4081 if not full:
4049 for name in allfilters:
4082 for name in allfilters:
4050 repo.filtered(name).branchmap()
4083 repo.filtered(name).branchmap()
4051 if not filternames or b'unfiltered' in filternames:
4084 if not filternames or b'unfiltered' in filternames:
4052 # add unfiltered
4085 # add unfiltered
4053 allfilters.append(None)
4086 allfilters.append(None)
4054
4087
4055 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4088 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4056 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4089 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4057 branchcacheread.set(classmethod(lambda *args: None))
4090 branchcacheread.set(classmethod(lambda *args: None))
4058 else:
4091 else:
4059 # older versions
4092 # older versions
4060 branchcacheread = safeattrsetter(branchmap, b'read')
4093 branchcacheread = safeattrsetter(branchmap, b'read')
4061 branchcacheread.set(lambda *args: None)
4094 branchcacheread.set(lambda *args: None)
4062 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4095 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4063 branchcachewrite.set(lambda *args: None)
4096 branchcachewrite.set(lambda *args: None)
4064 try:
4097 try:
4065 for name in allfilters:
4098 for name in allfilters:
4066 printname = name
4099 printname = name
4067 if name is None:
4100 if name is None:
4068 printname = b'unfiltered'
4101 printname = b'unfiltered'
4069 timer(getbranchmap(name), title=printname)
4102 timer(getbranchmap(name), title=printname)
4070 finally:
4103 finally:
4071 branchcacheread.restore()
4104 branchcacheread.restore()
4072 branchcachewrite.restore()
4105 branchcachewrite.restore()
4073 fm.end()
4106 fm.end()
4074
4107
4075
4108
4076 @command(
4109 @command(
4077 b'perf::branchmapupdate|perfbranchmapupdate',
4110 b'perf::branchmapupdate|perfbranchmapupdate',
4078 [
4111 [
4079 (b'', b'base', [], b'subset of revision to start from'),
4112 (b'', b'base', [], b'subset of revision to start from'),
4080 (b'', b'target', [], b'subset of revision to end with'),
4113 (b'', b'target', [], b'subset of revision to end with'),
4081 (b'', b'clear-caches', False, b'clear cache between each runs'),
4114 (b'', b'clear-caches', False, b'clear cache between each runs'),
4082 ]
4115 ]
4083 + formatteropts,
4116 + formatteropts,
4084 )
4117 )
4085 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4118 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4086 """benchmark branchmap update from for <base> revs to <target> revs
4119 """benchmark branchmap update from for <base> revs to <target> revs
4087
4120
4088 If `--clear-caches` is passed, the following items will be reset before
4121 If `--clear-caches` is passed, the following items will be reset before
4089 each update:
4122 each update:
4090 * the changelog instance and associated indexes
4123 * the changelog instance and associated indexes
4091 * the rev-branch-cache instance
4124 * the rev-branch-cache instance
4092
4125
4093 Examples:
4126 Examples:
4094
4127
4095 # update for the one last revision
4128 # update for the one last revision
4096 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4129 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4097
4130
4098 $ update for change coming with a new branch
4131 $ update for change coming with a new branch
4099 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4132 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4100 """
4133 """
4101 from mercurial import branchmap
4134 from mercurial import branchmap
4102 from mercurial import repoview
4135 from mercurial import repoview
4103
4136
4104 opts = _byteskwargs(opts)
4137 opts = _byteskwargs(opts)
4105 timer, fm = gettimer(ui, opts)
4138 timer, fm = gettimer(ui, opts)
4106 clearcaches = opts[b'clear_caches']
4139 clearcaches = opts[b'clear_caches']
4107 unfi = repo.unfiltered()
4140 unfi = repo.unfiltered()
4108 x = [None] # used to pass data between closure
4141 x = [None] # used to pass data between closure
4109
4142
4110 # we use a `list` here to avoid possible side effect from smartset
4143 # we use a `list` here to avoid possible side effect from smartset
4111 baserevs = list(scmutil.revrange(repo, base))
4144 baserevs = list(scmutil.revrange(repo, base))
4112 targetrevs = list(scmutil.revrange(repo, target))
4145 targetrevs = list(scmutil.revrange(repo, target))
4113 if not baserevs:
4146 if not baserevs:
4114 raise error.Abort(b'no revisions selected for --base')
4147 raise error.Abort(b'no revisions selected for --base')
4115 if not targetrevs:
4148 if not targetrevs:
4116 raise error.Abort(b'no revisions selected for --target')
4149 raise error.Abort(b'no revisions selected for --target')
4117
4150
4118 # make sure the target branchmap also contains the one in the base
4151 # make sure the target branchmap also contains the one in the base
4119 targetrevs = list(set(baserevs) | set(targetrevs))
4152 targetrevs = list(set(baserevs) | set(targetrevs))
4120 targetrevs.sort()
4153 targetrevs.sort()
4121
4154
4122 cl = repo.changelog
4155 cl = repo.changelog
4123 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4156 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4124 allbaserevs.sort()
4157 allbaserevs.sort()
4125 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4158 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4126
4159
4127 newrevs = list(alltargetrevs.difference(allbaserevs))
4160 newrevs = list(alltargetrevs.difference(allbaserevs))
4128 newrevs.sort()
4161 newrevs.sort()
4129
4162
4130 allrevs = frozenset(unfi.changelog.revs())
4163 allrevs = frozenset(unfi.changelog.revs())
4131 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4164 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4132 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4165 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4133
4166
4134 def basefilter(repo, visibilityexceptions=None):
4167 def basefilter(repo, visibilityexceptions=None):
4135 return basefilterrevs
4168 return basefilterrevs
4136
4169
4137 def targetfilter(repo, visibilityexceptions=None):
4170 def targetfilter(repo, visibilityexceptions=None):
4138 return targetfilterrevs
4171 return targetfilterrevs
4139
4172
4140 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4173 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4141 ui.status(msg % (len(allbaserevs), len(newrevs)))
4174 ui.status(msg % (len(allbaserevs), len(newrevs)))
4142 if targetfilterrevs:
4175 if targetfilterrevs:
4143 msg = b'(%d revisions still filtered)\n'
4176 msg = b'(%d revisions still filtered)\n'
4144 ui.status(msg % len(targetfilterrevs))
4177 ui.status(msg % len(targetfilterrevs))
4145
4178
4146 try:
4179 try:
4147 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4180 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4148 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4181 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4149
4182
4150 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4183 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4151 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4184 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4152
4185
4153 # try to find an existing branchmap to reuse
4186 # try to find an existing branchmap to reuse
4154 subsettable = getbranchmapsubsettable()
4187 subsettable = getbranchmapsubsettable()
4155 candidatefilter = subsettable.get(None)
4188 candidatefilter = subsettable.get(None)
4156 while candidatefilter is not None:
4189 while candidatefilter is not None:
4157 candidatebm = repo.filtered(candidatefilter).branchmap()
4190 candidatebm = repo.filtered(candidatefilter).branchmap()
4158 if candidatebm.validfor(baserepo):
4191 if candidatebm.validfor(baserepo):
4159 filtered = repoview.filterrevs(repo, candidatefilter)
4192 filtered = repoview.filterrevs(repo, candidatefilter)
4160 missing = [r for r in allbaserevs if r in filtered]
4193 missing = [r for r in allbaserevs if r in filtered]
4161 base = candidatebm.copy()
4194 base = candidatebm.copy()
4162 base.update(baserepo, missing)
4195 base.update(baserepo, missing)
4163 break
4196 break
4164 candidatefilter = subsettable.get(candidatefilter)
4197 candidatefilter = subsettable.get(candidatefilter)
4165 else:
4198 else:
4166 # no suitable subset where found
4199 # no suitable subset where found
4167 base = branchmap.branchcache()
4200 base = branchmap.branchcache()
4168 base.update(baserepo, allbaserevs)
4201 base.update(baserepo, allbaserevs)
4169
4202
4170 def setup():
4203 def setup():
4171 x[0] = base.copy()
4204 x[0] = base.copy()
4172 if clearcaches:
4205 if clearcaches:
4173 unfi._revbranchcache = None
4206 unfi._revbranchcache = None
4174 clearchangelog(repo)
4207 clearchangelog(repo)
4175
4208
4176 def bench():
4209 def bench():
4177 x[0].update(targetrepo, newrevs)
4210 x[0].update(targetrepo, newrevs)
4178
4211
4179 timer(bench, setup=setup)
4212 timer(bench, setup=setup)
4180 fm.end()
4213 fm.end()
4181 finally:
4214 finally:
4182 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4215 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4183 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4216 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4184
4217
4185
4218
4186 @command(
4219 @command(
4187 b'perf::branchmapload|perfbranchmapload',
4220 b'perf::branchmapload|perfbranchmapload',
4188 [
4221 [
4189 (b'f', b'filter', b'', b'Specify repoview filter'),
4222 (b'f', b'filter', b'', b'Specify repoview filter'),
4190 (b'', b'list', False, b'List brachmap filter caches'),
4223 (b'', b'list', False, b'List brachmap filter caches'),
4191 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4224 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4192 ]
4225 ]
4193 + formatteropts,
4226 + formatteropts,
4194 )
4227 )
4195 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4228 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4196 """benchmark reading the branchmap"""
4229 """benchmark reading the branchmap"""
4197 opts = _byteskwargs(opts)
4230 opts = _byteskwargs(opts)
4198 clearrevlogs = opts[b'clear_revlogs']
4231 clearrevlogs = opts[b'clear_revlogs']
4199
4232
4200 if list:
4233 if list:
4201 for name, kind, st in repo.cachevfs.readdir(stat=True):
4234 for name, kind, st in repo.cachevfs.readdir(stat=True):
4202 if name.startswith(b'branch2'):
4235 if name.startswith(b'branch2'):
4203 filtername = name.partition(b'-')[2] or b'unfiltered'
4236 filtername = name.partition(b'-')[2] or b'unfiltered'
4204 ui.status(
4237 ui.status(
4205 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4238 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4206 )
4239 )
4207 return
4240 return
4208 if not filter:
4241 if not filter:
4209 filter = None
4242 filter = None
4210 subsettable = getbranchmapsubsettable()
4243 subsettable = getbranchmapsubsettable()
4211 if filter is None:
4244 if filter is None:
4212 repo = repo.unfiltered()
4245 repo = repo.unfiltered()
4213 else:
4246 else:
4214 repo = repoview.repoview(repo, filter)
4247 repo = repoview.repoview(repo, filter)
4215
4248
4216 repo.branchmap() # make sure we have a relevant, up to date branchmap
4249 repo.branchmap() # make sure we have a relevant, up to date branchmap
4217
4250
4218 try:
4251 try:
4219 fromfile = branchmap.branchcache.fromfile
4252 fromfile = branchmap.branchcache.fromfile
4220 except AttributeError:
4253 except AttributeError:
4221 # older versions
4254 # older versions
4222 fromfile = branchmap.read
4255 fromfile = branchmap.read
4223
4256
4224 currentfilter = filter
4257 currentfilter = filter
4225 # try once without timer, the filter may not be cached
4258 # try once without timer, the filter may not be cached
4226 while fromfile(repo) is None:
4259 while fromfile(repo) is None:
4227 currentfilter = subsettable.get(currentfilter)
4260 currentfilter = subsettable.get(currentfilter)
4228 if currentfilter is None:
4261 if currentfilter is None:
4229 raise error.Abort(
4262 raise error.Abort(
4230 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4263 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4231 )
4264 )
4232 repo = repo.filtered(currentfilter)
4265 repo = repo.filtered(currentfilter)
4233 timer, fm = gettimer(ui, opts)
4266 timer, fm = gettimer(ui, opts)
4234
4267
4235 def setup():
4268 def setup():
4236 if clearrevlogs:
4269 if clearrevlogs:
4237 clearchangelog(repo)
4270 clearchangelog(repo)
4238
4271
4239 def bench():
4272 def bench():
4240 fromfile(repo)
4273 fromfile(repo)
4241
4274
4242 timer(bench, setup=setup)
4275 timer(bench, setup=setup)
4243 fm.end()
4276 fm.end()
4244
4277
4245
4278
4246 @command(b'perf::loadmarkers|perfloadmarkers')
4279 @command(b'perf::loadmarkers|perfloadmarkers')
4247 def perfloadmarkers(ui, repo):
4280 def perfloadmarkers(ui, repo):
4248 """benchmark the time to parse the on-disk markers for a repo
4281 """benchmark the time to parse the on-disk markers for a repo
4249
4282
4250 Result is the number of markers in the repo."""
4283 Result is the number of markers in the repo."""
4251 timer, fm = gettimer(ui)
4284 timer, fm = gettimer(ui)
4252 svfs = getsvfs(repo)
4285 svfs = getsvfs(repo)
4253 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4286 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4254 fm.end()
4287 fm.end()
4255
4288
4256
4289
4257 @command(
4290 @command(
4258 b'perf::lrucachedict|perflrucachedict',
4291 b'perf::lrucachedict|perflrucachedict',
4259 formatteropts
4292 formatteropts
4260 + [
4293 + [
4261 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4294 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4262 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4295 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4263 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4296 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4264 (b'', b'size', 4, b'size of cache'),
4297 (b'', b'size', 4, b'size of cache'),
4265 (b'', b'gets', 10000, b'number of key lookups'),
4298 (b'', b'gets', 10000, b'number of key lookups'),
4266 (b'', b'sets', 10000, b'number of key sets'),
4299 (b'', b'sets', 10000, b'number of key sets'),
4267 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4300 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4268 (
4301 (
4269 b'',
4302 b'',
4270 b'mixedgetfreq',
4303 b'mixedgetfreq',
4271 50,
4304 50,
4272 b'frequency of get vs set ops in mixed mode',
4305 b'frequency of get vs set ops in mixed mode',
4273 ),
4306 ),
4274 ],
4307 ],
4275 norepo=True,
4308 norepo=True,
4276 )
4309 )
4277 def perflrucache(
4310 def perflrucache(
4278 ui,
4311 ui,
4279 mincost=0,
4312 mincost=0,
4280 maxcost=100,
4313 maxcost=100,
4281 costlimit=0,
4314 costlimit=0,
4282 size=4,
4315 size=4,
4283 gets=10000,
4316 gets=10000,
4284 sets=10000,
4317 sets=10000,
4285 mixed=10000,
4318 mixed=10000,
4286 mixedgetfreq=50,
4319 mixedgetfreq=50,
4287 **opts
4320 **opts
4288 ):
4321 ):
4289 opts = _byteskwargs(opts)
4322 opts = _byteskwargs(opts)
4290
4323
4291 def doinit():
4324 def doinit():
4292 for i in _xrange(10000):
4325 for i in _xrange(10000):
4293 util.lrucachedict(size)
4326 util.lrucachedict(size)
4294
4327
4295 costrange = list(range(mincost, maxcost + 1))
4328 costrange = list(range(mincost, maxcost + 1))
4296
4329
4297 values = []
4330 values = []
4298 for i in _xrange(size):
4331 for i in _xrange(size):
4299 values.append(random.randint(0, _maxint))
4332 values.append(random.randint(0, _maxint))
4300
4333
4301 # Get mode fills the cache and tests raw lookup performance with no
4334 # Get mode fills the cache and tests raw lookup performance with no
4302 # eviction.
4335 # eviction.
4303 getseq = []
4336 getseq = []
4304 for i in _xrange(gets):
4337 for i in _xrange(gets):
4305 getseq.append(random.choice(values))
4338 getseq.append(random.choice(values))
4306
4339
4307 def dogets():
4340 def dogets():
4308 d = util.lrucachedict(size)
4341 d = util.lrucachedict(size)
4309 for v in values:
4342 for v in values:
4310 d[v] = v
4343 d[v] = v
4311 for key in getseq:
4344 for key in getseq:
4312 value = d[key]
4345 value = d[key]
4313 value # silence pyflakes warning
4346 value # silence pyflakes warning
4314
4347
4315 def dogetscost():
4348 def dogetscost():
4316 d = util.lrucachedict(size, maxcost=costlimit)
4349 d = util.lrucachedict(size, maxcost=costlimit)
4317 for i, v in enumerate(values):
4350 for i, v in enumerate(values):
4318 d.insert(v, v, cost=costs[i])
4351 d.insert(v, v, cost=costs[i])
4319 for key in getseq:
4352 for key in getseq:
4320 try:
4353 try:
4321 value = d[key]
4354 value = d[key]
4322 value # silence pyflakes warning
4355 value # silence pyflakes warning
4323 except KeyError:
4356 except KeyError:
4324 pass
4357 pass
4325
4358
4326 # Set mode tests insertion speed with cache eviction.
4359 # Set mode tests insertion speed with cache eviction.
4327 setseq = []
4360 setseq = []
4328 costs = []
4361 costs = []
4329 for i in _xrange(sets):
4362 for i in _xrange(sets):
4330 setseq.append(random.randint(0, _maxint))
4363 setseq.append(random.randint(0, _maxint))
4331 costs.append(random.choice(costrange))
4364 costs.append(random.choice(costrange))
4332
4365
4333 def doinserts():
4366 def doinserts():
4334 d = util.lrucachedict(size)
4367 d = util.lrucachedict(size)
4335 for v in setseq:
4368 for v in setseq:
4336 d.insert(v, v)
4369 d.insert(v, v)
4337
4370
4338 def doinsertscost():
4371 def doinsertscost():
4339 d = util.lrucachedict(size, maxcost=costlimit)
4372 d = util.lrucachedict(size, maxcost=costlimit)
4340 for i, v in enumerate(setseq):
4373 for i, v in enumerate(setseq):
4341 d.insert(v, v, cost=costs[i])
4374 d.insert(v, v, cost=costs[i])
4342
4375
4343 def dosets():
4376 def dosets():
4344 d = util.lrucachedict(size)
4377 d = util.lrucachedict(size)
4345 for v in setseq:
4378 for v in setseq:
4346 d[v] = v
4379 d[v] = v
4347
4380
4348 # Mixed mode randomly performs gets and sets with eviction.
4381 # Mixed mode randomly performs gets and sets with eviction.
4349 mixedops = []
4382 mixedops = []
4350 for i in _xrange(mixed):
4383 for i in _xrange(mixed):
4351 r = random.randint(0, 100)
4384 r = random.randint(0, 100)
4352 if r < mixedgetfreq:
4385 if r < mixedgetfreq:
4353 op = 0
4386 op = 0
4354 else:
4387 else:
4355 op = 1
4388 op = 1
4356
4389
4357 mixedops.append(
4390 mixedops.append(
4358 (op, random.randint(0, size * 2), random.choice(costrange))
4391 (op, random.randint(0, size * 2), random.choice(costrange))
4359 )
4392 )
4360
4393
4361 def domixed():
4394 def domixed():
4362 d = util.lrucachedict(size)
4395 d = util.lrucachedict(size)
4363
4396
4364 for op, v, cost in mixedops:
4397 for op, v, cost in mixedops:
4365 if op == 0:
4398 if op == 0:
4366 try:
4399 try:
4367 d[v]
4400 d[v]
4368 except KeyError:
4401 except KeyError:
4369 pass
4402 pass
4370 else:
4403 else:
4371 d[v] = v
4404 d[v] = v
4372
4405
4373 def domixedcost():
4406 def domixedcost():
4374 d = util.lrucachedict(size, maxcost=costlimit)
4407 d = util.lrucachedict(size, maxcost=costlimit)
4375
4408
4376 for op, v, cost in mixedops:
4409 for op, v, cost in mixedops:
4377 if op == 0:
4410 if op == 0:
4378 try:
4411 try:
4379 d[v]
4412 d[v]
4380 except KeyError:
4413 except KeyError:
4381 pass
4414 pass
4382 else:
4415 else:
4383 d.insert(v, v, cost=cost)
4416 d.insert(v, v, cost=cost)
4384
4417
4385 benches = [
4418 benches = [
4386 (doinit, b'init'),
4419 (doinit, b'init'),
4387 ]
4420 ]
4388
4421
4389 if costlimit:
4422 if costlimit:
4390 benches.extend(
4423 benches.extend(
4391 [
4424 [
4392 (dogetscost, b'gets w/ cost limit'),
4425 (dogetscost, b'gets w/ cost limit'),
4393 (doinsertscost, b'inserts w/ cost limit'),
4426 (doinsertscost, b'inserts w/ cost limit'),
4394 (domixedcost, b'mixed w/ cost limit'),
4427 (domixedcost, b'mixed w/ cost limit'),
4395 ]
4428 ]
4396 )
4429 )
4397 else:
4430 else:
4398 benches.extend(
4431 benches.extend(
4399 [
4432 [
4400 (dogets, b'gets'),
4433 (dogets, b'gets'),
4401 (doinserts, b'inserts'),
4434 (doinserts, b'inserts'),
4402 (dosets, b'sets'),
4435 (dosets, b'sets'),
4403 (domixed, b'mixed'),
4436 (domixed, b'mixed'),
4404 ]
4437 ]
4405 )
4438 )
4406
4439
4407 for fn, title in benches:
4440 for fn, title in benches:
4408 timer, fm = gettimer(ui, opts)
4441 timer, fm = gettimer(ui, opts)
4409 timer(fn, title=title)
4442 timer(fn, title=title)
4410 fm.end()
4443 fm.end()
4411
4444
4412
4445
4413 @command(
4446 @command(
4414 b'perf::write|perfwrite',
4447 b'perf::write|perfwrite',
4415 formatteropts
4448 formatteropts
4416 + [
4449 + [
4417 (b'', b'write-method', b'write', b'ui write method'),
4450 (b'', b'write-method', b'write', b'ui write method'),
4418 (b'', b'nlines', 100, b'number of lines'),
4451 (b'', b'nlines', 100, b'number of lines'),
4419 (b'', b'nitems', 100, b'number of items (per line)'),
4452 (b'', b'nitems', 100, b'number of items (per line)'),
4420 (b'', b'item', b'x', b'item that is written'),
4453 (b'', b'item', b'x', b'item that is written'),
4421 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4454 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4422 (b'', b'flush-line', None, b'flush after each line'),
4455 (b'', b'flush-line', None, b'flush after each line'),
4423 ],
4456 ],
4424 )
4457 )
4425 def perfwrite(ui, repo, **opts):
4458 def perfwrite(ui, repo, **opts):
4426 """microbenchmark ui.write (and others)"""
4459 """microbenchmark ui.write (and others)"""
4427 opts = _byteskwargs(opts)
4460 opts = _byteskwargs(opts)
4428
4461
4429 write = getattr(ui, _sysstr(opts[b'write_method']))
4462 write = getattr(ui, _sysstr(opts[b'write_method']))
4430 nlines = int(opts[b'nlines'])
4463 nlines = int(opts[b'nlines'])
4431 nitems = int(opts[b'nitems'])
4464 nitems = int(opts[b'nitems'])
4432 item = opts[b'item']
4465 item = opts[b'item']
4433 batch_line = opts.get(b'batch_line')
4466 batch_line = opts.get(b'batch_line')
4434 flush_line = opts.get(b'flush_line')
4467 flush_line = opts.get(b'flush_line')
4435
4468
4436 if batch_line:
4469 if batch_line:
4437 line = item * nitems + b'\n'
4470 line = item * nitems + b'\n'
4438
4471
4439 def benchmark():
4472 def benchmark():
4440 for i in pycompat.xrange(nlines):
4473 for i in pycompat.xrange(nlines):
4441 if batch_line:
4474 if batch_line:
4442 write(line)
4475 write(line)
4443 else:
4476 else:
4444 for i in pycompat.xrange(nitems):
4477 for i in pycompat.xrange(nitems):
4445 write(item)
4478 write(item)
4446 write(b'\n')
4479 write(b'\n')
4447 if flush_line:
4480 if flush_line:
4448 ui.flush()
4481 ui.flush()
4449 ui.flush()
4482 ui.flush()
4450
4483
4451 timer, fm = gettimer(ui, opts)
4484 timer, fm = gettimer(ui, opts)
4452 timer(benchmark)
4485 timer(benchmark)
4453 fm.end()
4486 fm.end()
4454
4487
4455
4488
4456 def uisetup(ui):
4489 def uisetup(ui):
4457 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4490 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4458 commands, b'debugrevlogopts'
4491 commands, b'debugrevlogopts'
4459 ):
4492 ):
4460 # for "historical portability":
4493 # for "historical portability":
4461 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4494 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4462 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4495 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4463 # openrevlog() should cause failure, because it has been
4496 # openrevlog() should cause failure, because it has been
4464 # available since 3.5 (or 49c583ca48c4).
4497 # available since 3.5 (or 49c583ca48c4).
4465 def openrevlog(orig, repo, cmd, file_, opts):
4498 def openrevlog(orig, repo, cmd, file_, opts):
4466 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4499 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4467 raise error.Abort(
4500 raise error.Abort(
4468 b"This version doesn't support --dir option",
4501 b"This version doesn't support --dir option",
4469 hint=b"use 3.5 or later",
4502 hint=b"use 3.5 or later",
4470 )
4503 )
4471 return orig(repo, cmd, file_, opts)
4504 return orig(repo, cmd, file_, opts)
4472
4505
4473 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4506 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4474
4507
4475
4508
4476 @command(
4509 @command(
4477 b'perf::progress|perfprogress',
4510 b'perf::progress|perfprogress',
4478 formatteropts
4511 formatteropts
4479 + [
4512 + [
4480 (b'', b'topic', b'topic', b'topic for progress messages'),
4513 (b'', b'topic', b'topic', b'topic for progress messages'),
4481 (b'c', b'total', 1000000, b'total value we are progressing to'),
4514 (b'c', b'total', 1000000, b'total value we are progressing to'),
4482 ],
4515 ],
4483 norepo=True,
4516 norepo=True,
4484 )
4517 )
4485 def perfprogress(ui, topic=None, total=None, **opts):
4518 def perfprogress(ui, topic=None, total=None, **opts):
4486 """printing of progress bars"""
4519 """printing of progress bars"""
4487 opts = _byteskwargs(opts)
4520 opts = _byteskwargs(opts)
4488
4521
4489 timer, fm = gettimer(ui, opts)
4522 timer, fm = gettimer(ui, opts)
4490
4523
4491 def doprogress():
4524 def doprogress():
4492 with ui.makeprogress(topic, total=total) as progress:
4525 with ui.makeprogress(topic, total=total) as progress:
4493 for i in _xrange(total):
4526 for i in _xrange(total):
4494 progress.increment()
4527 progress.increment()
4495
4528
4496 timer(doprogress)
4529 timer(doprogress)
4497 fm.end()
4530 fm.end()
@@ -1,922 +1,933 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13
13
14 import binascii
14 import binascii
15 import io
15 import io
16
16
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23 from .i18n import _
23 from .i18n import _
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 match as matchmod,
27 match as matchmod,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import stringutil
31 from .utils import stringutil
32
32
33 # Tags computation can be expensive and caches exist to make it fast in
33 # Tags computation can be expensive and caches exist to make it fast in
34 # the common case.
34 # the common case.
35 #
35 #
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 # each revision in the repository. The file is effectively an array of
37 # each revision in the repository. The file is effectively an array of
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 # details.
39 # details.
40 #
40 #
41 # The .hgtags filenode cache grows in proportion to the length of the
41 # The .hgtags filenode cache grows in proportion to the length of the
42 # changelog. The file is truncated when the # changelog is stripped.
42 # changelog. The file is truncated when the # changelog is stripped.
43 #
43 #
44 # The purpose of the filenode cache is to avoid the most expensive part
44 # The purpose of the filenode cache is to avoid the most expensive part
45 # of finding global tags, which is looking up the .hgtags filenode in the
45 # of finding global tags, which is looking up the .hgtags filenode in the
46 # manifest for each head. This can take dozens or over 100ms for
46 # manifest for each head. This can take dozens or over 100ms for
47 # repositories with very large manifests. Multiplied by dozens or even
47 # repositories with very large manifests. Multiplied by dozens or even
48 # hundreds of heads and there is a significant performance concern.
48 # hundreds of heads and there is a significant performance concern.
49 #
49 #
50 # There also exist a separate cache file for each repository filter.
50 # There also exist a separate cache file for each repository filter.
51 # These "tags-*" files store information about the history of tags.
51 # These "tags-*" files store information about the history of tags.
52 #
52 #
53 # The tags cache files consists of a cache validation line followed by
53 # The tags cache files consists of a cache validation line followed by
54 # a history of tags.
54 # a history of tags.
55 #
55 #
56 # The cache validation line has the format:
56 # The cache validation line has the format:
57 #
57 #
58 # <tiprev> <tipnode> [<filteredhash>]
58 # <tiprev> <tipnode> [<filteredhash>]
59 #
59 #
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 # node for that changeset. These redundantly identify the repository
61 # node for that changeset. These redundantly identify the repository
62 # tip from the time the cache was written. In addition, <filteredhash>,
62 # tip from the time the cache was written. In addition, <filteredhash>,
63 # if present, is a 40 character hex hash of the contents of the filtered
63 # if present, is a 40 character hex hash of the contents of the filtered
64 # revisions for this filter. If the set of filtered revs changes, the
64 # revisions for this filter. If the set of filtered revs changes, the
65 # hash will change and invalidate the cache.
65 # hash will change and invalidate the cache.
66 #
66 #
67 # The history part of the tags cache consists of lines of the form:
67 # The history part of the tags cache consists of lines of the form:
68 #
68 #
69 # <node> <tag>
69 # <node> <tag>
70 #
70 #
71 # (This format is identical to that of .hgtags files.)
71 # (This format is identical to that of .hgtags files.)
72 #
72 #
73 # <tag> is the tag name and <node> is the 40 character hex changeset
73 # <tag> is the tag name and <node> is the 40 character hex changeset
74 # the tag is associated with.
74 # the tag is associated with.
75 #
75 #
76 # Tags are written sorted by tag name.
76 # Tags are written sorted by tag name.
77 #
77 #
78 # Tags associated with multiple changesets have an entry for each changeset.
78 # Tags associated with multiple changesets have an entry for each changeset.
79 # The most recent changeset (in terms of revlog ordering for the head
79 # The most recent changeset (in terms of revlog ordering for the head
80 # setting it) for each tag is last.
80 # setting it) for each tag is last.
81
81
82
82
83 def fnoderevs(ui, repo, revs):
83 def fnoderevs(ui, repo, revs):
84 """return the list of '.hgtags' fnodes used in a set revisions
84 """return the list of '.hgtags' fnodes used in a set revisions
85
85
86 This is returned as list of unique fnodes. We use a list instead of a set
86 This is returned as list of unique fnodes. We use a list instead of a set
87 because order matters when it comes to tags."""
87 because order matters when it comes to tags."""
88 unfi = repo.unfiltered()
88 unfi = repo.unfiltered()
89 tonode = unfi.changelog.node
89 tonode = unfi.changelog.node
90 nodes = [tonode(r) for r in revs]
90 nodes = [tonode(r) for r in revs]
91 fnodes = _getfnodes(ui, repo, nodes)
91 fnodes = _getfnodes(ui, repo, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
92 fnodes = _filterfnodes(fnodes, nodes)
93 return fnodes
93 return fnodes
94
94
95
95
96 def _nulltonone(repo, value):
96 def _nulltonone(repo, value):
97 """convert nullid to None
97 """convert nullid to None
98
98
99 For tag value, nullid means "deleted". This small utility function helps
99 For tag value, nullid means "deleted". This small utility function helps
100 translating that to None."""
100 translating that to None."""
101 if value == repo.nullid:
101 if value == repo.nullid:
102 return None
102 return None
103 return value
103 return value
104
104
105
105
106 def difftags(ui, repo, oldfnodes, newfnodes):
106 def difftags(ui, repo, oldfnodes, newfnodes):
107 """list differences between tags expressed in two set of file-nodes
107 """list differences between tags expressed in two set of file-nodes
108
108
109 The list contains entries in the form: (tagname, oldvalue, new value).
109 The list contains entries in the form: (tagname, oldvalue, new value).
110 None is used to expressed missing value:
110 None is used to expressed missing value:
111 ('foo', None, 'abcd') is a new tag,
111 ('foo', None, 'abcd') is a new tag,
112 ('bar', 'ef01', None) is a deletion,
112 ('bar', 'ef01', None) is a deletion,
113 ('baz', 'abcd', 'ef01') is a tag movement.
113 ('baz', 'abcd', 'ef01') is a tag movement.
114 """
114 """
115 if oldfnodes == newfnodes:
115 if oldfnodes == newfnodes:
116 return []
116 return []
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
119
119
120 # list of (tag, old, new): None means missing
120 # list of (tag, old, new): None means missing
121 entries = []
121 entries = []
122 for tag, (new, __) in newtags.items():
122 for tag, (new, __) in newtags.items():
123 new = _nulltonone(repo, new)
123 new = _nulltonone(repo, new)
124 old, __ = oldtags.pop(tag, (None, None))
124 old, __ = oldtags.pop(tag, (None, None))
125 old = _nulltonone(repo, old)
125 old = _nulltonone(repo, old)
126 if old != new:
126 if old != new:
127 entries.append((tag, old, new))
127 entries.append((tag, old, new))
128 # handle deleted tags
128 # handle deleted tags
129 for tag, (old, __) in oldtags.items():
129 for tag, (old, __) in oldtags.items():
130 old = _nulltonone(repo, old)
130 old = _nulltonone(repo, old)
131 if old is not None:
131 if old is not None:
132 entries.append((tag, old, None))
132 entries.append((tag, old, None))
133 entries.sort()
133 entries.sort()
134 return entries
134 return entries
135
135
136
136
137 def writediff(fp, difflist):
137 def writediff(fp, difflist):
138 """write tags diff information to a file.
138 """write tags diff information to a file.
139
139
140 Data are stored with a line based format:
140 Data are stored with a line based format:
141
141
142 <action> <hex-node> <tag-name>\n
142 <action> <hex-node> <tag-name>\n
143
143
144 Action are defined as follow:
144 Action are defined as follow:
145 -R tag is removed,
145 -R tag is removed,
146 +A tag is added,
146 +A tag is added,
147 -M tag is moved (old value),
147 -M tag is moved (old value),
148 +M tag is moved (new value),
148 +M tag is moved (new value),
149
149
150 Example:
150 Example:
151
151
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
153
153
154 See documentation of difftags output for details about the input.
154 See documentation of difftags output for details about the input.
155 """
155 """
156 add = b'+A %s %s\n'
156 add = b'+A %s %s\n'
157 remove = b'-R %s %s\n'
157 remove = b'-R %s %s\n'
158 updateold = b'-M %s %s\n'
158 updateold = b'-M %s %s\n'
159 updatenew = b'+M %s %s\n'
159 updatenew = b'+M %s %s\n'
160 for tag, old, new in difflist:
160 for tag, old, new in difflist:
161 # translate to hex
161 # translate to hex
162 if old is not None:
162 if old is not None:
163 old = hex(old)
163 old = hex(old)
164 if new is not None:
164 if new is not None:
165 new = hex(new)
165 new = hex(new)
166 # write to file
166 # write to file
167 if old is None:
167 if old is None:
168 fp.write(add % (new, tag))
168 fp.write(add % (new, tag))
169 elif new is None:
169 elif new is None:
170 fp.write(remove % (old, tag))
170 fp.write(remove % (old, tag))
171 else:
171 else:
172 fp.write(updateold % (old, tag))
172 fp.write(updateold % (old, tag))
173 fp.write(updatenew % (new, tag))
173 fp.write(updatenew % (new, tag))
174
174
175
175
176 def findglobaltags(ui, repo):
176 def findglobaltags(ui, repo):
177 """Find global tags in a repo: return a tagsmap
177 """Find global tags in a repo: return a tagsmap
178
178
179 tagsmap: tag name to (node, hist) 2-tuples.
179 tagsmap: tag name to (node, hist) 2-tuples.
180
180
181 The tags cache is read and updated as a side-effect of calling.
181 The tags cache is read and updated as a side-effect of calling.
182 """
182 """
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
184 if cachetags is not None:
184 if cachetags is not None:
185 assert not shouldwrite
185 assert not shouldwrite
186 # XXX is this really 100% correct? are there oddball special
186 # XXX is this really 100% correct? are there oddball special
187 # cases where a global tag should outrank a local tag but won't,
187 # cases where a global tag should outrank a local tag but won't,
188 # because cachetags does not contain rank info?
188 # because cachetags does not contain rank info?
189 alltags = {}
189 alltags = {}
190 _updatetags(cachetags, alltags)
190 _updatetags(cachetags, alltags)
191 return alltags
191 return alltags
192
192
193 for head in reversed(heads): # oldest to newest
193 for head in reversed(heads): # oldest to newest
194 assert repo.changelog.index.has_node(
194 assert repo.changelog.index.has_node(
195 head
195 head
196 ), b"tag cache returned bogus head %s" % short(head)
196 ), b"tag cache returned bogus head %s" % short(head)
197 fnodes = _filterfnodes(tagfnode, reversed(heads))
197 fnodes = _filterfnodes(tagfnode, reversed(heads))
198 alltags = _tagsfromfnodes(ui, repo, fnodes)
198 alltags = _tagsfromfnodes(ui, repo, fnodes)
199
199
200 # and update the cache (if necessary)
200 # and update the cache (if necessary)
201 if shouldwrite:
201 if shouldwrite:
202 _writetagcache(ui, repo, valid, alltags)
202 _writetagcache(ui, repo, valid, alltags)
203 return alltags
203 return alltags
204
204
205
205
206 def _filterfnodes(tagfnode, nodes):
206 def _filterfnodes(tagfnode, nodes):
207 """return a list of unique fnodes
207 """return a list of unique fnodes
208
208
209 The order of this list matches the order of "nodes". Preserving this order
209 The order of this list matches the order of "nodes". Preserving this order
210 is important as reading tags in different order provides different
210 is important as reading tags in different order provides different
211 results."""
211 results."""
212 seen = set() # set of fnode
212 seen = set() # set of fnode
213 fnodes = []
213 fnodes = []
214 for no in nodes: # oldest to newest
214 for no in nodes: # oldest to newest
215 fnode = tagfnode.get(no)
215 fnode = tagfnode.get(no)
216 if fnode and fnode not in seen:
216 if fnode and fnode not in seen:
217 seen.add(fnode)
217 seen.add(fnode)
218 fnodes.append(fnode)
218 fnodes.append(fnode)
219 return fnodes
219 return fnodes
220
220
221
221
222 def _tagsfromfnodes(ui, repo, fnodes):
222 def _tagsfromfnodes(ui, repo, fnodes):
223 """return a tagsmap from a list of file-node
223 """return a tagsmap from a list of file-node
224
224
225 tagsmap: tag name to (node, hist) 2-tuples.
225 tagsmap: tag name to (node, hist) 2-tuples.
226
226
227 The order of the list matters."""
227 The order of the list matters."""
228 alltags = {}
228 alltags = {}
229 fctx = None
229 fctx = None
230 for fnode in fnodes:
230 for fnode in fnodes:
231 if fctx is None:
231 if fctx is None:
232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
233 else:
233 else:
234 fctx = fctx.filectx(fnode)
234 fctx = fctx.filectx(fnode)
235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
236 _updatetags(filetags, alltags)
236 _updatetags(filetags, alltags)
237 return alltags
237 return alltags
238
238
239
239
240 def readlocaltags(ui, repo, alltags, tagtypes):
240 def readlocaltags(ui, repo, alltags, tagtypes):
241 '''Read local tags in repo. Update alltags and tagtypes.'''
241 '''Read local tags in repo. Update alltags and tagtypes.'''
242 try:
242 try:
243 data = repo.vfs.read(b"localtags")
243 data = repo.vfs.read(b"localtags")
244 except FileNotFoundError:
244 except FileNotFoundError:
245 return
245 return
246
246
247 # localtags is in the local encoding; re-encode to UTF-8 on
247 # localtags is in the local encoding; re-encode to UTF-8 on
248 # input for consistency with the rest of this module.
248 # input for consistency with the rest of this module.
249 filetags = _readtags(
249 filetags = _readtags(
250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
251 )
251 )
252
252
253 # remove tags pointing to invalid nodes
253 # remove tags pointing to invalid nodes
254 cl = repo.changelog
254 cl = repo.changelog
255 for t in list(filetags):
255 for t in list(filetags):
256 try:
256 try:
257 cl.rev(filetags[t][0])
257 cl.rev(filetags[t][0])
258 except (LookupError, ValueError):
258 except (LookupError, ValueError):
259 del filetags[t]
259 del filetags[t]
260
260
261 _updatetags(filetags, alltags, b'local', tagtypes)
261 _updatetags(filetags, alltags, b'local', tagtypes)
262
262
263
263
264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
265 """Read tag definitions from a file (or any source of lines).
265 """Read tag definitions from a file (or any source of lines).
266
266
267 This function returns two sortdicts with similar information:
267 This function returns two sortdicts with similar information:
268
268
269 - the first dict, bintaghist, contains the tag information as expected by
269 - the first dict, bintaghist, contains the tag information as expected by
270 the _readtags function, i.e. a mapping from tag name to (node, hist):
270 the _readtags function, i.e. a mapping from tag name to (node, hist):
271 - node is the node id from the last line read for that name,
271 - node is the node id from the last line read for that name,
272 - hist is the list of node ids previously associated with it (in file
272 - hist is the list of node ids previously associated with it (in file
273 order). All node ids are binary, not hex.
273 order). All node ids are binary, not hex.
274
274
275 - the second dict, hextaglines, is a mapping from tag name to a list of
275 - the second dict, hextaglines, is a mapping from tag name to a list of
276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
277
277
278 When calcnodelines is False the hextaglines dict is not calculated (an
278 When calcnodelines is False the hextaglines dict is not calculated (an
279 empty dict is returned). This is done to improve this function's
279 empty dict is returned). This is done to improve this function's
280 performance in cases where the line numbers are not needed.
280 performance in cases where the line numbers are not needed.
281 """
281 """
282
282
283 bintaghist = util.sortdict()
283 bintaghist = util.sortdict()
284 hextaglines = util.sortdict()
284 hextaglines = util.sortdict()
285 count = 0
285 count = 0
286
286
287 def dbg(msg):
287 def dbg(msg):
288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
289
289
290 for nline, line in enumerate(lines):
290 for nline, line in enumerate(lines):
291 count += 1
291 count += 1
292 if not line:
292 if not line:
293 continue
293 continue
294 try:
294 try:
295 (nodehex, name) = line.split(b" ", 1)
295 (nodehex, name) = line.split(b" ", 1)
296 except ValueError:
296 except ValueError:
297 dbg(b"cannot parse entry")
297 dbg(b"cannot parse entry")
298 continue
298 continue
299 name = name.strip()
299 name = name.strip()
300 if recode:
300 if recode:
301 name = recode(name)
301 name = recode(name)
302 try:
302 try:
303 nodebin = bin(nodehex)
303 nodebin = bin(nodehex)
304 except binascii.Error:
304 except binascii.Error:
305 dbg(b"node '%s' is not well formed" % nodehex)
305 dbg(b"node '%s' is not well formed" % nodehex)
306 continue
306 continue
307
307
308 # update filetags
308 # update filetags
309 if calcnodelines:
309 if calcnodelines:
310 # map tag name to a list of line numbers
310 # map tag name to a list of line numbers
311 if name not in hextaglines:
311 if name not in hextaglines:
312 hextaglines[name] = []
312 hextaglines[name] = []
313 hextaglines[name].append([nodehex, nline])
313 hextaglines[name].append([nodehex, nline])
314 continue
314 continue
315 # map tag name to (node, hist)
315 # map tag name to (node, hist)
316 if name not in bintaghist:
316 if name not in bintaghist:
317 bintaghist[name] = []
317 bintaghist[name] = []
318 bintaghist[name].append(nodebin)
318 bintaghist[name].append(nodebin)
319 return bintaghist, hextaglines
319 return bintaghist, hextaglines
320
320
321
321
322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
323 """Read tag definitions from a file (or any source of lines).
323 """Read tag definitions from a file (or any source of lines).
324
324
325 Returns a mapping from tag name to (node, hist).
325 Returns a mapping from tag name to (node, hist).
326
326
327 "node" is the node id from the last line read for that name. "hist"
327 "node" is the node id from the last line read for that name. "hist"
328 is the list of node ids previously associated with it (in file order).
328 is the list of node ids previously associated with it (in file order).
329 All node ids are binary, not hex.
329 All node ids are binary, not hex.
330 """
330 """
331 filetags, nodelines = _readtaghist(
331 filetags, nodelines = _readtaghist(
332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
333 )
333 )
334 # util.sortdict().__setitem__ is much slower at replacing then inserting
334 # util.sortdict().__setitem__ is much slower at replacing then inserting
335 # new entries. The difference can matter if there are thousands of tags.
335 # new entries. The difference can matter if there are thousands of tags.
336 # Create a new sortdict to avoid the performance penalty.
336 # Create a new sortdict to avoid the performance penalty.
337 newtags = util.sortdict()
337 newtags = util.sortdict()
338 for tag, taghist in filetags.items():
338 for tag, taghist in filetags.items():
339 newtags[tag] = (taghist[-1], taghist[:-1])
339 newtags[tag] = (taghist[-1], taghist[:-1])
340 return newtags
340 return newtags
341
341
342
342
343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
344 """Incorporate the tag info read from one file into dictionnaries
344 """Incorporate the tag info read from one file into dictionnaries
345
345
346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
347
347
348 The second one, 'tagtypes', is optional and will be updated to track the
348 The second one, 'tagtypes', is optional and will be updated to track the
349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
350 needs to be set."""
350 needs to be set."""
351 if tagtype is None:
351 if tagtype is None:
352 assert tagtypes is None
352 assert tagtypes is None
353
353
354 for name, nodehist in filetags.items():
354 for name, nodehist in filetags.items():
355 if name not in alltags:
355 if name not in alltags:
356 alltags[name] = nodehist
356 alltags[name] = nodehist
357 if tagtype is not None:
357 if tagtype is not None:
358 tagtypes[name] = tagtype
358 tagtypes[name] = tagtype
359 continue
359 continue
360
360
361 # we prefer alltags[name] if:
361 # we prefer alltags[name] if:
362 # it supersedes us OR
362 # it supersedes us OR
363 # mutual supersedes and it has a higher rank
363 # mutual supersedes and it has a higher rank
364 # otherwise we win because we're tip-most
364 # otherwise we win because we're tip-most
365 anode, ahist = nodehist
365 anode, ahist = nodehist
366 bnode, bhist = alltags[name]
366 bnode, bhist = alltags[name]
367 if (
367 if (
368 bnode != anode
368 bnode != anode
369 and anode in bhist
369 and anode in bhist
370 and (bnode not in ahist or len(bhist) > len(ahist))
370 and (bnode not in ahist or len(bhist) > len(ahist))
371 ):
371 ):
372 anode = bnode
372 anode = bnode
373 elif tagtype is not None:
373 elif tagtype is not None:
374 tagtypes[name] = tagtype
374 tagtypes[name] = tagtype
375 ahist.extend([n for n in bhist if n not in ahist])
375 ahist.extend([n for n in bhist if n not in ahist])
376 alltags[name] = anode, ahist
376 alltags[name] = anode, ahist
377
377
378
378
379 def _filename(repo):
379 def _filename(repo):
380 """name of a tagcache file for a given repo or repoview"""
380 """name of a tagcache file for a given repo or repoview"""
381 filename = b'tags2'
381 filename = b'tags2'
382 if repo.filtername:
382 if repo.filtername:
383 filename = b'%s-%s' % (filename, repo.filtername)
383 filename = b'%s-%s' % (filename, repo.filtername)
384 return filename
384 return filename
385
385
386
386
387 def _readtagcache(ui, repo):
387 def _readtagcache(ui, repo):
388 """Read the tag cache.
388 """Read the tag cache.
389
389
390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
391
391
392 If the cache is completely up-to-date, "cachetags" is a dict of the
392 If the cache is completely up-to-date, "cachetags" is a dict of the
393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
394 None and "shouldwrite" is False.
394 None and "shouldwrite" is False.
395
395
396 If the cache is not up to date, "cachetags" is None. "heads" is a list
396 If the cache is not up to date, "cachetags" is None. "heads" is a list
397 of all heads currently in the repository, ordered from tip to oldest.
397 of all heads currently in the repository, ordered from tip to oldest.
398 "validinfo" is a tuple describing cache validation info. This is used
398 "validinfo" is a tuple describing cache validation info. This is used
399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
400 filenode. "shouldwrite" is True.
400 filenode. "shouldwrite" is True.
401
401
402 If the cache is not up to date, the caller is responsible for reading tag
402 If the cache is not up to date, the caller is responsible for reading tag
403 info from each returned head. (See findglobaltags().)
403 info from each returned head. (See findglobaltags().)
404 """
404 """
405 try:
405 try:
406 cachefile = repo.cachevfs(_filename(repo), b'r')
406 cachefile = repo.cachevfs(_filename(repo), b'r')
407 # force reading the file for static-http
407 # force reading the file for static-http
408 cachelines = iter(cachefile)
408 cachelines = iter(cachefile)
409 except IOError:
409 except IOError:
410 cachefile = None
410 cachefile = None
411
411
412 cacherev = None
412 cacherev = None
413 cachenode = None
413 cachenode = None
414 cachehash = None
414 cachehash = None
415 if cachefile:
415 if cachefile:
416 try:
416 try:
417 validline = next(cachelines)
417 validline = next(cachelines)
418 validline = validline.split()
418 validline = validline.split()
419 cacherev = int(validline[0])
419 cacherev = int(validline[0])
420 cachenode = bin(validline[1])
420 cachenode = bin(validline[1])
421 if len(validline) > 2:
421 if len(validline) > 2:
422 cachehash = bin(validline[2])
422 cachehash = bin(validline[2])
423 except Exception:
423 except Exception:
424 # corruption of the cache, just recompute it.
424 # corruption of the cache, just recompute it.
425 pass
425 pass
426
426
427 tipnode = repo.changelog.tip()
427 tipnode = repo.changelog.tip()
428 tiprev = len(repo.changelog) - 1
428 tiprev = len(repo.changelog) - 1
429
429
430 # Case 1 (common): tip is the same, so nothing has changed.
430 # Case 1 (common): tip is the same, so nothing has changed.
431 # (Unchanged tip trivially means no changesets have been added.
431 # (Unchanged tip trivially means no changesets have been added.
432 # But, thanks to localrepository.destroyed(), it also means none
432 # But, thanks to localrepository.destroyed(), it also means none
433 # have been destroyed by strip or rollback.)
433 # have been destroyed by strip or rollback.)
434 if (
434 if (
435 cacherev == tiprev
435 cacherev == tiprev
436 and cachenode == tipnode
436 and cachenode == tipnode
437 and cachehash == scmutil.filteredhash(repo, tiprev)
437 and cachehash == scmutil.filteredhash(repo, tiprev)
438 ):
438 ):
439 tags = _readtags(ui, repo, cachelines, cachefile.name)
439 tags = _readtags(ui, repo, cachelines, cachefile.name)
440 cachefile.close()
440 cachefile.close()
441 return (None, None, None, tags, False)
441 return (None, None, None, tags, False)
442 if cachefile:
442 if cachefile:
443 cachefile.close() # ignore rest of file
443 cachefile.close() # ignore rest of file
444
444
445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
446
446
447 repoheads = repo.heads()
447 repoheads = repo.heads()
448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
449 # writing an empty cache.
449 # writing an empty cache.
450 if repoheads == [repo.nullid]:
450 if repoheads == [repo.nullid]:
451 return ([], {}, valid, {}, False)
451 return ([], {}, valid, {}, False)
452
452
453 # Case 3 (uncommon): cache file missing or empty.
453 # Case 3 (uncommon): cache file missing or empty.
454
454
455 # Case 4 (uncommon): tip rev decreased. This should only happen
455 # Case 4 (uncommon): tip rev decreased. This should only happen
456 # when we're called from localrepository.destroyed(). Refresh the
456 # when we're called from localrepository.destroyed(). Refresh the
457 # cache so future invocations will not see disappeared heads in the
457 # cache so future invocations will not see disappeared heads in the
458 # cache.
458 # cache.
459
459
460 # Case 5 (common): tip has changed, so we've added/replaced heads.
460 # Case 5 (common): tip has changed, so we've added/replaced heads.
461
461
462 # As it happens, the code to handle cases 3, 4, 5 is the same.
462 # As it happens, the code to handle cases 3, 4, 5 is the same.
463
463
464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
465 # exposed".
465 # exposed".
466 if not len(repo.file(b'.hgtags')):
466 if not len(repo.file(b'.hgtags')):
467 # No tags have ever been committed, so we can avoid a
467 # No tags have ever been committed, so we can avoid a
468 # potentially expensive search.
468 # potentially expensive search.
469 return ([], {}, valid, None, True)
469 return ([], {}, valid, None, True)
470
470
471 # Now we have to lookup the .hgtags filenode for every new head.
471 # Now we have to lookup the .hgtags filenode for every new head.
472 # This is the most expensive part of finding tags, so performance
472 # This is the most expensive part of finding tags, so performance
473 # depends primarily on the size of newheads. Worst case: no cache
473 # depends primarily on the size of newheads. Worst case: no cache
474 # file, so newheads == repoheads.
474 # file, so newheads == repoheads.
475 # Reversed order helps the cache ('repoheads' is in descending order)
475 # Reversed order helps the cache ('repoheads' is in descending order)
476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
477
477
478 # Caller has to iterate over all heads, but can use the filenodes in
478 # Caller has to iterate over all heads, but can use the filenodes in
479 # cachefnode to get to each .hgtags revision quickly.
479 # cachefnode to get to each .hgtags revision quickly.
480 return (repoheads, cachefnode, valid, None, True)
480 return (repoheads, cachefnode, valid, None, True)
481
481
482
482
483 def _getfnodes(ui, repo, nodes):
483 def _getfnodes(ui, repo, nodes):
484 """return .hgtags fnodes for a list of changeset nodes
484 """return .hgtags fnodes for a list of changeset nodes
485
485
486 Return value is a {node: fnode} mapping. There will be no entry for nodes
486 Return value is a {node: fnode} mapping. There will be no entry for nodes
487 without a '.hgtags' file.
487 without a '.hgtags' file.
488 """
488 """
489 starttime = util.timer()
489 starttime = util.timer()
490 fnodescache = hgtagsfnodescache(repo.unfiltered())
490 fnodescache = hgtagsfnodescache(repo.unfiltered())
491 cachefnode = {}
491 cachefnode = {}
492 validated_fnodes = set()
492 validated_fnodes = set()
493 unknown_entries = set()
493 unknown_entries = set()
494
494
495 flog = None
495 flog = None
496 for node in nodes:
496 for node in nodes:
497 fnode = fnodescache.getfnode(node)
497 fnode = fnodescache.getfnode(node)
498 if fnode != repo.nullid:
498 if fnode != repo.nullid:
499 if fnode not in validated_fnodes:
499 if fnode not in validated_fnodes:
500 if flog is None:
500 if flog is None:
501 flog = repo.file(b'.hgtags')
501 flog = repo.file(b'.hgtags')
502 if flog.hasnode(fnode):
502 if flog.hasnode(fnode):
503 validated_fnodes.add(fnode)
503 validated_fnodes.add(fnode)
504 else:
504 else:
505 unknown_entries.add(node)
505 unknown_entries.add(node)
506 cachefnode[node] = fnode
506 cachefnode[node] = fnode
507
507
508 if unknown_entries:
508 if unknown_entries:
509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
510 for node, fnode in fixed_nodemap.items():
510 for node, fnode in fixed_nodemap.items():
511 if fnode != repo.nullid:
511 if fnode != repo.nullid:
512 cachefnode[node] = fnode
512 cachefnode[node] = fnode
513
513
514 fnodescache.write()
514 fnodescache.write()
515
515
516 duration = util.timer() - starttime
516 duration = util.timer() - starttime
517 ui.log(
517 ui.log(
518 b'tagscache',
518 b'tagscache',
519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
520 fnodescache.hitcount,
520 fnodescache.hitcount,
521 fnodescache.lookupcount,
521 fnodescache.lookupcount,
522 duration,
522 duration,
523 )
523 )
524 return cachefnode
524 return cachefnode
525
525
526
526
527 def _writetagcache(ui, repo, valid, cachetags):
527 def _writetagcache(ui, repo, valid, cachetags):
528 filename = _filename(repo)
528 filename = _filename(repo)
529 try:
529 try:
530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
531 except (OSError, IOError):
531 except (OSError, IOError):
532 return
532 return
533
533
534 ui.log(
534 ui.log(
535 b'tagscache',
535 b'tagscache',
536 b'writing .hg/cache/%s with %d tags\n',
536 b'writing .hg/cache/%s with %d tags\n',
537 filename,
537 filename,
538 len(cachetags),
538 len(cachetags),
539 )
539 )
540
540
541 if valid[2]:
541 if valid[2]:
542 cachefile.write(
542 cachefile.write(
543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
544 )
544 )
545 else:
545 else:
546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
547
547
548 # Tag names in the cache are in UTF-8 -- which is the whole reason
548 # Tag names in the cache are in UTF-8 -- which is the whole reason
549 # we keep them in UTF-8 throughout this module. If we converted
549 # we keep them in UTF-8 throughout this module. If we converted
550 # them local encoding on input, we would lose info writing them to
550 # them local encoding on input, we would lose info writing them to
551 # the cache.
551 # the cache.
552 for (name, (node, hist)) in sorted(cachetags.items()):
552 for (name, (node, hist)) in sorted(cachetags.items()):
553 for n in hist:
553 for n in hist:
554 cachefile.write(b"%s %s\n" % (hex(n), name))
554 cachefile.write(b"%s %s\n" % (hex(n), name))
555 cachefile.write(b"%s %s\n" % (hex(node), name))
555 cachefile.write(b"%s %s\n" % (hex(node), name))
556
556
557 try:
557 try:
558 cachefile.close()
558 cachefile.close()
559 except (OSError, IOError):
559 except (OSError, IOError):
560 pass
560 pass
561
561
562
562
563 def tag(repo, names, node, message, local, user, date, editor=False):
563 def tag(repo, names, node, message, local, user, date, editor=False):
564 """tag a revision with one or more symbolic names.
564 """tag a revision with one or more symbolic names.
565
565
566 names is a list of strings or, when adding a single tag, names may be a
566 names is a list of strings or, when adding a single tag, names may be a
567 string.
567 string.
568
568
569 if local is True, the tags are stored in a per-repository file.
569 if local is True, the tags are stored in a per-repository file.
570 otherwise, they are stored in the .hgtags file, and a new
570 otherwise, they are stored in the .hgtags file, and a new
571 changeset is committed with the change.
571 changeset is committed with the change.
572
572
573 keyword arguments:
573 keyword arguments:
574
574
575 local: whether to store tags in non-version-controlled file
575 local: whether to store tags in non-version-controlled file
576 (default False)
576 (default False)
577
577
578 message: commit message to use if committing
578 message: commit message to use if committing
579
579
580 user: name of user to use if committing
580 user: name of user to use if committing
581
581
582 date: date tuple to use if committing"""
582 date: date tuple to use if committing"""
583
583
584 if not local:
584 if not local:
585 m = matchmod.exact([b'.hgtags'])
585 m = matchmod.exact([b'.hgtags'])
586 st = repo.status(match=m, unknown=True, ignored=True)
586 st = repo.status(match=m, unknown=True, ignored=True)
587 if any(
587 if any(
588 (
588 (
589 st.modified,
589 st.modified,
590 st.added,
590 st.added,
591 st.removed,
591 st.removed,
592 st.deleted,
592 st.deleted,
593 st.unknown,
593 st.unknown,
594 st.ignored,
594 st.ignored,
595 )
595 )
596 ):
596 ):
597 raise error.Abort(
597 raise error.Abort(
598 _(b'working copy of .hgtags is changed'),
598 _(b'working copy of .hgtags is changed'),
599 hint=_(b'please commit .hgtags manually'),
599 hint=_(b'please commit .hgtags manually'),
600 )
600 )
601
601
602 with repo.wlock():
602 with repo.wlock():
603 repo.tags() # instantiate the cache
603 repo.tags() # instantiate the cache
604 _tag(repo, names, node, message, local, user, date, editor=editor)
604 _tag(repo, names, node, message, local, user, date, editor=editor)
605
605
606
606
607 def _tag(
607 def _tag(
608 repo, names, node, message, local, user, date, extra=None, editor=False
608 repo, names, node, message, local, user, date, extra=None, editor=False
609 ):
609 ):
610 if isinstance(names, bytes):
610 if isinstance(names, bytes):
611 names = (names,)
611 names = (names,)
612
612
613 branches = repo.branchmap()
613 branches = repo.branchmap()
614 for name in names:
614 for name in names:
615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
616 if name in branches:
616 if name in branches:
617 repo.ui.warn(
617 repo.ui.warn(
618 _(b"warning: tag %s conflicts with existing branch name\n")
618 _(b"warning: tag %s conflicts with existing branch name\n")
619 % name
619 % name
620 )
620 )
621
621
622 def writetags(fp, names, munge, prevtags):
622 def writetags(fp, names, munge, prevtags):
623 fp.seek(0, io.SEEK_END)
623 fp.seek(0, io.SEEK_END)
624 if prevtags and not prevtags.endswith(b'\n'):
624 if prevtags and not prevtags.endswith(b'\n'):
625 fp.write(b'\n')
625 fp.write(b'\n')
626 for name in names:
626 for name in names:
627 if munge:
627 if munge:
628 m = munge(name)
628 m = munge(name)
629 else:
629 else:
630 m = name
630 m = name
631
631
632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
633 old = repo.tags().get(name, repo.nullid)
633 old = repo.tags().get(name, repo.nullid)
634 fp.write(b'%s %s\n' % (hex(old), m))
634 fp.write(b'%s %s\n' % (hex(old), m))
635 fp.write(b'%s %s\n' % (hex(node), m))
635 fp.write(b'%s %s\n' % (hex(node), m))
636 fp.close()
636 fp.close()
637
637
638 prevtags = b''
638 prevtags = b''
639 if local:
639 if local:
640 try:
640 try:
641 fp = repo.vfs(b'localtags', b'r+')
641 fp = repo.vfs(b'localtags', b'r+')
642 except IOError:
642 except IOError:
643 fp = repo.vfs(b'localtags', b'a')
643 fp = repo.vfs(b'localtags', b'a')
644 else:
644 else:
645 prevtags = fp.read()
645 prevtags = fp.read()
646
646
647 # local tags are stored in the current charset
647 # local tags are stored in the current charset
648 writetags(fp, names, None, prevtags)
648 writetags(fp, names, None, prevtags)
649 for name in names:
649 for name in names:
650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
651 return
651 return
652
652
653 try:
653 try:
654 fp = repo.wvfs(b'.hgtags', b'rb+')
654 fp = repo.wvfs(b'.hgtags', b'rb+')
655 except FileNotFoundError:
655 except FileNotFoundError:
656 fp = repo.wvfs(b'.hgtags', b'ab')
656 fp = repo.wvfs(b'.hgtags', b'ab')
657 else:
657 else:
658 prevtags = fp.read()
658 prevtags = fp.read()
659
659
660 # committed tags are stored in UTF-8
660 # committed tags are stored in UTF-8
661 writetags(fp, names, encoding.fromlocal, prevtags)
661 writetags(fp, names, encoding.fromlocal, prevtags)
662
662
663 fp.close()
663 fp.close()
664
664
665 repo.invalidatecaches()
665 repo.invalidatecaches()
666
666
667 with repo.dirstate.changing_files(repo):
667 with repo.dirstate.changing_files(repo):
668 if b'.hgtags' not in repo.dirstate:
668 if b'.hgtags' not in repo.dirstate:
669 repo[None].add([b'.hgtags'])
669 repo[None].add([b'.hgtags'])
670
670
671 m = matchmod.exact([b'.hgtags'])
671 m = matchmod.exact([b'.hgtags'])
672 tagnode = repo.commit(
672 tagnode = repo.commit(
673 message, user, date, extra=extra, match=m, editor=editor
673 message, user, date, extra=extra, match=m, editor=editor
674 )
674 )
675
675
676 for name in names:
676 for name in names:
677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
678
678
679 return tagnode
679 return tagnode
680
680
681
681
682 _fnodescachefile = b'hgtagsfnodes1'
682 _fnodescachefile = b'hgtagsfnodes1'
683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
684 _fnodesmissingrec = b'\xff' * 24
684 _fnodesmissingrec = b'\xff' * 24
685
685
686
686
687 class hgtagsfnodescache:
687 class hgtagsfnodescache:
688 """Persistent cache mapping revisions to .hgtags filenodes.
688 """Persistent cache mapping revisions to .hgtags filenodes.
689
689
690 The cache is an array of records. Each item in the array corresponds to
690 The cache is an array of records. Each item in the array corresponds to
691 a changelog revision. Values in the array contain the first 4 bytes of
691 a changelog revision. Values in the array contain the first 4 bytes of
692 the node hash and the 20 bytes .hgtags filenode for that revision.
692 the node hash and the 20 bytes .hgtags filenode for that revision.
693
693
694 The first 4 bytes are present as a form of verification. Repository
694 The first 4 bytes are present as a form of verification. Repository
695 stripping and rewriting may change the node at a numeric revision in the
695 stripping and rewriting may change the node at a numeric revision in the
696 changelog. The changeset fragment serves as a verifier to detect
696 changelog. The changeset fragment serves as a verifier to detect
697 rewriting. This logic is shared with the rev branch cache (see
697 rewriting. This logic is shared with the rev branch cache (see
698 branchmap.py).
698 branchmap.py).
699
699
700 The instance holds in memory the full cache content but entries are
700 The instance holds in memory the full cache content but entries are
701 only parsed on read.
701 only parsed on read.
702
702
703 Instances behave like lists. ``c[i]`` works where i is a rev or
703 Instances behave like lists. ``c[i]`` works where i is a rev or
704 changeset node. Missing indexes are populated automatically on access.
704 changeset node. Missing indexes are populated automatically on access.
705 """
705 """
706
706
707 def __init__(self, repo):
707 def __init__(self, repo):
708 assert repo.filtername is None
708 assert repo.filtername is None
709
709
710 self._repo = repo
710 self._repo = repo
711
711
712 # Only for reporting purposes.
712 # Only for reporting purposes.
713 self.lookupcount = 0
713 self.lookupcount = 0
714 self.hitcount = 0
714 self.hitcount = 0
715
715
716 try:
716 try:
717 data = repo.cachevfs.read(_fnodescachefile)
717 data = repo.cachevfs.read(_fnodescachefile)
718 except (OSError, IOError):
718 except (OSError, IOError):
719 data = b""
719 data = b""
720 self._raw = bytearray(data)
720 self._raw = bytearray(data)
721
721
722 # The end state of self._raw is an array that is of the exact length
722 # The end state of self._raw is an array that is of the exact length
723 # required to hold a record for every revision in the repository.
723 # required to hold a record for every revision in the repository.
724 # We truncate or extend the array as necessary. self._dirtyoffset is
724 # We truncate or extend the array as necessary. self._dirtyoffset is
725 # defined to be the start offset at which we need to write the output
725 # defined to be the start offset at which we need to write the output
726 # file. This offset is also adjusted when new entries are calculated
726 # file. This offset is also adjusted when new entries are calculated
727 # for array members.
727 # for array members.
728 cllen = len(repo.changelog)
728 cllen = len(repo.changelog)
729 wantedlen = cllen * _fnodesrecsize
729 wantedlen = cllen * _fnodesrecsize
730 rawlen = len(self._raw)
730 rawlen = len(self._raw)
731
731
732 self._dirtyoffset = None
732 self._dirtyoffset = None
733
733
734 rawlentokeep = min(
734 rawlentokeep = min(
735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
736 )
736 )
737 if rawlen > rawlentokeep:
737 if rawlen > rawlentokeep:
738 # There's no easy way to truncate array instances. This seems
738 # There's no easy way to truncate array instances. This seems
739 # slightly less evil than copying a potentially large array slice.
739 # slightly less evil than copying a potentially large array slice.
740 for i in range(rawlen - rawlentokeep):
740 for i in range(rawlen - rawlentokeep):
741 self._raw.pop()
741 self._raw.pop()
742 rawlen = len(self._raw)
742 rawlen = len(self._raw)
743 self._dirtyoffset = rawlen
743 self._dirtyoffset = rawlen
744 if rawlen < wantedlen:
744 if rawlen < wantedlen:
745 if self._dirtyoffset is None:
745 if self._dirtyoffset is None:
746 self._dirtyoffset = rawlen
746 self._dirtyoffset = rawlen
747 # TODO: zero fill entire record, because it's invalid not missing?
747 # TODO: zero fill entire record, because it's invalid not missing?
748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
749
749
750 def getfnode(self, node, computemissing=True):
750 def getfnode(self, node, computemissing=True):
751 """Obtain the filenode of the .hgtags file at a specified revision.
751 """Obtain the filenode of the .hgtags file at a specified revision.
752
752
753 If the value is in the cache, the entry will be validated and returned.
753 If the value is in the cache, the entry will be validated and returned.
754 Otherwise, the filenode will be computed and returned unless
754 Otherwise, the filenode will be computed and returned unless
755 "computemissing" is False. In that case, None will be returned if
755 "computemissing" is False. In that case, None will be returned if
756 the entry is missing or False if the entry is invalid without
756 the entry is missing or False if the entry is invalid without
757 any potentially expensive computation being performed.
757 any potentially expensive computation being performed.
758
758
759 If an .hgtags does not exist at the specified revision, nullid is
759 If an .hgtags does not exist at the specified revision, nullid is
760 returned.
760 returned.
761 """
761 """
762 if node == self._repo.nullid:
762 if node == self._repo.nullid:
763 return node
763 return node
764
764
765 rev = self._repo.changelog.rev(node)
765 rev = self._repo.changelog.rev(node)
766
766
767 self.lookupcount += 1
767 self.lookupcount += 1
768
768
769 offset = rev * _fnodesrecsize
769 offset = rev * _fnodesrecsize
770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
771 properprefix = node[0:4]
771 properprefix = node[0:4]
772
772
773 # Validate and return existing entry.
773 # Validate and return existing entry.
774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
775 fileprefix = record[0:4]
775 fileprefix = record[0:4]
776
776
777 if fileprefix == properprefix:
777 if fileprefix == properprefix:
778 self.hitcount += 1
778 self.hitcount += 1
779 return record[4:]
779 return record[4:]
780
780
781 # Fall through.
781 # Fall through.
782
782
783 # If we get here, the entry is either missing or invalid.
783 # If we get here, the entry is either missing or invalid.
784
784
785 if not computemissing:
785 if not computemissing:
786 if record != _fnodesmissingrec:
786 if record != _fnodesmissingrec:
787 return False
787 return False
788 return None
788 return None
789
789
790 fnode = self._computefnode(node)
790 fnode = self._computefnode(node)
791 self._writeentry(offset, properprefix, fnode)
791 self._writeentry(offset, properprefix, fnode)
792 return fnode
792 return fnode
793
793
794 def _computefnode(self, node):
794 def _computefnode(self, node):
795 """Finds the tag filenode for a node which is missing or invalid
795 """Finds the tag filenode for a node which is missing or invalid
796 in cache"""
796 in cache"""
797 ctx = self._repo[node]
797 ctx = self._repo[node]
798 rev = ctx.rev()
798 rev = ctx.rev()
799 fnode = None
799 fnode = None
800 cl = self._repo.changelog
800 cl = self._repo.changelog
801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
802 p1node = cl.node(p1rev)
802 p1node = cl.node(p1rev)
803 p1fnode = self.getfnode(p1node, computemissing=False)
803 p1fnode = self.getfnode(p1node, computemissing=False)
804 if p2rev != nullrev:
804 if p2rev != nullrev:
805 # There is some no-merge changeset where p1 is null and p2 is set
805 # There is some no-merge changeset where p1 is null and p2 is set
806 # Processing them as merge is just slower, but still gives a good
806 # Processing them as merge is just slower, but still gives a good
807 # result.
807 # result.
808 p2node = cl.node(p2rev)
808 p2node = cl.node(p2rev)
809 p2fnode = self.getfnode(p2node, computemissing=False)
809 p2fnode = self.getfnode(p2node, computemissing=False)
810 if p1fnode != p2fnode:
810 if p1fnode != p2fnode:
811 # we cannot rely on readfast because we don't know against what
811 # we cannot rely on readfast because we don't know against what
812 # parent the readfast delta is computed
812 # parent the readfast delta is computed
813 p1fnode = None
813 p1fnode = None
814 if p1fnode:
814 if p1fnode:
815 mctx = ctx.manifestctx()
815 mctx = ctx.manifestctx()
816 fnode = mctx.readfast().get(b'.hgtags')
816 fnode = mctx.readfast().get(b'.hgtags')
817 if fnode is None:
817 if fnode is None:
818 fnode = p1fnode
818 fnode = p1fnode
819 if fnode is None:
819 if fnode is None:
820 # Populate missing entry.
820 # Populate missing entry.
821 try:
821 try:
822 fnode = ctx.filenode(b'.hgtags')
822 fnode = ctx.filenode(b'.hgtags')
823 except error.LookupError:
823 except error.LookupError:
824 # No .hgtags file on this revision.
824 # No .hgtags file on this revision.
825 fnode = self._repo.nullid
825 fnode = self._repo.nullid
826 return fnode
826 return fnode
827
827
828 def setfnode(self, node, fnode):
828 def setfnode(self, node, fnode):
829 """Set the .hgtags filenode for a given changeset."""
829 """Set the .hgtags filenode for a given changeset."""
830 assert len(fnode) == 20
830 assert len(fnode) == 20
831 ctx = self._repo[node]
831 ctx = self._repo[node]
832
832
833 # Do a lookup first to avoid writing if nothing has changed.
833 # Do a lookup first to avoid writing if nothing has changed.
834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
835 return
835 return
836
836
837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
838
838
839 def refresh_invalid_nodes(self, nodes):
839 def refresh_invalid_nodes(self, nodes):
840 """recomputes file nodes for a given set of nodes which has unknown
840 """recomputes file nodes for a given set of nodes which has unknown
841 filenodes for them in the cache
841 filenodes for them in the cache
842 Also updates the in-memory cache with the correct filenode.
842 Also updates the in-memory cache with the correct filenode.
843 Caller needs to take care about calling `.write()` so that updates are
843 Caller needs to take care about calling `.write()` so that updates are
844 persisted.
844 persisted.
845 Returns a map {node: recomputed fnode}
845 Returns a map {node: recomputed fnode}
846 """
846 """
847 fixed_nodemap = {}
847 fixed_nodemap = {}
848 for node in nodes:
848 for node in nodes:
849 fnode = self._computefnode(node)
849 fnode = self._computefnode(node)
850 fixed_nodemap[node] = fnode
850 fixed_nodemap[node] = fnode
851 self.setfnode(node, fnode)
851 self.setfnode(node, fnode)
852 return fixed_nodemap
852 return fixed_nodemap
853
853
854 def _writeentry(self, offset, prefix, fnode):
854 def _writeentry(self, offset, prefix, fnode):
855 # Slices on array instances only accept other array.
855 # Slices on array instances only accept other array.
856 entry = bytearray(prefix + fnode)
856 entry = bytearray(prefix + fnode)
857 self._raw[offset : offset + _fnodesrecsize] = entry
857 self._raw[offset : offset + _fnodesrecsize] = entry
858 # self._dirtyoffset could be None.
858 # self._dirtyoffset could be None.
859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
860
860
861 def write(self):
861 def write(self):
862 """Perform all necessary writes to cache file.
862 """Perform all necessary writes to cache file.
863
863
864 This may no-op if no writes are needed or if a write lock could
864 This may no-op if no writes are needed or if a write lock could
865 not be obtained.
865 not be obtained.
866 """
866 """
867 if self._dirtyoffset is None:
867 if self._dirtyoffset is None:
868 return
868 return
869
869
870 data = self._raw[self._dirtyoffset :]
870 data = self._raw[self._dirtyoffset :]
871 if not data:
871 if not data:
872 return
872 return
873
873
874 repo = self._repo
874 repo = self._repo
875
875
876 try:
876 try:
877 lock = repo.lock(wait=False)
877 lock = repo.lock(wait=False)
878 except error.LockError:
878 except error.LockError:
879 repo.ui.log(
879 repo.ui.log(
880 b'tagscache',
880 b'tagscache',
881 b'not writing .hg/cache/%s because '
881 b'not writing .hg/cache/%s because '
882 b'lock cannot be acquired\n' % _fnodescachefile,
882 b'lock cannot be acquired\n' % _fnodescachefile,
883 )
883 )
884 return
884 return
885
885
886 try:
886 try:
887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
888 try:
888 try:
889 # if the file has been truncated
889 # if the file has been truncated
890 actualoffset = f.tell()
890 actualoffset = f.tell()
891 if actualoffset < self._dirtyoffset:
891 if actualoffset < self._dirtyoffset:
892 self._dirtyoffset = actualoffset
892 self._dirtyoffset = actualoffset
893 data = self._raw[self._dirtyoffset :]
893 data = self._raw[self._dirtyoffset :]
894 f.seek(self._dirtyoffset)
894 f.seek(self._dirtyoffset)
895 f.truncate()
895 f.truncate()
896 repo.ui.log(
896 repo.ui.log(
897 b'tagscache',
897 b'tagscache',
898 b'writing %d bytes to cache/%s\n'
898 b'writing %d bytes to cache/%s\n'
899 % (len(data), _fnodescachefile),
899 % (len(data), _fnodescachefile),
900 )
900 )
901 f.write(data)
901 f.write(data)
902 self._dirtyoffset = None
902 self._dirtyoffset = None
903 finally:
903 finally:
904 f.close()
904 f.close()
905 except (IOError, OSError) as inst:
905 except (IOError, OSError) as inst:
906 repo.ui.log(
906 repo.ui.log(
907 b'tagscache',
907 b'tagscache',
908 b"couldn't write cache/%s: %s\n"
908 b"couldn't write cache/%s: %s\n"
909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
910 )
910 )
911 finally:
911 finally:
912 lock.release()
912 lock.release()
913
913
914
914
915 def clear_cache_on_disk(repo):
915 def clear_cache_on_disk(repo):
916 """function used by the perf extension to "tags" cache"""
916 """function used by the perf extension to "tags" cache"""
917 repo.cachevfs.tryunlink(_filename(repo))
917 repo.cachevfs.tryunlink(_filename(repo))
918
918
919
919
920 def clear_cache_fnodes(repo):
920 def clear_cache_fnodes(repo):
921 """function used by the perf extension to clear "file node cache"""
921 """function used by the perf extension to clear "file node cache"""
922 repo.cachevfs.tryunlink(_filename(repo))
922 repo.cachevfs.tryunlink(_filename(repo))
923
924
925 def forget_fnodes(repo, revs):
926 """function used by the perf extension to prune some entries from the fnodes
927 cache"""
928 missing_1 = b'\xff' * 4
929 missing_2 = b'\xff' * 20
930 cache = hgtagsfnodescache(repo.unfiltered())
931 for r in revs:
932 cache._writeentry(r * _fnodesrecsize, missing_1, missing_2)
933 cache.write()
General Comments 0
You need to be logged in to leave comments. Login now