##// END OF EJS Templates
perf: move cache clearing in the `setup` step of `perfheads`...
Boris Feld -
r41481:ab6d1f82 default
parent child Browse files
Show More
@@ -1,2674 +1,2675 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 """benchmark the computation of a changelog heads"""
538 """benchmark the computation of a changelog heads"""
539 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
540 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
541 cl = repo.changelog
541 cl = repo.changelog
542 def s():
543 clearcaches(cl)
542 def d():
544 def d():
543 len(cl.headrevs())
545 len(cl.headrevs())
544 clearcaches(cl)
546 timer(d, setup=s)
545 timer(d)
546 fm.end()
547 fm.end()
547
548
548 @command(b'perftags', formatteropts+
549 @command(b'perftags', formatteropts+
549 [
550 [
550 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 ])
552 ])
552 def perftags(ui, repo, **opts):
553 def perftags(ui, repo, **opts):
553 opts = _byteskwargs(opts)
554 opts = _byteskwargs(opts)
554 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
555 repocleartagscache = repocleartagscachefunc(repo)
556 repocleartagscache = repocleartagscachefunc(repo)
556 clearrevlogs = opts[b'clear_revlogs']
557 clearrevlogs = opts[b'clear_revlogs']
557 def s():
558 def s():
558 if clearrevlogs:
559 if clearrevlogs:
559 clearchangelog(repo)
560 clearchangelog(repo)
560 clearfilecache(repo.unfiltered(), 'manifest')
561 clearfilecache(repo.unfiltered(), 'manifest')
561 repocleartagscache()
562 repocleartagscache()
562 def t():
563 def t():
563 return len(repo.tags())
564 return len(repo.tags())
564 timer(t, setup=s)
565 timer(t, setup=s)
565 fm.end()
566 fm.end()
566
567
567 @command(b'perfancestors', formatteropts)
568 @command(b'perfancestors', formatteropts)
568 def perfancestors(ui, repo, **opts):
569 def perfancestors(ui, repo, **opts):
569 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
570 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
571 heads = repo.changelog.headrevs()
572 heads = repo.changelog.headrevs()
572 def d():
573 def d():
573 for a in repo.changelog.ancestors(heads):
574 for a in repo.changelog.ancestors(heads):
574 pass
575 pass
575 timer(d)
576 timer(d)
576 fm.end()
577 fm.end()
577
578
578 @command(b'perfancestorset', formatteropts)
579 @command(b'perfancestorset', formatteropts)
579 def perfancestorset(ui, repo, revset, **opts):
580 def perfancestorset(ui, repo, revset, **opts):
580 opts = _byteskwargs(opts)
581 opts = _byteskwargs(opts)
581 timer, fm = gettimer(ui, opts)
582 timer, fm = gettimer(ui, opts)
582 revs = repo.revs(revset)
583 revs = repo.revs(revset)
583 heads = repo.changelog.headrevs()
584 heads = repo.changelog.headrevs()
584 def d():
585 def d():
585 s = repo.changelog.ancestors(heads)
586 s = repo.changelog.ancestors(heads)
586 for rev in revs:
587 for rev in revs:
587 rev in s
588 rev in s
588 timer(d)
589 timer(d)
589 fm.end()
590 fm.end()
590
591
591 @command(b'perfdiscovery', formatteropts, b'PATH')
592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 def perfdiscovery(ui, repo, path, **opts):
593 def perfdiscovery(ui, repo, path, **opts):
593 """benchmark discovery between local repo and the peer at given path
594 """benchmark discovery between local repo and the peer at given path
594 """
595 """
595 repos = [repo, None]
596 repos = [repo, None]
596 timer, fm = gettimer(ui, opts)
597 timer, fm = gettimer(ui, opts)
597 path = ui.expandpath(path)
598 path = ui.expandpath(path)
598
599
599 def s():
600 def s():
600 repos[1] = hg.peer(ui, opts, path)
601 repos[1] = hg.peer(ui, opts, path)
601 def d():
602 def d():
602 setdiscovery.findcommonheads(ui, *repos)
603 setdiscovery.findcommonheads(ui, *repos)
603 timer(d, setup=s)
604 timer(d, setup=s)
604 fm.end()
605 fm.end()
605
606
606 @command(b'perfbookmarks', formatteropts +
607 @command(b'perfbookmarks', formatteropts +
607 [
608 [
608 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 ])
610 ])
610 def perfbookmarks(ui, repo, **opts):
611 def perfbookmarks(ui, repo, **opts):
611 """benchmark parsing bookmarks from disk to memory"""
612 """benchmark parsing bookmarks from disk to memory"""
612 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
613 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
614
615
615 clearrevlogs = opts[b'clear_revlogs']
616 clearrevlogs = opts[b'clear_revlogs']
616 def s():
617 def s():
617 if clearrevlogs:
618 if clearrevlogs:
618 clearchangelog(repo)
619 clearchangelog(repo)
619 clearfilecache(repo, b'_bookmarks')
620 clearfilecache(repo, b'_bookmarks')
620 def d():
621 def d():
621 repo._bookmarks
622 repo._bookmarks
622 timer(d, setup=s)
623 timer(d, setup=s)
623 fm.end()
624 fm.end()
624
625
625 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 def perfbundleread(ui, repo, bundlepath, **opts):
627 def perfbundleread(ui, repo, bundlepath, **opts):
627 """Benchmark reading of bundle files.
628 """Benchmark reading of bundle files.
628
629
629 This command is meant to isolate the I/O part of bundle reading as
630 This command is meant to isolate the I/O part of bundle reading as
630 much as possible.
631 much as possible.
631 """
632 """
632 from mercurial import (
633 from mercurial import (
633 bundle2,
634 bundle2,
634 exchange,
635 exchange,
635 streamclone,
636 streamclone,
636 )
637 )
637
638
638 opts = _byteskwargs(opts)
639 opts = _byteskwargs(opts)
639
640
640 def makebench(fn):
641 def makebench(fn):
641 def run():
642 def run():
642 with open(bundlepath, b'rb') as fh:
643 with open(bundlepath, b'rb') as fh:
643 bundle = exchange.readbundle(ui, fh, bundlepath)
644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 fn(bundle)
645 fn(bundle)
645
646
646 return run
647 return run
647
648
648 def makereadnbytes(size):
649 def makereadnbytes(size):
649 def run():
650 def run():
650 with open(bundlepath, b'rb') as fh:
651 with open(bundlepath, b'rb') as fh:
651 bundle = exchange.readbundle(ui, fh, bundlepath)
652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 while bundle.read(size):
653 while bundle.read(size):
653 pass
654 pass
654
655
655 return run
656 return run
656
657
657 def makestdioread(size):
658 def makestdioread(size):
658 def run():
659 def run():
659 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
660 while fh.read(size):
661 while fh.read(size):
661 pass
662 pass
662
663
663 return run
664 return run
664
665
665 # bundle1
666 # bundle1
666
667
667 def deltaiter(bundle):
668 def deltaiter(bundle):
668 for delta in bundle.deltaiter():
669 for delta in bundle.deltaiter():
669 pass
670 pass
670
671
671 def iterchunks(bundle):
672 def iterchunks(bundle):
672 for chunk in bundle.getchunks():
673 for chunk in bundle.getchunks():
673 pass
674 pass
674
675
675 # bundle2
676 # bundle2
676
677
677 def forwardchunks(bundle):
678 def forwardchunks(bundle):
678 for chunk in bundle._forwardchunks():
679 for chunk in bundle._forwardchunks():
679 pass
680 pass
680
681
681 def iterparts(bundle):
682 def iterparts(bundle):
682 for part in bundle.iterparts():
683 for part in bundle.iterparts():
683 pass
684 pass
684
685
685 def iterpartsseekable(bundle):
686 def iterpartsseekable(bundle):
686 for part in bundle.iterparts(seekable=True):
687 for part in bundle.iterparts(seekable=True):
687 pass
688 pass
688
689
689 def seek(bundle):
690 def seek(bundle):
690 for part in bundle.iterparts(seekable=True):
691 for part in bundle.iterparts(seekable=True):
691 part.seek(0, os.SEEK_END)
692 part.seek(0, os.SEEK_END)
692
693
693 def makepartreadnbytes(size):
694 def makepartreadnbytes(size):
694 def run():
695 def run():
695 with open(bundlepath, b'rb') as fh:
696 with open(bundlepath, b'rb') as fh:
696 bundle = exchange.readbundle(ui, fh, bundlepath)
697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 for part in bundle.iterparts():
698 for part in bundle.iterparts():
698 while part.read(size):
699 while part.read(size):
699 pass
700 pass
700
701
701 return run
702 return run
702
703
703 benches = [
704 benches = [
704 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(131072), b'read(128k)'),
708 (makestdioread(131072), b'read(128k)'),
708 ]
709 ]
709
710
710 with open(bundlepath, b'rb') as fh:
711 with open(bundlepath, b'rb') as fh:
711 bundle = exchange.readbundle(ui, fh, bundlepath)
712 bundle = exchange.readbundle(ui, fh, bundlepath)
712
713
713 if isinstance(bundle, changegroup.cg1unpacker):
714 if isinstance(bundle, changegroup.cg1unpacker):
714 benches.extend([
715 benches.extend([
715 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(131072), b'cg1 read(128k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 ])
722 ])
722 elif isinstance(bundle, bundle2.unbundle20):
723 elif isinstance(bundle, bundle2.unbundle20):
723 benches.extend([
724 benches.extend([
724 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(seek), b'bundle2 part seek()'),
728 (makebench(seek), b'bundle2 part seek()'),
728 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 ])
733 ])
733 elif isinstance(bundle, streamclone.streamcloneapplier):
734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 raise error.Abort(b'stream clone bundles not supported')
735 raise error.Abort(b'stream clone bundles not supported')
735 else:
736 else:
736 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737
738
738 for fn, title in benches:
739 for fn, title in benches:
739 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
740 timer(fn, title=title)
741 timer(fn, title=title)
741 fm.end()
742 fm.end()
742
743
743 @command(b'perfchangegroupchangelog', formatteropts +
744 @command(b'perfchangegroupchangelog', formatteropts +
744 [(b'', b'cgversion', b'02', b'changegroup version'),
745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 """Benchmark producing a changelog group for a changegroup.
748 """Benchmark producing a changelog group for a changegroup.
748
749
749 This measures the time spent processing the changelog during a
750 This measures the time spent processing the changelog during a
750 bundle operation. This occurs during `hg bundle` and on a server
751 bundle operation. This occurs during `hg bundle` and on a server
751 processing a `getbundle` wire protocol request (handles clones
752 processing a `getbundle` wire protocol request (handles clones
752 and pull requests).
753 and pull requests).
753
754
754 By default, all revisions are added to the changegroup.
755 By default, all revisions are added to the changegroup.
755 """
756 """
756 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
757 cl = repo.changelog
758 cl = repo.changelog
758 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 bundler = changegroup.getbundler(cgversion, repo)
760 bundler = changegroup.getbundler(cgversion, repo)
760
761
761 def d():
762 def d():
762 state, chunks = bundler._generatechangelog(cl, nodes)
763 state, chunks = bundler._generatechangelog(cl, nodes)
763 for chunk in chunks:
764 for chunk in chunks:
764 pass
765 pass
765
766
766 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
767
768
768 # Terminal printing can interfere with timing. So disable it.
769 # Terminal printing can interfere with timing. So disable it.
769 with ui.configoverride({(b'progress', b'disable'): True}):
770 with ui.configoverride({(b'progress', b'disable'): True}):
770 timer(d)
771 timer(d)
771
772
772 fm.end()
773 fm.end()
773
774
774 @command(b'perfdirs', formatteropts)
775 @command(b'perfdirs', formatteropts)
775 def perfdirs(ui, repo, **opts):
776 def perfdirs(ui, repo, **opts):
776 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
777 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
778 dirstate = repo.dirstate
779 dirstate = repo.dirstate
779 b'a' in dirstate
780 b'a' in dirstate
780 def d():
781 def d():
781 dirstate.hasdir(b'a')
782 dirstate.hasdir(b'a')
782 del dirstate._map._dirs
783 del dirstate._map._dirs
783 timer(d)
784 timer(d)
784 fm.end()
785 fm.end()
785
786
786 @command(b'perfdirstate', formatteropts)
787 @command(b'perfdirstate', formatteropts)
787 def perfdirstate(ui, repo, **opts):
788 def perfdirstate(ui, repo, **opts):
788 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
789 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
790 b"a" in repo.dirstate
791 b"a" in repo.dirstate
791 def d():
792 def d():
792 repo.dirstate.invalidate()
793 repo.dirstate.invalidate()
793 b"a" in repo.dirstate
794 b"a" in repo.dirstate
794 timer(d)
795 timer(d)
795 fm.end()
796 fm.end()
796
797
797 @command(b'perfdirstatedirs', formatteropts)
798 @command(b'perfdirstatedirs', formatteropts)
798 def perfdirstatedirs(ui, repo, **opts):
799 def perfdirstatedirs(ui, repo, **opts):
799 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
800 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
801 b"a" in repo.dirstate
802 b"a" in repo.dirstate
802 def d():
803 def d():
803 repo.dirstate.hasdir(b"a")
804 repo.dirstate.hasdir(b"a")
804 del repo.dirstate._map._dirs
805 del repo.dirstate._map._dirs
805 timer(d)
806 timer(d)
806 fm.end()
807 fm.end()
807
808
808 @command(b'perfdirstatefoldmap', formatteropts)
809 @command(b'perfdirstatefoldmap', formatteropts)
809 def perfdirstatefoldmap(ui, repo, **opts):
810 def perfdirstatefoldmap(ui, repo, **opts):
810 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
811 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
812 dirstate = repo.dirstate
813 dirstate = repo.dirstate
813 b'a' in dirstate
814 b'a' in dirstate
814 def d():
815 def d():
815 dirstate._map.filefoldmap.get(b'a')
816 dirstate._map.filefoldmap.get(b'a')
816 del dirstate._map.filefoldmap
817 del dirstate._map.filefoldmap
817 timer(d)
818 timer(d)
818 fm.end()
819 fm.end()
819
820
820 @command(b'perfdirfoldmap', formatteropts)
821 @command(b'perfdirfoldmap', formatteropts)
821 def perfdirfoldmap(ui, repo, **opts):
822 def perfdirfoldmap(ui, repo, **opts):
822 opts = _byteskwargs(opts)
823 opts = _byteskwargs(opts)
823 timer, fm = gettimer(ui, opts)
824 timer, fm = gettimer(ui, opts)
824 dirstate = repo.dirstate
825 dirstate = repo.dirstate
825 b'a' in dirstate
826 b'a' in dirstate
826 def d():
827 def d():
827 dirstate._map.dirfoldmap.get(b'a')
828 dirstate._map.dirfoldmap.get(b'a')
828 del dirstate._map.dirfoldmap
829 del dirstate._map.dirfoldmap
829 del dirstate._map._dirs
830 del dirstate._map._dirs
830 timer(d)
831 timer(d)
831 fm.end()
832 fm.end()
832
833
833 @command(b'perfdirstatewrite', formatteropts)
834 @command(b'perfdirstatewrite', formatteropts)
834 def perfdirstatewrite(ui, repo, **opts):
835 def perfdirstatewrite(ui, repo, **opts):
835 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
836 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
837 ds = repo.dirstate
838 ds = repo.dirstate
838 b"a" in ds
839 b"a" in ds
839 def d():
840 def d():
840 ds._dirty = True
841 ds._dirty = True
841 ds.write(repo.currenttransaction())
842 ds.write(repo.currenttransaction())
842 timer(d)
843 timer(d)
843 fm.end()
844 fm.end()
844
845
845 @command(b'perfmergecalculate',
846 @command(b'perfmergecalculate',
846 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 def perfmergecalculate(ui, repo, rev, **opts):
848 def perfmergecalculate(ui, repo, rev, **opts):
848 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
849 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
850 wctx = repo[None]
851 wctx = repo[None]
851 rctx = scmutil.revsingle(repo, rev, rev)
852 rctx = scmutil.revsingle(repo, rev, rev)
852 ancestor = wctx.ancestor(rctx)
853 ancestor = wctx.ancestor(rctx)
853 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # that cache
855 # that cache
855 wctx.dirty()
856 wctx.dirty()
856 def d():
857 def d():
857 # acceptremote is True because we don't want prompts in the middle of
858 # acceptremote is True because we don't want prompts in the middle of
858 # our benchmark
859 # our benchmark
859 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 acceptremote=True, followcopies=True)
861 acceptremote=True, followcopies=True)
861 timer(d)
862 timer(d)
862 fm.end()
863 fm.end()
863
864
864 @command(b'perfpathcopies', [], b"REV REV")
865 @command(b'perfpathcopies', [], b"REV REV")
865 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 """benchmark the copy tracing logic"""
867 """benchmark the copy tracing logic"""
867 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
868 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
869 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 def d():
872 def d():
872 copies.pathcopies(ctx1, ctx2)
873 copies.pathcopies(ctx1, ctx2)
873 timer(d)
874 timer(d)
874 fm.end()
875 fm.end()
875
876
876 @command(b'perfphases',
877 @command(b'perfphases',
877 [(b'', b'full', False, b'include file reading time too'),
878 [(b'', b'full', False, b'include file reading time too'),
878 ], b"")
879 ], b"")
879 def perfphases(ui, repo, **opts):
880 def perfphases(ui, repo, **opts):
880 """benchmark phasesets computation"""
881 """benchmark phasesets computation"""
881 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
882 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
883 _phases = repo._phasecache
884 _phases = repo._phasecache
884 full = opts.get(b'full')
885 full = opts.get(b'full')
885 def d():
886 def d():
886 phases = _phases
887 phases = _phases
887 if full:
888 if full:
888 clearfilecache(repo, b'_phasecache')
889 clearfilecache(repo, b'_phasecache')
889 phases = repo._phasecache
890 phases = repo._phasecache
890 phases.invalidate()
891 phases.invalidate()
891 phases.loadphaserevs(repo)
892 phases.loadphaserevs(repo)
892 timer(d)
893 timer(d)
893 fm.end()
894 fm.end()
894
895
895 @command(b'perfphasesremote',
896 @command(b'perfphasesremote',
896 [], b"[DEST]")
897 [], b"[DEST]")
897 def perfphasesremote(ui, repo, dest=None, **opts):
898 def perfphasesremote(ui, repo, dest=None, **opts):
898 """benchmark time needed to analyse phases of the remote server"""
899 """benchmark time needed to analyse phases of the remote server"""
899 from mercurial.node import (
900 from mercurial.node import (
900 bin,
901 bin,
901 )
902 )
902 from mercurial import (
903 from mercurial import (
903 exchange,
904 exchange,
904 hg,
905 hg,
905 phases,
906 phases,
906 )
907 )
907 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
908 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
909
910
910 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 if not path:
912 if not path:
912 raise error.Abort((b'default repository not configured!'),
913 raise error.Abort((b'default repository not configured!'),
913 hint=(b"see 'hg help config.paths'"))
914 hint=(b"see 'hg help config.paths'"))
914 dest = path.pushloc or path.loc
915 dest = path.pushloc or path.loc
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
917
918
918 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
919 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
920 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
921
922
922 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
923
924
924 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
925 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
926 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
927 del other
928 del other
928 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
929 if publishing:
930 if publishing:
930 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
931 else:
932 else:
932 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
933
934
934 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
935 nonpublishroots = 0
936 nonpublishroots = 0
936 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
937 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
938 continue
939 continue
939 node = bin(nhex)
940 node = bin(nhex)
940 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
941 nonpublishroots += 1
942 nonpublishroots += 1
942 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 def d():
945 def d():
945 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
946 remotesubset,
947 remotesubset,
947 remotephases)
948 remotephases)
948 timer(d)
949 timer(d)
949 fm.end()
950 fm.end()
950
951
951 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
952 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
955 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
957 dict-like object
958 dict-like object
958
959
959 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
960 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
961 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
962 if not manifest_rev:
963 if not manifest_rev:
963 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
964 t = ctx.manifestnode()
965 t = ctx.manifestnode()
965 else:
966 else:
966 from mercurial.node import bin
967 from mercurial.node import bin
967
968
968 if len(rev) == 40:
969 if len(rev) == 40:
969 t = bin(rev)
970 t = bin(rev)
970 else:
971 else:
971 try:
972 try:
972 rev = int(rev)
973 rev = int(rev)
973
974
974 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 else:
977 else:
977 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
978 except ValueError:
979 except ValueError:
979 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
980 b'node')
981 b'node')
981 def d():
982 def d():
982 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
984 timer(d)
985 timer(d)
985 fm.end()
986 fm.end()
986
987
987 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
988 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
989 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
990 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
991 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
992 def d():
993 def d():
993 repo.changelog.read(n)
994 repo.changelog.read(n)
994 #repo.changelog._cache = None
995 #repo.changelog._cache = None
995 timer(d)
996 timer(d)
996 fm.end()
997 fm.end()
997
998
998 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
999 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1000 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1001 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1002 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1003 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1004
1005
1005 def setupone():
1006 def setupone():
1006 dirstate.invalidate()
1007 dirstate.invalidate()
1007 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1008
1009
1009 def runone():
1010 def runone():
1010 dirstate._ignore
1011 dirstate._ignore
1011
1012
1012 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1013 fm.end()
1014 fm.end()
1014
1015
1015 @command(b'perfindex', [
1016 @command(b'perfindex', [
1016 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 ] + formatteropts)
1018 ] + formatteropts)
1018 def perfindex(ui, repo, **opts):
1019 def perfindex(ui, repo, **opts):
1019 import mercurial.revlog
1020 import mercurial.revlog
1020 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1021 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1022 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 if opts[b'rev'] is None:
1024 if opts[b'rev'] is None:
1024 n = repo[b"tip"].node()
1025 n = repo[b"tip"].node()
1025 else:
1026 else:
1026 rev = scmutil.revsingle(repo, opts[b'rev'])
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1027 n = repo[rev].node()
1028 n = repo[rev].node()
1028
1029
1029 unfi = repo.unfiltered()
1030 unfi = repo.unfiltered()
1030 # find the filecache func directly
1031 # find the filecache func directly
1031 # This avoid polluting the benchmark with the filecache logic
1032 # This avoid polluting the benchmark with the filecache logic
1032 makecl = unfi.__class__.changelog.func
1033 makecl = unfi.__class__.changelog.func
1033 def setup():
1034 def setup():
1034 # probably not necessary, but for good measure
1035 # probably not necessary, but for good measure
1035 clearchangelog(unfi)
1036 clearchangelog(unfi)
1036 def d():
1037 def d():
1037 cl = makecl(unfi)
1038 cl = makecl(unfi)
1038 cl.rev(n)
1039 cl.rev(n)
1039 timer(d, setup=setup)
1040 timer(d, setup=setup)
1040 fm.end()
1041 fm.end()
1041
1042
1042 @command(b'perfstartup', formatteropts)
1043 @command(b'perfstartup', formatteropts)
1043 def perfstartup(ui, repo, **opts):
1044 def perfstartup(ui, repo, **opts):
1044 opts = _byteskwargs(opts)
1045 opts = _byteskwargs(opts)
1045 timer, fm = gettimer(ui, opts)
1046 timer, fm = gettimer(ui, opts)
1046 def d():
1047 def d():
1047 if os.name != r'nt':
1048 if os.name != r'nt':
1048 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 fsencode(sys.argv[0]))
1050 fsencode(sys.argv[0]))
1050 else:
1051 else:
1051 os.environ[r'HGRCPATH'] = r' '
1052 os.environ[r'HGRCPATH'] = r' '
1052 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 timer(d)
1054 timer(d)
1054 fm.end()
1055 fm.end()
1055
1056
1056 @command(b'perfparents', formatteropts)
1057 @command(b'perfparents', formatteropts)
1057 def perfparents(ui, repo, **opts):
1058 def perfparents(ui, repo, **opts):
1058 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1059 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1060 # control the number of commits perfparents iterates over
1061 # control the number of commits perfparents iterates over
1061 # experimental config: perf.parentscount
1062 # experimental config: perf.parentscount
1062 count = getint(ui, b"perf", b"parentscount", 1000)
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1063 if len(repo.changelog) < count:
1064 if len(repo.changelog) < count:
1064 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 repo = repo.unfiltered()
1066 repo = repo.unfiltered()
1066 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 def d():
1068 def d():
1068 for n in nl:
1069 for n in nl:
1069 repo.changelog.parents(n)
1070 repo.changelog.parents(n)
1070 timer(d)
1071 timer(d)
1071 fm.end()
1072 fm.end()
1072
1073
1073 @command(b'perfctxfiles', formatteropts)
1074 @command(b'perfctxfiles', formatteropts)
1074 def perfctxfiles(ui, repo, x, **opts):
1075 def perfctxfiles(ui, repo, x, **opts):
1075 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1076 x = int(x)
1077 x = int(x)
1077 timer, fm = gettimer(ui, opts)
1078 timer, fm = gettimer(ui, opts)
1078 def d():
1079 def d():
1079 len(repo[x].files())
1080 len(repo[x].files())
1080 timer(d)
1081 timer(d)
1081 fm.end()
1082 fm.end()
1082
1083
1083 @command(b'perfrawfiles', formatteropts)
1084 @command(b'perfrawfiles', formatteropts)
1084 def perfrawfiles(ui, repo, x, **opts):
1085 def perfrawfiles(ui, repo, x, **opts):
1085 opts = _byteskwargs(opts)
1086 opts = _byteskwargs(opts)
1086 x = int(x)
1087 x = int(x)
1087 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1088 cl = repo.changelog
1089 cl = repo.changelog
1089 def d():
1090 def d():
1090 len(cl.read(x)[3])
1091 len(cl.read(x)[3])
1091 timer(d)
1092 timer(d)
1092 fm.end()
1093 fm.end()
1093
1094
1094 @command(b'perflookup', formatteropts)
1095 @command(b'perflookup', formatteropts)
1095 def perflookup(ui, repo, rev, **opts):
1096 def perflookup(ui, repo, rev, **opts):
1096 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1097 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1098 timer(lambda: len(repo.lookup(rev)))
1099 timer(lambda: len(repo.lookup(rev)))
1099 fm.end()
1100 fm.end()
1100
1101
1101 @command(b'perflinelogedits',
1102 @command(b'perflinelogedits',
1102 [(b'n', b'edits', 10000, b'number of edits'),
1103 [(b'n', b'edits', 10000, b'number of edits'),
1103 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 ], norepo=True)
1105 ], norepo=True)
1105 def perflinelogedits(ui, **opts):
1106 def perflinelogedits(ui, **opts):
1106 from mercurial import linelog
1107 from mercurial import linelog
1107
1108
1108 opts = _byteskwargs(opts)
1109 opts = _byteskwargs(opts)
1109
1110
1110 edits = opts[b'edits']
1111 edits = opts[b'edits']
1111 maxhunklines = opts[b'max_hunk_lines']
1112 maxhunklines = opts[b'max_hunk_lines']
1112
1113
1113 maxb1 = 100000
1114 maxb1 = 100000
1114 random.seed(0)
1115 random.seed(0)
1115 randint = random.randint
1116 randint = random.randint
1116 currentlines = 0
1117 currentlines = 0
1117 arglist = []
1118 arglist = []
1118 for rev in _xrange(edits):
1119 for rev in _xrange(edits):
1119 a1 = randint(0, currentlines)
1120 a1 = randint(0, currentlines)
1120 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 b1 = randint(0, maxb1)
1122 b1 = randint(0, maxb1)
1122 b2 = randint(b1, b1 + maxhunklines)
1123 b2 = randint(b1, b1 + maxhunklines)
1123 currentlines += (b2 - b1) - (a2 - a1)
1124 currentlines += (b2 - b1) - (a2 - a1)
1124 arglist.append((rev, a1, a2, b1, b2))
1125 arglist.append((rev, a1, a2, b1, b2))
1125
1126
1126 def d():
1127 def d():
1127 ll = linelog.linelog()
1128 ll = linelog.linelog()
1128 for args in arglist:
1129 for args in arglist:
1129 ll.replacelines(*args)
1130 ll.replacelines(*args)
1130
1131
1131 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1132 timer(d)
1133 timer(d)
1133 fm.end()
1134 fm.end()
1134
1135
1135 @command(b'perfrevrange', formatteropts)
1136 @command(b'perfrevrange', formatteropts)
1136 def perfrevrange(ui, repo, *specs, **opts):
1137 def perfrevrange(ui, repo, *specs, **opts):
1137 opts = _byteskwargs(opts)
1138 opts = _byteskwargs(opts)
1138 timer, fm = gettimer(ui, opts)
1139 timer, fm = gettimer(ui, opts)
1139 revrange = scmutil.revrange
1140 revrange = scmutil.revrange
1140 timer(lambda: len(revrange(repo, specs)))
1141 timer(lambda: len(revrange(repo, specs)))
1141 fm.end()
1142 fm.end()
1142
1143
1143 @command(b'perfnodelookup', formatteropts)
1144 @command(b'perfnodelookup', formatteropts)
1144 def perfnodelookup(ui, repo, rev, **opts):
1145 def perfnodelookup(ui, repo, rev, **opts):
1145 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1146 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1147 import mercurial.revlog
1148 import mercurial.revlog
1148 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 n = scmutil.revsingle(repo, rev).node()
1150 n = scmutil.revsingle(repo, rev).node()
1150 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 def d():
1152 def d():
1152 cl.rev(n)
1153 cl.rev(n)
1153 clearcaches(cl)
1154 clearcaches(cl)
1154 timer(d)
1155 timer(d)
1155 fm.end()
1156 fm.end()
1156
1157
1157 @command(b'perflog',
1158 @command(b'perflog',
1158 [(b'', b'rename', False, b'ask log to follow renames')
1159 [(b'', b'rename', False, b'ask log to follow renames')
1159 ] + formatteropts)
1160 ] + formatteropts)
1160 def perflog(ui, repo, rev=None, **opts):
1161 def perflog(ui, repo, rev=None, **opts):
1161 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1162 if rev is None:
1163 if rev is None:
1163 rev=[]
1164 rev=[]
1164 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1165 ui.pushbuffer()
1166 ui.pushbuffer()
1166 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 copies=opts.get(b'rename')))
1168 copies=opts.get(b'rename')))
1168 ui.popbuffer()
1169 ui.popbuffer()
1169 fm.end()
1170 fm.end()
1170
1171
1171 @command(b'perfmoonwalk', formatteropts)
1172 @command(b'perfmoonwalk', formatteropts)
1172 def perfmoonwalk(ui, repo, **opts):
1173 def perfmoonwalk(ui, repo, **opts):
1173 """benchmark walking the changelog backwards
1174 """benchmark walking the changelog backwards
1174
1175
1175 This also loads the changelog data for each revision in the changelog.
1176 This also loads the changelog data for each revision in the changelog.
1176 """
1177 """
1177 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1178 timer, fm = gettimer(ui, opts)
1179 timer, fm = gettimer(ui, opts)
1179 def moonwalk():
1180 def moonwalk():
1180 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 ctx = repo[i]
1182 ctx = repo[i]
1182 ctx.branch() # read changelog data (in addition to the index)
1183 ctx.branch() # read changelog data (in addition to the index)
1183 timer(moonwalk)
1184 timer(moonwalk)
1184 fm.end()
1185 fm.end()
1185
1186
1186 @command(b'perftemplating',
1187 @command(b'perftemplating',
1187 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 ] + formatteropts)
1189 ] + formatteropts)
1189 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 """test the rendering time of a given template"""
1191 """test the rendering time of a given template"""
1191 if makelogtemplater is None:
1192 if makelogtemplater is None:
1192 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 hint=b"use 4.3 or later")
1194 hint=b"use 4.3 or later")
1194
1195
1195 opts = _byteskwargs(opts)
1196 opts = _byteskwargs(opts)
1196
1197
1197 nullui = ui.copy()
1198 nullui = ui.copy()
1198 nullui.fout = open(os.devnull, r'wb')
1199 nullui.fout = open(os.devnull, r'wb')
1199 nullui.disablepager()
1200 nullui.disablepager()
1200 revs = opts.get(b'rev')
1201 revs = opts.get(b'rev')
1201 if not revs:
1202 if not revs:
1202 revs = [b'all()']
1203 revs = [b'all()']
1203 revs = list(scmutil.revrange(repo, revs))
1204 revs = list(scmutil.revrange(repo, revs))
1204
1205
1205 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 b' {author|person}: {desc|firstline}\n')
1207 b' {author|person}: {desc|firstline}\n')
1207 if testedtemplate is None:
1208 if testedtemplate is None:
1208 testedtemplate = defaulttemplate
1209 testedtemplate = defaulttemplate
1209 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 def format():
1211 def format():
1211 for r in revs:
1212 for r in revs:
1212 ctx = repo[r]
1213 ctx = repo[r]
1213 displayer.show(ctx)
1214 displayer.show(ctx)
1214 displayer.flush(ctx)
1215 displayer.flush(ctx)
1215
1216
1216 timer, fm = gettimer(ui, opts)
1217 timer, fm = gettimer(ui, opts)
1217 timer(format)
1218 timer(format)
1218 fm.end()
1219 fm.end()
1219
1220
1220 @command(b'perfhelper-pathcopies', formatteropts +
1221 @command(b'perfhelper-pathcopies', formatteropts +
1221 [
1222 [
1222 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 (b'', b'timing', False, b'provides extra data (costly)'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1224 ])
1225 ])
1225 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 """find statistic about potential parameters for the `perftracecopies`
1227 """find statistic about potential parameters for the `perftracecopies`
1227
1228
1228 This command find source-destination pair relevant for copytracing testing.
1229 This command find source-destination pair relevant for copytracing testing.
1229 It report value for some of the parameters that impact copy tracing time.
1230 It report value for some of the parameters that impact copy tracing time.
1230
1231
1231 If `--timing` is set, rename detection is run and the associated timing
1232 If `--timing` is set, rename detection is run and the associated timing
1232 will be reported. The extra details comes at the cost of a slower command
1233 will be reported. The extra details comes at the cost of a slower command
1233 execution.
1234 execution.
1234
1235
1235 Since the rename detection is only run once, other factors might easily
1236 Since the rename detection is only run once, other factors might easily
1236 affect the precision of the timing. However it should give a good
1237 affect the precision of the timing. However it should give a good
1237 approximation of which revision pairs are very costly.
1238 approximation of which revision pairs are very costly.
1238 """
1239 """
1239 opts = _byteskwargs(opts)
1240 opts = _byteskwargs(opts)
1240 fm = ui.formatter(b'perf', opts)
1241 fm = ui.formatter(b'perf', opts)
1241 dotiming = opts[b'timing']
1242 dotiming = opts[b'timing']
1242
1243
1243 if dotiming:
1244 if dotiming:
1244 header = '%12s %12s %12s %12s %12s %12s\n'
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1245 output = ("%(source)12s %(destination)12s "
1246 output = ("%(source)12s %(destination)12s "
1246 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 "nb-renames", "time")
1250 "nb-renames", "time")
1250 fm.plain(header % header_names)
1251 fm.plain(header % header_names)
1251 else:
1252 else:
1252 header = '%12s %12s %12s %12s\n'
1253 header = '%12s %12s %12s %12s\n'
1253 output = ("%(source)12s %(destination)12s "
1254 output = ("%(source)12s %(destination)12s "
1254 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256
1257
1257 if not revs:
1258 if not revs:
1258 revs = ['all()']
1259 revs = ['all()']
1259 revs = scmutil.revrange(repo, revs)
1260 revs = scmutil.revrange(repo, revs)
1260
1261
1261 roi = repo.revs('merge() and %ld', revs)
1262 roi = repo.revs('merge() and %ld', revs)
1262 for r in roi:
1263 for r in roi:
1263 ctx = repo[r]
1264 ctx = repo[r]
1264 p1 = ctx.p1().rev()
1265 p1 = ctx.p1().rev()
1265 p2 = ctx.p2().rev()
1266 p2 = ctx.p2().rev()
1266 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 for p in (p1, p2):
1268 for p in (p1, p2):
1268 for b in bases:
1269 for b in bases:
1269 base = repo[b]
1270 base = repo[b]
1270 parent = repo[p]
1271 parent = repo[p]
1271 missing = copies._computeforwardmissing(base, parent)
1272 missing = copies._computeforwardmissing(base, parent)
1272 if not missing:
1273 if not missing:
1273 continue
1274 continue
1274 data = {
1275 data = {
1275 b'source': base.hex(),
1276 b'source': base.hex(),
1276 b'destination': parent.hex(),
1277 b'destination': parent.hex(),
1277 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 b'nbmissingfiles': len(missing),
1279 b'nbmissingfiles': len(missing),
1279 }
1280 }
1280 if dotiming:
1281 if dotiming:
1281 begin = util.timer()
1282 begin = util.timer()
1282 renames = copies.pathcopies(base, parent)
1283 renames = copies.pathcopies(base, parent)
1283 end = util.timer()
1284 end = util.timer()
1284 # not very stable timing since we did only one run
1285 # not very stable timing since we did only one run
1285 data['time'] = end - begin
1286 data['time'] = end - begin
1286 data['nbrenamedfiles'] = len(renames)
1287 data['nbrenamedfiles'] = len(renames)
1287 fm.startitem()
1288 fm.startitem()
1288 fm.data(**data)
1289 fm.data(**data)
1289 out = data.copy()
1290 out = data.copy()
1290 out['source'] = fm.hexfunc(base.node())
1291 out['source'] = fm.hexfunc(base.node())
1291 out['destination'] = fm.hexfunc(parent.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1292 fm.plain(output % out)
1293 fm.plain(output % out)
1293
1294
1294 fm.end()
1295 fm.end()
1295
1296
1296 @command(b'perfcca', formatteropts)
1297 @command(b'perfcca', formatteropts)
1297 def perfcca(ui, repo, **opts):
1298 def perfcca(ui, repo, **opts):
1298 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1299 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 fm.end()
1302 fm.end()
1302
1303
1303 @command(b'perffncacheload', formatteropts)
1304 @command(b'perffncacheload', formatteropts)
1304 def perffncacheload(ui, repo, **opts):
1305 def perffncacheload(ui, repo, **opts):
1305 opts = _byteskwargs(opts)
1306 opts = _byteskwargs(opts)
1306 timer, fm = gettimer(ui, opts)
1307 timer, fm = gettimer(ui, opts)
1307 s = repo.store
1308 s = repo.store
1308 def d():
1309 def d():
1309 s.fncache._load()
1310 s.fncache._load()
1310 timer(d)
1311 timer(d)
1311 fm.end()
1312 fm.end()
1312
1313
1313 @command(b'perffncachewrite', formatteropts)
1314 @command(b'perffncachewrite', formatteropts)
1314 def perffncachewrite(ui, repo, **opts):
1315 def perffncachewrite(ui, repo, **opts):
1315 opts = _byteskwargs(opts)
1316 opts = _byteskwargs(opts)
1316 timer, fm = gettimer(ui, opts)
1317 timer, fm = gettimer(ui, opts)
1317 s = repo.store
1318 s = repo.store
1318 lock = repo.lock()
1319 lock = repo.lock()
1319 s.fncache._load()
1320 s.fncache._load()
1320 tr = repo.transaction(b'perffncachewrite')
1321 tr = repo.transaction(b'perffncachewrite')
1321 tr.addbackup(b'fncache')
1322 tr.addbackup(b'fncache')
1322 def d():
1323 def d():
1323 s.fncache._dirty = True
1324 s.fncache._dirty = True
1324 s.fncache.write(tr)
1325 s.fncache.write(tr)
1325 timer(d)
1326 timer(d)
1326 tr.close()
1327 tr.close()
1327 lock.release()
1328 lock.release()
1328 fm.end()
1329 fm.end()
1329
1330
1330 @command(b'perffncacheencode', formatteropts)
1331 @command(b'perffncacheencode', formatteropts)
1331 def perffncacheencode(ui, repo, **opts):
1332 def perffncacheencode(ui, repo, **opts):
1332 opts = _byteskwargs(opts)
1333 opts = _byteskwargs(opts)
1333 timer, fm = gettimer(ui, opts)
1334 timer, fm = gettimer(ui, opts)
1334 s = repo.store
1335 s = repo.store
1335 s.fncache._load()
1336 s.fncache._load()
1336 def d():
1337 def d():
1337 for p in s.fncache.entries:
1338 for p in s.fncache.entries:
1338 s.encode(p)
1339 s.encode(p)
1339 timer(d)
1340 timer(d)
1340 fm.end()
1341 fm.end()
1341
1342
1342 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 while not done.is_set():
1344 while not done.is_set():
1344 pair = q.get()
1345 pair = q.get()
1345 while pair is not None:
1346 while pair is not None:
1346 if xdiff:
1347 if xdiff:
1347 mdiff.bdiff.xdiffblocks(*pair)
1348 mdiff.bdiff.xdiffblocks(*pair)
1348 elif blocks:
1349 elif blocks:
1349 mdiff.bdiff.blocks(*pair)
1350 mdiff.bdiff.blocks(*pair)
1350 else:
1351 else:
1351 mdiff.textdiff(*pair)
1352 mdiff.textdiff(*pair)
1352 q.task_done()
1353 q.task_done()
1353 pair = q.get()
1354 pair = q.get()
1354 q.task_done() # for the None one
1355 q.task_done() # for the None one
1355 with ready:
1356 with ready:
1356 ready.wait()
1357 ready.wait()
1357
1358
1358 def _manifestrevision(repo, mnode):
1359 def _manifestrevision(repo, mnode):
1359 ml = repo.manifestlog
1360 ml = repo.manifestlog
1360
1361
1361 if util.safehasattr(ml, b'getstorage'):
1362 if util.safehasattr(ml, b'getstorage'):
1362 store = ml.getstorage(b'')
1363 store = ml.getstorage(b'')
1363 else:
1364 else:
1364 store = ml._revlog
1365 store = ml._revlog
1365
1366
1366 return store.revision(mnode)
1367 return store.revision(mnode)
1367
1368
1368 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 ],
1375 ],
1375
1376
1376 b'-c|-m|FILE REV')
1377 b'-c|-m|FILE REV')
1377 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 """benchmark a bdiff between revisions
1379 """benchmark a bdiff between revisions
1379
1380
1380 By default, benchmark a bdiff between its delta parent and itself.
1381 By default, benchmark a bdiff between its delta parent and itself.
1381
1382
1382 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 revisions starting at the specified revision.
1384 revisions starting at the specified revision.
1384
1385
1385 With ``--alldata``, assume the requested revision is a changeset and
1386 With ``--alldata``, assume the requested revision is a changeset and
1386 measure bdiffs for all changes related to that changeset (manifest
1387 measure bdiffs for all changes related to that changeset (manifest
1387 and filelogs).
1388 and filelogs).
1388 """
1389 """
1389 opts = _byteskwargs(opts)
1390 opts = _byteskwargs(opts)
1390
1391
1391 if opts[b'xdiff'] and not opts[b'blocks']:
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1392 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393
1394
1394 if opts[b'alldata']:
1395 if opts[b'alldata']:
1395 opts[b'changelog'] = True
1396 opts[b'changelog'] = True
1396
1397
1397 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 file_, rev = None, file_
1399 file_, rev = None, file_
1399 elif rev is None:
1400 elif rev is None:
1400 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401
1402
1402 blocks = opts[b'blocks']
1403 blocks = opts[b'blocks']
1403 xdiff = opts[b'xdiff']
1404 xdiff = opts[b'xdiff']
1404 textpairs = []
1405 textpairs = []
1405
1406
1406 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407
1408
1408 startrev = r.rev(r.lookup(rev))
1409 startrev = r.rev(r.lookup(rev))
1409 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 if opts[b'alldata']:
1411 if opts[b'alldata']:
1411 # Load revisions associated with changeset.
1412 # Load revisions associated with changeset.
1412 ctx = repo[rev]
1413 ctx = repo[rev]
1413 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 for pctx in ctx.parents():
1415 for pctx in ctx.parents():
1415 pman = _manifestrevision(repo, pctx.manifestnode())
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 textpairs.append((pman, mtext))
1417 textpairs.append((pman, mtext))
1417
1418
1418 # Load filelog revisions by iterating manifest delta.
1419 # Load filelog revisions by iterating manifest delta.
1419 man = ctx.manifest()
1420 man = ctx.manifest()
1420 pman = ctx.p1().manifest()
1421 pman = ctx.p1().manifest()
1421 for filename, change in pman.diff(man).items():
1422 for filename, change in pman.diff(man).items():
1422 fctx = repo.file(filename)
1423 fctx = repo.file(filename)
1423 f1 = fctx.revision(change[0][0] or -1)
1424 f1 = fctx.revision(change[0][0] or -1)
1424 f2 = fctx.revision(change[1][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1425 textpairs.append((f1, f2))
1426 textpairs.append((f1, f2))
1426 else:
1427 else:
1427 dp = r.deltaparent(rev)
1428 dp = r.deltaparent(rev)
1428 textpairs.append((r.revision(dp), r.revision(rev)))
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429
1430
1430 withthreads = threads > 0
1431 withthreads = threads > 0
1431 if not withthreads:
1432 if not withthreads:
1432 def d():
1433 def d():
1433 for pair in textpairs:
1434 for pair in textpairs:
1434 if xdiff:
1435 if xdiff:
1435 mdiff.bdiff.xdiffblocks(*pair)
1436 mdiff.bdiff.xdiffblocks(*pair)
1436 elif blocks:
1437 elif blocks:
1437 mdiff.bdiff.blocks(*pair)
1438 mdiff.bdiff.blocks(*pair)
1438 else:
1439 else:
1439 mdiff.textdiff(*pair)
1440 mdiff.textdiff(*pair)
1440 else:
1441 else:
1441 q = queue()
1442 q = queue()
1442 for i in _xrange(threads):
1443 for i in _xrange(threads):
1443 q.put(None)
1444 q.put(None)
1444 ready = threading.Condition()
1445 ready = threading.Condition()
1445 done = threading.Event()
1446 done = threading.Event()
1446 for i in _xrange(threads):
1447 for i in _xrange(threads):
1447 threading.Thread(target=_bdiffworker,
1448 threading.Thread(target=_bdiffworker,
1448 args=(q, blocks, xdiff, ready, done)).start()
1449 args=(q, blocks, xdiff, ready, done)).start()
1449 q.join()
1450 q.join()
1450 def d():
1451 def d():
1451 for pair in textpairs:
1452 for pair in textpairs:
1452 q.put(pair)
1453 q.put(pair)
1453 for i in _xrange(threads):
1454 for i in _xrange(threads):
1454 q.put(None)
1455 q.put(None)
1455 with ready:
1456 with ready:
1456 ready.notify_all()
1457 ready.notify_all()
1457 q.join()
1458 q.join()
1458 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1459 timer(d)
1460 timer(d)
1460 fm.end()
1461 fm.end()
1461
1462
1462 if withthreads:
1463 if withthreads:
1463 done.set()
1464 done.set()
1464 for i in _xrange(threads):
1465 for i in _xrange(threads):
1465 q.put(None)
1466 q.put(None)
1466 with ready:
1467 with ready:
1467 ready.notify_all()
1468 ready.notify_all()
1468
1469
1469 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 ], b'-c|-m|FILE REV')
1473 ], b'-c|-m|FILE REV')
1473 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 """benchmark a unified diff between revisions
1475 """benchmark a unified diff between revisions
1475
1476
1476 This doesn't include any copy tracing - it's just a unified diff
1477 This doesn't include any copy tracing - it's just a unified diff
1477 of the texts.
1478 of the texts.
1478
1479
1479 By default, benchmark a diff between its delta parent and itself.
1480 By default, benchmark a diff between its delta parent and itself.
1480
1481
1481 With ``--count``, benchmark diffs between delta parents and self for N
1482 With ``--count``, benchmark diffs between delta parents and self for N
1482 revisions starting at the specified revision.
1483 revisions starting at the specified revision.
1483
1484
1484 With ``--alldata``, assume the requested revision is a changeset and
1485 With ``--alldata``, assume the requested revision is a changeset and
1485 measure diffs for all changes related to that changeset (manifest
1486 measure diffs for all changes related to that changeset (manifest
1486 and filelogs).
1487 and filelogs).
1487 """
1488 """
1488 opts = _byteskwargs(opts)
1489 opts = _byteskwargs(opts)
1489 if opts[b'alldata']:
1490 if opts[b'alldata']:
1490 opts[b'changelog'] = True
1491 opts[b'changelog'] = True
1491
1492
1492 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 file_, rev = None, file_
1494 file_, rev = None, file_
1494 elif rev is None:
1495 elif rev is None:
1495 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496
1497
1497 textpairs = []
1498 textpairs = []
1498
1499
1499 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500
1501
1501 startrev = r.rev(r.lookup(rev))
1502 startrev = r.rev(r.lookup(rev))
1502 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 if opts[b'alldata']:
1504 if opts[b'alldata']:
1504 # Load revisions associated with changeset.
1505 # Load revisions associated with changeset.
1505 ctx = repo[rev]
1506 ctx = repo[rev]
1506 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 for pctx in ctx.parents():
1508 for pctx in ctx.parents():
1508 pman = _manifestrevision(repo, pctx.manifestnode())
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1509 textpairs.append((pman, mtext))
1510 textpairs.append((pman, mtext))
1510
1511
1511 # Load filelog revisions by iterating manifest delta.
1512 # Load filelog revisions by iterating manifest delta.
1512 man = ctx.manifest()
1513 man = ctx.manifest()
1513 pman = ctx.p1().manifest()
1514 pman = ctx.p1().manifest()
1514 for filename, change in pman.diff(man).items():
1515 for filename, change in pman.diff(man).items():
1515 fctx = repo.file(filename)
1516 fctx = repo.file(filename)
1516 f1 = fctx.revision(change[0][0] or -1)
1517 f1 = fctx.revision(change[0][0] or -1)
1517 f2 = fctx.revision(change[1][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1518 textpairs.append((f1, f2))
1519 textpairs.append((f1, f2))
1519 else:
1520 else:
1520 dp = r.deltaparent(rev)
1521 dp = r.deltaparent(rev)
1521 textpairs.append((r.revision(dp), r.revision(rev)))
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1522
1523
1523 def d():
1524 def d():
1524 for left, right in textpairs:
1525 for left, right in textpairs:
1525 # The date strings don't matter, so we pass empty strings.
1526 # The date strings don't matter, so we pass empty strings.
1526 headerlines, hunks = mdiff.unidiff(
1527 headerlines, hunks = mdiff.unidiff(
1527 left, b'', right, b'', b'left', b'right', binary=False)
1528 left, b'', right, b'', b'left', b'right', binary=False)
1528 # consume iterators in roughly the way patch.py does
1529 # consume iterators in roughly the way patch.py does
1529 b'\n'.join(headerlines)
1530 b'\n'.join(headerlines)
1530 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 timer, fm = gettimer(ui, opts)
1532 timer, fm = gettimer(ui, opts)
1532 timer(d)
1533 timer(d)
1533 fm.end()
1534 fm.end()
1534
1535
1535 @command(b'perfdiffwd', formatteropts)
1536 @command(b'perfdiffwd', formatteropts)
1536 def perfdiffwd(ui, repo, **opts):
1537 def perfdiffwd(ui, repo, **opts):
1537 """Profile diff of working directory changes"""
1538 """Profile diff of working directory changes"""
1538 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1539 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1540 options = {
1541 options = {
1541 'w': 'ignore_all_space',
1542 'w': 'ignore_all_space',
1542 'b': 'ignore_space_change',
1543 'b': 'ignore_space_change',
1543 'B': 'ignore_blank_lines',
1544 'B': 'ignore_blank_lines',
1544 }
1545 }
1545
1546
1546 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 opts = dict((options[c], b'1') for c in diffopt)
1548 opts = dict((options[c], b'1') for c in diffopt)
1548 def d():
1549 def d():
1549 ui.pushbuffer()
1550 ui.pushbuffer()
1550 commands.diff(ui, repo, **opts)
1551 commands.diff(ui, repo, **opts)
1551 ui.popbuffer()
1552 ui.popbuffer()
1552 diffopt = diffopt.encode('ascii')
1553 diffopt = diffopt.encode('ascii')
1553 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 timer(d, title=title)
1555 timer(d, title=title)
1555 fm.end()
1556 fm.end()
1556
1557
1557 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 b'-c|-m|FILE')
1559 b'-c|-m|FILE')
1559 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 """Benchmark operations against a revlog index.
1561 """Benchmark operations against a revlog index.
1561
1562
1562 This tests constructing a revlog instance, reading index data,
1563 This tests constructing a revlog instance, reading index data,
1563 parsing index data, and performing various operations related to
1564 parsing index data, and performing various operations related to
1564 index data.
1565 index data.
1565 """
1566 """
1566
1567
1567 opts = _byteskwargs(opts)
1568 opts = _byteskwargs(opts)
1568
1569
1569 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570
1571
1571 opener = getattr(rl, 'opener') # trick linter
1572 opener = getattr(rl, 'opener') # trick linter
1572 indexfile = rl.indexfile
1573 indexfile = rl.indexfile
1573 data = opener.read(indexfile)
1574 data = opener.read(indexfile)
1574
1575
1575 header = struct.unpack(b'>I', data[0:4])[0]
1576 header = struct.unpack(b'>I', data[0:4])[0]
1576 version = header & 0xFFFF
1577 version = header & 0xFFFF
1577 if version == 1:
1578 if version == 1:
1578 revlogio = revlog.revlogio()
1579 revlogio = revlog.revlogio()
1579 inline = header & (1 << 16)
1580 inline = header & (1 << 16)
1580 else:
1581 else:
1581 raise error.Abort((b'unsupported revlog version: %d') % version)
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1582
1583
1583 rllen = len(rl)
1584 rllen = len(rl)
1584
1585
1585 node0 = rl.node(0)
1586 node0 = rl.node(0)
1586 node25 = rl.node(rllen // 4)
1587 node25 = rl.node(rllen // 4)
1587 node50 = rl.node(rllen // 2)
1588 node50 = rl.node(rllen // 2)
1588 node75 = rl.node(rllen // 4 * 3)
1589 node75 = rl.node(rllen // 4 * 3)
1589 node100 = rl.node(rllen - 1)
1590 node100 = rl.node(rllen - 1)
1590
1591
1591 allrevs = range(rllen)
1592 allrevs = range(rllen)
1592 allrevsrev = list(reversed(allrevs))
1593 allrevsrev = list(reversed(allrevs))
1593 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 allnodesrev = list(reversed(allnodes))
1595 allnodesrev = list(reversed(allnodes))
1595
1596
1596 def constructor():
1597 def constructor():
1597 revlog.revlog(opener, indexfile)
1598 revlog.revlog(opener, indexfile)
1598
1599
1599 def read():
1600 def read():
1600 with opener(indexfile) as fh:
1601 with opener(indexfile) as fh:
1601 fh.read()
1602 fh.read()
1602
1603
1603 def parseindex():
1604 def parseindex():
1604 revlogio.parseindex(data, inline)
1605 revlogio.parseindex(data, inline)
1605
1606
1606 def getentry(revornode):
1607 def getentry(revornode):
1607 index = revlogio.parseindex(data, inline)[0]
1608 index = revlogio.parseindex(data, inline)[0]
1608 index[revornode]
1609 index[revornode]
1609
1610
1610 def getentries(revs, count=1):
1611 def getentries(revs, count=1):
1611 index = revlogio.parseindex(data, inline)[0]
1612 index = revlogio.parseindex(data, inline)[0]
1612
1613
1613 for i in range(count):
1614 for i in range(count):
1614 for rev in revs:
1615 for rev in revs:
1615 index[rev]
1616 index[rev]
1616
1617
1617 def resolvenode(node):
1618 def resolvenode(node):
1618 nodemap = revlogio.parseindex(data, inline)[1]
1619 nodemap = revlogio.parseindex(data, inline)[1]
1619 # This only works for the C code.
1620 # This only works for the C code.
1620 if nodemap is None:
1621 if nodemap is None:
1621 return
1622 return
1622
1623
1623 try:
1624 try:
1624 nodemap[node]
1625 nodemap[node]
1625 except error.RevlogError:
1626 except error.RevlogError:
1626 pass
1627 pass
1627
1628
1628 def resolvenodes(nodes, count=1):
1629 def resolvenodes(nodes, count=1):
1629 nodemap = revlogio.parseindex(data, inline)[1]
1630 nodemap = revlogio.parseindex(data, inline)[1]
1630 if nodemap is None:
1631 if nodemap is None:
1631 return
1632 return
1632
1633
1633 for i in range(count):
1634 for i in range(count):
1634 for node in nodes:
1635 for node in nodes:
1635 try:
1636 try:
1636 nodemap[node]
1637 nodemap[node]
1637 except error.RevlogError:
1638 except error.RevlogError:
1638 pass
1639 pass
1639
1640
1640 benches = [
1641 benches = [
1641 (constructor, b'revlog constructor'),
1642 (constructor, b'revlog constructor'),
1642 (read, b'read'),
1643 (read, b'read'),
1643 (parseindex, b'create index object'),
1644 (parseindex, b'create index object'),
1644 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 (lambda: resolvenode(node100), b'look up node at tip'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1651 # 2x variation is to measure caching impact.
1652 # 2x variation is to measure caching impact.
1652 (lambda: resolvenodes(allnodes),
1653 (lambda: resolvenodes(allnodes),
1653 b'look up all nodes (forward)'),
1654 b'look up all nodes (forward)'),
1654 (lambda: resolvenodes(allnodes, 2),
1655 (lambda: resolvenodes(allnodes, 2),
1655 b'look up all nodes 2x (forward)'),
1656 b'look up all nodes 2x (forward)'),
1656 (lambda: resolvenodes(allnodesrev),
1657 (lambda: resolvenodes(allnodesrev),
1657 b'look up all nodes (reverse)'),
1658 b'look up all nodes (reverse)'),
1658 (lambda: resolvenodes(allnodesrev, 2),
1659 (lambda: resolvenodes(allnodesrev, 2),
1659 b'look up all nodes 2x (reverse)'),
1660 b'look up all nodes 2x (reverse)'),
1660 (lambda: getentries(allrevs),
1661 (lambda: getentries(allrevs),
1661 b'retrieve all index entries (forward)'),
1662 b'retrieve all index entries (forward)'),
1662 (lambda: getentries(allrevs, 2),
1663 (lambda: getentries(allrevs, 2),
1663 b'retrieve all index entries 2x (forward)'),
1664 b'retrieve all index entries 2x (forward)'),
1664 (lambda: getentries(allrevsrev),
1665 (lambda: getentries(allrevsrev),
1665 b'retrieve all index entries (reverse)'),
1666 b'retrieve all index entries (reverse)'),
1666 (lambda: getentries(allrevsrev, 2),
1667 (lambda: getentries(allrevsrev, 2),
1667 b'retrieve all index entries 2x (reverse)'),
1668 b'retrieve all index entries 2x (reverse)'),
1668 ]
1669 ]
1669
1670
1670 for fn, title in benches:
1671 for fn, title in benches:
1671 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1672 timer(fn, title=title)
1673 timer(fn, title=title)
1673 fm.end()
1674 fm.end()
1674
1675
1675 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 (b's', b'startrev', 0, b'revision to start reading at'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1678 (b'', b'reverse', False, b'read in reverse')],
1679 (b'', b'reverse', False, b'read in reverse')],
1679 b'-c|-m|FILE')
1680 b'-c|-m|FILE')
1680 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 **opts):
1682 **opts):
1682 """Benchmark reading a series of revisions from a revlog.
1683 """Benchmark reading a series of revisions from a revlog.
1683
1684
1684 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 the specified revlog.
1686 the specified revlog.
1686
1687
1687 The start revision can be defined via ``-s/--startrev``.
1688 The start revision can be defined via ``-s/--startrev``.
1688 """
1689 """
1689 opts = _byteskwargs(opts)
1690 opts = _byteskwargs(opts)
1690
1691
1691 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 rllen = getlen(ui)(rl)
1693 rllen = getlen(ui)(rl)
1693
1694
1694 if startrev < 0:
1695 if startrev < 0:
1695 startrev = rllen + startrev
1696 startrev = rllen + startrev
1696
1697
1697 def d():
1698 def d():
1698 rl.clearcaches()
1699 rl.clearcaches()
1699
1700
1700 beginrev = startrev
1701 beginrev = startrev
1701 endrev = rllen
1702 endrev = rllen
1702 dist = opts[b'dist']
1703 dist = opts[b'dist']
1703
1704
1704 if reverse:
1705 if reverse:
1705 beginrev, endrev = endrev - 1, beginrev - 1
1706 beginrev, endrev = endrev - 1, beginrev - 1
1706 dist = -1 * dist
1707 dist = -1 * dist
1707
1708
1708 for x in _xrange(beginrev, endrev, dist):
1709 for x in _xrange(beginrev, endrev, dist):
1709 # Old revisions don't support passing int.
1710 # Old revisions don't support passing int.
1710 n = rl.node(x)
1711 n = rl.node(x)
1711 rl.revision(n)
1712 rl.revision(n)
1712
1713
1713 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1714 timer(d)
1715 timer(d)
1715 fm.end()
1716 fm.end()
1716
1717
1717 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 (b'', b'stoprev', -1, b'last revision to write'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1720 (b'', b'count', 3, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1721 (b'', b'details', False, b'print timing for every revisions tested'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1722 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 ],
1726 ],
1726 b'-c|-m|FILE')
1727 b'-c|-m|FILE')
1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 """Benchmark writing a series of revisions to a revlog.
1729 """Benchmark writing a series of revisions to a revlog.
1729
1730
1730 Possible source values are:
1731 Possible source values are:
1731 * `full`: add from a full text (default).
1732 * `full`: add from a full text (default).
1732 * `parent-1`: add from a delta to the first parent
1733 * `parent-1`: add from a delta to the first parent
1733 * `parent-2`: add from a delta to the second parent if it exists
1734 * `parent-2`: add from a delta to the second parent if it exists
1734 (use a delta from the first parent otherwise)
1735 (use a delta from the first parent otherwise)
1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `storage`: add from the existing precomputed deltas
1737 * `storage`: add from the existing precomputed deltas
1737 """
1738 """
1738 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1739
1740
1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rllen = getlen(ui)(rl)
1742 rllen = getlen(ui)(rl)
1742 if startrev < 0:
1743 if startrev < 0:
1743 startrev = rllen + startrev
1744 startrev = rllen + startrev
1744 if stoprev < 0:
1745 if stoprev < 0:
1745 stoprev = rllen + stoprev
1746 stoprev = rllen + stoprev
1746
1747
1747 lazydeltabase = opts['lazydeltabase']
1748 lazydeltabase = opts['lazydeltabase']
1748 source = opts['source']
1749 source = opts['source']
1749 clearcaches = opts['clear_caches']
1750 clearcaches = opts['clear_caches']
1750 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1751 b'storage')
1752 b'storage')
1752 if source not in validsource:
1753 if source not in validsource:
1753 raise error.Abort('invalid source type: %s' % source)
1754 raise error.Abort('invalid source type: %s' % source)
1754
1755
1755 ### actually gather results
1756 ### actually gather results
1756 count = opts['count']
1757 count = opts['count']
1757 if count <= 0:
1758 if count <= 0:
1758 raise error.Abort('invalide run count: %d' % count)
1759 raise error.Abort('invalide run count: %d' % count)
1759 allresults = []
1760 allresults = []
1760 for c in range(count):
1761 for c in range(count):
1761 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1762 lazydeltabase=lazydeltabase,
1763 lazydeltabase=lazydeltabase,
1763 clearcaches=clearcaches)
1764 clearcaches=clearcaches)
1764 allresults.append(timing)
1765 allresults.append(timing)
1765
1766
1766 ### consolidate the results in a single list
1767 ### consolidate the results in a single list
1767 results = []
1768 results = []
1768 for idx, (rev, t) in enumerate(allresults[0]):
1769 for idx, (rev, t) in enumerate(allresults[0]):
1769 ts = [t]
1770 ts = [t]
1770 for other in allresults[1:]:
1771 for other in allresults[1:]:
1771 orev, ot = other[idx]
1772 orev, ot = other[idx]
1772 assert orev == rev
1773 assert orev == rev
1773 ts.append(ot)
1774 ts.append(ot)
1774 results.append((rev, ts))
1775 results.append((rev, ts))
1775 resultcount = len(results)
1776 resultcount = len(results)
1776
1777
1777 ### Compute and display relevant statistics
1778 ### Compute and display relevant statistics
1778
1779
1779 # get a formatter
1780 # get a formatter
1780 fm = ui.formatter(b'perf', opts)
1781 fm = ui.formatter(b'perf', opts)
1781 displayall = ui.configbool(b"perf", b"all-timing", False)
1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1782
1783
1783 # print individual details if requested
1784 # print individual details if requested
1784 if opts['details']:
1785 if opts['details']:
1785 for idx, item in enumerate(results, 1):
1786 for idx, item in enumerate(results, 1):
1786 rev, data = item
1787 rev, data = item
1787 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1788 formatone(fm, data, title=title, displayall=displayall)
1789 formatone(fm, data, title=title, displayall=displayall)
1789
1790
1790 # sorts results by median time
1791 # sorts results by median time
1791 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1792 # list of (name, index) to display)
1793 # list of (name, index) to display)
1793 relevants = [
1794 relevants = [
1794 ("min", 0),
1795 ("min", 0),
1795 ("10%", resultcount * 10 // 100),
1796 ("10%", resultcount * 10 // 100),
1796 ("25%", resultcount * 25 // 100),
1797 ("25%", resultcount * 25 // 100),
1797 ("50%", resultcount * 70 // 100),
1798 ("50%", resultcount * 70 // 100),
1798 ("75%", resultcount * 75 // 100),
1799 ("75%", resultcount * 75 // 100),
1799 ("90%", resultcount * 90 // 100),
1800 ("90%", resultcount * 90 // 100),
1800 ("95%", resultcount * 95 // 100),
1801 ("95%", resultcount * 95 // 100),
1801 ("99%", resultcount * 99 // 100),
1802 ("99%", resultcount * 99 // 100),
1802 ("99.9%", resultcount * 999 // 1000),
1803 ("99.9%", resultcount * 999 // 1000),
1803 ("99.99%", resultcount * 9999 // 10000),
1804 ("99.99%", resultcount * 9999 // 10000),
1804 ("99.999%", resultcount * 99999 // 100000),
1805 ("99.999%", resultcount * 99999 // 100000),
1805 ("max", -1),
1806 ("max", -1),
1806 ]
1807 ]
1807 if not ui.quiet:
1808 if not ui.quiet:
1808 for name, idx in relevants:
1809 for name, idx in relevants:
1809 data = results[idx]
1810 data = results[idx]
1810 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1811 formatone(fm, data[1], title=title, displayall=displayall)
1812 formatone(fm, data[1], title=title, displayall=displayall)
1812
1813
1813 # XXX summing that many float will not be very precise, we ignore this fact
1814 # XXX summing that many float will not be very precise, we ignore this fact
1814 # for now
1815 # for now
1815 totaltime = []
1816 totaltime = []
1816 for item in allresults:
1817 for item in allresults:
1817 totaltime.append((sum(x[1][0] for x in item),
1818 totaltime.append((sum(x[1][0] for x in item),
1818 sum(x[1][1] for x in item),
1819 sum(x[1][1] for x in item),
1819 sum(x[1][2] for x in item),)
1820 sum(x[1][2] for x in item),)
1820 )
1821 )
1821 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1822 displayall=displayall)
1823 displayall=displayall)
1823 fm.end()
1824 fm.end()
1824
1825
1825 class _faketr(object):
1826 class _faketr(object):
1826 def add(s, x, y, z=None):
1827 def add(s, x, y, z=None):
1827 return None
1828 return None
1828
1829
1829 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1830 lazydeltabase=True, clearcaches=True):
1831 lazydeltabase=True, clearcaches=True):
1831 timings = []
1832 timings = []
1832 tr = _faketr()
1833 tr = _faketr()
1833 with _temprevlog(ui, orig, startrev) as dest:
1834 with _temprevlog(ui, orig, startrev) as dest:
1834 dest._lazydeltabase = lazydeltabase
1835 dest._lazydeltabase = lazydeltabase
1835 revs = list(orig.revs(startrev, stoprev))
1836 revs = list(orig.revs(startrev, stoprev))
1836 total = len(revs)
1837 total = len(revs)
1837 topic = 'adding'
1838 topic = 'adding'
1838 if runidx is not None:
1839 if runidx is not None:
1839 topic += ' (run #%d)' % runidx
1840 topic += ' (run #%d)' % runidx
1840 # Support both old and new progress API
1841 # Support both old and new progress API
1841 if util.safehasattr(ui, 'makeprogress'):
1842 if util.safehasattr(ui, 'makeprogress'):
1842 progress = ui.makeprogress(topic, unit='revs', total=total)
1843 progress = ui.makeprogress(topic, unit='revs', total=total)
1843 def updateprogress(pos):
1844 def updateprogress(pos):
1844 progress.update(pos)
1845 progress.update(pos)
1845 def completeprogress():
1846 def completeprogress():
1846 progress.complete()
1847 progress.complete()
1847 else:
1848 else:
1848 def updateprogress(pos):
1849 def updateprogress(pos):
1849 ui.progress(topic, pos, unit='revs', total=total)
1850 ui.progress(topic, pos, unit='revs', total=total)
1850 def completeprogress():
1851 def completeprogress():
1851 ui.progress(topic, None, unit='revs', total=total)
1852 ui.progress(topic, None, unit='revs', total=total)
1852
1853
1853 for idx, rev in enumerate(revs):
1854 for idx, rev in enumerate(revs):
1854 updateprogress(idx)
1855 updateprogress(idx)
1855 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1856 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1856 if clearcaches:
1857 if clearcaches:
1857 dest.index.clearcaches()
1858 dest.index.clearcaches()
1858 dest.clearcaches()
1859 dest.clearcaches()
1859 with timeone() as r:
1860 with timeone() as r:
1860 dest.addrawrevision(*addargs, **addkwargs)
1861 dest.addrawrevision(*addargs, **addkwargs)
1861 timings.append((rev, r[0]))
1862 timings.append((rev, r[0]))
1862 updateprogress(total)
1863 updateprogress(total)
1863 completeprogress()
1864 completeprogress()
1864 return timings
1865 return timings
1865
1866
1866 def _getrevisionseed(orig, rev, tr, source):
1867 def _getrevisionseed(orig, rev, tr, source):
1867 from mercurial.node import nullid
1868 from mercurial.node import nullid
1868
1869
1869 linkrev = orig.linkrev(rev)
1870 linkrev = orig.linkrev(rev)
1870 node = orig.node(rev)
1871 node = orig.node(rev)
1871 p1, p2 = orig.parents(node)
1872 p1, p2 = orig.parents(node)
1872 flags = orig.flags(rev)
1873 flags = orig.flags(rev)
1873 cachedelta = None
1874 cachedelta = None
1874 text = None
1875 text = None
1875
1876
1876 if source == b'full':
1877 if source == b'full':
1877 text = orig.revision(rev)
1878 text = orig.revision(rev)
1878 elif source == b'parent-1':
1879 elif source == b'parent-1':
1879 baserev = orig.rev(p1)
1880 baserev = orig.rev(p1)
1880 cachedelta = (baserev, orig.revdiff(p1, rev))
1881 cachedelta = (baserev, orig.revdiff(p1, rev))
1881 elif source == b'parent-2':
1882 elif source == b'parent-2':
1882 parent = p2
1883 parent = p2
1883 if p2 == nullid:
1884 if p2 == nullid:
1884 parent = p1
1885 parent = p1
1885 baserev = orig.rev(parent)
1886 baserev = orig.rev(parent)
1886 cachedelta = (baserev, orig.revdiff(parent, rev))
1887 cachedelta = (baserev, orig.revdiff(parent, rev))
1887 elif source == b'parent-smallest':
1888 elif source == b'parent-smallest':
1888 p1diff = orig.revdiff(p1, rev)
1889 p1diff = orig.revdiff(p1, rev)
1889 parent = p1
1890 parent = p1
1890 diff = p1diff
1891 diff = p1diff
1891 if p2 != nullid:
1892 if p2 != nullid:
1892 p2diff = orig.revdiff(p2, rev)
1893 p2diff = orig.revdiff(p2, rev)
1893 if len(p1diff) > len(p2diff):
1894 if len(p1diff) > len(p2diff):
1894 parent = p2
1895 parent = p2
1895 diff = p2diff
1896 diff = p2diff
1896 baserev = orig.rev(parent)
1897 baserev = orig.rev(parent)
1897 cachedelta = (baserev, diff)
1898 cachedelta = (baserev, diff)
1898 elif source == b'storage':
1899 elif source == b'storage':
1899 baserev = orig.deltaparent(rev)
1900 baserev = orig.deltaparent(rev)
1900 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1901 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1901
1902
1902 return ((text, tr, linkrev, p1, p2),
1903 return ((text, tr, linkrev, p1, p2),
1903 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1904 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1904
1905
1905 @contextlib.contextmanager
1906 @contextlib.contextmanager
1906 def _temprevlog(ui, orig, truncaterev):
1907 def _temprevlog(ui, orig, truncaterev):
1907 from mercurial import vfs as vfsmod
1908 from mercurial import vfs as vfsmod
1908
1909
1909 if orig._inline:
1910 if orig._inline:
1910 raise error.Abort('not supporting inline revlog (yet)')
1911 raise error.Abort('not supporting inline revlog (yet)')
1911
1912
1912 origindexpath = orig.opener.join(orig.indexfile)
1913 origindexpath = orig.opener.join(orig.indexfile)
1913 origdatapath = orig.opener.join(orig.datafile)
1914 origdatapath = orig.opener.join(orig.datafile)
1914 indexname = 'revlog.i'
1915 indexname = 'revlog.i'
1915 dataname = 'revlog.d'
1916 dataname = 'revlog.d'
1916
1917
1917 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1918 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1918 try:
1919 try:
1919 # copy the data file in a temporary directory
1920 # copy the data file in a temporary directory
1920 ui.debug('copying data in %s\n' % tmpdir)
1921 ui.debug('copying data in %s\n' % tmpdir)
1921 destindexpath = os.path.join(tmpdir, 'revlog.i')
1922 destindexpath = os.path.join(tmpdir, 'revlog.i')
1922 destdatapath = os.path.join(tmpdir, 'revlog.d')
1923 destdatapath = os.path.join(tmpdir, 'revlog.d')
1923 shutil.copyfile(origindexpath, destindexpath)
1924 shutil.copyfile(origindexpath, destindexpath)
1924 shutil.copyfile(origdatapath, destdatapath)
1925 shutil.copyfile(origdatapath, destdatapath)
1925
1926
1926 # remove the data we want to add again
1927 # remove the data we want to add again
1927 ui.debug('truncating data to be rewritten\n')
1928 ui.debug('truncating data to be rewritten\n')
1928 with open(destindexpath, 'ab') as index:
1929 with open(destindexpath, 'ab') as index:
1929 index.seek(0)
1930 index.seek(0)
1930 index.truncate(truncaterev * orig._io.size)
1931 index.truncate(truncaterev * orig._io.size)
1931 with open(destdatapath, 'ab') as data:
1932 with open(destdatapath, 'ab') as data:
1932 data.seek(0)
1933 data.seek(0)
1933 data.truncate(orig.start(truncaterev))
1934 data.truncate(orig.start(truncaterev))
1934
1935
1935 # instantiate a new revlog from the temporary copy
1936 # instantiate a new revlog from the temporary copy
1936 ui.debug('truncating adding to be rewritten\n')
1937 ui.debug('truncating adding to be rewritten\n')
1937 vfs = vfsmod.vfs(tmpdir)
1938 vfs = vfsmod.vfs(tmpdir)
1938 vfs.options = getattr(orig.opener, 'options', None)
1939 vfs.options = getattr(orig.opener, 'options', None)
1939
1940
1940 dest = revlog.revlog(vfs,
1941 dest = revlog.revlog(vfs,
1941 indexfile=indexname,
1942 indexfile=indexname,
1942 datafile=dataname)
1943 datafile=dataname)
1943 if dest._inline:
1944 if dest._inline:
1944 raise error.Abort('not supporting inline revlog (yet)')
1945 raise error.Abort('not supporting inline revlog (yet)')
1945 # make sure internals are initialized
1946 # make sure internals are initialized
1946 dest.revision(len(dest) - 1)
1947 dest.revision(len(dest) - 1)
1947 yield dest
1948 yield dest
1948 del dest, vfs
1949 del dest, vfs
1949 finally:
1950 finally:
1950 shutil.rmtree(tmpdir, True)
1951 shutil.rmtree(tmpdir, True)
1951
1952
1952 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1953 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1953 [(b'e', b'engines', b'', b'compression engines to use'),
1954 [(b'e', b'engines', b'', b'compression engines to use'),
1954 (b's', b'startrev', 0, b'revision to start at')],
1955 (b's', b'startrev', 0, b'revision to start at')],
1955 b'-c|-m|FILE')
1956 b'-c|-m|FILE')
1956 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1957 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1957 """Benchmark operations on revlog chunks.
1958 """Benchmark operations on revlog chunks.
1958
1959
1959 Logically, each revlog is a collection of fulltext revisions. However,
1960 Logically, each revlog is a collection of fulltext revisions. However,
1960 stored within each revlog are "chunks" of possibly compressed data. This
1961 stored within each revlog are "chunks" of possibly compressed data. This
1961 data needs to be read and decompressed or compressed and written.
1962 data needs to be read and decompressed or compressed and written.
1962
1963
1963 This command measures the time it takes to read+decompress and recompress
1964 This command measures the time it takes to read+decompress and recompress
1964 chunks in a revlog. It effectively isolates I/O and compression performance.
1965 chunks in a revlog. It effectively isolates I/O and compression performance.
1965 For measurements of higher-level operations like resolving revisions,
1966 For measurements of higher-level operations like resolving revisions,
1966 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1967 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1967 """
1968 """
1968 opts = _byteskwargs(opts)
1969 opts = _byteskwargs(opts)
1969
1970
1970 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1971 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1971
1972
1972 # _chunkraw was renamed to _getsegmentforrevs.
1973 # _chunkraw was renamed to _getsegmentforrevs.
1973 try:
1974 try:
1974 segmentforrevs = rl._getsegmentforrevs
1975 segmentforrevs = rl._getsegmentforrevs
1975 except AttributeError:
1976 except AttributeError:
1976 segmentforrevs = rl._chunkraw
1977 segmentforrevs = rl._chunkraw
1977
1978
1978 # Verify engines argument.
1979 # Verify engines argument.
1979 if engines:
1980 if engines:
1980 engines = set(e.strip() for e in engines.split(b','))
1981 engines = set(e.strip() for e in engines.split(b','))
1981 for engine in engines:
1982 for engine in engines:
1982 try:
1983 try:
1983 util.compressionengines[engine]
1984 util.compressionengines[engine]
1984 except KeyError:
1985 except KeyError:
1985 raise error.Abort(b'unknown compression engine: %s' % engine)
1986 raise error.Abort(b'unknown compression engine: %s' % engine)
1986 else:
1987 else:
1987 engines = []
1988 engines = []
1988 for e in util.compengines:
1989 for e in util.compengines:
1989 engine = util.compengines[e]
1990 engine = util.compengines[e]
1990 try:
1991 try:
1991 if engine.available():
1992 if engine.available():
1992 engine.revlogcompressor().compress(b'dummy')
1993 engine.revlogcompressor().compress(b'dummy')
1993 engines.append(e)
1994 engines.append(e)
1994 except NotImplementedError:
1995 except NotImplementedError:
1995 pass
1996 pass
1996
1997
1997 revs = list(rl.revs(startrev, len(rl) - 1))
1998 revs = list(rl.revs(startrev, len(rl) - 1))
1998
1999
1999 def rlfh(rl):
2000 def rlfh(rl):
2000 if rl._inline:
2001 if rl._inline:
2001 return getsvfs(repo)(rl.indexfile)
2002 return getsvfs(repo)(rl.indexfile)
2002 else:
2003 else:
2003 return getsvfs(repo)(rl.datafile)
2004 return getsvfs(repo)(rl.datafile)
2004
2005
2005 def doread():
2006 def doread():
2006 rl.clearcaches()
2007 rl.clearcaches()
2007 for rev in revs:
2008 for rev in revs:
2008 segmentforrevs(rev, rev)
2009 segmentforrevs(rev, rev)
2009
2010
2010 def doreadcachedfh():
2011 def doreadcachedfh():
2011 rl.clearcaches()
2012 rl.clearcaches()
2012 fh = rlfh(rl)
2013 fh = rlfh(rl)
2013 for rev in revs:
2014 for rev in revs:
2014 segmentforrevs(rev, rev, df=fh)
2015 segmentforrevs(rev, rev, df=fh)
2015
2016
2016 def doreadbatch():
2017 def doreadbatch():
2017 rl.clearcaches()
2018 rl.clearcaches()
2018 segmentforrevs(revs[0], revs[-1])
2019 segmentforrevs(revs[0], revs[-1])
2019
2020
2020 def doreadbatchcachedfh():
2021 def doreadbatchcachedfh():
2021 rl.clearcaches()
2022 rl.clearcaches()
2022 fh = rlfh(rl)
2023 fh = rlfh(rl)
2023 segmentforrevs(revs[0], revs[-1], df=fh)
2024 segmentforrevs(revs[0], revs[-1], df=fh)
2024
2025
2025 def dochunk():
2026 def dochunk():
2026 rl.clearcaches()
2027 rl.clearcaches()
2027 fh = rlfh(rl)
2028 fh = rlfh(rl)
2028 for rev in revs:
2029 for rev in revs:
2029 rl._chunk(rev, df=fh)
2030 rl._chunk(rev, df=fh)
2030
2031
2031 chunks = [None]
2032 chunks = [None]
2032
2033
2033 def dochunkbatch():
2034 def dochunkbatch():
2034 rl.clearcaches()
2035 rl.clearcaches()
2035 fh = rlfh(rl)
2036 fh = rlfh(rl)
2036 # Save chunks as a side-effect.
2037 # Save chunks as a side-effect.
2037 chunks[0] = rl._chunks(revs, df=fh)
2038 chunks[0] = rl._chunks(revs, df=fh)
2038
2039
2039 def docompress(compressor):
2040 def docompress(compressor):
2040 rl.clearcaches()
2041 rl.clearcaches()
2041
2042
2042 try:
2043 try:
2043 # Swap in the requested compression engine.
2044 # Swap in the requested compression engine.
2044 oldcompressor = rl._compressor
2045 oldcompressor = rl._compressor
2045 rl._compressor = compressor
2046 rl._compressor = compressor
2046 for chunk in chunks[0]:
2047 for chunk in chunks[0]:
2047 rl.compress(chunk)
2048 rl.compress(chunk)
2048 finally:
2049 finally:
2049 rl._compressor = oldcompressor
2050 rl._compressor = oldcompressor
2050
2051
2051 benches = [
2052 benches = [
2052 (lambda: doread(), b'read'),
2053 (lambda: doread(), b'read'),
2053 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2054 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2054 (lambda: doreadbatch(), b'read batch'),
2055 (lambda: doreadbatch(), b'read batch'),
2055 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2056 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2056 (lambda: dochunk(), b'chunk'),
2057 (lambda: dochunk(), b'chunk'),
2057 (lambda: dochunkbatch(), b'chunk batch'),
2058 (lambda: dochunkbatch(), b'chunk batch'),
2058 ]
2059 ]
2059
2060
2060 for engine in sorted(engines):
2061 for engine in sorted(engines):
2061 compressor = util.compengines[engine].revlogcompressor()
2062 compressor = util.compengines[engine].revlogcompressor()
2062 benches.append((functools.partial(docompress, compressor),
2063 benches.append((functools.partial(docompress, compressor),
2063 b'compress w/ %s' % engine))
2064 b'compress w/ %s' % engine))
2064
2065
2065 for fn, title in benches:
2066 for fn, title in benches:
2066 timer, fm = gettimer(ui, opts)
2067 timer, fm = gettimer(ui, opts)
2067 timer(fn, title=title)
2068 timer(fn, title=title)
2068 fm.end()
2069 fm.end()
2069
2070
2070 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2071 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2071 [(b'', b'cache', False, b'use caches instead of clearing')],
2072 [(b'', b'cache', False, b'use caches instead of clearing')],
2072 b'-c|-m|FILE REV')
2073 b'-c|-m|FILE REV')
2073 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2074 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2074 """Benchmark obtaining a revlog revision.
2075 """Benchmark obtaining a revlog revision.
2075
2076
2076 Obtaining a revlog revision consists of roughly the following steps:
2077 Obtaining a revlog revision consists of roughly the following steps:
2077
2078
2078 1. Compute the delta chain
2079 1. Compute the delta chain
2079 2. Slice the delta chain if applicable
2080 2. Slice the delta chain if applicable
2080 3. Obtain the raw chunks for that delta chain
2081 3. Obtain the raw chunks for that delta chain
2081 4. Decompress each raw chunk
2082 4. Decompress each raw chunk
2082 5. Apply binary patches to obtain fulltext
2083 5. Apply binary patches to obtain fulltext
2083 6. Verify hash of fulltext
2084 6. Verify hash of fulltext
2084
2085
2085 This command measures the time spent in each of these phases.
2086 This command measures the time spent in each of these phases.
2086 """
2087 """
2087 opts = _byteskwargs(opts)
2088 opts = _byteskwargs(opts)
2088
2089
2089 if opts.get(b'changelog') or opts.get(b'manifest'):
2090 if opts.get(b'changelog') or opts.get(b'manifest'):
2090 file_, rev = None, file_
2091 file_, rev = None, file_
2091 elif rev is None:
2092 elif rev is None:
2092 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2093 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2093
2094
2094 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2095 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2095
2096
2096 # _chunkraw was renamed to _getsegmentforrevs.
2097 # _chunkraw was renamed to _getsegmentforrevs.
2097 try:
2098 try:
2098 segmentforrevs = r._getsegmentforrevs
2099 segmentforrevs = r._getsegmentforrevs
2099 except AttributeError:
2100 except AttributeError:
2100 segmentforrevs = r._chunkraw
2101 segmentforrevs = r._chunkraw
2101
2102
2102 node = r.lookup(rev)
2103 node = r.lookup(rev)
2103 rev = r.rev(node)
2104 rev = r.rev(node)
2104
2105
2105 def getrawchunks(data, chain):
2106 def getrawchunks(data, chain):
2106 start = r.start
2107 start = r.start
2107 length = r.length
2108 length = r.length
2108 inline = r._inline
2109 inline = r._inline
2109 iosize = r._io.size
2110 iosize = r._io.size
2110 buffer = util.buffer
2111 buffer = util.buffer
2111
2112
2112 chunks = []
2113 chunks = []
2113 ladd = chunks.append
2114 ladd = chunks.append
2114 for idx, item in enumerate(chain):
2115 for idx, item in enumerate(chain):
2115 offset = start(item[0])
2116 offset = start(item[0])
2116 bits = data[idx]
2117 bits = data[idx]
2117 for rev in item:
2118 for rev in item:
2118 chunkstart = start(rev)
2119 chunkstart = start(rev)
2119 if inline:
2120 if inline:
2120 chunkstart += (rev + 1) * iosize
2121 chunkstart += (rev + 1) * iosize
2121 chunklength = length(rev)
2122 chunklength = length(rev)
2122 ladd(buffer(bits, chunkstart - offset, chunklength))
2123 ladd(buffer(bits, chunkstart - offset, chunklength))
2123
2124
2124 return chunks
2125 return chunks
2125
2126
2126 def dodeltachain(rev):
2127 def dodeltachain(rev):
2127 if not cache:
2128 if not cache:
2128 r.clearcaches()
2129 r.clearcaches()
2129 r._deltachain(rev)
2130 r._deltachain(rev)
2130
2131
2131 def doread(chain):
2132 def doread(chain):
2132 if not cache:
2133 if not cache:
2133 r.clearcaches()
2134 r.clearcaches()
2134 for item in slicedchain:
2135 for item in slicedchain:
2135 segmentforrevs(item[0], item[-1])
2136 segmentforrevs(item[0], item[-1])
2136
2137
2137 def doslice(r, chain, size):
2138 def doslice(r, chain, size):
2138 for s in slicechunk(r, chain, targetsize=size):
2139 for s in slicechunk(r, chain, targetsize=size):
2139 pass
2140 pass
2140
2141
2141 def dorawchunks(data, chain):
2142 def dorawchunks(data, chain):
2142 if not cache:
2143 if not cache:
2143 r.clearcaches()
2144 r.clearcaches()
2144 getrawchunks(data, chain)
2145 getrawchunks(data, chain)
2145
2146
2146 def dodecompress(chunks):
2147 def dodecompress(chunks):
2147 decomp = r.decompress
2148 decomp = r.decompress
2148 for chunk in chunks:
2149 for chunk in chunks:
2149 decomp(chunk)
2150 decomp(chunk)
2150
2151
2151 def dopatch(text, bins):
2152 def dopatch(text, bins):
2152 if not cache:
2153 if not cache:
2153 r.clearcaches()
2154 r.clearcaches()
2154 mdiff.patches(text, bins)
2155 mdiff.patches(text, bins)
2155
2156
2156 def dohash(text):
2157 def dohash(text):
2157 if not cache:
2158 if not cache:
2158 r.clearcaches()
2159 r.clearcaches()
2159 r.checkhash(text, node, rev=rev)
2160 r.checkhash(text, node, rev=rev)
2160
2161
2161 def dorevision():
2162 def dorevision():
2162 if not cache:
2163 if not cache:
2163 r.clearcaches()
2164 r.clearcaches()
2164 r.revision(node)
2165 r.revision(node)
2165
2166
2166 try:
2167 try:
2167 from mercurial.revlogutils.deltas import slicechunk
2168 from mercurial.revlogutils.deltas import slicechunk
2168 except ImportError:
2169 except ImportError:
2169 slicechunk = getattr(revlog, '_slicechunk', None)
2170 slicechunk = getattr(revlog, '_slicechunk', None)
2170
2171
2171 size = r.length(rev)
2172 size = r.length(rev)
2172 chain = r._deltachain(rev)[0]
2173 chain = r._deltachain(rev)[0]
2173 if not getattr(r, '_withsparseread', False):
2174 if not getattr(r, '_withsparseread', False):
2174 slicedchain = (chain,)
2175 slicedchain = (chain,)
2175 else:
2176 else:
2176 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2177 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2177 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2178 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2178 rawchunks = getrawchunks(data, slicedchain)
2179 rawchunks = getrawchunks(data, slicedchain)
2179 bins = r._chunks(chain)
2180 bins = r._chunks(chain)
2180 text = bytes(bins[0])
2181 text = bytes(bins[0])
2181 bins = bins[1:]
2182 bins = bins[1:]
2182 text = mdiff.patches(text, bins)
2183 text = mdiff.patches(text, bins)
2183
2184
2184 benches = [
2185 benches = [
2185 (lambda: dorevision(), b'full'),
2186 (lambda: dorevision(), b'full'),
2186 (lambda: dodeltachain(rev), b'deltachain'),
2187 (lambda: dodeltachain(rev), b'deltachain'),
2187 (lambda: doread(chain), b'read'),
2188 (lambda: doread(chain), b'read'),
2188 ]
2189 ]
2189
2190
2190 if getattr(r, '_withsparseread', False):
2191 if getattr(r, '_withsparseread', False):
2191 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2192 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2192 benches.append(slicing)
2193 benches.append(slicing)
2193
2194
2194 benches.extend([
2195 benches.extend([
2195 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2196 (lambda: dodecompress(rawchunks), b'decompress'),
2197 (lambda: dodecompress(rawchunks), b'decompress'),
2197 (lambda: dopatch(text, bins), b'patch'),
2198 (lambda: dopatch(text, bins), b'patch'),
2198 (lambda: dohash(text), b'hash'),
2199 (lambda: dohash(text), b'hash'),
2199 ])
2200 ])
2200
2201
2201 timer, fm = gettimer(ui, opts)
2202 timer, fm = gettimer(ui, opts)
2202 for fn, title in benches:
2203 for fn, title in benches:
2203 timer(fn, title=title)
2204 timer(fn, title=title)
2204 fm.end()
2205 fm.end()
2205
2206
2206 @command(b'perfrevset',
2207 @command(b'perfrevset',
2207 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2208 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2208 (b'', b'contexts', False, b'obtain changectx for each revision')]
2209 (b'', b'contexts', False, b'obtain changectx for each revision')]
2209 + formatteropts, b"REVSET")
2210 + formatteropts, b"REVSET")
2210 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2211 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2211 """benchmark the execution time of a revset
2212 """benchmark the execution time of a revset
2212
2213
2213 Use the --clean option if need to evaluate the impact of build volatile
2214 Use the --clean option if need to evaluate the impact of build volatile
2214 revisions set cache on the revset execution. Volatile cache hold filtered
2215 revisions set cache on the revset execution. Volatile cache hold filtered
2215 and obsolete related cache."""
2216 and obsolete related cache."""
2216 opts = _byteskwargs(opts)
2217 opts = _byteskwargs(opts)
2217
2218
2218 timer, fm = gettimer(ui, opts)
2219 timer, fm = gettimer(ui, opts)
2219 def d():
2220 def d():
2220 if clear:
2221 if clear:
2221 repo.invalidatevolatilesets()
2222 repo.invalidatevolatilesets()
2222 if contexts:
2223 if contexts:
2223 for ctx in repo.set(expr): pass
2224 for ctx in repo.set(expr): pass
2224 else:
2225 else:
2225 for r in repo.revs(expr): pass
2226 for r in repo.revs(expr): pass
2226 timer(d)
2227 timer(d)
2227 fm.end()
2228 fm.end()
2228
2229
2229 @command(b'perfvolatilesets',
2230 @command(b'perfvolatilesets',
2230 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2231 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2231 ] + formatteropts)
2232 ] + formatteropts)
2232 def perfvolatilesets(ui, repo, *names, **opts):
2233 def perfvolatilesets(ui, repo, *names, **opts):
2233 """benchmark the computation of various volatile set
2234 """benchmark the computation of various volatile set
2234
2235
2235 Volatile set computes element related to filtering and obsolescence."""
2236 Volatile set computes element related to filtering and obsolescence."""
2236 opts = _byteskwargs(opts)
2237 opts = _byteskwargs(opts)
2237 timer, fm = gettimer(ui, opts)
2238 timer, fm = gettimer(ui, opts)
2238 repo = repo.unfiltered()
2239 repo = repo.unfiltered()
2239
2240
2240 def getobs(name):
2241 def getobs(name):
2241 def d():
2242 def d():
2242 repo.invalidatevolatilesets()
2243 repo.invalidatevolatilesets()
2243 if opts[b'clear_obsstore']:
2244 if opts[b'clear_obsstore']:
2244 clearfilecache(repo, b'obsstore')
2245 clearfilecache(repo, b'obsstore')
2245 obsolete.getrevs(repo, name)
2246 obsolete.getrevs(repo, name)
2246 return d
2247 return d
2247
2248
2248 allobs = sorted(obsolete.cachefuncs)
2249 allobs = sorted(obsolete.cachefuncs)
2249 if names:
2250 if names:
2250 allobs = [n for n in allobs if n in names]
2251 allobs = [n for n in allobs if n in names]
2251
2252
2252 for name in allobs:
2253 for name in allobs:
2253 timer(getobs(name), title=name)
2254 timer(getobs(name), title=name)
2254
2255
2255 def getfiltered(name):
2256 def getfiltered(name):
2256 def d():
2257 def d():
2257 repo.invalidatevolatilesets()
2258 repo.invalidatevolatilesets()
2258 if opts[b'clear_obsstore']:
2259 if opts[b'clear_obsstore']:
2259 clearfilecache(repo, b'obsstore')
2260 clearfilecache(repo, b'obsstore')
2260 repoview.filterrevs(repo, name)
2261 repoview.filterrevs(repo, name)
2261 return d
2262 return d
2262
2263
2263 allfilter = sorted(repoview.filtertable)
2264 allfilter = sorted(repoview.filtertable)
2264 if names:
2265 if names:
2265 allfilter = [n for n in allfilter if n in names]
2266 allfilter = [n for n in allfilter if n in names]
2266
2267
2267 for name in allfilter:
2268 for name in allfilter:
2268 timer(getfiltered(name), title=name)
2269 timer(getfiltered(name), title=name)
2269 fm.end()
2270 fm.end()
2270
2271
2271 @command(b'perfbranchmap',
2272 @command(b'perfbranchmap',
2272 [(b'f', b'full', False,
2273 [(b'f', b'full', False,
2273 b'Includes build time of subset'),
2274 b'Includes build time of subset'),
2274 (b'', b'clear-revbranch', False,
2275 (b'', b'clear-revbranch', False,
2275 b'purge the revbranch cache between computation'),
2276 b'purge the revbranch cache between computation'),
2276 ] + formatteropts)
2277 ] + formatteropts)
2277 def perfbranchmap(ui, repo, *filternames, **opts):
2278 def perfbranchmap(ui, repo, *filternames, **opts):
2278 """benchmark the update of a branchmap
2279 """benchmark the update of a branchmap
2279
2280
2280 This benchmarks the full repo.branchmap() call with read and write disabled
2281 This benchmarks the full repo.branchmap() call with read and write disabled
2281 """
2282 """
2282 opts = _byteskwargs(opts)
2283 opts = _byteskwargs(opts)
2283 full = opts.get(b"full", False)
2284 full = opts.get(b"full", False)
2284 clear_revbranch = opts.get(b"clear_revbranch", False)
2285 clear_revbranch = opts.get(b"clear_revbranch", False)
2285 timer, fm = gettimer(ui, opts)
2286 timer, fm = gettimer(ui, opts)
2286 def getbranchmap(filtername):
2287 def getbranchmap(filtername):
2287 """generate a benchmark function for the filtername"""
2288 """generate a benchmark function for the filtername"""
2288 if filtername is None:
2289 if filtername is None:
2289 view = repo
2290 view = repo
2290 else:
2291 else:
2291 view = repo.filtered(filtername)
2292 view = repo.filtered(filtername)
2292 def d():
2293 def d():
2293 if clear_revbranch:
2294 if clear_revbranch:
2294 repo.revbranchcache()._clear()
2295 repo.revbranchcache()._clear()
2295 if full:
2296 if full:
2296 view._branchcaches.clear()
2297 view._branchcaches.clear()
2297 else:
2298 else:
2298 view._branchcaches.pop(filtername, None)
2299 view._branchcaches.pop(filtername, None)
2299 view.branchmap()
2300 view.branchmap()
2300 return d
2301 return d
2301 # add filter in smaller subset to bigger subset
2302 # add filter in smaller subset to bigger subset
2302 possiblefilters = set(repoview.filtertable)
2303 possiblefilters = set(repoview.filtertable)
2303 if filternames:
2304 if filternames:
2304 possiblefilters &= set(filternames)
2305 possiblefilters &= set(filternames)
2305 subsettable = getbranchmapsubsettable()
2306 subsettable = getbranchmapsubsettable()
2306 allfilters = []
2307 allfilters = []
2307 while possiblefilters:
2308 while possiblefilters:
2308 for name in possiblefilters:
2309 for name in possiblefilters:
2309 subset = subsettable.get(name)
2310 subset = subsettable.get(name)
2310 if subset not in possiblefilters:
2311 if subset not in possiblefilters:
2311 break
2312 break
2312 else:
2313 else:
2313 assert False, b'subset cycle %s!' % possiblefilters
2314 assert False, b'subset cycle %s!' % possiblefilters
2314 allfilters.append(name)
2315 allfilters.append(name)
2315 possiblefilters.remove(name)
2316 possiblefilters.remove(name)
2316
2317
2317 # warm the cache
2318 # warm the cache
2318 if not full:
2319 if not full:
2319 for name in allfilters:
2320 for name in allfilters:
2320 repo.filtered(name).branchmap()
2321 repo.filtered(name).branchmap()
2321 if not filternames or b'unfiltered' in filternames:
2322 if not filternames or b'unfiltered' in filternames:
2322 # add unfiltered
2323 # add unfiltered
2323 allfilters.append(None)
2324 allfilters.append(None)
2324
2325
2325 branchcacheread = safeattrsetter(branchmap, b'read')
2326 branchcacheread = safeattrsetter(branchmap, b'read')
2326 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2327 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2327 branchcacheread.set(lambda repo: None)
2328 branchcacheread.set(lambda repo: None)
2328 branchcachewrite.set(lambda bc, repo: None)
2329 branchcachewrite.set(lambda bc, repo: None)
2329 try:
2330 try:
2330 for name in allfilters:
2331 for name in allfilters:
2331 printname = name
2332 printname = name
2332 if name is None:
2333 if name is None:
2333 printname = b'unfiltered'
2334 printname = b'unfiltered'
2334 timer(getbranchmap(name), title=str(printname))
2335 timer(getbranchmap(name), title=str(printname))
2335 finally:
2336 finally:
2336 branchcacheread.restore()
2337 branchcacheread.restore()
2337 branchcachewrite.restore()
2338 branchcachewrite.restore()
2338 fm.end()
2339 fm.end()
2339
2340
2340 @command(b'perfbranchmapupdate', [
2341 @command(b'perfbranchmapupdate', [
2341 (b'', b'base', [], b'subset of revision to start from'),
2342 (b'', b'base', [], b'subset of revision to start from'),
2342 (b'', b'target', [], b'subset of revision to end with'),
2343 (b'', b'target', [], b'subset of revision to end with'),
2343 (b'', b'clear-caches', False, b'clear cache between each runs')
2344 (b'', b'clear-caches', False, b'clear cache between each runs')
2344 ] + formatteropts)
2345 ] + formatteropts)
2345 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2346 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2346 """benchmark branchmap update from for <base> revs to <target> revs
2347 """benchmark branchmap update from for <base> revs to <target> revs
2347
2348
2348 If `--clear-caches` is passed, the following items will be reset before
2349 If `--clear-caches` is passed, the following items will be reset before
2349 each update:
2350 each update:
2350 * the changelog instance and associated indexes
2351 * the changelog instance and associated indexes
2351 * the rev-branch-cache instance
2352 * the rev-branch-cache instance
2352
2353
2353 Examples:
2354 Examples:
2354
2355
2355 # update for the one last revision
2356 # update for the one last revision
2356 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2357 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2357
2358
2358 $ update for change coming with a new branch
2359 $ update for change coming with a new branch
2359 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2360 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2360 """
2361 """
2361 from mercurial import branchmap
2362 from mercurial import branchmap
2362 from mercurial import repoview
2363 from mercurial import repoview
2363 opts = _byteskwargs(opts)
2364 opts = _byteskwargs(opts)
2364 timer, fm = gettimer(ui, opts)
2365 timer, fm = gettimer(ui, opts)
2365 clearcaches = opts[b'clear_caches']
2366 clearcaches = opts[b'clear_caches']
2366 unfi = repo.unfiltered()
2367 unfi = repo.unfiltered()
2367 x = [None] # used to pass data between closure
2368 x = [None] # used to pass data between closure
2368
2369
2369 # we use a `list` here to avoid possible side effect from smartset
2370 # we use a `list` here to avoid possible side effect from smartset
2370 baserevs = list(scmutil.revrange(repo, base))
2371 baserevs = list(scmutil.revrange(repo, base))
2371 targetrevs = list(scmutil.revrange(repo, target))
2372 targetrevs = list(scmutil.revrange(repo, target))
2372 if not baserevs:
2373 if not baserevs:
2373 raise error.Abort(b'no revisions selected for --base')
2374 raise error.Abort(b'no revisions selected for --base')
2374 if not targetrevs:
2375 if not targetrevs:
2375 raise error.Abort(b'no revisions selected for --target')
2376 raise error.Abort(b'no revisions selected for --target')
2376
2377
2377 # make sure the target branchmap also contains the one in the base
2378 # make sure the target branchmap also contains the one in the base
2378 targetrevs = list(set(baserevs) | set(targetrevs))
2379 targetrevs = list(set(baserevs) | set(targetrevs))
2379 targetrevs.sort()
2380 targetrevs.sort()
2380
2381
2381 cl = repo.changelog
2382 cl = repo.changelog
2382 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2383 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2383 allbaserevs.sort()
2384 allbaserevs.sort()
2384 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2385 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2385
2386
2386 newrevs = list(alltargetrevs.difference(allbaserevs))
2387 newrevs = list(alltargetrevs.difference(allbaserevs))
2387 newrevs.sort()
2388 newrevs.sort()
2388
2389
2389 allrevs = frozenset(unfi.changelog.revs())
2390 allrevs = frozenset(unfi.changelog.revs())
2390 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2391 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2391 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2392 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2392
2393
2393 def basefilter(repo, visibilityexceptions=None):
2394 def basefilter(repo, visibilityexceptions=None):
2394 return basefilterrevs
2395 return basefilterrevs
2395
2396
2396 def targetfilter(repo, visibilityexceptions=None):
2397 def targetfilter(repo, visibilityexceptions=None):
2397 return targetfilterrevs
2398 return targetfilterrevs
2398
2399
2399 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2400 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2400 ui.status(msg % (len(allbaserevs), len(newrevs)))
2401 ui.status(msg % (len(allbaserevs), len(newrevs)))
2401 if targetfilterrevs:
2402 if targetfilterrevs:
2402 msg = b'(%d revisions still filtered)\n'
2403 msg = b'(%d revisions still filtered)\n'
2403 ui.status(msg % len(targetfilterrevs))
2404 ui.status(msg % len(targetfilterrevs))
2404
2405
2405 try:
2406 try:
2406 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2407 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2407 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2408 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2408
2409
2409 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2410 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2410 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2411 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2411
2412
2412 # try to find an existing branchmap to reuse
2413 # try to find an existing branchmap to reuse
2413 subsettable = getbranchmapsubsettable()
2414 subsettable = getbranchmapsubsettable()
2414 candidatefilter = subsettable.get(None)
2415 candidatefilter = subsettable.get(None)
2415 while candidatefilter is not None:
2416 while candidatefilter is not None:
2416 candidatebm = repo.filtered(candidatefilter).branchmap()
2417 candidatebm = repo.filtered(candidatefilter).branchmap()
2417 if candidatebm.validfor(baserepo):
2418 if candidatebm.validfor(baserepo):
2418 filtered = repoview.filterrevs(repo, candidatefilter)
2419 filtered = repoview.filterrevs(repo, candidatefilter)
2419 missing = [r for r in allbaserevs if r in filtered]
2420 missing = [r for r in allbaserevs if r in filtered]
2420 base = candidatebm.copy()
2421 base = candidatebm.copy()
2421 base.update(baserepo, missing)
2422 base.update(baserepo, missing)
2422 break
2423 break
2423 candidatefilter = subsettable.get(candidatefilter)
2424 candidatefilter = subsettable.get(candidatefilter)
2424 else:
2425 else:
2425 # no suitable subset where found
2426 # no suitable subset where found
2426 base = branchmap.branchcache()
2427 base = branchmap.branchcache()
2427 base.update(baserepo, allbaserevs)
2428 base.update(baserepo, allbaserevs)
2428
2429
2429 def setup():
2430 def setup():
2430 x[0] = base.copy()
2431 x[0] = base.copy()
2431 if clearcaches:
2432 if clearcaches:
2432 unfi._revbranchcache = None
2433 unfi._revbranchcache = None
2433 clearchangelog(repo)
2434 clearchangelog(repo)
2434
2435
2435 def bench():
2436 def bench():
2436 x[0].update(targetrepo, newrevs)
2437 x[0].update(targetrepo, newrevs)
2437
2438
2438 timer(bench, setup=setup)
2439 timer(bench, setup=setup)
2439 fm.end()
2440 fm.end()
2440 finally:
2441 finally:
2441 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2442 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2442 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2443 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2443
2444
2444 @command(b'perfbranchmapload', [
2445 @command(b'perfbranchmapload', [
2445 (b'f', b'filter', b'', b'Specify repoview filter'),
2446 (b'f', b'filter', b'', b'Specify repoview filter'),
2446 (b'', b'list', False, b'List brachmap filter caches'),
2447 (b'', b'list', False, b'List brachmap filter caches'),
2447 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2448 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2448
2449
2449 ] + formatteropts)
2450 ] + formatteropts)
2450 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2451 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2451 """benchmark reading the branchmap"""
2452 """benchmark reading the branchmap"""
2452 opts = _byteskwargs(opts)
2453 opts = _byteskwargs(opts)
2453 clearrevlogs = opts[b'clear_revlogs']
2454 clearrevlogs = opts[b'clear_revlogs']
2454
2455
2455 if list:
2456 if list:
2456 for name, kind, st in repo.cachevfs.readdir(stat=True):
2457 for name, kind, st in repo.cachevfs.readdir(stat=True):
2457 if name.startswith(b'branch2'):
2458 if name.startswith(b'branch2'):
2458 filtername = name.partition(b'-')[2] or b'unfiltered'
2459 filtername = name.partition(b'-')[2] or b'unfiltered'
2459 ui.status(b'%s - %s\n'
2460 ui.status(b'%s - %s\n'
2460 % (filtername, util.bytecount(st.st_size)))
2461 % (filtername, util.bytecount(st.st_size)))
2461 return
2462 return
2462 if not filter:
2463 if not filter:
2463 filter = None
2464 filter = None
2464 subsettable = getbranchmapsubsettable()
2465 subsettable = getbranchmapsubsettable()
2465 if filter is None:
2466 if filter is None:
2466 repo = repo.unfiltered()
2467 repo = repo.unfiltered()
2467 else:
2468 else:
2468 repo = repoview.repoview(repo, filter)
2469 repo = repoview.repoview(repo, filter)
2469
2470
2470 repo.branchmap() # make sure we have a relevant, up to date branchmap
2471 repo.branchmap() # make sure we have a relevant, up to date branchmap
2471
2472
2472 currentfilter = filter
2473 currentfilter = filter
2473 # try once without timer, the filter may not be cached
2474 # try once without timer, the filter may not be cached
2474 while branchmap.read(repo) is None:
2475 while branchmap.read(repo) is None:
2475 currentfilter = subsettable.get(currentfilter)
2476 currentfilter = subsettable.get(currentfilter)
2476 if currentfilter is None:
2477 if currentfilter is None:
2477 raise error.Abort(b'No branchmap cached for %s repo'
2478 raise error.Abort(b'No branchmap cached for %s repo'
2478 % (filter or b'unfiltered'))
2479 % (filter or b'unfiltered'))
2479 repo = repo.filtered(currentfilter)
2480 repo = repo.filtered(currentfilter)
2480 timer, fm = gettimer(ui, opts)
2481 timer, fm = gettimer(ui, opts)
2481 def setup():
2482 def setup():
2482 if clearrevlogs:
2483 if clearrevlogs:
2483 clearchangelog(repo)
2484 clearchangelog(repo)
2484 def bench():
2485 def bench():
2485 branchmap.read(repo)
2486 branchmap.read(repo)
2486 timer(bench, setup=setup)
2487 timer(bench, setup=setup)
2487 fm.end()
2488 fm.end()
2488
2489
2489 @command(b'perfloadmarkers')
2490 @command(b'perfloadmarkers')
2490 def perfloadmarkers(ui, repo):
2491 def perfloadmarkers(ui, repo):
2491 """benchmark the time to parse the on-disk markers for a repo
2492 """benchmark the time to parse the on-disk markers for a repo
2492
2493
2493 Result is the number of markers in the repo."""
2494 Result is the number of markers in the repo."""
2494 timer, fm = gettimer(ui)
2495 timer, fm = gettimer(ui)
2495 svfs = getsvfs(repo)
2496 svfs = getsvfs(repo)
2496 timer(lambda: len(obsolete.obsstore(svfs)))
2497 timer(lambda: len(obsolete.obsstore(svfs)))
2497 fm.end()
2498 fm.end()
2498
2499
2499 @command(b'perflrucachedict', formatteropts +
2500 @command(b'perflrucachedict', formatteropts +
2500 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2501 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2501 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2502 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2502 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2503 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2503 (b'', b'size', 4, b'size of cache'),
2504 (b'', b'size', 4, b'size of cache'),
2504 (b'', b'gets', 10000, b'number of key lookups'),
2505 (b'', b'gets', 10000, b'number of key lookups'),
2505 (b'', b'sets', 10000, b'number of key sets'),
2506 (b'', b'sets', 10000, b'number of key sets'),
2506 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2507 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2507 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2508 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2508 norepo=True)
2509 norepo=True)
2509 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2510 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2510 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2511 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2511 opts = _byteskwargs(opts)
2512 opts = _byteskwargs(opts)
2512
2513
2513 def doinit():
2514 def doinit():
2514 for i in _xrange(10000):
2515 for i in _xrange(10000):
2515 util.lrucachedict(size)
2516 util.lrucachedict(size)
2516
2517
2517 costrange = list(range(mincost, maxcost + 1))
2518 costrange = list(range(mincost, maxcost + 1))
2518
2519
2519 values = []
2520 values = []
2520 for i in _xrange(size):
2521 for i in _xrange(size):
2521 values.append(random.randint(0, _maxint))
2522 values.append(random.randint(0, _maxint))
2522
2523
2523 # Get mode fills the cache and tests raw lookup performance with no
2524 # Get mode fills the cache and tests raw lookup performance with no
2524 # eviction.
2525 # eviction.
2525 getseq = []
2526 getseq = []
2526 for i in _xrange(gets):
2527 for i in _xrange(gets):
2527 getseq.append(random.choice(values))
2528 getseq.append(random.choice(values))
2528
2529
2529 def dogets():
2530 def dogets():
2530 d = util.lrucachedict(size)
2531 d = util.lrucachedict(size)
2531 for v in values:
2532 for v in values:
2532 d[v] = v
2533 d[v] = v
2533 for key in getseq:
2534 for key in getseq:
2534 value = d[key]
2535 value = d[key]
2535 value # silence pyflakes warning
2536 value # silence pyflakes warning
2536
2537
2537 def dogetscost():
2538 def dogetscost():
2538 d = util.lrucachedict(size, maxcost=costlimit)
2539 d = util.lrucachedict(size, maxcost=costlimit)
2539 for i, v in enumerate(values):
2540 for i, v in enumerate(values):
2540 d.insert(v, v, cost=costs[i])
2541 d.insert(v, v, cost=costs[i])
2541 for key in getseq:
2542 for key in getseq:
2542 try:
2543 try:
2543 value = d[key]
2544 value = d[key]
2544 value # silence pyflakes warning
2545 value # silence pyflakes warning
2545 except KeyError:
2546 except KeyError:
2546 pass
2547 pass
2547
2548
2548 # Set mode tests insertion speed with cache eviction.
2549 # Set mode tests insertion speed with cache eviction.
2549 setseq = []
2550 setseq = []
2550 costs = []
2551 costs = []
2551 for i in _xrange(sets):
2552 for i in _xrange(sets):
2552 setseq.append(random.randint(0, _maxint))
2553 setseq.append(random.randint(0, _maxint))
2553 costs.append(random.choice(costrange))
2554 costs.append(random.choice(costrange))
2554
2555
2555 def doinserts():
2556 def doinserts():
2556 d = util.lrucachedict(size)
2557 d = util.lrucachedict(size)
2557 for v in setseq:
2558 for v in setseq:
2558 d.insert(v, v)
2559 d.insert(v, v)
2559
2560
2560 def doinsertscost():
2561 def doinsertscost():
2561 d = util.lrucachedict(size, maxcost=costlimit)
2562 d = util.lrucachedict(size, maxcost=costlimit)
2562 for i, v in enumerate(setseq):
2563 for i, v in enumerate(setseq):
2563 d.insert(v, v, cost=costs[i])
2564 d.insert(v, v, cost=costs[i])
2564
2565
2565 def dosets():
2566 def dosets():
2566 d = util.lrucachedict(size)
2567 d = util.lrucachedict(size)
2567 for v in setseq:
2568 for v in setseq:
2568 d[v] = v
2569 d[v] = v
2569
2570
2570 # Mixed mode randomly performs gets and sets with eviction.
2571 # Mixed mode randomly performs gets and sets with eviction.
2571 mixedops = []
2572 mixedops = []
2572 for i in _xrange(mixed):
2573 for i in _xrange(mixed):
2573 r = random.randint(0, 100)
2574 r = random.randint(0, 100)
2574 if r < mixedgetfreq:
2575 if r < mixedgetfreq:
2575 op = 0
2576 op = 0
2576 else:
2577 else:
2577 op = 1
2578 op = 1
2578
2579
2579 mixedops.append((op,
2580 mixedops.append((op,
2580 random.randint(0, size * 2),
2581 random.randint(0, size * 2),
2581 random.choice(costrange)))
2582 random.choice(costrange)))
2582
2583
2583 def domixed():
2584 def domixed():
2584 d = util.lrucachedict(size)
2585 d = util.lrucachedict(size)
2585
2586
2586 for op, v, cost in mixedops:
2587 for op, v, cost in mixedops:
2587 if op == 0:
2588 if op == 0:
2588 try:
2589 try:
2589 d[v]
2590 d[v]
2590 except KeyError:
2591 except KeyError:
2591 pass
2592 pass
2592 else:
2593 else:
2593 d[v] = v
2594 d[v] = v
2594
2595
2595 def domixedcost():
2596 def domixedcost():
2596 d = util.lrucachedict(size, maxcost=costlimit)
2597 d = util.lrucachedict(size, maxcost=costlimit)
2597
2598
2598 for op, v, cost in mixedops:
2599 for op, v, cost in mixedops:
2599 if op == 0:
2600 if op == 0:
2600 try:
2601 try:
2601 d[v]
2602 d[v]
2602 except KeyError:
2603 except KeyError:
2603 pass
2604 pass
2604 else:
2605 else:
2605 d.insert(v, v, cost=cost)
2606 d.insert(v, v, cost=cost)
2606
2607
2607 benches = [
2608 benches = [
2608 (doinit, b'init'),
2609 (doinit, b'init'),
2609 ]
2610 ]
2610
2611
2611 if costlimit:
2612 if costlimit:
2612 benches.extend([
2613 benches.extend([
2613 (dogetscost, b'gets w/ cost limit'),
2614 (dogetscost, b'gets w/ cost limit'),
2614 (doinsertscost, b'inserts w/ cost limit'),
2615 (doinsertscost, b'inserts w/ cost limit'),
2615 (domixedcost, b'mixed w/ cost limit'),
2616 (domixedcost, b'mixed w/ cost limit'),
2616 ])
2617 ])
2617 else:
2618 else:
2618 benches.extend([
2619 benches.extend([
2619 (dogets, b'gets'),
2620 (dogets, b'gets'),
2620 (doinserts, b'inserts'),
2621 (doinserts, b'inserts'),
2621 (dosets, b'sets'),
2622 (dosets, b'sets'),
2622 (domixed, b'mixed')
2623 (domixed, b'mixed')
2623 ])
2624 ])
2624
2625
2625 for fn, title in benches:
2626 for fn, title in benches:
2626 timer, fm = gettimer(ui, opts)
2627 timer, fm = gettimer(ui, opts)
2627 timer(fn, title=title)
2628 timer(fn, title=title)
2628 fm.end()
2629 fm.end()
2629
2630
2630 @command(b'perfwrite', formatteropts)
2631 @command(b'perfwrite', formatteropts)
2631 def perfwrite(ui, repo, **opts):
2632 def perfwrite(ui, repo, **opts):
2632 """microbenchmark ui.write
2633 """microbenchmark ui.write
2633 """
2634 """
2634 opts = _byteskwargs(opts)
2635 opts = _byteskwargs(opts)
2635
2636
2636 timer, fm = gettimer(ui, opts)
2637 timer, fm = gettimer(ui, opts)
2637 def write():
2638 def write():
2638 for i in range(100000):
2639 for i in range(100000):
2639 ui.write((b'Testing write performance\n'))
2640 ui.write((b'Testing write performance\n'))
2640 timer(write)
2641 timer(write)
2641 fm.end()
2642 fm.end()
2642
2643
2643 def uisetup(ui):
2644 def uisetup(ui):
2644 if (util.safehasattr(cmdutil, b'openrevlog') and
2645 if (util.safehasattr(cmdutil, b'openrevlog') and
2645 not util.safehasattr(commands, b'debugrevlogopts')):
2646 not util.safehasattr(commands, b'debugrevlogopts')):
2646 # for "historical portability":
2647 # for "historical portability":
2647 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2648 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2648 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2649 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2649 # openrevlog() should cause failure, because it has been
2650 # openrevlog() should cause failure, because it has been
2650 # available since 3.5 (or 49c583ca48c4).
2651 # available since 3.5 (or 49c583ca48c4).
2651 def openrevlog(orig, repo, cmd, file_, opts):
2652 def openrevlog(orig, repo, cmd, file_, opts):
2652 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2653 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2653 raise error.Abort(b"This version doesn't support --dir option",
2654 raise error.Abort(b"This version doesn't support --dir option",
2654 hint=b"use 3.5 or later")
2655 hint=b"use 3.5 or later")
2655 return orig(repo, cmd, file_, opts)
2656 return orig(repo, cmd, file_, opts)
2656 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2657 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2657
2658
2658 @command(b'perfprogress', formatteropts + [
2659 @command(b'perfprogress', formatteropts + [
2659 (b'', b'topic', b'topic', b'topic for progress messages'),
2660 (b'', b'topic', b'topic', b'topic for progress messages'),
2660 (b'c', b'total', 1000000, b'total value we are progressing to'),
2661 (b'c', b'total', 1000000, b'total value we are progressing to'),
2661 ], norepo=True)
2662 ], norepo=True)
2662 def perfprogress(ui, topic=None, total=None, **opts):
2663 def perfprogress(ui, topic=None, total=None, **opts):
2663 """printing of progress bars"""
2664 """printing of progress bars"""
2664 opts = _byteskwargs(opts)
2665 opts = _byteskwargs(opts)
2665
2666
2666 timer, fm = gettimer(ui, opts)
2667 timer, fm = gettimer(ui, opts)
2667
2668
2668 def doprogress():
2669 def doprogress():
2669 with ui.makeprogress(topic, total=total) as progress:
2670 with ui.makeprogress(topic, total=total) as progress:
2670 for i in pycompat.xrange(total):
2671 for i in pycompat.xrange(total):
2671 progress.increment()
2672 progress.increment()
2672
2673
2673 timer(doprogress)
2674 timer(doprogress)
2674 fm.end()
2675 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now