##// END OF EJS Templates
perfrevflogwrite: clear revlog cache between each write...
Boris Feld -
r41013:21a9cace default
parent child Browse files
Show More
@@ -1,2656 +1,2662
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 opts = _byteskwargs(opts)
538 opts = _byteskwargs(opts)
539 timer, fm = gettimer(ui, opts)
539 timer, fm = gettimer(ui, opts)
540 cl = repo.changelog
540 cl = repo.changelog
541 def d():
541 def d():
542 len(cl.headrevs())
542 len(cl.headrevs())
543 clearcaches(cl)
543 clearcaches(cl)
544 timer(d)
544 timer(d)
545 fm.end()
545 fm.end()
546
546
547 @command(b'perftags', formatteropts+
547 @command(b'perftags', formatteropts+
548 [
548 [
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
550 ])
550 ])
551 def perftags(ui, repo, **opts):
551 def perftags(ui, repo, **opts):
552 opts = _byteskwargs(opts)
552 opts = _byteskwargs(opts)
553 timer, fm = gettimer(ui, opts)
553 timer, fm = gettimer(ui, opts)
554 repocleartagscache = repocleartagscachefunc(repo)
554 repocleartagscache = repocleartagscachefunc(repo)
555 clearrevlogs = opts[b'clear_revlogs']
555 clearrevlogs = opts[b'clear_revlogs']
556 def s():
556 def s():
557 if clearrevlogs:
557 if clearrevlogs:
558 clearchangelog(repo)
558 clearchangelog(repo)
559 clearfilecache(repo.unfiltered(), 'manifest')
559 clearfilecache(repo.unfiltered(), 'manifest')
560 repocleartagscache()
560 repocleartagscache()
561 def t():
561 def t():
562 return len(repo.tags())
562 return len(repo.tags())
563 timer(t, setup=s)
563 timer(t, setup=s)
564 fm.end()
564 fm.end()
565
565
566 @command(b'perfancestors', formatteropts)
566 @command(b'perfancestors', formatteropts)
567 def perfancestors(ui, repo, **opts):
567 def perfancestors(ui, repo, **opts):
568 opts = _byteskwargs(opts)
568 opts = _byteskwargs(opts)
569 timer, fm = gettimer(ui, opts)
569 timer, fm = gettimer(ui, opts)
570 heads = repo.changelog.headrevs()
570 heads = repo.changelog.headrevs()
571 def d():
571 def d():
572 for a in repo.changelog.ancestors(heads):
572 for a in repo.changelog.ancestors(heads):
573 pass
573 pass
574 timer(d)
574 timer(d)
575 fm.end()
575 fm.end()
576
576
577 @command(b'perfancestorset', formatteropts)
577 @command(b'perfancestorset', formatteropts)
578 def perfancestorset(ui, repo, revset, **opts):
578 def perfancestorset(ui, repo, revset, **opts):
579 opts = _byteskwargs(opts)
579 opts = _byteskwargs(opts)
580 timer, fm = gettimer(ui, opts)
580 timer, fm = gettimer(ui, opts)
581 revs = repo.revs(revset)
581 revs = repo.revs(revset)
582 heads = repo.changelog.headrevs()
582 heads = repo.changelog.headrevs()
583 def d():
583 def d():
584 s = repo.changelog.ancestors(heads)
584 s = repo.changelog.ancestors(heads)
585 for rev in revs:
585 for rev in revs:
586 rev in s
586 rev in s
587 timer(d)
587 timer(d)
588 fm.end()
588 fm.end()
589
589
590 @command(b'perfdiscovery', formatteropts, b'PATH')
590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 def perfdiscovery(ui, repo, path, **opts):
591 def perfdiscovery(ui, repo, path, **opts):
592 """benchmark discovery between local repo and the peer at given path
592 """benchmark discovery between local repo and the peer at given path
593 """
593 """
594 repos = [repo, None]
594 repos = [repo, None]
595 timer, fm = gettimer(ui, opts)
595 timer, fm = gettimer(ui, opts)
596 path = ui.expandpath(path)
596 path = ui.expandpath(path)
597
597
598 def s():
598 def s():
599 repos[1] = hg.peer(ui, opts, path)
599 repos[1] = hg.peer(ui, opts, path)
600 def d():
600 def d():
601 setdiscovery.findcommonheads(ui, *repos)
601 setdiscovery.findcommonheads(ui, *repos)
602 timer(d, setup=s)
602 timer(d, setup=s)
603 fm.end()
603 fm.end()
604
604
605 @command(b'perfbookmarks', formatteropts +
605 @command(b'perfbookmarks', formatteropts +
606 [
606 [
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
608 ])
608 ])
609 def perfbookmarks(ui, repo, **opts):
609 def perfbookmarks(ui, repo, **opts):
610 """benchmark parsing bookmarks from disk to memory"""
610 """benchmark parsing bookmarks from disk to memory"""
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613
613
614 clearrevlogs = opts[b'clear_revlogs']
614 clearrevlogs = opts[b'clear_revlogs']
615 def s():
615 def s():
616 if clearrevlogs:
616 if clearrevlogs:
617 clearchangelog(repo)
617 clearchangelog(repo)
618 clearfilecache(repo, b'_bookmarks')
618 clearfilecache(repo, b'_bookmarks')
619 def d():
619 def d():
620 repo._bookmarks
620 repo._bookmarks
621 timer(d, setup=s)
621 timer(d, setup=s)
622 fm.end()
622 fm.end()
623
623
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
625 def perfbundleread(ui, repo, bundlepath, **opts):
625 def perfbundleread(ui, repo, bundlepath, **opts):
626 """Benchmark reading of bundle files.
626 """Benchmark reading of bundle files.
627
627
628 This command is meant to isolate the I/O part of bundle reading as
628 This command is meant to isolate the I/O part of bundle reading as
629 much as possible.
629 much as possible.
630 """
630 """
631 from mercurial import (
631 from mercurial import (
632 bundle2,
632 bundle2,
633 exchange,
633 exchange,
634 streamclone,
634 streamclone,
635 )
635 )
636
636
637 opts = _byteskwargs(opts)
637 opts = _byteskwargs(opts)
638
638
639 def makebench(fn):
639 def makebench(fn):
640 def run():
640 def run():
641 with open(bundlepath, b'rb') as fh:
641 with open(bundlepath, b'rb') as fh:
642 bundle = exchange.readbundle(ui, fh, bundlepath)
642 bundle = exchange.readbundle(ui, fh, bundlepath)
643 fn(bundle)
643 fn(bundle)
644
644
645 return run
645 return run
646
646
647 def makereadnbytes(size):
647 def makereadnbytes(size):
648 def run():
648 def run():
649 with open(bundlepath, b'rb') as fh:
649 with open(bundlepath, b'rb') as fh:
650 bundle = exchange.readbundle(ui, fh, bundlepath)
650 bundle = exchange.readbundle(ui, fh, bundlepath)
651 while bundle.read(size):
651 while bundle.read(size):
652 pass
652 pass
653
653
654 return run
654 return run
655
655
656 def makestdioread(size):
656 def makestdioread(size):
657 def run():
657 def run():
658 with open(bundlepath, b'rb') as fh:
658 with open(bundlepath, b'rb') as fh:
659 while fh.read(size):
659 while fh.read(size):
660 pass
660 pass
661
661
662 return run
662 return run
663
663
664 # bundle1
664 # bundle1
665
665
666 def deltaiter(bundle):
666 def deltaiter(bundle):
667 for delta in bundle.deltaiter():
667 for delta in bundle.deltaiter():
668 pass
668 pass
669
669
670 def iterchunks(bundle):
670 def iterchunks(bundle):
671 for chunk in bundle.getchunks():
671 for chunk in bundle.getchunks():
672 pass
672 pass
673
673
674 # bundle2
674 # bundle2
675
675
676 def forwardchunks(bundle):
676 def forwardchunks(bundle):
677 for chunk in bundle._forwardchunks():
677 for chunk in bundle._forwardchunks():
678 pass
678 pass
679
679
680 def iterparts(bundle):
680 def iterparts(bundle):
681 for part in bundle.iterparts():
681 for part in bundle.iterparts():
682 pass
682 pass
683
683
684 def iterpartsseekable(bundle):
684 def iterpartsseekable(bundle):
685 for part in bundle.iterparts(seekable=True):
685 for part in bundle.iterparts(seekable=True):
686 pass
686 pass
687
687
688 def seek(bundle):
688 def seek(bundle):
689 for part in bundle.iterparts(seekable=True):
689 for part in bundle.iterparts(seekable=True):
690 part.seek(0, os.SEEK_END)
690 part.seek(0, os.SEEK_END)
691
691
692 def makepartreadnbytes(size):
692 def makepartreadnbytes(size):
693 def run():
693 def run():
694 with open(bundlepath, b'rb') as fh:
694 with open(bundlepath, b'rb') as fh:
695 bundle = exchange.readbundle(ui, fh, bundlepath)
695 bundle = exchange.readbundle(ui, fh, bundlepath)
696 for part in bundle.iterparts():
696 for part in bundle.iterparts():
697 while part.read(size):
697 while part.read(size):
698 pass
698 pass
699
699
700 return run
700 return run
701
701
702 benches = [
702 benches = [
703 (makestdioread(8192), b'read(8k)'),
703 (makestdioread(8192), b'read(8k)'),
704 (makestdioread(16384), b'read(16k)'),
704 (makestdioread(16384), b'read(16k)'),
705 (makestdioread(32768), b'read(32k)'),
705 (makestdioread(32768), b'read(32k)'),
706 (makestdioread(131072), b'read(128k)'),
706 (makestdioread(131072), b'read(128k)'),
707 ]
707 ]
708
708
709 with open(bundlepath, b'rb') as fh:
709 with open(bundlepath, b'rb') as fh:
710 bundle = exchange.readbundle(ui, fh, bundlepath)
710 bundle = exchange.readbundle(ui, fh, bundlepath)
711
711
712 if isinstance(bundle, changegroup.cg1unpacker):
712 if isinstance(bundle, changegroup.cg1unpacker):
713 benches.extend([
713 benches.extend([
714 (makebench(deltaiter), b'cg1 deltaiter()'),
714 (makebench(deltaiter), b'cg1 deltaiter()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
720 ])
720 ])
721 elif isinstance(bundle, bundle2.unbundle20):
721 elif isinstance(bundle, bundle2.unbundle20):
722 benches.extend([
722 benches.extend([
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
726 (makebench(seek), b'bundle2 part seek()'),
726 (makebench(seek), b'bundle2 part seek()'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
731 ])
731 ])
732 elif isinstance(bundle, streamclone.streamcloneapplier):
732 elif isinstance(bundle, streamclone.streamcloneapplier):
733 raise error.Abort(b'stream clone bundles not supported')
733 raise error.Abort(b'stream clone bundles not supported')
734 else:
734 else:
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
736
736
737 for fn, title in benches:
737 for fn, title in benches:
738 timer, fm = gettimer(ui, opts)
738 timer, fm = gettimer(ui, opts)
739 timer(fn, title=title)
739 timer(fn, title=title)
740 fm.end()
740 fm.end()
741
741
742 @command(b'perfchangegroupchangelog', formatteropts +
742 @command(b'perfchangegroupchangelog', formatteropts +
743 [(b'', b'cgversion', b'02', b'changegroup version'),
743 [(b'', b'cgversion', b'02', b'changegroup version'),
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
746 """Benchmark producing a changelog group for a changegroup.
746 """Benchmark producing a changelog group for a changegroup.
747
747
748 This measures the time spent processing the changelog during a
748 This measures the time spent processing the changelog during a
749 bundle operation. This occurs during `hg bundle` and on a server
749 bundle operation. This occurs during `hg bundle` and on a server
750 processing a `getbundle` wire protocol request (handles clones
750 processing a `getbundle` wire protocol request (handles clones
751 and pull requests).
751 and pull requests).
752
752
753 By default, all revisions are added to the changegroup.
753 By default, all revisions are added to the changegroup.
754 """
754 """
755 opts = _byteskwargs(opts)
755 opts = _byteskwargs(opts)
756 cl = repo.changelog
756 cl = repo.changelog
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
758 bundler = changegroup.getbundler(cgversion, repo)
758 bundler = changegroup.getbundler(cgversion, repo)
759
759
760 def d():
760 def d():
761 state, chunks = bundler._generatechangelog(cl, nodes)
761 state, chunks = bundler._generatechangelog(cl, nodes)
762 for chunk in chunks:
762 for chunk in chunks:
763 pass
763 pass
764
764
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 # Terminal printing can interfere with timing. So disable it.
767 # Terminal printing can interfere with timing. So disable it.
768 with ui.configoverride({(b'progress', b'disable'): True}):
768 with ui.configoverride({(b'progress', b'disable'): True}):
769 timer(d)
769 timer(d)
770
770
771 fm.end()
771 fm.end()
772
772
773 @command(b'perfdirs', formatteropts)
773 @command(b'perfdirs', formatteropts)
774 def perfdirs(ui, repo, **opts):
774 def perfdirs(ui, repo, **opts):
775 opts = _byteskwargs(opts)
775 opts = _byteskwargs(opts)
776 timer, fm = gettimer(ui, opts)
776 timer, fm = gettimer(ui, opts)
777 dirstate = repo.dirstate
777 dirstate = repo.dirstate
778 b'a' in dirstate
778 b'a' in dirstate
779 def d():
779 def d():
780 dirstate.hasdir(b'a')
780 dirstate.hasdir(b'a')
781 del dirstate._map._dirs
781 del dirstate._map._dirs
782 timer(d)
782 timer(d)
783 fm.end()
783 fm.end()
784
784
785 @command(b'perfdirstate', formatteropts)
785 @command(b'perfdirstate', formatteropts)
786 def perfdirstate(ui, repo, **opts):
786 def perfdirstate(ui, repo, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 b"a" in repo.dirstate
789 b"a" in repo.dirstate
790 def d():
790 def d():
791 repo.dirstate.invalidate()
791 repo.dirstate.invalidate()
792 b"a" in repo.dirstate
792 b"a" in repo.dirstate
793 timer(d)
793 timer(d)
794 fm.end()
794 fm.end()
795
795
796 @command(b'perfdirstatedirs', formatteropts)
796 @command(b'perfdirstatedirs', formatteropts)
797 def perfdirstatedirs(ui, repo, **opts):
797 def perfdirstatedirs(ui, repo, **opts):
798 opts = _byteskwargs(opts)
798 opts = _byteskwargs(opts)
799 timer, fm = gettimer(ui, opts)
799 timer, fm = gettimer(ui, opts)
800 b"a" in repo.dirstate
800 b"a" in repo.dirstate
801 def d():
801 def d():
802 repo.dirstate.hasdir(b"a")
802 repo.dirstate.hasdir(b"a")
803 del repo.dirstate._map._dirs
803 del repo.dirstate._map._dirs
804 timer(d)
804 timer(d)
805 fm.end()
805 fm.end()
806
806
807 @command(b'perfdirstatefoldmap', formatteropts)
807 @command(b'perfdirstatefoldmap', formatteropts)
808 def perfdirstatefoldmap(ui, repo, **opts):
808 def perfdirstatefoldmap(ui, repo, **opts):
809 opts = _byteskwargs(opts)
809 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
810 timer, fm = gettimer(ui, opts)
811 dirstate = repo.dirstate
811 dirstate = repo.dirstate
812 b'a' in dirstate
812 b'a' in dirstate
813 def d():
813 def d():
814 dirstate._map.filefoldmap.get(b'a')
814 dirstate._map.filefoldmap.get(b'a')
815 del dirstate._map.filefoldmap
815 del dirstate._map.filefoldmap
816 timer(d)
816 timer(d)
817 fm.end()
817 fm.end()
818
818
819 @command(b'perfdirfoldmap', formatteropts)
819 @command(b'perfdirfoldmap', formatteropts)
820 def perfdirfoldmap(ui, repo, **opts):
820 def perfdirfoldmap(ui, repo, **opts):
821 opts = _byteskwargs(opts)
821 opts = _byteskwargs(opts)
822 timer, fm = gettimer(ui, opts)
822 timer, fm = gettimer(ui, opts)
823 dirstate = repo.dirstate
823 dirstate = repo.dirstate
824 b'a' in dirstate
824 b'a' in dirstate
825 def d():
825 def d():
826 dirstate._map.dirfoldmap.get(b'a')
826 dirstate._map.dirfoldmap.get(b'a')
827 del dirstate._map.dirfoldmap
827 del dirstate._map.dirfoldmap
828 del dirstate._map._dirs
828 del dirstate._map._dirs
829 timer(d)
829 timer(d)
830 fm.end()
830 fm.end()
831
831
832 @command(b'perfdirstatewrite', formatteropts)
832 @command(b'perfdirstatewrite', formatteropts)
833 def perfdirstatewrite(ui, repo, **opts):
833 def perfdirstatewrite(ui, repo, **opts):
834 opts = _byteskwargs(opts)
834 opts = _byteskwargs(opts)
835 timer, fm = gettimer(ui, opts)
835 timer, fm = gettimer(ui, opts)
836 ds = repo.dirstate
836 ds = repo.dirstate
837 b"a" in ds
837 b"a" in ds
838 def d():
838 def d():
839 ds._dirty = True
839 ds._dirty = True
840 ds.write(repo.currenttransaction())
840 ds.write(repo.currenttransaction())
841 timer(d)
841 timer(d)
842 fm.end()
842 fm.end()
843
843
844 @command(b'perfmergecalculate',
844 @command(b'perfmergecalculate',
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
846 def perfmergecalculate(ui, repo, rev, **opts):
846 def perfmergecalculate(ui, repo, rev, **opts):
847 opts = _byteskwargs(opts)
847 opts = _byteskwargs(opts)
848 timer, fm = gettimer(ui, opts)
848 timer, fm = gettimer(ui, opts)
849 wctx = repo[None]
849 wctx = repo[None]
850 rctx = scmutil.revsingle(repo, rev, rev)
850 rctx = scmutil.revsingle(repo, rev, rev)
851 ancestor = wctx.ancestor(rctx)
851 ancestor = wctx.ancestor(rctx)
852 # we don't want working dir files to be stat'd in the benchmark, so prime
852 # we don't want working dir files to be stat'd in the benchmark, so prime
853 # that cache
853 # that cache
854 wctx.dirty()
854 wctx.dirty()
855 def d():
855 def d():
856 # acceptremote is True because we don't want prompts in the middle of
856 # acceptremote is True because we don't want prompts in the middle of
857 # our benchmark
857 # our benchmark
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
859 acceptremote=True, followcopies=True)
859 acceptremote=True, followcopies=True)
860 timer(d)
860 timer(d)
861 fm.end()
861 fm.end()
862
862
863 @command(b'perfpathcopies', [], b"REV REV")
863 @command(b'perfpathcopies', [], b"REV REV")
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
865 """benchmark the copy tracing logic"""
865 """benchmark the copy tracing logic"""
866 opts = _byteskwargs(opts)
866 opts = _byteskwargs(opts)
867 timer, fm = gettimer(ui, opts)
867 timer, fm = gettimer(ui, opts)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
870 def d():
870 def d():
871 copies.pathcopies(ctx1, ctx2)
871 copies.pathcopies(ctx1, ctx2)
872 timer(d)
872 timer(d)
873 fm.end()
873 fm.end()
874
874
875 @command(b'perfphases',
875 @command(b'perfphases',
876 [(b'', b'full', False, b'include file reading time too'),
876 [(b'', b'full', False, b'include file reading time too'),
877 ], b"")
877 ], b"")
878 def perfphases(ui, repo, **opts):
878 def perfphases(ui, repo, **opts):
879 """benchmark phasesets computation"""
879 """benchmark phasesets computation"""
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 _phases = repo._phasecache
882 _phases = repo._phasecache
883 full = opts.get(b'full')
883 full = opts.get(b'full')
884 def d():
884 def d():
885 phases = _phases
885 phases = _phases
886 if full:
886 if full:
887 clearfilecache(repo, b'_phasecache')
887 clearfilecache(repo, b'_phasecache')
888 phases = repo._phasecache
888 phases = repo._phasecache
889 phases.invalidate()
889 phases.invalidate()
890 phases.loadphaserevs(repo)
890 phases.loadphaserevs(repo)
891 timer(d)
891 timer(d)
892 fm.end()
892 fm.end()
893
893
894 @command(b'perfphasesremote',
894 @command(b'perfphasesremote',
895 [], b"[DEST]")
895 [], b"[DEST]")
896 def perfphasesremote(ui, repo, dest=None, **opts):
896 def perfphasesremote(ui, repo, dest=None, **opts):
897 """benchmark time needed to analyse phases of the remote server"""
897 """benchmark time needed to analyse phases of the remote server"""
898 from mercurial.node import (
898 from mercurial.node import (
899 bin,
899 bin,
900 )
900 )
901 from mercurial import (
901 from mercurial import (
902 exchange,
902 exchange,
903 hg,
903 hg,
904 phases,
904 phases,
905 )
905 )
906 opts = _byteskwargs(opts)
906 opts = _byteskwargs(opts)
907 timer, fm = gettimer(ui, opts)
907 timer, fm = gettimer(ui, opts)
908
908
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
910 if not path:
910 if not path:
911 raise error.Abort((b'default repository not configured!'),
911 raise error.Abort((b'default repository not configured!'),
912 hint=(b"see 'hg help config.paths'"))
912 hint=(b"see 'hg help config.paths'"))
913 dest = path.pushloc or path.loc
913 dest = path.pushloc or path.loc
914 branches = (path.branch, opts.get(b'branch') or [])
914 branches = (path.branch, opts.get(b'branch') or [])
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 ] + formatteropts)
1018 ] + formatteropts)
1019 def perfindex(ui, repo, **opts):
1019 def perfindex(ui, repo, **opts):
1020 import mercurial.revlog
1020 import mercurial.revlog
1021 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1022 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 if opts[b'rev'] is None:
1024 if opts[b'rev'] is None:
1025 n = repo[b"tip"].node()
1025 n = repo[b"tip"].node()
1026 else:
1026 else:
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 n = repo[rev].node()
1028 n = repo[rev].node()
1029
1029
1030 unfi = repo.unfiltered()
1030 unfi = repo.unfiltered()
1031 # find the filecache func directly
1031 # find the filecache func directly
1032 # This avoid polluting the benchmark with the filecache logic
1032 # This avoid polluting the benchmark with the filecache logic
1033 makecl = unfi.__class__.changelog.func
1033 makecl = unfi.__class__.changelog.func
1034 def setup():
1034 def setup():
1035 # probably not necessary, but for good measure
1035 # probably not necessary, but for good measure
1036 clearchangelog(unfi)
1036 clearchangelog(unfi)
1037 def d():
1037 def d():
1038 cl = makecl(unfi)
1038 cl = makecl(unfi)
1039 cl.rev(n)
1039 cl.rev(n)
1040 timer(d, setup=setup)
1040 timer(d, setup=setup)
1041 fm.end()
1041 fm.end()
1042
1042
1043 @command(b'perfstartup', formatteropts)
1043 @command(b'perfstartup', formatteropts)
1044 def perfstartup(ui, repo, **opts):
1044 def perfstartup(ui, repo, **opts):
1045 opts = _byteskwargs(opts)
1045 opts = _byteskwargs(opts)
1046 timer, fm = gettimer(ui, opts)
1046 timer, fm = gettimer(ui, opts)
1047 def d():
1047 def d():
1048 if os.name != r'nt':
1048 if os.name != r'nt':
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 fsencode(sys.argv[0]))
1050 fsencode(sys.argv[0]))
1051 else:
1051 else:
1052 os.environ[r'HGRCPATH'] = r' '
1052 os.environ[r'HGRCPATH'] = r' '
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 timer(d)
1054 timer(d)
1055 fm.end()
1055 fm.end()
1056
1056
1057 @command(b'perfparents', formatteropts)
1057 @command(b'perfparents', formatteropts)
1058 def perfparents(ui, repo, **opts):
1058 def perfparents(ui, repo, **opts):
1059 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 # control the number of commits perfparents iterates over
1061 # control the number of commits perfparents iterates over
1062 # experimental config: perf.parentscount
1062 # experimental config: perf.parentscount
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1064 if len(repo.changelog) < count:
1064 if len(repo.changelog) < count:
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 repo = repo.unfiltered()
1066 repo = repo.unfiltered()
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 def d():
1068 def d():
1069 for n in nl:
1069 for n in nl:
1070 repo.changelog.parents(n)
1070 repo.changelog.parents(n)
1071 timer(d)
1071 timer(d)
1072 fm.end()
1072 fm.end()
1073
1073
1074 @command(b'perfctxfiles', formatteropts)
1074 @command(b'perfctxfiles', formatteropts)
1075 def perfctxfiles(ui, repo, x, **opts):
1075 def perfctxfiles(ui, repo, x, **opts):
1076 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1077 x = int(x)
1077 x = int(x)
1078 timer, fm = gettimer(ui, opts)
1078 timer, fm = gettimer(ui, opts)
1079 def d():
1079 def d():
1080 len(repo[x].files())
1080 len(repo[x].files())
1081 timer(d)
1081 timer(d)
1082 fm.end()
1082 fm.end()
1083
1083
1084 @command(b'perfrawfiles', formatteropts)
1084 @command(b'perfrawfiles', formatteropts)
1085 def perfrawfiles(ui, repo, x, **opts):
1085 def perfrawfiles(ui, repo, x, **opts):
1086 opts = _byteskwargs(opts)
1086 opts = _byteskwargs(opts)
1087 x = int(x)
1087 x = int(x)
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089 cl = repo.changelog
1089 cl = repo.changelog
1090 def d():
1090 def d():
1091 len(cl.read(x)[3])
1091 len(cl.read(x)[3])
1092 timer(d)
1092 timer(d)
1093 fm.end()
1093 fm.end()
1094
1094
1095 @command(b'perflookup', formatteropts)
1095 @command(b'perflookup', formatteropts)
1096 def perflookup(ui, repo, rev, **opts):
1096 def perflookup(ui, repo, rev, **opts):
1097 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1099 timer(lambda: len(repo.lookup(rev)))
1099 timer(lambda: len(repo.lookup(rev)))
1100 fm.end()
1100 fm.end()
1101
1101
1102 @command(b'perflinelogedits',
1102 @command(b'perflinelogedits',
1103 [(b'n', b'edits', 10000, b'number of edits'),
1103 [(b'n', b'edits', 10000, b'number of edits'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 ], norepo=True)
1105 ], norepo=True)
1106 def perflinelogedits(ui, **opts):
1106 def perflinelogedits(ui, **opts):
1107 from mercurial import linelog
1107 from mercurial import linelog
1108
1108
1109 opts = _byteskwargs(opts)
1109 opts = _byteskwargs(opts)
1110
1110
1111 edits = opts[b'edits']
1111 edits = opts[b'edits']
1112 maxhunklines = opts[b'max_hunk_lines']
1112 maxhunklines = opts[b'max_hunk_lines']
1113
1113
1114 maxb1 = 100000
1114 maxb1 = 100000
1115 random.seed(0)
1115 random.seed(0)
1116 randint = random.randint
1116 randint = random.randint
1117 currentlines = 0
1117 currentlines = 0
1118 arglist = []
1118 arglist = []
1119 for rev in _xrange(edits):
1119 for rev in _xrange(edits):
1120 a1 = randint(0, currentlines)
1120 a1 = randint(0, currentlines)
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 b1 = randint(0, maxb1)
1122 b1 = randint(0, maxb1)
1123 b2 = randint(b1, b1 + maxhunklines)
1123 b2 = randint(b1, b1 + maxhunklines)
1124 currentlines += (b2 - b1) - (a2 - a1)
1124 currentlines += (b2 - b1) - (a2 - a1)
1125 arglist.append((rev, a1, a2, b1, b2))
1125 arglist.append((rev, a1, a2, b1, b2))
1126
1126
1127 def d():
1127 def d():
1128 ll = linelog.linelog()
1128 ll = linelog.linelog()
1129 for args in arglist:
1129 for args in arglist:
1130 ll.replacelines(*args)
1130 ll.replacelines(*args)
1131
1131
1132 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1133 timer(d)
1133 timer(d)
1134 fm.end()
1134 fm.end()
1135
1135
1136 @command(b'perfrevrange', formatteropts)
1136 @command(b'perfrevrange', formatteropts)
1137 def perfrevrange(ui, repo, *specs, **opts):
1137 def perfrevrange(ui, repo, *specs, **opts):
1138 opts = _byteskwargs(opts)
1138 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1139 timer, fm = gettimer(ui, opts)
1140 revrange = scmutil.revrange
1140 revrange = scmutil.revrange
1141 timer(lambda: len(revrange(repo, specs)))
1141 timer(lambda: len(revrange(repo, specs)))
1142 fm.end()
1142 fm.end()
1143
1143
1144 @command(b'perfnodelookup', formatteropts)
1144 @command(b'perfnodelookup', formatteropts)
1145 def perfnodelookup(ui, repo, rev, **opts):
1145 def perfnodelookup(ui, repo, rev, **opts):
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 import mercurial.revlog
1148 import mercurial.revlog
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 n = scmutil.revsingle(repo, rev).node()
1150 n = scmutil.revsingle(repo, rev).node()
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 def d():
1152 def d():
1153 cl.rev(n)
1153 cl.rev(n)
1154 clearcaches(cl)
1154 clearcaches(cl)
1155 timer(d)
1155 timer(d)
1156 fm.end()
1156 fm.end()
1157
1157
1158 @command(b'perflog',
1158 @command(b'perflog',
1159 [(b'', b'rename', False, b'ask log to follow renames')
1159 [(b'', b'rename', False, b'ask log to follow renames')
1160 ] + formatteropts)
1160 ] + formatteropts)
1161 def perflog(ui, repo, rev=None, **opts):
1161 def perflog(ui, repo, rev=None, **opts):
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 if rev is None:
1163 if rev is None:
1164 rev=[]
1164 rev=[]
1165 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1166 ui.pushbuffer()
1166 ui.pushbuffer()
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 copies=opts.get(b'rename')))
1168 copies=opts.get(b'rename')))
1169 ui.popbuffer()
1169 ui.popbuffer()
1170 fm.end()
1170 fm.end()
1171
1171
1172 @command(b'perfmoonwalk', formatteropts)
1172 @command(b'perfmoonwalk', formatteropts)
1173 def perfmoonwalk(ui, repo, **opts):
1173 def perfmoonwalk(ui, repo, **opts):
1174 """benchmark walking the changelog backwards
1174 """benchmark walking the changelog backwards
1175
1175
1176 This also loads the changelog data for each revision in the changelog.
1176 This also loads the changelog data for each revision in the changelog.
1177 """
1177 """
1178 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1179 timer, fm = gettimer(ui, opts)
1179 timer, fm = gettimer(ui, opts)
1180 def moonwalk():
1180 def moonwalk():
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 ctx = repo[i]
1182 ctx = repo[i]
1183 ctx.branch() # read changelog data (in addition to the index)
1183 ctx.branch() # read changelog data (in addition to the index)
1184 timer(moonwalk)
1184 timer(moonwalk)
1185 fm.end()
1185 fm.end()
1186
1186
1187 @command(b'perftemplating',
1187 @command(b'perftemplating',
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 ] + formatteropts)
1189 ] + formatteropts)
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 """test the rendering time of a given template"""
1191 """test the rendering time of a given template"""
1192 if makelogtemplater is None:
1192 if makelogtemplater is None:
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 hint=b"use 4.3 or later")
1194 hint=b"use 4.3 or later")
1195
1195
1196 opts = _byteskwargs(opts)
1196 opts = _byteskwargs(opts)
1197
1197
1198 nullui = ui.copy()
1198 nullui = ui.copy()
1199 nullui.fout = open(os.devnull, r'wb')
1199 nullui.fout = open(os.devnull, r'wb')
1200 nullui.disablepager()
1200 nullui.disablepager()
1201 revs = opts.get(b'rev')
1201 revs = opts.get(b'rev')
1202 if not revs:
1202 if not revs:
1203 revs = [b'all()']
1203 revs = [b'all()']
1204 revs = list(scmutil.revrange(repo, revs))
1204 revs = list(scmutil.revrange(repo, revs))
1205
1205
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 b' {author|person}: {desc|firstline}\n')
1207 b' {author|person}: {desc|firstline}\n')
1208 if testedtemplate is None:
1208 if testedtemplate is None:
1209 testedtemplate = defaulttemplate
1209 testedtemplate = defaulttemplate
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 def format():
1211 def format():
1212 for r in revs:
1212 for r in revs:
1213 ctx = repo[r]
1213 ctx = repo[r]
1214 displayer.show(ctx)
1214 displayer.show(ctx)
1215 displayer.flush(ctx)
1215 displayer.flush(ctx)
1216
1216
1217 timer, fm = gettimer(ui, opts)
1217 timer, fm = gettimer(ui, opts)
1218 timer(format)
1218 timer(format)
1219 fm.end()
1219 fm.end()
1220
1220
1221 @command(b'perfhelper-pathcopies', formatteropts +
1221 @command(b'perfhelper-pathcopies', formatteropts +
1222 [
1222 [
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1225 ])
1225 ])
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 """find statistic about potential parameters for the `perftracecopies`
1227 """find statistic about potential parameters for the `perftracecopies`
1228
1228
1229 This command find source-destination pair relevant for copytracing testing.
1229 This command find source-destination pair relevant for copytracing testing.
1230 It report value for some of the parameters that impact copy tracing time.
1230 It report value for some of the parameters that impact copy tracing time.
1231
1231
1232 If `--timing` is set, rename detection is run and the associated timing
1232 If `--timing` is set, rename detection is run and the associated timing
1233 will be reported. The extra details comes at the cost of a slower command
1233 will be reported. The extra details comes at the cost of a slower command
1234 execution.
1234 execution.
1235
1235
1236 Since the rename detection is only run once, other factors might easily
1236 Since the rename detection is only run once, other factors might easily
1237 affect the precision of the timing. However it should give a good
1237 affect the precision of the timing. However it should give a good
1238 approximation of which revision pairs are very costly.
1238 approximation of which revision pairs are very costly.
1239 """
1239 """
1240 opts = _byteskwargs(opts)
1240 opts = _byteskwargs(opts)
1241 fm = ui.formatter(b'perf', opts)
1241 fm = ui.formatter(b'perf', opts)
1242 dotiming = opts[b'timing']
1242 dotiming = opts[b'timing']
1243
1243
1244 if dotiming:
1244 if dotiming:
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1246 output = ("%(source)12s %(destination)12s "
1246 output = ("%(source)12s %(destination)12s "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 "nb-renames", "time")
1250 "nb-renames", "time")
1251 fm.plain(header % header_names)
1251 fm.plain(header % header_names)
1252 else:
1252 else:
1253 header = '%12s %12s %12s %12s\n'
1253 header = '%12s %12s %12s %12s\n'
1254 output = ("%(source)12s %(destination)12s "
1254 output = ("%(source)12s %(destination)12s "
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257
1257
1258 if not revs:
1258 if not revs:
1259 revs = ['all()']
1259 revs = ['all()']
1260 revs = scmutil.revrange(repo, revs)
1260 revs = scmutil.revrange(repo, revs)
1261
1261
1262 roi = repo.revs('merge() and %ld', revs)
1262 roi = repo.revs('merge() and %ld', revs)
1263 for r in roi:
1263 for r in roi:
1264 ctx = repo[r]
1264 ctx = repo[r]
1265 p1 = ctx.p1().rev()
1265 p1 = ctx.p1().rev()
1266 p2 = ctx.p2().rev()
1266 p2 = ctx.p2().rev()
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 for p in (p1, p2):
1268 for p in (p1, p2):
1269 for b in bases:
1269 for b in bases:
1270 base = repo[b]
1270 base = repo[b]
1271 parent = repo[p]
1271 parent = repo[p]
1272 missing = copies._computeforwardmissing(base, parent)
1272 missing = copies._computeforwardmissing(base, parent)
1273 if not missing:
1273 if not missing:
1274 continue
1274 continue
1275 data = {
1275 data = {
1276 b'source': base.hex(),
1276 b'source': base.hex(),
1277 b'destination': parent.hex(),
1277 b'destination': parent.hex(),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 b'nbmissingfiles': len(missing),
1279 b'nbmissingfiles': len(missing),
1280 }
1280 }
1281 if dotiming:
1281 if dotiming:
1282 begin = util.timer()
1282 begin = util.timer()
1283 renames = copies.pathcopies(base, parent)
1283 renames = copies.pathcopies(base, parent)
1284 end = util.timer()
1284 end = util.timer()
1285 # not very stable timing since we did only one run
1285 # not very stable timing since we did only one run
1286 data['time'] = end - begin
1286 data['time'] = end - begin
1287 data['nbrenamedfiles'] = len(renames)
1287 data['nbrenamedfiles'] = len(renames)
1288 fm.startitem()
1288 fm.startitem()
1289 fm.data(**data)
1289 fm.data(**data)
1290 out = data.copy()
1290 out = data.copy()
1291 out['source'] = fm.hexfunc(base.node())
1291 out['source'] = fm.hexfunc(base.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1293 fm.plain(output % out)
1293 fm.plain(output % out)
1294
1294
1295 fm.end()
1295 fm.end()
1296
1296
1297 @command(b'perfcca', formatteropts)
1297 @command(b'perfcca', formatteropts)
1298 def perfcca(ui, repo, **opts):
1298 def perfcca(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 fm.end()
1302 fm.end()
1303
1303
1304 @command(b'perffncacheload', formatteropts)
1304 @command(b'perffncacheload', formatteropts)
1305 def perffncacheload(ui, repo, **opts):
1305 def perffncacheload(ui, repo, **opts):
1306 opts = _byteskwargs(opts)
1306 opts = _byteskwargs(opts)
1307 timer, fm = gettimer(ui, opts)
1307 timer, fm = gettimer(ui, opts)
1308 s = repo.store
1308 s = repo.store
1309 def d():
1309 def d():
1310 s.fncache._load()
1310 s.fncache._load()
1311 timer(d)
1311 timer(d)
1312 fm.end()
1312 fm.end()
1313
1313
1314 @command(b'perffncachewrite', formatteropts)
1314 @command(b'perffncachewrite', formatteropts)
1315 def perffncachewrite(ui, repo, **opts):
1315 def perffncachewrite(ui, repo, **opts):
1316 opts = _byteskwargs(opts)
1316 opts = _byteskwargs(opts)
1317 timer, fm = gettimer(ui, opts)
1317 timer, fm = gettimer(ui, opts)
1318 s = repo.store
1318 s = repo.store
1319 lock = repo.lock()
1319 lock = repo.lock()
1320 s.fncache._load()
1320 s.fncache._load()
1321 tr = repo.transaction(b'perffncachewrite')
1321 tr = repo.transaction(b'perffncachewrite')
1322 tr.addbackup(b'fncache')
1322 tr.addbackup(b'fncache')
1323 def d():
1323 def d():
1324 s.fncache._dirty = True
1324 s.fncache._dirty = True
1325 s.fncache.write(tr)
1325 s.fncache.write(tr)
1326 timer(d)
1326 timer(d)
1327 tr.close()
1327 tr.close()
1328 lock.release()
1328 lock.release()
1329 fm.end()
1329 fm.end()
1330
1330
1331 @command(b'perffncacheencode', formatteropts)
1331 @command(b'perffncacheencode', formatteropts)
1332 def perffncacheencode(ui, repo, **opts):
1332 def perffncacheencode(ui, repo, **opts):
1333 opts = _byteskwargs(opts)
1333 opts = _byteskwargs(opts)
1334 timer, fm = gettimer(ui, opts)
1334 timer, fm = gettimer(ui, opts)
1335 s = repo.store
1335 s = repo.store
1336 s.fncache._load()
1336 s.fncache._load()
1337 def d():
1337 def d():
1338 for p in s.fncache.entries:
1338 for p in s.fncache.entries:
1339 s.encode(p)
1339 s.encode(p)
1340 timer(d)
1340 timer(d)
1341 fm.end()
1341 fm.end()
1342
1342
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 while not done.is_set():
1344 while not done.is_set():
1345 pair = q.get()
1345 pair = q.get()
1346 while pair is not None:
1346 while pair is not None:
1347 if xdiff:
1347 if xdiff:
1348 mdiff.bdiff.xdiffblocks(*pair)
1348 mdiff.bdiff.xdiffblocks(*pair)
1349 elif blocks:
1349 elif blocks:
1350 mdiff.bdiff.blocks(*pair)
1350 mdiff.bdiff.blocks(*pair)
1351 else:
1351 else:
1352 mdiff.textdiff(*pair)
1352 mdiff.textdiff(*pair)
1353 q.task_done()
1353 q.task_done()
1354 pair = q.get()
1354 pair = q.get()
1355 q.task_done() # for the None one
1355 q.task_done() # for the None one
1356 with ready:
1356 with ready:
1357 ready.wait()
1357 ready.wait()
1358
1358
1359 def _manifestrevision(repo, mnode):
1359 def _manifestrevision(repo, mnode):
1360 ml = repo.manifestlog
1360 ml = repo.manifestlog
1361
1361
1362 if util.safehasattr(ml, b'getstorage'):
1362 if util.safehasattr(ml, b'getstorage'):
1363 store = ml.getstorage(b'')
1363 store = ml.getstorage(b'')
1364 else:
1364 else:
1365 store = ml._revlog
1365 store = ml._revlog
1366
1366
1367 return store.revision(mnode)
1367 return store.revision(mnode)
1368
1368
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 ],
1375 ],
1376
1376
1377 b'-c|-m|FILE REV')
1377 b'-c|-m|FILE REV')
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 """benchmark a bdiff between revisions
1379 """benchmark a bdiff between revisions
1380
1380
1381 By default, benchmark a bdiff between its delta parent and itself.
1381 By default, benchmark a bdiff between its delta parent and itself.
1382
1382
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 revisions starting at the specified revision.
1384 revisions starting at the specified revision.
1385
1385
1386 With ``--alldata``, assume the requested revision is a changeset and
1386 With ``--alldata``, assume the requested revision is a changeset and
1387 measure bdiffs for all changes related to that changeset (manifest
1387 measure bdiffs for all changes related to that changeset (manifest
1388 and filelogs).
1388 and filelogs).
1389 """
1389 """
1390 opts = _byteskwargs(opts)
1390 opts = _byteskwargs(opts)
1391
1391
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394
1394
1395 if opts[b'alldata']:
1395 if opts[b'alldata']:
1396 opts[b'changelog'] = True
1396 opts[b'changelog'] = True
1397
1397
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 file_, rev = None, file_
1399 file_, rev = None, file_
1400 elif rev is None:
1400 elif rev is None:
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402
1402
1403 blocks = opts[b'blocks']
1403 blocks = opts[b'blocks']
1404 xdiff = opts[b'xdiff']
1404 xdiff = opts[b'xdiff']
1405 textpairs = []
1405 textpairs = []
1406
1406
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408
1408
1409 startrev = r.rev(r.lookup(rev))
1409 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1411 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1412 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1413 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1415 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1417 textpairs.append((pman, mtext))
1418
1418
1419 # Load filelog revisions by iterating manifest delta.
1419 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1420 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1421 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1422 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1423 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1424 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1426 textpairs.append((f1, f2))
1427 else:
1427 else:
1428 dp = r.deltaparent(rev)
1428 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1430
1431 withthreads = threads > 0
1431 withthreads = threads > 0
1432 if not withthreads:
1432 if not withthreads:
1433 def d():
1433 def d():
1434 for pair in textpairs:
1434 for pair in textpairs:
1435 if xdiff:
1435 if xdiff:
1436 mdiff.bdiff.xdiffblocks(*pair)
1436 mdiff.bdiff.xdiffblocks(*pair)
1437 elif blocks:
1437 elif blocks:
1438 mdiff.bdiff.blocks(*pair)
1438 mdiff.bdiff.blocks(*pair)
1439 else:
1439 else:
1440 mdiff.textdiff(*pair)
1440 mdiff.textdiff(*pair)
1441 else:
1441 else:
1442 q = queue()
1442 q = queue()
1443 for i in _xrange(threads):
1443 for i in _xrange(threads):
1444 q.put(None)
1444 q.put(None)
1445 ready = threading.Condition()
1445 ready = threading.Condition()
1446 done = threading.Event()
1446 done = threading.Event()
1447 for i in _xrange(threads):
1447 for i in _xrange(threads):
1448 threading.Thread(target=_bdiffworker,
1448 threading.Thread(target=_bdiffworker,
1449 args=(q, blocks, xdiff, ready, done)).start()
1449 args=(q, blocks, xdiff, ready, done)).start()
1450 q.join()
1450 q.join()
1451 def d():
1451 def d():
1452 for pair in textpairs:
1452 for pair in textpairs:
1453 q.put(pair)
1453 q.put(pair)
1454 for i in _xrange(threads):
1454 for i in _xrange(threads):
1455 q.put(None)
1455 q.put(None)
1456 with ready:
1456 with ready:
1457 ready.notify_all()
1457 ready.notify_all()
1458 q.join()
1458 q.join()
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 timer(d)
1460 timer(d)
1461 fm.end()
1461 fm.end()
1462
1462
1463 if withthreads:
1463 if withthreads:
1464 done.set()
1464 done.set()
1465 for i in _xrange(threads):
1465 for i in _xrange(threads):
1466 q.put(None)
1466 q.put(None)
1467 with ready:
1467 with ready:
1468 ready.notify_all()
1468 ready.notify_all()
1469
1469
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 ], b'-c|-m|FILE REV')
1473 ], b'-c|-m|FILE REV')
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 """benchmark a unified diff between revisions
1475 """benchmark a unified diff between revisions
1476
1476
1477 This doesn't include any copy tracing - it's just a unified diff
1477 This doesn't include any copy tracing - it's just a unified diff
1478 of the texts.
1478 of the texts.
1479
1479
1480 By default, benchmark a diff between its delta parent and itself.
1480 By default, benchmark a diff between its delta parent and itself.
1481
1481
1482 With ``--count``, benchmark diffs between delta parents and self for N
1482 With ``--count``, benchmark diffs between delta parents and self for N
1483 revisions starting at the specified revision.
1483 revisions starting at the specified revision.
1484
1484
1485 With ``--alldata``, assume the requested revision is a changeset and
1485 With ``--alldata``, assume the requested revision is a changeset and
1486 measure diffs for all changes related to that changeset (manifest
1486 measure diffs for all changes related to that changeset (manifest
1487 and filelogs).
1487 and filelogs).
1488 """
1488 """
1489 opts = _byteskwargs(opts)
1489 opts = _byteskwargs(opts)
1490 if opts[b'alldata']:
1490 if opts[b'alldata']:
1491 opts[b'changelog'] = True
1491 opts[b'changelog'] = True
1492
1492
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 file_, rev = None, file_
1494 file_, rev = None, file_
1495 elif rev is None:
1495 elif rev is None:
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497
1497
1498 textpairs = []
1498 textpairs = []
1499
1499
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501
1501
1502 startrev = r.rev(r.lookup(rev))
1502 startrev = r.rev(r.lookup(rev))
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 if opts[b'alldata']:
1504 if opts[b'alldata']:
1505 # Load revisions associated with changeset.
1505 # Load revisions associated with changeset.
1506 ctx = repo[rev]
1506 ctx = repo[rev]
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 for pctx in ctx.parents():
1508 for pctx in ctx.parents():
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1510 textpairs.append((pman, mtext))
1510 textpairs.append((pman, mtext))
1511
1511
1512 # Load filelog revisions by iterating manifest delta.
1512 # Load filelog revisions by iterating manifest delta.
1513 man = ctx.manifest()
1513 man = ctx.manifest()
1514 pman = ctx.p1().manifest()
1514 pman = ctx.p1().manifest()
1515 for filename, change in pman.diff(man).items():
1515 for filename, change in pman.diff(man).items():
1516 fctx = repo.file(filename)
1516 fctx = repo.file(filename)
1517 f1 = fctx.revision(change[0][0] or -1)
1517 f1 = fctx.revision(change[0][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1519 textpairs.append((f1, f2))
1519 textpairs.append((f1, f2))
1520 else:
1520 else:
1521 dp = r.deltaparent(rev)
1521 dp = r.deltaparent(rev)
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1523
1523
1524 def d():
1524 def d():
1525 for left, right in textpairs:
1525 for left, right in textpairs:
1526 # The date strings don't matter, so we pass empty strings.
1526 # The date strings don't matter, so we pass empty strings.
1527 headerlines, hunks = mdiff.unidiff(
1527 headerlines, hunks = mdiff.unidiff(
1528 left, b'', right, b'', b'left', b'right', binary=False)
1528 left, b'', right, b'', b'left', b'right', binary=False)
1529 # consume iterators in roughly the way patch.py does
1529 # consume iterators in roughly the way patch.py does
1530 b'\n'.join(headerlines)
1530 b'\n'.join(headerlines)
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 timer, fm = gettimer(ui, opts)
1532 timer, fm = gettimer(ui, opts)
1533 timer(d)
1533 timer(d)
1534 fm.end()
1534 fm.end()
1535
1535
1536 @command(b'perfdiffwd', formatteropts)
1536 @command(b'perfdiffwd', formatteropts)
1537 def perfdiffwd(ui, repo, **opts):
1537 def perfdiffwd(ui, repo, **opts):
1538 """Profile diff of working directory changes"""
1538 """Profile diff of working directory changes"""
1539 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1541 options = {
1541 options = {
1542 'w': 'ignore_all_space',
1542 'w': 'ignore_all_space',
1543 'b': 'ignore_space_change',
1543 'b': 'ignore_space_change',
1544 'B': 'ignore_blank_lines',
1544 'B': 'ignore_blank_lines',
1545 }
1545 }
1546
1546
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 opts = dict((options[c], b'1') for c in diffopt)
1548 opts = dict((options[c], b'1') for c in diffopt)
1549 def d():
1549 def d():
1550 ui.pushbuffer()
1550 ui.pushbuffer()
1551 commands.diff(ui, repo, **opts)
1551 commands.diff(ui, repo, **opts)
1552 ui.popbuffer()
1552 ui.popbuffer()
1553 diffopt = diffopt.encode('ascii')
1553 diffopt = diffopt.encode('ascii')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 timer(d, title=title)
1555 timer(d, title=title)
1556 fm.end()
1556 fm.end()
1557
1557
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 b'-c|-m|FILE')
1559 b'-c|-m|FILE')
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 """Benchmark operations against a revlog index.
1561 """Benchmark operations against a revlog index.
1562
1562
1563 This tests constructing a revlog instance, reading index data,
1563 This tests constructing a revlog instance, reading index data,
1564 parsing index data, and performing various operations related to
1564 parsing index data, and performing various operations related to
1565 index data.
1565 index data.
1566 """
1566 """
1567
1567
1568 opts = _byteskwargs(opts)
1568 opts = _byteskwargs(opts)
1569
1569
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571
1571
1572 opener = getattr(rl, 'opener') # trick linter
1572 opener = getattr(rl, 'opener') # trick linter
1573 indexfile = rl.indexfile
1573 indexfile = rl.indexfile
1574 data = opener.read(indexfile)
1574 data = opener.read(indexfile)
1575
1575
1576 header = struct.unpack(b'>I', data[0:4])[0]
1576 header = struct.unpack(b'>I', data[0:4])[0]
1577 version = header & 0xFFFF
1577 version = header & 0xFFFF
1578 if version == 1:
1578 if version == 1:
1579 revlogio = revlog.revlogio()
1579 revlogio = revlog.revlogio()
1580 inline = header & (1 << 16)
1580 inline = header & (1 << 16)
1581 else:
1581 else:
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1583
1583
1584 rllen = len(rl)
1584 rllen = len(rl)
1585
1585
1586 node0 = rl.node(0)
1586 node0 = rl.node(0)
1587 node25 = rl.node(rllen // 4)
1587 node25 = rl.node(rllen // 4)
1588 node50 = rl.node(rllen // 2)
1588 node50 = rl.node(rllen // 2)
1589 node75 = rl.node(rllen // 4 * 3)
1589 node75 = rl.node(rllen // 4 * 3)
1590 node100 = rl.node(rllen - 1)
1590 node100 = rl.node(rllen - 1)
1591
1591
1592 allrevs = range(rllen)
1592 allrevs = range(rllen)
1593 allrevsrev = list(reversed(allrevs))
1593 allrevsrev = list(reversed(allrevs))
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 allnodesrev = list(reversed(allnodes))
1595 allnodesrev = list(reversed(allnodes))
1596
1596
1597 def constructor():
1597 def constructor():
1598 revlog.revlog(opener, indexfile)
1598 revlog.revlog(opener, indexfile)
1599
1599
1600 def read():
1600 def read():
1601 with opener(indexfile) as fh:
1601 with opener(indexfile) as fh:
1602 fh.read()
1602 fh.read()
1603
1603
1604 def parseindex():
1604 def parseindex():
1605 revlogio.parseindex(data, inline)
1605 revlogio.parseindex(data, inline)
1606
1606
1607 def getentry(revornode):
1607 def getentry(revornode):
1608 index = revlogio.parseindex(data, inline)[0]
1608 index = revlogio.parseindex(data, inline)[0]
1609 index[revornode]
1609 index[revornode]
1610
1610
1611 def getentries(revs, count=1):
1611 def getentries(revs, count=1):
1612 index = revlogio.parseindex(data, inline)[0]
1612 index = revlogio.parseindex(data, inline)[0]
1613
1613
1614 for i in range(count):
1614 for i in range(count):
1615 for rev in revs:
1615 for rev in revs:
1616 index[rev]
1616 index[rev]
1617
1617
1618 def resolvenode(node):
1618 def resolvenode(node):
1619 nodemap = revlogio.parseindex(data, inline)[1]
1619 nodemap = revlogio.parseindex(data, inline)[1]
1620 # This only works for the C code.
1620 # This only works for the C code.
1621 if nodemap is None:
1621 if nodemap is None:
1622 return
1622 return
1623
1623
1624 try:
1624 try:
1625 nodemap[node]
1625 nodemap[node]
1626 except error.RevlogError:
1626 except error.RevlogError:
1627 pass
1627 pass
1628
1628
1629 def resolvenodes(nodes, count=1):
1629 def resolvenodes(nodes, count=1):
1630 nodemap = revlogio.parseindex(data, inline)[1]
1630 nodemap = revlogio.parseindex(data, inline)[1]
1631 if nodemap is None:
1631 if nodemap is None:
1632 return
1632 return
1633
1633
1634 for i in range(count):
1634 for i in range(count):
1635 for node in nodes:
1635 for node in nodes:
1636 try:
1636 try:
1637 nodemap[node]
1637 nodemap[node]
1638 except error.RevlogError:
1638 except error.RevlogError:
1639 pass
1639 pass
1640
1640
1641 benches = [
1641 benches = [
1642 (constructor, b'revlog constructor'),
1642 (constructor, b'revlog constructor'),
1643 (read, b'read'),
1643 (read, b'read'),
1644 (parseindex, b'create index object'),
1644 (parseindex, b'create index object'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1652 # 2x variation is to measure caching impact.
1652 # 2x variation is to measure caching impact.
1653 (lambda: resolvenodes(allnodes),
1653 (lambda: resolvenodes(allnodes),
1654 b'look up all nodes (forward)'),
1654 b'look up all nodes (forward)'),
1655 (lambda: resolvenodes(allnodes, 2),
1655 (lambda: resolvenodes(allnodes, 2),
1656 b'look up all nodes 2x (forward)'),
1656 b'look up all nodes 2x (forward)'),
1657 (lambda: resolvenodes(allnodesrev),
1657 (lambda: resolvenodes(allnodesrev),
1658 b'look up all nodes (reverse)'),
1658 b'look up all nodes (reverse)'),
1659 (lambda: resolvenodes(allnodesrev, 2),
1659 (lambda: resolvenodes(allnodesrev, 2),
1660 b'look up all nodes 2x (reverse)'),
1660 b'look up all nodes 2x (reverse)'),
1661 (lambda: getentries(allrevs),
1661 (lambda: getentries(allrevs),
1662 b'retrieve all index entries (forward)'),
1662 b'retrieve all index entries (forward)'),
1663 (lambda: getentries(allrevs, 2),
1663 (lambda: getentries(allrevs, 2),
1664 b'retrieve all index entries 2x (forward)'),
1664 b'retrieve all index entries 2x (forward)'),
1665 (lambda: getentries(allrevsrev),
1665 (lambda: getentries(allrevsrev),
1666 b'retrieve all index entries (reverse)'),
1666 b'retrieve all index entries (reverse)'),
1667 (lambda: getentries(allrevsrev, 2),
1667 (lambda: getentries(allrevsrev, 2),
1668 b'retrieve all index entries 2x (reverse)'),
1668 b'retrieve all index entries 2x (reverse)'),
1669 ]
1669 ]
1670
1670
1671 for fn, title in benches:
1671 for fn, title in benches:
1672 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1673 timer(fn, title=title)
1673 timer(fn, title=title)
1674 fm.end()
1674 fm.end()
1675
1675
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1679 (b'', b'reverse', False, b'read in reverse')],
1679 (b'', b'reverse', False, b'read in reverse')],
1680 b'-c|-m|FILE')
1680 b'-c|-m|FILE')
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 **opts):
1682 **opts):
1683 """Benchmark reading a series of revisions from a revlog.
1683 """Benchmark reading a series of revisions from a revlog.
1684
1684
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 the specified revlog.
1686 the specified revlog.
1687
1687
1688 The start revision can be defined via ``-s/--startrev``.
1688 The start revision can be defined via ``-s/--startrev``.
1689 """
1689 """
1690 opts = _byteskwargs(opts)
1690 opts = _byteskwargs(opts)
1691
1691
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 rllen = getlen(ui)(rl)
1693 rllen = getlen(ui)(rl)
1694
1694
1695 if startrev < 0:
1695 if startrev < 0:
1696 startrev = rllen + startrev
1696 startrev = rllen + startrev
1697
1697
1698 def d():
1698 def d():
1699 rl.clearcaches()
1699 rl.clearcaches()
1700
1700
1701 beginrev = startrev
1701 beginrev = startrev
1702 endrev = rllen
1702 endrev = rllen
1703 dist = opts[b'dist']
1703 dist = opts[b'dist']
1704
1704
1705 if reverse:
1705 if reverse:
1706 beginrev, endrev = endrev - 1, beginrev - 1
1706 beginrev, endrev = endrev - 1, beginrev - 1
1707 dist = -1 * dist
1707 dist = -1 * dist
1708
1708
1709 for x in _xrange(beginrev, endrev, dist):
1709 for x in _xrange(beginrev, endrev, dist):
1710 # Old revisions don't support passing int.
1710 # Old revisions don't support passing int.
1711 n = rl.node(x)
1711 n = rl.node(x)
1712 rl.revision(n)
1712 rl.revision(n)
1713
1713
1714 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1715 timer(d)
1715 timer(d)
1716 fm.end()
1716 fm.end()
1717
1717
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1725 ],
1726 ],
1726 b'-c|-m|FILE')
1727 b'-c|-m|FILE')
1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 """Benchmark writing a series of revisions to a revlog.
1729 """Benchmark writing a series of revisions to a revlog.
1729
1730
1730 Possible source values are:
1731 Possible source values are:
1731 * `full`: add from a full text (default).
1732 * `full`: add from a full text (default).
1732 * `parent-1`: add from a delta to the first parent
1733 * `parent-1`: add from a delta to the first parent
1733 * `parent-2`: add from a delta to the second parent if it exists
1734 * `parent-2`: add from a delta to the second parent if it exists
1734 (use a delta from the first parent otherwise)
1735 (use a delta from the first parent otherwise)
1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `storage`: add from the existing precomputed deltas
1737 * `storage`: add from the existing precomputed deltas
1737 """
1738 """
1738 opts = _byteskwargs(opts)
1739 opts = _byteskwargs(opts)
1739
1740
1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rllen = getlen(ui)(rl)
1742 rllen = getlen(ui)(rl)
1742 if startrev < 0:
1743 if startrev < 0:
1743 startrev = rllen + startrev
1744 startrev = rllen + startrev
1744 if stoprev < 0:
1745 if stoprev < 0:
1745 stoprev = rllen + stoprev
1746 stoprev = rllen + stoprev
1746
1747
1747 lazydeltabase = opts['lazydeltabase']
1748 lazydeltabase = opts['lazydeltabase']
1748 source = opts['source']
1749 source = opts['source']
1750 clearcaches = opts['clear_cache']
1749 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1750 b'storage')
1752 b'storage')
1751 if source not in validsource:
1753 if source not in validsource:
1752 raise error.Abort('invalid source type: %s' % source)
1754 raise error.Abort('invalid source type: %s' % source)
1753
1755
1754 ### actually gather results
1756 ### actually gather results
1755 count = opts['count']
1757 count = opts['count']
1756 if count <= 0:
1758 if count <= 0:
1757 raise error.Abort('invalide run count: %d' % count)
1759 raise error.Abort('invalide run count: %d' % count)
1758 allresults = []
1760 allresults = []
1759 for c in range(count):
1761 for c in range(count):
1760 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1761 lazydeltabase=lazydeltabase)
1763 lazydeltabase=lazydeltabase,
1764 clearcaches=clearcaches)
1762 allresults.append(timing)
1765 allresults.append(timing)
1763
1766
1764 ### consolidate the results in a single list
1767 ### consolidate the results in a single list
1765 results = []
1768 results = []
1766 for idx, (rev, t) in enumerate(allresults[0]):
1769 for idx, (rev, t) in enumerate(allresults[0]):
1767 ts = [t]
1770 ts = [t]
1768 for other in allresults[1:]:
1771 for other in allresults[1:]:
1769 orev, ot = other[idx]
1772 orev, ot = other[idx]
1770 assert orev == rev
1773 assert orev == rev
1771 ts.append(ot)
1774 ts.append(ot)
1772 results.append((rev, ts))
1775 results.append((rev, ts))
1773 resultcount = len(results)
1776 resultcount = len(results)
1774
1777
1775 ### Compute and display relevant statistics
1778 ### Compute and display relevant statistics
1776
1779
1777 # get a formatter
1780 # get a formatter
1778 fm = ui.formatter(b'perf', opts)
1781 fm = ui.formatter(b'perf', opts)
1779 displayall = ui.configbool(b"perf", b"all-timing", False)
1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1780
1783
1781 # print individual details if requested
1784 # print individual details if requested
1782 if opts['details']:
1785 if opts['details']:
1783 for idx, item in enumerate(results, 1):
1786 for idx, item in enumerate(results, 1):
1784 rev, data = item
1787 rev, data = item
1785 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1786 formatone(fm, data, title=title, displayall=displayall)
1789 formatone(fm, data, title=title, displayall=displayall)
1787
1790
1788 # sorts results by median time
1791 # sorts results by median time
1789 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1790 # list of (name, index) to display)
1793 # list of (name, index) to display)
1791 relevants = [
1794 relevants = [
1792 ("min", 0),
1795 ("min", 0),
1793 ("10%", resultcount * 10 // 100),
1796 ("10%", resultcount * 10 // 100),
1794 ("25%", resultcount * 25 // 100),
1797 ("25%", resultcount * 25 // 100),
1795 ("50%", resultcount * 70 // 100),
1798 ("50%", resultcount * 70 // 100),
1796 ("75%", resultcount * 75 // 100),
1799 ("75%", resultcount * 75 // 100),
1797 ("90%", resultcount * 90 // 100),
1800 ("90%", resultcount * 90 // 100),
1798 ("95%", resultcount * 95 // 100),
1801 ("95%", resultcount * 95 // 100),
1799 ("99%", resultcount * 99 // 100),
1802 ("99%", resultcount * 99 // 100),
1800 ("99.9%", resultcount * 999 // 1000),
1803 ("99.9%", resultcount * 999 // 1000),
1801 ("99.99%", resultcount * 9999 // 10000),
1804 ("99.99%", resultcount * 9999 // 10000),
1802 ("99.999%", resultcount * 99999 // 100000),
1805 ("99.999%", resultcount * 99999 // 100000),
1803 ("max", -1),
1806 ("max", -1),
1804 ]
1807 ]
1805 if not ui.quiet:
1808 if not ui.quiet:
1806 for name, idx in relevants:
1809 for name, idx in relevants:
1807 data = results[idx]
1810 data = results[idx]
1808 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1809 formatone(fm, data[1], title=title, displayall=displayall)
1812 formatone(fm, data[1], title=title, displayall=displayall)
1810
1813
1811 # XXX summing that many float will not be very precise, we ignore this fact
1814 # XXX summing that many float will not be very precise, we ignore this fact
1812 # for now
1815 # for now
1813 totaltime = []
1816 totaltime = []
1814 for item in allresults:
1817 for item in allresults:
1815 totaltime.append((sum(x[1][0] for x in item),
1818 totaltime.append((sum(x[1][0] for x in item),
1816 sum(x[1][1] for x in item),
1819 sum(x[1][1] for x in item),
1817 sum(x[1][2] for x in item),)
1820 sum(x[1][2] for x in item),)
1818 )
1821 )
1819 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1820 displayall=displayall)
1823 displayall=displayall)
1821 fm.end()
1824 fm.end()
1822
1825
1823 class _faketr(object):
1826 class _faketr(object):
1824 def add(s, x, y, z=None):
1827 def add(s, x, y, z=None):
1825 return None
1828 return None
1826
1829
1827 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1828 lazydeltabase=True):
1831 lazydeltabase=True, clearcaches=True):
1829 timings = []
1832 timings = []
1830 tr = _faketr()
1833 tr = _faketr()
1831 with _temprevlog(ui, orig, startrev) as dest:
1834 with _temprevlog(ui, orig, startrev) as dest:
1832 dest._lazydeltabase = lazydeltabase
1835 dest._lazydeltabase = lazydeltabase
1833 revs = list(orig.revs(startrev, stoprev))
1836 revs = list(orig.revs(startrev, stoprev))
1834 total = len(revs)
1837 total = len(revs)
1835 topic = 'adding'
1838 topic = 'adding'
1836 if runidx is not None:
1839 if runidx is not None:
1837 topic += ' (run #%d)' % runidx
1840 topic += ' (run #%d)' % runidx
1838 for idx, rev in enumerate(revs):
1841 for idx, rev in enumerate(revs):
1839 ui.progress(topic, idx, unit='revs', total=total)
1842 ui.progress(topic, idx, unit='revs', total=total)
1840 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1843 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1844 if clearcaches:
1845 dest.index.clearcaches()
1846 dest.clearcaches()
1841 with timeone() as r:
1847 with timeone() as r:
1842 dest.addrawrevision(*addargs, **addkwargs)
1848 dest.addrawrevision(*addargs, **addkwargs)
1843 timings.append((rev, r[0]))
1849 timings.append((rev, r[0]))
1844 ui.progress(topic, total, unit='revs', total=total)
1850 ui.progress(topic, total, unit='revs', total=total)
1845 ui.progress(topic, None, unit='revs', total=total)
1851 ui.progress(topic, None, unit='revs', total=total)
1846 return timings
1852 return timings
1847
1853
1848 def _getrevisionseed(orig, rev, tr, source):
1854 def _getrevisionseed(orig, rev, tr, source):
1849 from mercurial.node import nullid
1855 from mercurial.node import nullid
1850
1856
1851 linkrev = orig.linkrev(rev)
1857 linkrev = orig.linkrev(rev)
1852 node = orig.node(rev)
1858 node = orig.node(rev)
1853 p1, p2 = orig.parents(node)
1859 p1, p2 = orig.parents(node)
1854 flags = orig.flags(rev)
1860 flags = orig.flags(rev)
1855 cachedelta = None
1861 cachedelta = None
1856 text = None
1862 text = None
1857
1863
1858 if source == b'full':
1864 if source == b'full':
1859 text = orig.revision(rev)
1865 text = orig.revision(rev)
1860 elif source == b'parent-1':
1866 elif source == b'parent-1':
1861 baserev = orig.rev(p1)
1867 baserev = orig.rev(p1)
1862 cachedelta = (baserev, orig.revdiff(p1, rev))
1868 cachedelta = (baserev, orig.revdiff(p1, rev))
1863 elif source == b'parent-2':
1869 elif source == b'parent-2':
1864 parent = p2
1870 parent = p2
1865 if p2 == nullid:
1871 if p2 == nullid:
1866 parent = p1
1872 parent = p1
1867 baserev = orig.rev(parent)
1873 baserev = orig.rev(parent)
1868 cachedelta = (baserev, orig.revdiff(parent, rev))
1874 cachedelta = (baserev, orig.revdiff(parent, rev))
1869 elif source == b'parent-smallest':
1875 elif source == b'parent-smallest':
1870 p1diff = orig.revdiff(p1, rev)
1876 p1diff = orig.revdiff(p1, rev)
1871 parent = p1
1877 parent = p1
1872 diff = p1diff
1878 diff = p1diff
1873 if p2 != nullid:
1879 if p2 != nullid:
1874 p2diff = orig.revdiff(p2, rev)
1880 p2diff = orig.revdiff(p2, rev)
1875 if len(p1diff) > len(p2diff):
1881 if len(p1diff) > len(p2diff):
1876 parent = p2
1882 parent = p2
1877 diff = p2diff
1883 diff = p2diff
1878 baserev = orig.rev(parent)
1884 baserev = orig.rev(parent)
1879 cachedelta = (baserev, diff)
1885 cachedelta = (baserev, diff)
1880 elif source == b'storage':
1886 elif source == b'storage':
1881 baserev = orig.deltaparent(rev)
1887 baserev = orig.deltaparent(rev)
1882 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1888 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1883
1889
1884 return ((text, tr, linkrev, p1, p2),
1890 return ((text, tr, linkrev, p1, p2),
1885 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1891 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1886
1892
1887 @contextlib.contextmanager
1893 @contextlib.contextmanager
1888 def _temprevlog(ui, orig, truncaterev):
1894 def _temprevlog(ui, orig, truncaterev):
1889 from mercurial import vfs as vfsmod
1895 from mercurial import vfs as vfsmod
1890
1896
1891 if orig._inline:
1897 if orig._inline:
1892 raise error.Abort('not supporting inline revlog (yet)')
1898 raise error.Abort('not supporting inline revlog (yet)')
1893
1899
1894 origindexpath = orig.opener.join(orig.indexfile)
1900 origindexpath = orig.opener.join(orig.indexfile)
1895 origdatapath = orig.opener.join(orig.datafile)
1901 origdatapath = orig.opener.join(orig.datafile)
1896 indexname = 'revlog.i'
1902 indexname = 'revlog.i'
1897 dataname = 'revlog.d'
1903 dataname = 'revlog.d'
1898
1904
1899 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1905 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1900 try:
1906 try:
1901 # copy the data file in a temporary directory
1907 # copy the data file in a temporary directory
1902 ui.debug('copying data in %s\n' % tmpdir)
1908 ui.debug('copying data in %s\n' % tmpdir)
1903 destindexpath = os.path.join(tmpdir, 'revlog.i')
1909 destindexpath = os.path.join(tmpdir, 'revlog.i')
1904 destdatapath = os.path.join(tmpdir, 'revlog.d')
1910 destdatapath = os.path.join(tmpdir, 'revlog.d')
1905 shutil.copyfile(origindexpath, destindexpath)
1911 shutil.copyfile(origindexpath, destindexpath)
1906 shutil.copyfile(origdatapath, destdatapath)
1912 shutil.copyfile(origdatapath, destdatapath)
1907
1913
1908 # remove the data we want to add again
1914 # remove the data we want to add again
1909 ui.debug('truncating data to be rewritten\n')
1915 ui.debug('truncating data to be rewritten\n')
1910 with open(destindexpath, 'ab') as index:
1916 with open(destindexpath, 'ab') as index:
1911 index.seek(0)
1917 index.seek(0)
1912 index.truncate(truncaterev * orig._io.size)
1918 index.truncate(truncaterev * orig._io.size)
1913 with open(destdatapath, 'ab') as data:
1919 with open(destdatapath, 'ab') as data:
1914 data.seek(0)
1920 data.seek(0)
1915 data.truncate(orig.start(truncaterev))
1921 data.truncate(orig.start(truncaterev))
1916
1922
1917 # instantiate a new revlog from the temporary copy
1923 # instantiate a new revlog from the temporary copy
1918 ui.debug('truncating adding to be rewritten\n')
1924 ui.debug('truncating adding to be rewritten\n')
1919 vfs = vfsmod.vfs(tmpdir)
1925 vfs = vfsmod.vfs(tmpdir)
1920 vfs.options = getattr(orig.opener, 'options', None)
1926 vfs.options = getattr(orig.opener, 'options', None)
1921
1927
1922 dest = revlog.revlog(vfs,
1928 dest = revlog.revlog(vfs,
1923 indexfile=indexname,
1929 indexfile=indexname,
1924 datafile=dataname)
1930 datafile=dataname)
1925 if dest._inline:
1931 if dest._inline:
1926 raise error.Abort('not supporting inline revlog (yet)')
1932 raise error.Abort('not supporting inline revlog (yet)')
1927 # make sure internals are initialized
1933 # make sure internals are initialized
1928 dest.revision(len(dest) - 1)
1934 dest.revision(len(dest) - 1)
1929 yield dest
1935 yield dest
1930 del dest, vfs
1936 del dest, vfs
1931 finally:
1937 finally:
1932 shutil.rmtree(tmpdir, True)
1938 shutil.rmtree(tmpdir, True)
1933
1939
1934 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1940 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1935 [(b'e', b'engines', b'', b'compression engines to use'),
1941 [(b'e', b'engines', b'', b'compression engines to use'),
1936 (b's', b'startrev', 0, b'revision to start at')],
1942 (b's', b'startrev', 0, b'revision to start at')],
1937 b'-c|-m|FILE')
1943 b'-c|-m|FILE')
1938 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1944 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1939 """Benchmark operations on revlog chunks.
1945 """Benchmark operations on revlog chunks.
1940
1946
1941 Logically, each revlog is a collection of fulltext revisions. However,
1947 Logically, each revlog is a collection of fulltext revisions. However,
1942 stored within each revlog are "chunks" of possibly compressed data. This
1948 stored within each revlog are "chunks" of possibly compressed data. This
1943 data needs to be read and decompressed or compressed and written.
1949 data needs to be read and decompressed or compressed and written.
1944
1950
1945 This command measures the time it takes to read+decompress and recompress
1951 This command measures the time it takes to read+decompress and recompress
1946 chunks in a revlog. It effectively isolates I/O and compression performance.
1952 chunks in a revlog. It effectively isolates I/O and compression performance.
1947 For measurements of higher-level operations like resolving revisions,
1953 For measurements of higher-level operations like resolving revisions,
1948 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1954 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1949 """
1955 """
1950 opts = _byteskwargs(opts)
1956 opts = _byteskwargs(opts)
1951
1957
1952 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1958 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1953
1959
1954 # _chunkraw was renamed to _getsegmentforrevs.
1960 # _chunkraw was renamed to _getsegmentforrevs.
1955 try:
1961 try:
1956 segmentforrevs = rl._getsegmentforrevs
1962 segmentforrevs = rl._getsegmentforrevs
1957 except AttributeError:
1963 except AttributeError:
1958 segmentforrevs = rl._chunkraw
1964 segmentforrevs = rl._chunkraw
1959
1965
1960 # Verify engines argument.
1966 # Verify engines argument.
1961 if engines:
1967 if engines:
1962 engines = set(e.strip() for e in engines.split(b','))
1968 engines = set(e.strip() for e in engines.split(b','))
1963 for engine in engines:
1969 for engine in engines:
1964 try:
1970 try:
1965 util.compressionengines[engine]
1971 util.compressionengines[engine]
1966 except KeyError:
1972 except KeyError:
1967 raise error.Abort(b'unknown compression engine: %s' % engine)
1973 raise error.Abort(b'unknown compression engine: %s' % engine)
1968 else:
1974 else:
1969 engines = []
1975 engines = []
1970 for e in util.compengines:
1976 for e in util.compengines:
1971 engine = util.compengines[e]
1977 engine = util.compengines[e]
1972 try:
1978 try:
1973 if engine.available():
1979 if engine.available():
1974 engine.revlogcompressor().compress(b'dummy')
1980 engine.revlogcompressor().compress(b'dummy')
1975 engines.append(e)
1981 engines.append(e)
1976 except NotImplementedError:
1982 except NotImplementedError:
1977 pass
1983 pass
1978
1984
1979 revs = list(rl.revs(startrev, len(rl) - 1))
1985 revs = list(rl.revs(startrev, len(rl) - 1))
1980
1986
1981 def rlfh(rl):
1987 def rlfh(rl):
1982 if rl._inline:
1988 if rl._inline:
1983 return getsvfs(repo)(rl.indexfile)
1989 return getsvfs(repo)(rl.indexfile)
1984 else:
1990 else:
1985 return getsvfs(repo)(rl.datafile)
1991 return getsvfs(repo)(rl.datafile)
1986
1992
1987 def doread():
1993 def doread():
1988 rl.clearcaches()
1994 rl.clearcaches()
1989 for rev in revs:
1995 for rev in revs:
1990 segmentforrevs(rev, rev)
1996 segmentforrevs(rev, rev)
1991
1997
1992 def doreadcachedfh():
1998 def doreadcachedfh():
1993 rl.clearcaches()
1999 rl.clearcaches()
1994 fh = rlfh(rl)
2000 fh = rlfh(rl)
1995 for rev in revs:
2001 for rev in revs:
1996 segmentforrevs(rev, rev, df=fh)
2002 segmentforrevs(rev, rev, df=fh)
1997
2003
1998 def doreadbatch():
2004 def doreadbatch():
1999 rl.clearcaches()
2005 rl.clearcaches()
2000 segmentforrevs(revs[0], revs[-1])
2006 segmentforrevs(revs[0], revs[-1])
2001
2007
2002 def doreadbatchcachedfh():
2008 def doreadbatchcachedfh():
2003 rl.clearcaches()
2009 rl.clearcaches()
2004 fh = rlfh(rl)
2010 fh = rlfh(rl)
2005 segmentforrevs(revs[0], revs[-1], df=fh)
2011 segmentforrevs(revs[0], revs[-1], df=fh)
2006
2012
2007 def dochunk():
2013 def dochunk():
2008 rl.clearcaches()
2014 rl.clearcaches()
2009 fh = rlfh(rl)
2015 fh = rlfh(rl)
2010 for rev in revs:
2016 for rev in revs:
2011 rl._chunk(rev, df=fh)
2017 rl._chunk(rev, df=fh)
2012
2018
2013 chunks = [None]
2019 chunks = [None]
2014
2020
2015 def dochunkbatch():
2021 def dochunkbatch():
2016 rl.clearcaches()
2022 rl.clearcaches()
2017 fh = rlfh(rl)
2023 fh = rlfh(rl)
2018 # Save chunks as a side-effect.
2024 # Save chunks as a side-effect.
2019 chunks[0] = rl._chunks(revs, df=fh)
2025 chunks[0] = rl._chunks(revs, df=fh)
2020
2026
2021 def docompress(compressor):
2027 def docompress(compressor):
2022 rl.clearcaches()
2028 rl.clearcaches()
2023
2029
2024 try:
2030 try:
2025 # Swap in the requested compression engine.
2031 # Swap in the requested compression engine.
2026 oldcompressor = rl._compressor
2032 oldcompressor = rl._compressor
2027 rl._compressor = compressor
2033 rl._compressor = compressor
2028 for chunk in chunks[0]:
2034 for chunk in chunks[0]:
2029 rl.compress(chunk)
2035 rl.compress(chunk)
2030 finally:
2036 finally:
2031 rl._compressor = oldcompressor
2037 rl._compressor = oldcompressor
2032
2038
2033 benches = [
2039 benches = [
2034 (lambda: doread(), b'read'),
2040 (lambda: doread(), b'read'),
2035 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2041 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2036 (lambda: doreadbatch(), b'read batch'),
2042 (lambda: doreadbatch(), b'read batch'),
2037 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2043 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2038 (lambda: dochunk(), b'chunk'),
2044 (lambda: dochunk(), b'chunk'),
2039 (lambda: dochunkbatch(), b'chunk batch'),
2045 (lambda: dochunkbatch(), b'chunk batch'),
2040 ]
2046 ]
2041
2047
2042 for engine in sorted(engines):
2048 for engine in sorted(engines):
2043 compressor = util.compengines[engine].revlogcompressor()
2049 compressor = util.compengines[engine].revlogcompressor()
2044 benches.append((functools.partial(docompress, compressor),
2050 benches.append((functools.partial(docompress, compressor),
2045 b'compress w/ %s' % engine))
2051 b'compress w/ %s' % engine))
2046
2052
2047 for fn, title in benches:
2053 for fn, title in benches:
2048 timer, fm = gettimer(ui, opts)
2054 timer, fm = gettimer(ui, opts)
2049 timer(fn, title=title)
2055 timer(fn, title=title)
2050 fm.end()
2056 fm.end()
2051
2057
2052 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2058 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2053 [(b'', b'cache', False, b'use caches instead of clearing')],
2059 [(b'', b'cache', False, b'use caches instead of clearing')],
2054 b'-c|-m|FILE REV')
2060 b'-c|-m|FILE REV')
2055 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2061 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2056 """Benchmark obtaining a revlog revision.
2062 """Benchmark obtaining a revlog revision.
2057
2063
2058 Obtaining a revlog revision consists of roughly the following steps:
2064 Obtaining a revlog revision consists of roughly the following steps:
2059
2065
2060 1. Compute the delta chain
2066 1. Compute the delta chain
2061 2. Slice the delta chain if applicable
2067 2. Slice the delta chain if applicable
2062 3. Obtain the raw chunks for that delta chain
2068 3. Obtain the raw chunks for that delta chain
2063 4. Decompress each raw chunk
2069 4. Decompress each raw chunk
2064 5. Apply binary patches to obtain fulltext
2070 5. Apply binary patches to obtain fulltext
2065 6. Verify hash of fulltext
2071 6. Verify hash of fulltext
2066
2072
2067 This command measures the time spent in each of these phases.
2073 This command measures the time spent in each of these phases.
2068 """
2074 """
2069 opts = _byteskwargs(opts)
2075 opts = _byteskwargs(opts)
2070
2076
2071 if opts.get(b'changelog') or opts.get(b'manifest'):
2077 if opts.get(b'changelog') or opts.get(b'manifest'):
2072 file_, rev = None, file_
2078 file_, rev = None, file_
2073 elif rev is None:
2079 elif rev is None:
2074 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2080 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2075
2081
2076 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2082 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2077
2083
2078 # _chunkraw was renamed to _getsegmentforrevs.
2084 # _chunkraw was renamed to _getsegmentforrevs.
2079 try:
2085 try:
2080 segmentforrevs = r._getsegmentforrevs
2086 segmentforrevs = r._getsegmentforrevs
2081 except AttributeError:
2087 except AttributeError:
2082 segmentforrevs = r._chunkraw
2088 segmentforrevs = r._chunkraw
2083
2089
2084 node = r.lookup(rev)
2090 node = r.lookup(rev)
2085 rev = r.rev(node)
2091 rev = r.rev(node)
2086
2092
2087 def getrawchunks(data, chain):
2093 def getrawchunks(data, chain):
2088 start = r.start
2094 start = r.start
2089 length = r.length
2095 length = r.length
2090 inline = r._inline
2096 inline = r._inline
2091 iosize = r._io.size
2097 iosize = r._io.size
2092 buffer = util.buffer
2098 buffer = util.buffer
2093
2099
2094 chunks = []
2100 chunks = []
2095 ladd = chunks.append
2101 ladd = chunks.append
2096 for idx, item in enumerate(chain):
2102 for idx, item in enumerate(chain):
2097 offset = start(item[0])
2103 offset = start(item[0])
2098 bits = data[idx]
2104 bits = data[idx]
2099 for rev in item:
2105 for rev in item:
2100 chunkstart = start(rev)
2106 chunkstart = start(rev)
2101 if inline:
2107 if inline:
2102 chunkstart += (rev + 1) * iosize
2108 chunkstart += (rev + 1) * iosize
2103 chunklength = length(rev)
2109 chunklength = length(rev)
2104 ladd(buffer(bits, chunkstart - offset, chunklength))
2110 ladd(buffer(bits, chunkstart - offset, chunklength))
2105
2111
2106 return chunks
2112 return chunks
2107
2113
2108 def dodeltachain(rev):
2114 def dodeltachain(rev):
2109 if not cache:
2115 if not cache:
2110 r.clearcaches()
2116 r.clearcaches()
2111 r._deltachain(rev)
2117 r._deltachain(rev)
2112
2118
2113 def doread(chain):
2119 def doread(chain):
2114 if not cache:
2120 if not cache:
2115 r.clearcaches()
2121 r.clearcaches()
2116 for item in slicedchain:
2122 for item in slicedchain:
2117 segmentforrevs(item[0], item[-1])
2123 segmentforrevs(item[0], item[-1])
2118
2124
2119 def doslice(r, chain, size):
2125 def doslice(r, chain, size):
2120 for s in slicechunk(r, chain, targetsize=size):
2126 for s in slicechunk(r, chain, targetsize=size):
2121 pass
2127 pass
2122
2128
2123 def dorawchunks(data, chain):
2129 def dorawchunks(data, chain):
2124 if not cache:
2130 if not cache:
2125 r.clearcaches()
2131 r.clearcaches()
2126 getrawchunks(data, chain)
2132 getrawchunks(data, chain)
2127
2133
2128 def dodecompress(chunks):
2134 def dodecompress(chunks):
2129 decomp = r.decompress
2135 decomp = r.decompress
2130 for chunk in chunks:
2136 for chunk in chunks:
2131 decomp(chunk)
2137 decomp(chunk)
2132
2138
2133 def dopatch(text, bins):
2139 def dopatch(text, bins):
2134 if not cache:
2140 if not cache:
2135 r.clearcaches()
2141 r.clearcaches()
2136 mdiff.patches(text, bins)
2142 mdiff.patches(text, bins)
2137
2143
2138 def dohash(text):
2144 def dohash(text):
2139 if not cache:
2145 if not cache:
2140 r.clearcaches()
2146 r.clearcaches()
2141 r.checkhash(text, node, rev=rev)
2147 r.checkhash(text, node, rev=rev)
2142
2148
2143 def dorevision():
2149 def dorevision():
2144 if not cache:
2150 if not cache:
2145 r.clearcaches()
2151 r.clearcaches()
2146 r.revision(node)
2152 r.revision(node)
2147
2153
2148 try:
2154 try:
2149 from mercurial.revlogutils.deltas import slicechunk
2155 from mercurial.revlogutils.deltas import slicechunk
2150 except ImportError:
2156 except ImportError:
2151 slicechunk = getattr(revlog, '_slicechunk', None)
2157 slicechunk = getattr(revlog, '_slicechunk', None)
2152
2158
2153 size = r.length(rev)
2159 size = r.length(rev)
2154 chain = r._deltachain(rev)[0]
2160 chain = r._deltachain(rev)[0]
2155 if not getattr(r, '_withsparseread', False):
2161 if not getattr(r, '_withsparseread', False):
2156 slicedchain = (chain,)
2162 slicedchain = (chain,)
2157 else:
2163 else:
2158 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2164 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2159 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2165 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2160 rawchunks = getrawchunks(data, slicedchain)
2166 rawchunks = getrawchunks(data, slicedchain)
2161 bins = r._chunks(chain)
2167 bins = r._chunks(chain)
2162 text = bytes(bins[0])
2168 text = bytes(bins[0])
2163 bins = bins[1:]
2169 bins = bins[1:]
2164 text = mdiff.patches(text, bins)
2170 text = mdiff.patches(text, bins)
2165
2171
2166 benches = [
2172 benches = [
2167 (lambda: dorevision(), b'full'),
2173 (lambda: dorevision(), b'full'),
2168 (lambda: dodeltachain(rev), b'deltachain'),
2174 (lambda: dodeltachain(rev), b'deltachain'),
2169 (lambda: doread(chain), b'read'),
2175 (lambda: doread(chain), b'read'),
2170 ]
2176 ]
2171
2177
2172 if getattr(r, '_withsparseread', False):
2178 if getattr(r, '_withsparseread', False):
2173 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2179 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2174 benches.append(slicing)
2180 benches.append(slicing)
2175
2181
2176 benches.extend([
2182 benches.extend([
2177 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2183 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2178 (lambda: dodecompress(rawchunks), b'decompress'),
2184 (lambda: dodecompress(rawchunks), b'decompress'),
2179 (lambda: dopatch(text, bins), b'patch'),
2185 (lambda: dopatch(text, bins), b'patch'),
2180 (lambda: dohash(text), b'hash'),
2186 (lambda: dohash(text), b'hash'),
2181 ])
2187 ])
2182
2188
2183 timer, fm = gettimer(ui, opts)
2189 timer, fm = gettimer(ui, opts)
2184 for fn, title in benches:
2190 for fn, title in benches:
2185 timer(fn, title=title)
2191 timer(fn, title=title)
2186 fm.end()
2192 fm.end()
2187
2193
2188 @command(b'perfrevset',
2194 @command(b'perfrevset',
2189 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2195 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2190 (b'', b'contexts', False, b'obtain changectx for each revision')]
2196 (b'', b'contexts', False, b'obtain changectx for each revision')]
2191 + formatteropts, b"REVSET")
2197 + formatteropts, b"REVSET")
2192 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2198 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2193 """benchmark the execution time of a revset
2199 """benchmark the execution time of a revset
2194
2200
2195 Use the --clean option if need to evaluate the impact of build volatile
2201 Use the --clean option if need to evaluate the impact of build volatile
2196 revisions set cache on the revset execution. Volatile cache hold filtered
2202 revisions set cache on the revset execution. Volatile cache hold filtered
2197 and obsolete related cache."""
2203 and obsolete related cache."""
2198 opts = _byteskwargs(opts)
2204 opts = _byteskwargs(opts)
2199
2205
2200 timer, fm = gettimer(ui, opts)
2206 timer, fm = gettimer(ui, opts)
2201 def d():
2207 def d():
2202 if clear:
2208 if clear:
2203 repo.invalidatevolatilesets()
2209 repo.invalidatevolatilesets()
2204 if contexts:
2210 if contexts:
2205 for ctx in repo.set(expr): pass
2211 for ctx in repo.set(expr): pass
2206 else:
2212 else:
2207 for r in repo.revs(expr): pass
2213 for r in repo.revs(expr): pass
2208 timer(d)
2214 timer(d)
2209 fm.end()
2215 fm.end()
2210
2216
2211 @command(b'perfvolatilesets',
2217 @command(b'perfvolatilesets',
2212 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2218 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2213 ] + formatteropts)
2219 ] + formatteropts)
2214 def perfvolatilesets(ui, repo, *names, **opts):
2220 def perfvolatilesets(ui, repo, *names, **opts):
2215 """benchmark the computation of various volatile set
2221 """benchmark the computation of various volatile set
2216
2222
2217 Volatile set computes element related to filtering and obsolescence."""
2223 Volatile set computes element related to filtering and obsolescence."""
2218 opts = _byteskwargs(opts)
2224 opts = _byteskwargs(opts)
2219 timer, fm = gettimer(ui, opts)
2225 timer, fm = gettimer(ui, opts)
2220 repo = repo.unfiltered()
2226 repo = repo.unfiltered()
2221
2227
2222 def getobs(name):
2228 def getobs(name):
2223 def d():
2229 def d():
2224 repo.invalidatevolatilesets()
2230 repo.invalidatevolatilesets()
2225 if opts[b'clear_obsstore']:
2231 if opts[b'clear_obsstore']:
2226 clearfilecache(repo, b'obsstore')
2232 clearfilecache(repo, b'obsstore')
2227 obsolete.getrevs(repo, name)
2233 obsolete.getrevs(repo, name)
2228 return d
2234 return d
2229
2235
2230 allobs = sorted(obsolete.cachefuncs)
2236 allobs = sorted(obsolete.cachefuncs)
2231 if names:
2237 if names:
2232 allobs = [n for n in allobs if n in names]
2238 allobs = [n for n in allobs if n in names]
2233
2239
2234 for name in allobs:
2240 for name in allobs:
2235 timer(getobs(name), title=name)
2241 timer(getobs(name), title=name)
2236
2242
2237 def getfiltered(name):
2243 def getfiltered(name):
2238 def d():
2244 def d():
2239 repo.invalidatevolatilesets()
2245 repo.invalidatevolatilesets()
2240 if opts[b'clear_obsstore']:
2246 if opts[b'clear_obsstore']:
2241 clearfilecache(repo, b'obsstore')
2247 clearfilecache(repo, b'obsstore')
2242 repoview.filterrevs(repo, name)
2248 repoview.filterrevs(repo, name)
2243 return d
2249 return d
2244
2250
2245 allfilter = sorted(repoview.filtertable)
2251 allfilter = sorted(repoview.filtertable)
2246 if names:
2252 if names:
2247 allfilter = [n for n in allfilter if n in names]
2253 allfilter = [n for n in allfilter if n in names]
2248
2254
2249 for name in allfilter:
2255 for name in allfilter:
2250 timer(getfiltered(name), title=name)
2256 timer(getfiltered(name), title=name)
2251 fm.end()
2257 fm.end()
2252
2258
2253 @command(b'perfbranchmap',
2259 @command(b'perfbranchmap',
2254 [(b'f', b'full', False,
2260 [(b'f', b'full', False,
2255 b'Includes build time of subset'),
2261 b'Includes build time of subset'),
2256 (b'', b'clear-revbranch', False,
2262 (b'', b'clear-revbranch', False,
2257 b'purge the revbranch cache between computation'),
2263 b'purge the revbranch cache between computation'),
2258 ] + formatteropts)
2264 ] + formatteropts)
2259 def perfbranchmap(ui, repo, *filternames, **opts):
2265 def perfbranchmap(ui, repo, *filternames, **opts):
2260 """benchmark the update of a branchmap
2266 """benchmark the update of a branchmap
2261
2267
2262 This benchmarks the full repo.branchmap() call with read and write disabled
2268 This benchmarks the full repo.branchmap() call with read and write disabled
2263 """
2269 """
2264 opts = _byteskwargs(opts)
2270 opts = _byteskwargs(opts)
2265 full = opts.get(b"full", False)
2271 full = opts.get(b"full", False)
2266 clear_revbranch = opts.get(b"clear_revbranch", False)
2272 clear_revbranch = opts.get(b"clear_revbranch", False)
2267 timer, fm = gettimer(ui, opts)
2273 timer, fm = gettimer(ui, opts)
2268 def getbranchmap(filtername):
2274 def getbranchmap(filtername):
2269 """generate a benchmark function for the filtername"""
2275 """generate a benchmark function for the filtername"""
2270 if filtername is None:
2276 if filtername is None:
2271 view = repo
2277 view = repo
2272 else:
2278 else:
2273 view = repo.filtered(filtername)
2279 view = repo.filtered(filtername)
2274 def d():
2280 def d():
2275 if clear_revbranch:
2281 if clear_revbranch:
2276 repo.revbranchcache()._clear()
2282 repo.revbranchcache()._clear()
2277 if full:
2283 if full:
2278 view._branchcaches.clear()
2284 view._branchcaches.clear()
2279 else:
2285 else:
2280 view._branchcaches.pop(filtername, None)
2286 view._branchcaches.pop(filtername, None)
2281 view.branchmap()
2287 view.branchmap()
2282 return d
2288 return d
2283 # add filter in smaller subset to bigger subset
2289 # add filter in smaller subset to bigger subset
2284 possiblefilters = set(repoview.filtertable)
2290 possiblefilters = set(repoview.filtertable)
2285 if filternames:
2291 if filternames:
2286 possiblefilters &= set(filternames)
2292 possiblefilters &= set(filternames)
2287 subsettable = getbranchmapsubsettable()
2293 subsettable = getbranchmapsubsettable()
2288 allfilters = []
2294 allfilters = []
2289 while possiblefilters:
2295 while possiblefilters:
2290 for name in possiblefilters:
2296 for name in possiblefilters:
2291 subset = subsettable.get(name)
2297 subset = subsettable.get(name)
2292 if subset not in possiblefilters:
2298 if subset not in possiblefilters:
2293 break
2299 break
2294 else:
2300 else:
2295 assert False, b'subset cycle %s!' % possiblefilters
2301 assert False, b'subset cycle %s!' % possiblefilters
2296 allfilters.append(name)
2302 allfilters.append(name)
2297 possiblefilters.remove(name)
2303 possiblefilters.remove(name)
2298
2304
2299 # warm the cache
2305 # warm the cache
2300 if not full:
2306 if not full:
2301 for name in allfilters:
2307 for name in allfilters:
2302 repo.filtered(name).branchmap()
2308 repo.filtered(name).branchmap()
2303 if not filternames or b'unfiltered' in filternames:
2309 if not filternames or b'unfiltered' in filternames:
2304 # add unfiltered
2310 # add unfiltered
2305 allfilters.append(None)
2311 allfilters.append(None)
2306
2312
2307 branchcacheread = safeattrsetter(branchmap, b'read')
2313 branchcacheread = safeattrsetter(branchmap, b'read')
2308 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2314 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2309 branchcacheread.set(lambda repo: None)
2315 branchcacheread.set(lambda repo: None)
2310 branchcachewrite.set(lambda bc, repo: None)
2316 branchcachewrite.set(lambda bc, repo: None)
2311 try:
2317 try:
2312 for name in allfilters:
2318 for name in allfilters:
2313 printname = name
2319 printname = name
2314 if name is None:
2320 if name is None:
2315 printname = b'unfiltered'
2321 printname = b'unfiltered'
2316 timer(getbranchmap(name), title=str(printname))
2322 timer(getbranchmap(name), title=str(printname))
2317 finally:
2323 finally:
2318 branchcacheread.restore()
2324 branchcacheread.restore()
2319 branchcachewrite.restore()
2325 branchcachewrite.restore()
2320 fm.end()
2326 fm.end()
2321
2327
2322 @command(b'perfbranchmapupdate', [
2328 @command(b'perfbranchmapupdate', [
2323 (b'', b'base', [], b'subset of revision to start from'),
2329 (b'', b'base', [], b'subset of revision to start from'),
2324 (b'', b'target', [], b'subset of revision to end with'),
2330 (b'', b'target', [], b'subset of revision to end with'),
2325 (b'', b'clear-caches', False, b'clear cache between each runs')
2331 (b'', b'clear-caches', False, b'clear cache between each runs')
2326 ] + formatteropts)
2332 ] + formatteropts)
2327 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2333 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2328 """benchmark branchmap update from for <base> revs to <target> revs
2334 """benchmark branchmap update from for <base> revs to <target> revs
2329
2335
2330 If `--clear-caches` is passed, the following items will be reset before
2336 If `--clear-caches` is passed, the following items will be reset before
2331 each update:
2337 each update:
2332 * the changelog instance and associated indexes
2338 * the changelog instance and associated indexes
2333 * the rev-branch-cache instance
2339 * the rev-branch-cache instance
2334
2340
2335 Examples:
2341 Examples:
2336
2342
2337 # update for the one last revision
2343 # update for the one last revision
2338 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2344 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2339
2345
2340 $ update for change coming with a new branch
2346 $ update for change coming with a new branch
2341 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2347 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2342 """
2348 """
2343 from mercurial import branchmap
2349 from mercurial import branchmap
2344 from mercurial import repoview
2350 from mercurial import repoview
2345 opts = _byteskwargs(opts)
2351 opts = _byteskwargs(opts)
2346 timer, fm = gettimer(ui, opts)
2352 timer, fm = gettimer(ui, opts)
2347 clearcaches = opts[b'clear_caches']
2353 clearcaches = opts[b'clear_caches']
2348 unfi = repo.unfiltered()
2354 unfi = repo.unfiltered()
2349 x = [None] # used to pass data between closure
2355 x = [None] # used to pass data between closure
2350
2356
2351 # we use a `list` here to avoid possible side effect from smartset
2357 # we use a `list` here to avoid possible side effect from smartset
2352 baserevs = list(scmutil.revrange(repo, base))
2358 baserevs = list(scmutil.revrange(repo, base))
2353 targetrevs = list(scmutil.revrange(repo, target))
2359 targetrevs = list(scmutil.revrange(repo, target))
2354 if not baserevs:
2360 if not baserevs:
2355 raise error.Abort(b'no revisions selected for --base')
2361 raise error.Abort(b'no revisions selected for --base')
2356 if not targetrevs:
2362 if not targetrevs:
2357 raise error.Abort(b'no revisions selected for --target')
2363 raise error.Abort(b'no revisions selected for --target')
2358
2364
2359 # make sure the target branchmap also contains the one in the base
2365 # make sure the target branchmap also contains the one in the base
2360 targetrevs = list(set(baserevs) | set(targetrevs))
2366 targetrevs = list(set(baserevs) | set(targetrevs))
2361 targetrevs.sort()
2367 targetrevs.sort()
2362
2368
2363 cl = repo.changelog
2369 cl = repo.changelog
2364 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2370 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2365 allbaserevs.sort()
2371 allbaserevs.sort()
2366 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2372 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2367
2373
2368 newrevs = list(alltargetrevs.difference(allbaserevs))
2374 newrevs = list(alltargetrevs.difference(allbaserevs))
2369 newrevs.sort()
2375 newrevs.sort()
2370
2376
2371 allrevs = frozenset(unfi.changelog.revs())
2377 allrevs = frozenset(unfi.changelog.revs())
2372 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2378 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2373 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2379 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2374
2380
2375 def basefilter(repo, visibilityexceptions=None):
2381 def basefilter(repo, visibilityexceptions=None):
2376 return basefilterrevs
2382 return basefilterrevs
2377
2383
2378 def targetfilter(repo, visibilityexceptions=None):
2384 def targetfilter(repo, visibilityexceptions=None):
2379 return targetfilterrevs
2385 return targetfilterrevs
2380
2386
2381 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2387 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2382 ui.status(msg % (len(allbaserevs), len(newrevs)))
2388 ui.status(msg % (len(allbaserevs), len(newrevs)))
2383 if targetfilterrevs:
2389 if targetfilterrevs:
2384 msg = b'(%d revisions still filtered)\n'
2390 msg = b'(%d revisions still filtered)\n'
2385 ui.status(msg % len(targetfilterrevs))
2391 ui.status(msg % len(targetfilterrevs))
2386
2392
2387 try:
2393 try:
2388 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2394 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2389 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2395 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2390
2396
2391 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2397 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2392 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2398 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2393
2399
2394 # try to find an existing branchmap to reuse
2400 # try to find an existing branchmap to reuse
2395 subsettable = getbranchmapsubsettable()
2401 subsettable = getbranchmapsubsettable()
2396 candidatefilter = subsettable.get(None)
2402 candidatefilter = subsettable.get(None)
2397 while candidatefilter is not None:
2403 while candidatefilter is not None:
2398 candidatebm = repo.filtered(candidatefilter).branchmap()
2404 candidatebm = repo.filtered(candidatefilter).branchmap()
2399 if candidatebm.validfor(baserepo):
2405 if candidatebm.validfor(baserepo):
2400 filtered = repoview.filterrevs(repo, candidatefilter)
2406 filtered = repoview.filterrevs(repo, candidatefilter)
2401 missing = [r for r in allbaserevs if r in filtered]
2407 missing = [r for r in allbaserevs if r in filtered]
2402 base = candidatebm.copy()
2408 base = candidatebm.copy()
2403 base.update(baserepo, missing)
2409 base.update(baserepo, missing)
2404 break
2410 break
2405 candidatefilter = subsettable.get(candidatefilter)
2411 candidatefilter = subsettable.get(candidatefilter)
2406 else:
2412 else:
2407 # no suitable subset where found
2413 # no suitable subset where found
2408 base = branchmap.branchcache()
2414 base = branchmap.branchcache()
2409 base.update(baserepo, allbaserevs)
2415 base.update(baserepo, allbaserevs)
2410
2416
2411 def setup():
2417 def setup():
2412 x[0] = base.copy()
2418 x[0] = base.copy()
2413 if clearcaches:
2419 if clearcaches:
2414 unfi._revbranchcache = None
2420 unfi._revbranchcache = None
2415 clearchangelog(repo)
2421 clearchangelog(repo)
2416
2422
2417 def bench():
2423 def bench():
2418 x[0].update(targetrepo, newrevs)
2424 x[0].update(targetrepo, newrevs)
2419
2425
2420 timer(bench, setup=setup)
2426 timer(bench, setup=setup)
2421 fm.end()
2427 fm.end()
2422 finally:
2428 finally:
2423 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2429 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2424 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2430 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2425
2431
2426 @command(b'perfbranchmapload', [
2432 @command(b'perfbranchmapload', [
2427 (b'f', b'filter', b'', b'Specify repoview filter'),
2433 (b'f', b'filter', b'', b'Specify repoview filter'),
2428 (b'', b'list', False, b'List brachmap filter caches'),
2434 (b'', b'list', False, b'List brachmap filter caches'),
2429 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2435 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2430
2436
2431 ] + formatteropts)
2437 ] + formatteropts)
2432 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2438 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2433 """benchmark reading the branchmap"""
2439 """benchmark reading the branchmap"""
2434 opts = _byteskwargs(opts)
2440 opts = _byteskwargs(opts)
2435 clearrevlogs = opts[b'clear_revlogs']
2441 clearrevlogs = opts[b'clear_revlogs']
2436
2442
2437 if list:
2443 if list:
2438 for name, kind, st in repo.cachevfs.readdir(stat=True):
2444 for name, kind, st in repo.cachevfs.readdir(stat=True):
2439 if name.startswith(b'branch2'):
2445 if name.startswith(b'branch2'):
2440 filtername = name.partition(b'-')[2] or b'unfiltered'
2446 filtername = name.partition(b'-')[2] or b'unfiltered'
2441 ui.status(b'%s - %s\n'
2447 ui.status(b'%s - %s\n'
2442 % (filtername, util.bytecount(st.st_size)))
2448 % (filtername, util.bytecount(st.st_size)))
2443 return
2449 return
2444 if not filter:
2450 if not filter:
2445 filter = None
2451 filter = None
2446 subsettable = getbranchmapsubsettable()
2452 subsettable = getbranchmapsubsettable()
2447 if filter is None:
2453 if filter is None:
2448 repo = repo.unfiltered()
2454 repo = repo.unfiltered()
2449 else:
2455 else:
2450 repo = repoview.repoview(repo, filter)
2456 repo = repoview.repoview(repo, filter)
2451
2457
2452 repo.branchmap() # make sure we have a relevant, up to date branchmap
2458 repo.branchmap() # make sure we have a relevant, up to date branchmap
2453
2459
2454 currentfilter = filter
2460 currentfilter = filter
2455 # try once without timer, the filter may not be cached
2461 # try once without timer, the filter may not be cached
2456 while branchmap.read(repo) is None:
2462 while branchmap.read(repo) is None:
2457 currentfilter = subsettable.get(currentfilter)
2463 currentfilter = subsettable.get(currentfilter)
2458 if currentfilter is None:
2464 if currentfilter is None:
2459 raise error.Abort(b'No branchmap cached for %s repo'
2465 raise error.Abort(b'No branchmap cached for %s repo'
2460 % (filter or b'unfiltered'))
2466 % (filter or b'unfiltered'))
2461 repo = repo.filtered(currentfilter)
2467 repo = repo.filtered(currentfilter)
2462 timer, fm = gettimer(ui, opts)
2468 timer, fm = gettimer(ui, opts)
2463 def setup():
2469 def setup():
2464 if clearrevlogs:
2470 if clearrevlogs:
2465 clearchangelog(repo)
2471 clearchangelog(repo)
2466 def bench():
2472 def bench():
2467 branchmap.read(repo)
2473 branchmap.read(repo)
2468 timer(bench, setup=setup)
2474 timer(bench, setup=setup)
2469 fm.end()
2475 fm.end()
2470
2476
2471 @command(b'perfloadmarkers')
2477 @command(b'perfloadmarkers')
2472 def perfloadmarkers(ui, repo):
2478 def perfloadmarkers(ui, repo):
2473 """benchmark the time to parse the on-disk markers for a repo
2479 """benchmark the time to parse the on-disk markers for a repo
2474
2480
2475 Result is the number of markers in the repo."""
2481 Result is the number of markers in the repo."""
2476 timer, fm = gettimer(ui)
2482 timer, fm = gettimer(ui)
2477 svfs = getsvfs(repo)
2483 svfs = getsvfs(repo)
2478 timer(lambda: len(obsolete.obsstore(svfs)))
2484 timer(lambda: len(obsolete.obsstore(svfs)))
2479 fm.end()
2485 fm.end()
2480
2486
2481 @command(b'perflrucachedict', formatteropts +
2487 @command(b'perflrucachedict', formatteropts +
2482 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2488 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2483 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2489 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2484 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2490 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2485 (b'', b'size', 4, b'size of cache'),
2491 (b'', b'size', 4, b'size of cache'),
2486 (b'', b'gets', 10000, b'number of key lookups'),
2492 (b'', b'gets', 10000, b'number of key lookups'),
2487 (b'', b'sets', 10000, b'number of key sets'),
2493 (b'', b'sets', 10000, b'number of key sets'),
2488 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2494 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2489 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2495 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2490 norepo=True)
2496 norepo=True)
2491 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2497 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2492 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2498 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2493 opts = _byteskwargs(opts)
2499 opts = _byteskwargs(opts)
2494
2500
2495 def doinit():
2501 def doinit():
2496 for i in _xrange(10000):
2502 for i in _xrange(10000):
2497 util.lrucachedict(size)
2503 util.lrucachedict(size)
2498
2504
2499 costrange = list(range(mincost, maxcost + 1))
2505 costrange = list(range(mincost, maxcost + 1))
2500
2506
2501 values = []
2507 values = []
2502 for i in _xrange(size):
2508 for i in _xrange(size):
2503 values.append(random.randint(0, _maxint))
2509 values.append(random.randint(0, _maxint))
2504
2510
2505 # Get mode fills the cache and tests raw lookup performance with no
2511 # Get mode fills the cache and tests raw lookup performance with no
2506 # eviction.
2512 # eviction.
2507 getseq = []
2513 getseq = []
2508 for i in _xrange(gets):
2514 for i in _xrange(gets):
2509 getseq.append(random.choice(values))
2515 getseq.append(random.choice(values))
2510
2516
2511 def dogets():
2517 def dogets():
2512 d = util.lrucachedict(size)
2518 d = util.lrucachedict(size)
2513 for v in values:
2519 for v in values:
2514 d[v] = v
2520 d[v] = v
2515 for key in getseq:
2521 for key in getseq:
2516 value = d[key]
2522 value = d[key]
2517 value # silence pyflakes warning
2523 value # silence pyflakes warning
2518
2524
2519 def dogetscost():
2525 def dogetscost():
2520 d = util.lrucachedict(size, maxcost=costlimit)
2526 d = util.lrucachedict(size, maxcost=costlimit)
2521 for i, v in enumerate(values):
2527 for i, v in enumerate(values):
2522 d.insert(v, v, cost=costs[i])
2528 d.insert(v, v, cost=costs[i])
2523 for key in getseq:
2529 for key in getseq:
2524 try:
2530 try:
2525 value = d[key]
2531 value = d[key]
2526 value # silence pyflakes warning
2532 value # silence pyflakes warning
2527 except KeyError:
2533 except KeyError:
2528 pass
2534 pass
2529
2535
2530 # Set mode tests insertion speed with cache eviction.
2536 # Set mode tests insertion speed with cache eviction.
2531 setseq = []
2537 setseq = []
2532 costs = []
2538 costs = []
2533 for i in _xrange(sets):
2539 for i in _xrange(sets):
2534 setseq.append(random.randint(0, _maxint))
2540 setseq.append(random.randint(0, _maxint))
2535 costs.append(random.choice(costrange))
2541 costs.append(random.choice(costrange))
2536
2542
2537 def doinserts():
2543 def doinserts():
2538 d = util.lrucachedict(size)
2544 d = util.lrucachedict(size)
2539 for v in setseq:
2545 for v in setseq:
2540 d.insert(v, v)
2546 d.insert(v, v)
2541
2547
2542 def doinsertscost():
2548 def doinsertscost():
2543 d = util.lrucachedict(size, maxcost=costlimit)
2549 d = util.lrucachedict(size, maxcost=costlimit)
2544 for i, v in enumerate(setseq):
2550 for i, v in enumerate(setseq):
2545 d.insert(v, v, cost=costs[i])
2551 d.insert(v, v, cost=costs[i])
2546
2552
2547 def dosets():
2553 def dosets():
2548 d = util.lrucachedict(size)
2554 d = util.lrucachedict(size)
2549 for v in setseq:
2555 for v in setseq:
2550 d[v] = v
2556 d[v] = v
2551
2557
2552 # Mixed mode randomly performs gets and sets with eviction.
2558 # Mixed mode randomly performs gets and sets with eviction.
2553 mixedops = []
2559 mixedops = []
2554 for i in _xrange(mixed):
2560 for i in _xrange(mixed):
2555 r = random.randint(0, 100)
2561 r = random.randint(0, 100)
2556 if r < mixedgetfreq:
2562 if r < mixedgetfreq:
2557 op = 0
2563 op = 0
2558 else:
2564 else:
2559 op = 1
2565 op = 1
2560
2566
2561 mixedops.append((op,
2567 mixedops.append((op,
2562 random.randint(0, size * 2),
2568 random.randint(0, size * 2),
2563 random.choice(costrange)))
2569 random.choice(costrange)))
2564
2570
2565 def domixed():
2571 def domixed():
2566 d = util.lrucachedict(size)
2572 d = util.lrucachedict(size)
2567
2573
2568 for op, v, cost in mixedops:
2574 for op, v, cost in mixedops:
2569 if op == 0:
2575 if op == 0:
2570 try:
2576 try:
2571 d[v]
2577 d[v]
2572 except KeyError:
2578 except KeyError:
2573 pass
2579 pass
2574 else:
2580 else:
2575 d[v] = v
2581 d[v] = v
2576
2582
2577 def domixedcost():
2583 def domixedcost():
2578 d = util.lrucachedict(size, maxcost=costlimit)
2584 d = util.lrucachedict(size, maxcost=costlimit)
2579
2585
2580 for op, v, cost in mixedops:
2586 for op, v, cost in mixedops:
2581 if op == 0:
2587 if op == 0:
2582 try:
2588 try:
2583 d[v]
2589 d[v]
2584 except KeyError:
2590 except KeyError:
2585 pass
2591 pass
2586 else:
2592 else:
2587 d.insert(v, v, cost=cost)
2593 d.insert(v, v, cost=cost)
2588
2594
2589 benches = [
2595 benches = [
2590 (doinit, b'init'),
2596 (doinit, b'init'),
2591 ]
2597 ]
2592
2598
2593 if costlimit:
2599 if costlimit:
2594 benches.extend([
2600 benches.extend([
2595 (dogetscost, b'gets w/ cost limit'),
2601 (dogetscost, b'gets w/ cost limit'),
2596 (doinsertscost, b'inserts w/ cost limit'),
2602 (doinsertscost, b'inserts w/ cost limit'),
2597 (domixedcost, b'mixed w/ cost limit'),
2603 (domixedcost, b'mixed w/ cost limit'),
2598 ])
2604 ])
2599 else:
2605 else:
2600 benches.extend([
2606 benches.extend([
2601 (dogets, b'gets'),
2607 (dogets, b'gets'),
2602 (doinserts, b'inserts'),
2608 (doinserts, b'inserts'),
2603 (dosets, b'sets'),
2609 (dosets, b'sets'),
2604 (domixed, b'mixed')
2610 (domixed, b'mixed')
2605 ])
2611 ])
2606
2612
2607 for fn, title in benches:
2613 for fn, title in benches:
2608 timer, fm = gettimer(ui, opts)
2614 timer, fm = gettimer(ui, opts)
2609 timer(fn, title=title)
2615 timer(fn, title=title)
2610 fm.end()
2616 fm.end()
2611
2617
2612 @command(b'perfwrite', formatteropts)
2618 @command(b'perfwrite', formatteropts)
2613 def perfwrite(ui, repo, **opts):
2619 def perfwrite(ui, repo, **opts):
2614 """microbenchmark ui.write
2620 """microbenchmark ui.write
2615 """
2621 """
2616 opts = _byteskwargs(opts)
2622 opts = _byteskwargs(opts)
2617
2623
2618 timer, fm = gettimer(ui, opts)
2624 timer, fm = gettimer(ui, opts)
2619 def write():
2625 def write():
2620 for i in range(100000):
2626 for i in range(100000):
2621 ui.write((b'Testing write performance\n'))
2627 ui.write((b'Testing write performance\n'))
2622 timer(write)
2628 timer(write)
2623 fm.end()
2629 fm.end()
2624
2630
2625 def uisetup(ui):
2631 def uisetup(ui):
2626 if (util.safehasattr(cmdutil, b'openrevlog') and
2632 if (util.safehasattr(cmdutil, b'openrevlog') and
2627 not util.safehasattr(commands, b'debugrevlogopts')):
2633 not util.safehasattr(commands, b'debugrevlogopts')):
2628 # for "historical portability":
2634 # for "historical portability":
2629 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2635 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2630 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2636 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2631 # openrevlog() should cause failure, because it has been
2637 # openrevlog() should cause failure, because it has been
2632 # available since 3.5 (or 49c583ca48c4).
2638 # available since 3.5 (or 49c583ca48c4).
2633 def openrevlog(orig, repo, cmd, file_, opts):
2639 def openrevlog(orig, repo, cmd, file_, opts):
2634 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2640 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2635 raise error.Abort(b"This version doesn't support --dir option",
2641 raise error.Abort(b"This version doesn't support --dir option",
2636 hint=b"use 3.5 or later")
2642 hint=b"use 3.5 or later")
2637 return orig(repo, cmd, file_, opts)
2643 return orig(repo, cmd, file_, opts)
2638 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2644 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2639
2645
2640 @command(b'perfprogress', formatteropts + [
2646 @command(b'perfprogress', formatteropts + [
2641 (b'', b'topic', b'topic', b'topic for progress messages'),
2647 (b'', b'topic', b'topic', b'topic for progress messages'),
2642 (b'c', b'total', 1000000, b'total value we are progressing to'),
2648 (b'c', b'total', 1000000, b'total value we are progressing to'),
2643 ], norepo=True)
2649 ], norepo=True)
2644 def perfprogress(ui, topic=None, total=None, **opts):
2650 def perfprogress(ui, topic=None, total=None, **opts):
2645 """printing of progress bars"""
2651 """printing of progress bars"""
2646 opts = _byteskwargs(opts)
2652 opts = _byteskwargs(opts)
2647
2653
2648 timer, fm = gettimer(ui, opts)
2654 timer, fm = gettimer(ui, opts)
2649
2655
2650 def doprogress():
2656 def doprogress():
2651 with ui.makeprogress(topic, total=total) as progress:
2657 with ui.makeprogress(topic, total=total) as progress:
2652 for i in pycompat.xrange(total):
2658 for i in pycompat.xrange(total):
2653 progress.increment()
2659 progress.increment()
2654
2660
2655 timer(doprogress)
2661 timer(doprogress)
2656 fm.end()
2662 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now