##// END OF EJS Templates
perf: add two more missing b prefixes for Python 3...
Augie Fackler -
r40984:a314eafd default
parent child Browse files
Show More
@@ -1,2653 +1,2653 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 opts = _byteskwargs(opts)
538 opts = _byteskwargs(opts)
539 timer, fm = gettimer(ui, opts)
539 timer, fm = gettimer(ui, opts)
540 cl = repo.changelog
540 cl = repo.changelog
541 def d():
541 def d():
542 len(cl.headrevs())
542 len(cl.headrevs())
543 clearcaches(cl)
543 clearcaches(cl)
544 timer(d)
544 timer(d)
545 fm.end()
545 fm.end()
546
546
547 @command(b'perftags', formatteropts+
547 @command(b'perftags', formatteropts+
548 [
548 [
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
549 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
550 ])
550 ])
551 def perftags(ui, repo, **opts):
551 def perftags(ui, repo, **opts):
552 opts = _byteskwargs(opts)
552 opts = _byteskwargs(opts)
553 timer, fm = gettimer(ui, opts)
553 timer, fm = gettimer(ui, opts)
554 repocleartagscache = repocleartagscachefunc(repo)
554 repocleartagscache = repocleartagscachefunc(repo)
555 clearrevlogs = opts[b'clear_revlogs']
555 clearrevlogs = opts[b'clear_revlogs']
556 def s():
556 def s():
557 if clearrevlogs:
557 if clearrevlogs:
558 clearchangelog(repo)
558 clearchangelog(repo)
559 clearfilecache(repo.unfiltered(), 'manifest')
559 clearfilecache(repo.unfiltered(), 'manifest')
560 repocleartagscache()
560 repocleartagscache()
561 def t():
561 def t():
562 return len(repo.tags())
562 return len(repo.tags())
563 timer(t, setup=s)
563 timer(t, setup=s)
564 fm.end()
564 fm.end()
565
565
566 @command(b'perfancestors', formatteropts)
566 @command(b'perfancestors', formatteropts)
567 def perfancestors(ui, repo, **opts):
567 def perfancestors(ui, repo, **opts):
568 opts = _byteskwargs(opts)
568 opts = _byteskwargs(opts)
569 timer, fm = gettimer(ui, opts)
569 timer, fm = gettimer(ui, opts)
570 heads = repo.changelog.headrevs()
570 heads = repo.changelog.headrevs()
571 def d():
571 def d():
572 for a in repo.changelog.ancestors(heads):
572 for a in repo.changelog.ancestors(heads):
573 pass
573 pass
574 timer(d)
574 timer(d)
575 fm.end()
575 fm.end()
576
576
577 @command(b'perfancestorset', formatteropts)
577 @command(b'perfancestorset', formatteropts)
578 def perfancestorset(ui, repo, revset, **opts):
578 def perfancestorset(ui, repo, revset, **opts):
579 opts = _byteskwargs(opts)
579 opts = _byteskwargs(opts)
580 timer, fm = gettimer(ui, opts)
580 timer, fm = gettimer(ui, opts)
581 revs = repo.revs(revset)
581 revs = repo.revs(revset)
582 heads = repo.changelog.headrevs()
582 heads = repo.changelog.headrevs()
583 def d():
583 def d():
584 s = repo.changelog.ancestors(heads)
584 s = repo.changelog.ancestors(heads)
585 for rev in revs:
585 for rev in revs:
586 rev in s
586 rev in s
587 timer(d)
587 timer(d)
588 fm.end()
588 fm.end()
589
589
590 @command(b'perfdiscovery', formatteropts, b'PATH')
590 @command(b'perfdiscovery', formatteropts, b'PATH')
591 def perfdiscovery(ui, repo, path, **opts):
591 def perfdiscovery(ui, repo, path, **opts):
592 """benchmark discovery between local repo and the peer at given path
592 """benchmark discovery between local repo and the peer at given path
593 """
593 """
594 repos = [repo, None]
594 repos = [repo, None]
595 timer, fm = gettimer(ui, opts)
595 timer, fm = gettimer(ui, opts)
596 path = ui.expandpath(path)
596 path = ui.expandpath(path)
597
597
598 def s():
598 def s():
599 repos[1] = hg.peer(ui, opts, path)
599 repos[1] = hg.peer(ui, opts, path)
600 def d():
600 def d():
601 setdiscovery.findcommonheads(ui, *repos)
601 setdiscovery.findcommonheads(ui, *repos)
602 timer(d, setup=s)
602 timer(d, setup=s)
603 fm.end()
603 fm.end()
604
604
605 @command(b'perfbookmarks', formatteropts +
605 @command(b'perfbookmarks', formatteropts +
606 [
606 [
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
607 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
608 ])
608 ])
609 def perfbookmarks(ui, repo, **opts):
609 def perfbookmarks(ui, repo, **opts):
610 """benchmark parsing bookmarks from disk to memory"""
610 """benchmark parsing bookmarks from disk to memory"""
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
613
613
614 clearrevlogs = opts[b'clear_revlogs']
614 clearrevlogs = opts[b'clear_revlogs']
615 def s():
615 def s():
616 if clearrevlogs:
616 if clearrevlogs:
617 clearchangelog(repo)
617 clearchangelog(repo)
618 clearfilecache(repo, b'_bookmarks')
618 clearfilecache(repo, b'_bookmarks')
619 def d():
619 def d():
620 repo._bookmarks
620 repo._bookmarks
621 timer(d, setup=s)
621 timer(d, setup=s)
622 fm.end()
622 fm.end()
623
623
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
624 @command(b'perfbundleread', formatteropts, b'BUNDLE')
625 def perfbundleread(ui, repo, bundlepath, **opts):
625 def perfbundleread(ui, repo, bundlepath, **opts):
626 """Benchmark reading of bundle files.
626 """Benchmark reading of bundle files.
627
627
628 This command is meant to isolate the I/O part of bundle reading as
628 This command is meant to isolate the I/O part of bundle reading as
629 much as possible.
629 much as possible.
630 """
630 """
631 from mercurial import (
631 from mercurial import (
632 bundle2,
632 bundle2,
633 exchange,
633 exchange,
634 streamclone,
634 streamclone,
635 )
635 )
636
636
637 opts = _byteskwargs(opts)
637 opts = _byteskwargs(opts)
638
638
639 def makebench(fn):
639 def makebench(fn):
640 def run():
640 def run():
641 with open(bundlepath, b'rb') as fh:
641 with open(bundlepath, b'rb') as fh:
642 bundle = exchange.readbundle(ui, fh, bundlepath)
642 bundle = exchange.readbundle(ui, fh, bundlepath)
643 fn(bundle)
643 fn(bundle)
644
644
645 return run
645 return run
646
646
647 def makereadnbytes(size):
647 def makereadnbytes(size):
648 def run():
648 def run():
649 with open(bundlepath, b'rb') as fh:
649 with open(bundlepath, b'rb') as fh:
650 bundle = exchange.readbundle(ui, fh, bundlepath)
650 bundle = exchange.readbundle(ui, fh, bundlepath)
651 while bundle.read(size):
651 while bundle.read(size):
652 pass
652 pass
653
653
654 return run
654 return run
655
655
656 def makestdioread(size):
656 def makestdioread(size):
657 def run():
657 def run():
658 with open(bundlepath, b'rb') as fh:
658 with open(bundlepath, b'rb') as fh:
659 while fh.read(size):
659 while fh.read(size):
660 pass
660 pass
661
661
662 return run
662 return run
663
663
664 # bundle1
664 # bundle1
665
665
666 def deltaiter(bundle):
666 def deltaiter(bundle):
667 for delta in bundle.deltaiter():
667 for delta in bundle.deltaiter():
668 pass
668 pass
669
669
670 def iterchunks(bundle):
670 def iterchunks(bundle):
671 for chunk in bundle.getchunks():
671 for chunk in bundle.getchunks():
672 pass
672 pass
673
673
674 # bundle2
674 # bundle2
675
675
676 def forwardchunks(bundle):
676 def forwardchunks(bundle):
677 for chunk in bundle._forwardchunks():
677 for chunk in bundle._forwardchunks():
678 pass
678 pass
679
679
680 def iterparts(bundle):
680 def iterparts(bundle):
681 for part in bundle.iterparts():
681 for part in bundle.iterparts():
682 pass
682 pass
683
683
684 def iterpartsseekable(bundle):
684 def iterpartsseekable(bundle):
685 for part in bundle.iterparts(seekable=True):
685 for part in bundle.iterparts(seekable=True):
686 pass
686 pass
687
687
688 def seek(bundle):
688 def seek(bundle):
689 for part in bundle.iterparts(seekable=True):
689 for part in bundle.iterparts(seekable=True):
690 part.seek(0, os.SEEK_END)
690 part.seek(0, os.SEEK_END)
691
691
692 def makepartreadnbytes(size):
692 def makepartreadnbytes(size):
693 def run():
693 def run():
694 with open(bundlepath, b'rb') as fh:
694 with open(bundlepath, b'rb') as fh:
695 bundle = exchange.readbundle(ui, fh, bundlepath)
695 bundle = exchange.readbundle(ui, fh, bundlepath)
696 for part in bundle.iterparts():
696 for part in bundle.iterparts():
697 while part.read(size):
697 while part.read(size):
698 pass
698 pass
699
699
700 return run
700 return run
701
701
702 benches = [
702 benches = [
703 (makestdioread(8192), b'read(8k)'),
703 (makestdioread(8192), b'read(8k)'),
704 (makestdioread(16384), b'read(16k)'),
704 (makestdioread(16384), b'read(16k)'),
705 (makestdioread(32768), b'read(32k)'),
705 (makestdioread(32768), b'read(32k)'),
706 (makestdioread(131072), b'read(128k)'),
706 (makestdioread(131072), b'read(128k)'),
707 ]
707 ]
708
708
709 with open(bundlepath, b'rb') as fh:
709 with open(bundlepath, b'rb') as fh:
710 bundle = exchange.readbundle(ui, fh, bundlepath)
710 bundle = exchange.readbundle(ui, fh, bundlepath)
711
711
712 if isinstance(bundle, changegroup.cg1unpacker):
712 if isinstance(bundle, changegroup.cg1unpacker):
713 benches.extend([
713 benches.extend([
714 (makebench(deltaiter), b'cg1 deltaiter()'),
714 (makebench(deltaiter), b'cg1 deltaiter()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
715 (makebench(iterchunks), b'cg1 getchunks()'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
716 (makereadnbytes(8192), b'cg1 read(8k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
717 (makereadnbytes(16384), b'cg1 read(16k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
718 (makereadnbytes(32768), b'cg1 read(32k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
719 (makereadnbytes(131072), b'cg1 read(128k)'),
720 ])
720 ])
721 elif isinstance(bundle, bundle2.unbundle20):
721 elif isinstance(bundle, bundle2.unbundle20):
722 benches.extend([
722 benches.extend([
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
723 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
724 (makebench(iterparts), b'bundle2 iterparts()'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
725 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
726 (makebench(seek), b'bundle2 part seek()'),
726 (makebench(seek), b'bundle2 part seek()'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
727 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
728 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
729 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
730 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
731 ])
731 ])
732 elif isinstance(bundle, streamclone.streamcloneapplier):
732 elif isinstance(bundle, streamclone.streamcloneapplier):
733 raise error.Abort(b'stream clone bundles not supported')
733 raise error.Abort(b'stream clone bundles not supported')
734 else:
734 else:
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
735 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
736
736
737 for fn, title in benches:
737 for fn, title in benches:
738 timer, fm = gettimer(ui, opts)
738 timer, fm = gettimer(ui, opts)
739 timer(fn, title=title)
739 timer(fn, title=title)
740 fm.end()
740 fm.end()
741
741
742 @command(b'perfchangegroupchangelog', formatteropts +
742 @command(b'perfchangegroupchangelog', formatteropts +
743 [(b'', b'cgversion', b'02', b'changegroup version'),
743 [(b'', b'cgversion', b'02', b'changegroup version'),
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
744 (b'r', b'rev', b'', b'revisions to add to changegroup')])
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
745 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
746 """Benchmark producing a changelog group for a changegroup.
746 """Benchmark producing a changelog group for a changegroup.
747
747
748 This measures the time spent processing the changelog during a
748 This measures the time spent processing the changelog during a
749 bundle operation. This occurs during `hg bundle` and on a server
749 bundle operation. This occurs during `hg bundle` and on a server
750 processing a `getbundle` wire protocol request (handles clones
750 processing a `getbundle` wire protocol request (handles clones
751 and pull requests).
751 and pull requests).
752
752
753 By default, all revisions are added to the changegroup.
753 By default, all revisions are added to the changegroup.
754 """
754 """
755 opts = _byteskwargs(opts)
755 opts = _byteskwargs(opts)
756 cl = repo.changelog
756 cl = repo.changelog
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
757 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
758 bundler = changegroup.getbundler(cgversion, repo)
758 bundler = changegroup.getbundler(cgversion, repo)
759
759
760 def d():
760 def d():
761 state, chunks = bundler._generatechangelog(cl, nodes)
761 state, chunks = bundler._generatechangelog(cl, nodes)
762 for chunk in chunks:
762 for chunk in chunks:
763 pass
763 pass
764
764
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766
766
767 # Terminal printing can interfere with timing. So disable it.
767 # Terminal printing can interfere with timing. So disable it.
768 with ui.configoverride({(b'progress', b'disable'): True}):
768 with ui.configoverride({(b'progress', b'disable'): True}):
769 timer(d)
769 timer(d)
770
770
771 fm.end()
771 fm.end()
772
772
773 @command(b'perfdirs', formatteropts)
773 @command(b'perfdirs', formatteropts)
774 def perfdirs(ui, repo, **opts):
774 def perfdirs(ui, repo, **opts):
775 opts = _byteskwargs(opts)
775 opts = _byteskwargs(opts)
776 timer, fm = gettimer(ui, opts)
776 timer, fm = gettimer(ui, opts)
777 dirstate = repo.dirstate
777 dirstate = repo.dirstate
778 b'a' in dirstate
778 b'a' in dirstate
779 def d():
779 def d():
780 dirstate.hasdir(b'a')
780 dirstate.hasdir(b'a')
781 del dirstate._map._dirs
781 del dirstate._map._dirs
782 timer(d)
782 timer(d)
783 fm.end()
783 fm.end()
784
784
785 @command(b'perfdirstate', formatteropts)
785 @command(b'perfdirstate', formatteropts)
786 def perfdirstate(ui, repo, **opts):
786 def perfdirstate(ui, repo, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 b"a" in repo.dirstate
789 b"a" in repo.dirstate
790 def d():
790 def d():
791 repo.dirstate.invalidate()
791 repo.dirstate.invalidate()
792 b"a" in repo.dirstate
792 b"a" in repo.dirstate
793 timer(d)
793 timer(d)
794 fm.end()
794 fm.end()
795
795
796 @command(b'perfdirstatedirs', formatteropts)
796 @command(b'perfdirstatedirs', formatteropts)
797 def perfdirstatedirs(ui, repo, **opts):
797 def perfdirstatedirs(ui, repo, **opts):
798 opts = _byteskwargs(opts)
798 opts = _byteskwargs(opts)
799 timer, fm = gettimer(ui, opts)
799 timer, fm = gettimer(ui, opts)
800 b"a" in repo.dirstate
800 b"a" in repo.dirstate
801 def d():
801 def d():
802 repo.dirstate.hasdir(b"a")
802 repo.dirstate.hasdir(b"a")
803 del repo.dirstate._map._dirs
803 del repo.dirstate._map._dirs
804 timer(d)
804 timer(d)
805 fm.end()
805 fm.end()
806
806
807 @command(b'perfdirstatefoldmap', formatteropts)
807 @command(b'perfdirstatefoldmap', formatteropts)
808 def perfdirstatefoldmap(ui, repo, **opts):
808 def perfdirstatefoldmap(ui, repo, **opts):
809 opts = _byteskwargs(opts)
809 opts = _byteskwargs(opts)
810 timer, fm = gettimer(ui, opts)
810 timer, fm = gettimer(ui, opts)
811 dirstate = repo.dirstate
811 dirstate = repo.dirstate
812 b'a' in dirstate
812 b'a' in dirstate
813 def d():
813 def d():
814 dirstate._map.filefoldmap.get(b'a')
814 dirstate._map.filefoldmap.get(b'a')
815 del dirstate._map.filefoldmap
815 del dirstate._map.filefoldmap
816 timer(d)
816 timer(d)
817 fm.end()
817 fm.end()
818
818
819 @command(b'perfdirfoldmap', formatteropts)
819 @command(b'perfdirfoldmap', formatteropts)
820 def perfdirfoldmap(ui, repo, **opts):
820 def perfdirfoldmap(ui, repo, **opts):
821 opts = _byteskwargs(opts)
821 opts = _byteskwargs(opts)
822 timer, fm = gettimer(ui, opts)
822 timer, fm = gettimer(ui, opts)
823 dirstate = repo.dirstate
823 dirstate = repo.dirstate
824 b'a' in dirstate
824 b'a' in dirstate
825 def d():
825 def d():
826 dirstate._map.dirfoldmap.get(b'a')
826 dirstate._map.dirfoldmap.get(b'a')
827 del dirstate._map.dirfoldmap
827 del dirstate._map.dirfoldmap
828 del dirstate._map._dirs
828 del dirstate._map._dirs
829 timer(d)
829 timer(d)
830 fm.end()
830 fm.end()
831
831
832 @command(b'perfdirstatewrite', formatteropts)
832 @command(b'perfdirstatewrite', formatteropts)
833 def perfdirstatewrite(ui, repo, **opts):
833 def perfdirstatewrite(ui, repo, **opts):
834 opts = _byteskwargs(opts)
834 opts = _byteskwargs(opts)
835 timer, fm = gettimer(ui, opts)
835 timer, fm = gettimer(ui, opts)
836 ds = repo.dirstate
836 ds = repo.dirstate
837 b"a" in ds
837 b"a" in ds
838 def d():
838 def d():
839 ds._dirty = True
839 ds._dirty = True
840 ds.write(repo.currenttransaction())
840 ds.write(repo.currenttransaction())
841 timer(d)
841 timer(d)
842 fm.end()
842 fm.end()
843
843
844 @command(b'perfmergecalculate',
844 @command(b'perfmergecalculate',
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
845 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
846 def perfmergecalculate(ui, repo, rev, **opts):
846 def perfmergecalculate(ui, repo, rev, **opts):
847 opts = _byteskwargs(opts)
847 opts = _byteskwargs(opts)
848 timer, fm = gettimer(ui, opts)
848 timer, fm = gettimer(ui, opts)
849 wctx = repo[None]
849 wctx = repo[None]
850 rctx = scmutil.revsingle(repo, rev, rev)
850 rctx = scmutil.revsingle(repo, rev, rev)
851 ancestor = wctx.ancestor(rctx)
851 ancestor = wctx.ancestor(rctx)
852 # we don't want working dir files to be stat'd in the benchmark, so prime
852 # we don't want working dir files to be stat'd in the benchmark, so prime
853 # that cache
853 # that cache
854 wctx.dirty()
854 wctx.dirty()
855 def d():
855 def d():
856 # acceptremote is True because we don't want prompts in the middle of
856 # acceptremote is True because we don't want prompts in the middle of
857 # our benchmark
857 # our benchmark
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
858 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
859 acceptremote=True, followcopies=True)
859 acceptremote=True, followcopies=True)
860 timer(d)
860 timer(d)
861 fm.end()
861 fm.end()
862
862
863 @command(b'perfpathcopies', [], b"REV REV")
863 @command(b'perfpathcopies', [], b"REV REV")
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
864 def perfpathcopies(ui, repo, rev1, rev2, **opts):
865 """benchmark the copy tracing logic"""
865 """benchmark the copy tracing logic"""
866 opts = _byteskwargs(opts)
866 opts = _byteskwargs(opts)
867 timer, fm = gettimer(ui, opts)
867 timer, fm = gettimer(ui, opts)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
868 ctx1 = scmutil.revsingle(repo, rev1, rev1)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
869 ctx2 = scmutil.revsingle(repo, rev2, rev2)
870 def d():
870 def d():
871 copies.pathcopies(ctx1, ctx2)
871 copies.pathcopies(ctx1, ctx2)
872 timer(d)
872 timer(d)
873 fm.end()
873 fm.end()
874
874
875 @command(b'perfphases',
875 @command(b'perfphases',
876 [(b'', b'full', False, b'include file reading time too'),
876 [(b'', b'full', False, b'include file reading time too'),
877 ], b"")
877 ], b"")
878 def perfphases(ui, repo, **opts):
878 def perfphases(ui, repo, **opts):
879 """benchmark phasesets computation"""
879 """benchmark phasesets computation"""
880 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
881 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
882 _phases = repo._phasecache
882 _phases = repo._phasecache
883 full = opts.get(b'full')
883 full = opts.get(b'full')
884 def d():
884 def d():
885 phases = _phases
885 phases = _phases
886 if full:
886 if full:
887 clearfilecache(repo, b'_phasecache')
887 clearfilecache(repo, b'_phasecache')
888 phases = repo._phasecache
888 phases = repo._phasecache
889 phases.invalidate()
889 phases.invalidate()
890 phases.loadphaserevs(repo)
890 phases.loadphaserevs(repo)
891 timer(d)
891 timer(d)
892 fm.end()
892 fm.end()
893
893
894 @command(b'perfphasesremote',
894 @command(b'perfphasesremote',
895 [], b"[DEST]")
895 [], b"[DEST]")
896 def perfphasesremote(ui, repo, dest=None, **opts):
896 def perfphasesremote(ui, repo, dest=None, **opts):
897 """benchmark time needed to analyse phases of the remote server"""
897 """benchmark time needed to analyse phases of the remote server"""
898 from mercurial.node import (
898 from mercurial.node import (
899 bin,
899 bin,
900 )
900 )
901 from mercurial import (
901 from mercurial import (
902 exchange,
902 exchange,
903 hg,
903 hg,
904 phases,
904 phases,
905 )
905 )
906 opts = _byteskwargs(opts)
906 opts = _byteskwargs(opts)
907 timer, fm = gettimer(ui, opts)
907 timer, fm = gettimer(ui, opts)
908
908
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
909 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
910 if not path:
910 if not path:
911 raise error.Abort((b'default repository not configured!'),
911 raise error.Abort((b'default repository not configured!'),
912 hint=(b"see 'hg help config.paths'"))
912 hint=(b"see 'hg help config.paths'"))
913 dest = path.pushloc or path.loc
913 dest = path.pushloc or path.loc
914 branches = (path.branch, opts.get(b'branch') or [])
914 branches = (path.branch, opts.get(b'branch') or [])
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
915 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
916 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 ] + formatteropts)
1018 ] + formatteropts)
1019 def perfindex(ui, repo, **opts):
1019 def perfindex(ui, repo, **opts):
1020 import mercurial.revlog
1020 import mercurial.revlog
1021 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1022 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 if opts['rev'] is None:
1024 if opts[b'rev'] is None:
1025 n = repo[b"tip"].node()
1025 n = repo[b"tip"].node()
1026 else:
1026 else:
1027 rev = scmutil.revsingle(repo, opts['rev'])
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 n = repo[rev].node()
1028 n = repo[rev].node()
1029
1029
1030 unfi = repo.unfiltered()
1030 unfi = repo.unfiltered()
1031 # find the filecache func directly
1031 # find the filecache func directly
1032 # This avoid polluting the benchmark with the filecache logic
1032 # This avoid polluting the benchmark with the filecache logic
1033 makecl = unfi.__class__.changelog.func
1033 makecl = unfi.__class__.changelog.func
1034 def setup():
1034 def setup():
1035 # probably not necessary, but for good measure
1035 # probably not necessary, but for good measure
1036 clearchangelog(unfi)
1036 clearchangelog(unfi)
1037 def d():
1037 def d():
1038 cl = makecl(unfi)
1038 cl = makecl(unfi)
1039 cl.rev(n)
1039 cl.rev(n)
1040 timer(d, setup=setup)
1040 timer(d, setup=setup)
1041 fm.end()
1041 fm.end()
1042
1042
1043 @command(b'perfstartup', formatteropts)
1043 @command(b'perfstartup', formatteropts)
1044 def perfstartup(ui, repo, **opts):
1044 def perfstartup(ui, repo, **opts):
1045 opts = _byteskwargs(opts)
1045 opts = _byteskwargs(opts)
1046 timer, fm = gettimer(ui, opts)
1046 timer, fm = gettimer(ui, opts)
1047 def d():
1047 def d():
1048 if os.name != r'nt':
1048 if os.name != r'nt':
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 fsencode(sys.argv[0]))
1050 fsencode(sys.argv[0]))
1051 else:
1051 else:
1052 os.environ[r'HGRCPATH'] = r' '
1052 os.environ[r'HGRCPATH'] = r' '
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 timer(d)
1054 timer(d)
1055 fm.end()
1055 fm.end()
1056
1056
1057 @command(b'perfparents', formatteropts)
1057 @command(b'perfparents', formatteropts)
1058 def perfparents(ui, repo, **opts):
1058 def perfparents(ui, repo, **opts):
1059 opts = _byteskwargs(opts)
1059 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1061 # control the number of commits perfparents iterates over
1061 # control the number of commits perfparents iterates over
1062 # experimental config: perf.parentscount
1062 # experimental config: perf.parentscount
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1064 if len(repo.changelog) < count:
1064 if len(repo.changelog) < count:
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 repo = repo.unfiltered()
1066 repo = repo.unfiltered()
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 def d():
1068 def d():
1069 for n in nl:
1069 for n in nl:
1070 repo.changelog.parents(n)
1070 repo.changelog.parents(n)
1071 timer(d)
1071 timer(d)
1072 fm.end()
1072 fm.end()
1073
1073
1074 @command(b'perfctxfiles', formatteropts)
1074 @command(b'perfctxfiles', formatteropts)
1075 def perfctxfiles(ui, repo, x, **opts):
1075 def perfctxfiles(ui, repo, x, **opts):
1076 opts = _byteskwargs(opts)
1076 opts = _byteskwargs(opts)
1077 x = int(x)
1077 x = int(x)
1078 timer, fm = gettimer(ui, opts)
1078 timer, fm = gettimer(ui, opts)
1079 def d():
1079 def d():
1080 len(repo[x].files())
1080 len(repo[x].files())
1081 timer(d)
1081 timer(d)
1082 fm.end()
1082 fm.end()
1083
1083
1084 @command(b'perfrawfiles', formatteropts)
1084 @command(b'perfrawfiles', formatteropts)
1085 def perfrawfiles(ui, repo, x, **opts):
1085 def perfrawfiles(ui, repo, x, **opts):
1086 opts = _byteskwargs(opts)
1086 opts = _byteskwargs(opts)
1087 x = int(x)
1087 x = int(x)
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089 cl = repo.changelog
1089 cl = repo.changelog
1090 def d():
1090 def d():
1091 len(cl.read(x)[3])
1091 len(cl.read(x)[3])
1092 timer(d)
1092 timer(d)
1093 fm.end()
1093 fm.end()
1094
1094
1095 @command(b'perflookup', formatteropts)
1095 @command(b'perflookup', formatteropts)
1096 def perflookup(ui, repo, rev, **opts):
1096 def perflookup(ui, repo, rev, **opts):
1097 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1099 timer(lambda: len(repo.lookup(rev)))
1099 timer(lambda: len(repo.lookup(rev)))
1100 fm.end()
1100 fm.end()
1101
1101
1102 @command(b'perflinelogedits',
1102 @command(b'perflinelogedits',
1103 [(b'n', b'edits', 10000, b'number of edits'),
1103 [(b'n', b'edits', 10000, b'number of edits'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 ], norepo=True)
1105 ], norepo=True)
1106 def perflinelogedits(ui, **opts):
1106 def perflinelogedits(ui, **opts):
1107 from mercurial import linelog
1107 from mercurial import linelog
1108
1108
1109 opts = _byteskwargs(opts)
1109 opts = _byteskwargs(opts)
1110
1110
1111 edits = opts[b'edits']
1111 edits = opts[b'edits']
1112 maxhunklines = opts[b'max_hunk_lines']
1112 maxhunklines = opts[b'max_hunk_lines']
1113
1113
1114 maxb1 = 100000
1114 maxb1 = 100000
1115 random.seed(0)
1115 random.seed(0)
1116 randint = random.randint
1116 randint = random.randint
1117 currentlines = 0
1117 currentlines = 0
1118 arglist = []
1118 arglist = []
1119 for rev in _xrange(edits):
1119 for rev in _xrange(edits):
1120 a1 = randint(0, currentlines)
1120 a1 = randint(0, currentlines)
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 b1 = randint(0, maxb1)
1122 b1 = randint(0, maxb1)
1123 b2 = randint(b1, b1 + maxhunklines)
1123 b2 = randint(b1, b1 + maxhunklines)
1124 currentlines += (b2 - b1) - (a2 - a1)
1124 currentlines += (b2 - b1) - (a2 - a1)
1125 arglist.append((rev, a1, a2, b1, b2))
1125 arglist.append((rev, a1, a2, b1, b2))
1126
1126
1127 def d():
1127 def d():
1128 ll = linelog.linelog()
1128 ll = linelog.linelog()
1129 for args in arglist:
1129 for args in arglist:
1130 ll.replacelines(*args)
1130 ll.replacelines(*args)
1131
1131
1132 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1133 timer(d)
1133 timer(d)
1134 fm.end()
1134 fm.end()
1135
1135
1136 @command(b'perfrevrange', formatteropts)
1136 @command(b'perfrevrange', formatteropts)
1137 def perfrevrange(ui, repo, *specs, **opts):
1137 def perfrevrange(ui, repo, *specs, **opts):
1138 opts = _byteskwargs(opts)
1138 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1139 timer, fm = gettimer(ui, opts)
1140 revrange = scmutil.revrange
1140 revrange = scmutil.revrange
1141 timer(lambda: len(revrange(repo, specs)))
1141 timer(lambda: len(revrange(repo, specs)))
1142 fm.end()
1142 fm.end()
1143
1143
1144 @command(b'perfnodelookup', formatteropts)
1144 @command(b'perfnodelookup', formatteropts)
1145 def perfnodelookup(ui, repo, rev, **opts):
1145 def perfnodelookup(ui, repo, rev, **opts):
1146 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1148 import mercurial.revlog
1148 import mercurial.revlog
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 n = scmutil.revsingle(repo, rev).node()
1150 n = scmutil.revsingle(repo, rev).node()
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 def d():
1152 def d():
1153 cl.rev(n)
1153 cl.rev(n)
1154 clearcaches(cl)
1154 clearcaches(cl)
1155 timer(d)
1155 timer(d)
1156 fm.end()
1156 fm.end()
1157
1157
1158 @command(b'perflog',
1158 @command(b'perflog',
1159 [(b'', b'rename', False, b'ask log to follow renames')
1159 [(b'', b'rename', False, b'ask log to follow renames')
1160 ] + formatteropts)
1160 ] + formatteropts)
1161 def perflog(ui, repo, rev=None, **opts):
1161 def perflog(ui, repo, rev=None, **opts):
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 if rev is None:
1163 if rev is None:
1164 rev=[]
1164 rev=[]
1165 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1166 ui.pushbuffer()
1166 ui.pushbuffer()
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 copies=opts.get(b'rename')))
1168 copies=opts.get(b'rename')))
1169 ui.popbuffer()
1169 ui.popbuffer()
1170 fm.end()
1170 fm.end()
1171
1171
1172 @command(b'perfmoonwalk', formatteropts)
1172 @command(b'perfmoonwalk', formatteropts)
1173 def perfmoonwalk(ui, repo, **opts):
1173 def perfmoonwalk(ui, repo, **opts):
1174 """benchmark walking the changelog backwards
1174 """benchmark walking the changelog backwards
1175
1175
1176 This also loads the changelog data for each revision in the changelog.
1176 This also loads the changelog data for each revision in the changelog.
1177 """
1177 """
1178 opts = _byteskwargs(opts)
1178 opts = _byteskwargs(opts)
1179 timer, fm = gettimer(ui, opts)
1179 timer, fm = gettimer(ui, opts)
1180 def moonwalk():
1180 def moonwalk():
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 ctx = repo[i]
1182 ctx = repo[i]
1183 ctx.branch() # read changelog data (in addition to the index)
1183 ctx.branch() # read changelog data (in addition to the index)
1184 timer(moonwalk)
1184 timer(moonwalk)
1185 fm.end()
1185 fm.end()
1186
1186
1187 @command(b'perftemplating',
1187 @command(b'perftemplating',
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 ] + formatteropts)
1189 ] + formatteropts)
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 """test the rendering time of a given template"""
1191 """test the rendering time of a given template"""
1192 if makelogtemplater is None:
1192 if makelogtemplater is None:
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 hint=b"use 4.3 or later")
1194 hint=b"use 4.3 or later")
1195
1195
1196 opts = _byteskwargs(opts)
1196 opts = _byteskwargs(opts)
1197
1197
1198 nullui = ui.copy()
1198 nullui = ui.copy()
1199 nullui.fout = open(os.devnull, r'wb')
1199 nullui.fout = open(os.devnull, r'wb')
1200 nullui.disablepager()
1200 nullui.disablepager()
1201 revs = opts.get(b'rev')
1201 revs = opts.get(b'rev')
1202 if not revs:
1202 if not revs:
1203 revs = [b'all()']
1203 revs = [b'all()']
1204 revs = list(scmutil.revrange(repo, revs))
1204 revs = list(scmutil.revrange(repo, revs))
1205
1205
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 b' {author|person}: {desc|firstline}\n')
1207 b' {author|person}: {desc|firstline}\n')
1208 if testedtemplate is None:
1208 if testedtemplate is None:
1209 testedtemplate = defaulttemplate
1209 testedtemplate = defaulttemplate
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 def format():
1211 def format():
1212 for r in revs:
1212 for r in revs:
1213 ctx = repo[r]
1213 ctx = repo[r]
1214 displayer.show(ctx)
1214 displayer.show(ctx)
1215 displayer.flush(ctx)
1215 displayer.flush(ctx)
1216
1216
1217 timer, fm = gettimer(ui, opts)
1217 timer, fm = gettimer(ui, opts)
1218 timer(format)
1218 timer(format)
1219 fm.end()
1219 fm.end()
1220
1220
1221 @command(b'perfhelper-pathcopies', formatteropts +
1221 @command(b'perfhelper-pathcopies', formatteropts +
1222 [
1222 [
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1225 ])
1225 ])
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 """find statistic about potential parameters for the `perftracecopies`
1227 """find statistic about potential parameters for the `perftracecopies`
1228
1228
1229 This command find source-destination pair relevant for copytracing testing.
1229 This command find source-destination pair relevant for copytracing testing.
1230 It report value for some of the parameters that impact copy tracing time.
1230 It report value for some of the parameters that impact copy tracing time.
1231
1231
1232 If `--timing` is set, rename detection is run and the associated timing
1232 If `--timing` is set, rename detection is run and the associated timing
1233 will be reported. The extra details comes at the cost of a slower command
1233 will be reported. The extra details comes at the cost of a slower command
1234 execution.
1234 execution.
1235
1235
1236 Since the rename detection is only run once, other factors might easily
1236 Since the rename detection is only run once, other factors might easily
1237 affect the precision of the timing. However it should give a good
1237 affect the precision of the timing. However it should give a good
1238 approximation of which revision pairs are very costly.
1238 approximation of which revision pairs are very costly.
1239 """
1239 """
1240 opts = _byteskwargs(opts)
1240 opts = _byteskwargs(opts)
1241 fm = ui.formatter(b'perf', opts)
1241 fm = ui.formatter(b'perf', opts)
1242 dotiming = opts[b'timing']
1242 dotiming = opts[b'timing']
1243
1243
1244 if dotiming:
1244 if dotiming:
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1246 output = ("%(source)12s %(destination)12s "
1246 output = ("%(source)12s %(destination)12s "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 "nb-renames", "time")
1250 "nb-renames", "time")
1251 fm.plain(header % header_names)
1251 fm.plain(header % header_names)
1252 else:
1252 else:
1253 header = '%12s %12s %12s %12s\n'
1253 header = '%12s %12s %12s %12s\n'
1254 output = ("%(source)12s %(destination)12s "
1254 output = ("%(source)12s %(destination)12s "
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257
1257
1258 if not revs:
1258 if not revs:
1259 revs = ['all()']
1259 revs = ['all()']
1260 revs = scmutil.revrange(repo, revs)
1260 revs = scmutil.revrange(repo, revs)
1261
1261
1262 roi = repo.revs('merge() and %ld', revs)
1262 roi = repo.revs('merge() and %ld', revs)
1263 for r in roi:
1263 for r in roi:
1264 ctx = repo[r]
1264 ctx = repo[r]
1265 p1 = ctx.p1().rev()
1265 p1 = ctx.p1().rev()
1266 p2 = ctx.p2().rev()
1266 p2 = ctx.p2().rev()
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 for p in (p1, p2):
1268 for p in (p1, p2):
1269 for b in bases:
1269 for b in bases:
1270 base = repo[b]
1270 base = repo[b]
1271 parent = repo[p]
1271 parent = repo[p]
1272 missing = copies._computeforwardmissing(base, parent)
1272 missing = copies._computeforwardmissing(base, parent)
1273 if not missing:
1273 if not missing:
1274 continue
1274 continue
1275 data = {
1275 data = {
1276 b'source': base.hex(),
1276 b'source': base.hex(),
1277 b'destination': parent.hex(),
1277 b'destination': parent.hex(),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 b'nbmissingfiles': len(missing),
1279 b'nbmissingfiles': len(missing),
1280 }
1280 }
1281 if dotiming:
1281 if dotiming:
1282 begin = util.timer()
1282 begin = util.timer()
1283 renames = copies.pathcopies(base, parent)
1283 renames = copies.pathcopies(base, parent)
1284 end = util.timer()
1284 end = util.timer()
1285 # not very stable timing since we did only one run
1285 # not very stable timing since we did only one run
1286 data['time'] = end - begin
1286 data['time'] = end - begin
1287 data['nbrenamedfiles'] = len(renames)
1287 data['nbrenamedfiles'] = len(renames)
1288 fm.startitem()
1288 fm.startitem()
1289 fm.data(**data)
1289 fm.data(**data)
1290 out = data.copy()
1290 out = data.copy()
1291 out['source'] = fm.hexfunc(base.node())
1291 out['source'] = fm.hexfunc(base.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1293 fm.plain(output % out)
1293 fm.plain(output % out)
1294
1294
1295 fm.end()
1295 fm.end()
1296
1296
1297 @command(b'perfcca', formatteropts)
1297 @command(b'perfcca', formatteropts)
1298 def perfcca(ui, repo, **opts):
1298 def perfcca(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1299 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1300 timer, fm = gettimer(ui, opts)
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 fm.end()
1302 fm.end()
1303
1303
1304 @command(b'perffncacheload', formatteropts)
1304 @command(b'perffncacheload', formatteropts)
1305 def perffncacheload(ui, repo, **opts):
1305 def perffncacheload(ui, repo, **opts):
1306 opts = _byteskwargs(opts)
1306 opts = _byteskwargs(opts)
1307 timer, fm = gettimer(ui, opts)
1307 timer, fm = gettimer(ui, opts)
1308 s = repo.store
1308 s = repo.store
1309 def d():
1309 def d():
1310 s.fncache._load()
1310 s.fncache._load()
1311 timer(d)
1311 timer(d)
1312 fm.end()
1312 fm.end()
1313
1313
1314 @command(b'perffncachewrite', formatteropts)
1314 @command(b'perffncachewrite', formatteropts)
1315 def perffncachewrite(ui, repo, **opts):
1315 def perffncachewrite(ui, repo, **opts):
1316 opts = _byteskwargs(opts)
1316 opts = _byteskwargs(opts)
1317 timer, fm = gettimer(ui, opts)
1317 timer, fm = gettimer(ui, opts)
1318 s = repo.store
1318 s = repo.store
1319 lock = repo.lock()
1319 lock = repo.lock()
1320 s.fncache._load()
1320 s.fncache._load()
1321 tr = repo.transaction(b'perffncachewrite')
1321 tr = repo.transaction(b'perffncachewrite')
1322 tr.addbackup(b'fncache')
1322 tr.addbackup(b'fncache')
1323 def d():
1323 def d():
1324 s.fncache._dirty = True
1324 s.fncache._dirty = True
1325 s.fncache.write(tr)
1325 s.fncache.write(tr)
1326 timer(d)
1326 timer(d)
1327 tr.close()
1327 tr.close()
1328 lock.release()
1328 lock.release()
1329 fm.end()
1329 fm.end()
1330
1330
1331 @command(b'perffncacheencode', formatteropts)
1331 @command(b'perffncacheencode', formatteropts)
1332 def perffncacheencode(ui, repo, **opts):
1332 def perffncacheencode(ui, repo, **opts):
1333 opts = _byteskwargs(opts)
1333 opts = _byteskwargs(opts)
1334 timer, fm = gettimer(ui, opts)
1334 timer, fm = gettimer(ui, opts)
1335 s = repo.store
1335 s = repo.store
1336 s.fncache._load()
1336 s.fncache._load()
1337 def d():
1337 def d():
1338 for p in s.fncache.entries:
1338 for p in s.fncache.entries:
1339 s.encode(p)
1339 s.encode(p)
1340 timer(d)
1340 timer(d)
1341 fm.end()
1341 fm.end()
1342
1342
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 while not done.is_set():
1344 while not done.is_set():
1345 pair = q.get()
1345 pair = q.get()
1346 while pair is not None:
1346 while pair is not None:
1347 if xdiff:
1347 if xdiff:
1348 mdiff.bdiff.xdiffblocks(*pair)
1348 mdiff.bdiff.xdiffblocks(*pair)
1349 elif blocks:
1349 elif blocks:
1350 mdiff.bdiff.blocks(*pair)
1350 mdiff.bdiff.blocks(*pair)
1351 else:
1351 else:
1352 mdiff.textdiff(*pair)
1352 mdiff.textdiff(*pair)
1353 q.task_done()
1353 q.task_done()
1354 pair = q.get()
1354 pair = q.get()
1355 q.task_done() # for the None one
1355 q.task_done() # for the None one
1356 with ready:
1356 with ready:
1357 ready.wait()
1357 ready.wait()
1358
1358
1359 def _manifestrevision(repo, mnode):
1359 def _manifestrevision(repo, mnode):
1360 ml = repo.manifestlog
1360 ml = repo.manifestlog
1361
1361
1362 if util.safehasattr(ml, b'getstorage'):
1362 if util.safehasattr(ml, b'getstorage'):
1363 store = ml.getstorage(b'')
1363 store = ml.getstorage(b'')
1364 else:
1364 else:
1365 store = ml._revlog
1365 store = ml._revlog
1366
1366
1367 return store.revision(mnode)
1367 return store.revision(mnode)
1368
1368
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 ],
1375 ],
1376
1376
1377 b'-c|-m|FILE REV')
1377 b'-c|-m|FILE REV')
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 """benchmark a bdiff between revisions
1379 """benchmark a bdiff between revisions
1380
1380
1381 By default, benchmark a bdiff between its delta parent and itself.
1381 By default, benchmark a bdiff between its delta parent and itself.
1382
1382
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 revisions starting at the specified revision.
1384 revisions starting at the specified revision.
1385
1385
1386 With ``--alldata``, assume the requested revision is a changeset and
1386 With ``--alldata``, assume the requested revision is a changeset and
1387 measure bdiffs for all changes related to that changeset (manifest
1387 measure bdiffs for all changes related to that changeset (manifest
1388 and filelogs).
1388 and filelogs).
1389 """
1389 """
1390 opts = _byteskwargs(opts)
1390 opts = _byteskwargs(opts)
1391
1391
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394
1394
1395 if opts[b'alldata']:
1395 if opts[b'alldata']:
1396 opts[b'changelog'] = True
1396 opts[b'changelog'] = True
1397
1397
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 file_, rev = None, file_
1399 file_, rev = None, file_
1400 elif rev is None:
1400 elif rev is None:
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402
1402
1403 blocks = opts[b'blocks']
1403 blocks = opts[b'blocks']
1404 xdiff = opts[b'xdiff']
1404 xdiff = opts[b'xdiff']
1405 textpairs = []
1405 textpairs = []
1406
1406
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408
1408
1409 startrev = r.rev(r.lookup(rev))
1409 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1411 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1412 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1413 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1415 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1417 textpairs.append((pman, mtext))
1418
1418
1419 # Load filelog revisions by iterating manifest delta.
1419 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1420 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1421 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1422 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1423 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1424 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1426 textpairs.append((f1, f2))
1427 else:
1427 else:
1428 dp = r.deltaparent(rev)
1428 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1430
1431 withthreads = threads > 0
1431 withthreads = threads > 0
1432 if not withthreads:
1432 if not withthreads:
1433 def d():
1433 def d():
1434 for pair in textpairs:
1434 for pair in textpairs:
1435 if xdiff:
1435 if xdiff:
1436 mdiff.bdiff.xdiffblocks(*pair)
1436 mdiff.bdiff.xdiffblocks(*pair)
1437 elif blocks:
1437 elif blocks:
1438 mdiff.bdiff.blocks(*pair)
1438 mdiff.bdiff.blocks(*pair)
1439 else:
1439 else:
1440 mdiff.textdiff(*pair)
1440 mdiff.textdiff(*pair)
1441 else:
1441 else:
1442 q = queue()
1442 q = queue()
1443 for i in _xrange(threads):
1443 for i in _xrange(threads):
1444 q.put(None)
1444 q.put(None)
1445 ready = threading.Condition()
1445 ready = threading.Condition()
1446 done = threading.Event()
1446 done = threading.Event()
1447 for i in _xrange(threads):
1447 for i in _xrange(threads):
1448 threading.Thread(target=_bdiffworker,
1448 threading.Thread(target=_bdiffworker,
1449 args=(q, blocks, xdiff, ready, done)).start()
1449 args=(q, blocks, xdiff, ready, done)).start()
1450 q.join()
1450 q.join()
1451 def d():
1451 def d():
1452 for pair in textpairs:
1452 for pair in textpairs:
1453 q.put(pair)
1453 q.put(pair)
1454 for i in _xrange(threads):
1454 for i in _xrange(threads):
1455 q.put(None)
1455 q.put(None)
1456 with ready:
1456 with ready:
1457 ready.notify_all()
1457 ready.notify_all()
1458 q.join()
1458 q.join()
1459 timer, fm = gettimer(ui, opts)
1459 timer, fm = gettimer(ui, opts)
1460 timer(d)
1460 timer(d)
1461 fm.end()
1461 fm.end()
1462
1462
1463 if withthreads:
1463 if withthreads:
1464 done.set()
1464 done.set()
1465 for i in _xrange(threads):
1465 for i in _xrange(threads):
1466 q.put(None)
1466 q.put(None)
1467 with ready:
1467 with ready:
1468 ready.notify_all()
1468 ready.notify_all()
1469
1469
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 ], b'-c|-m|FILE REV')
1473 ], b'-c|-m|FILE REV')
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 """benchmark a unified diff between revisions
1475 """benchmark a unified diff between revisions
1476
1476
1477 This doesn't include any copy tracing - it's just a unified diff
1477 This doesn't include any copy tracing - it's just a unified diff
1478 of the texts.
1478 of the texts.
1479
1479
1480 By default, benchmark a diff between its delta parent and itself.
1480 By default, benchmark a diff between its delta parent and itself.
1481
1481
1482 With ``--count``, benchmark diffs between delta parents and self for N
1482 With ``--count``, benchmark diffs between delta parents and self for N
1483 revisions starting at the specified revision.
1483 revisions starting at the specified revision.
1484
1484
1485 With ``--alldata``, assume the requested revision is a changeset and
1485 With ``--alldata``, assume the requested revision is a changeset and
1486 measure diffs for all changes related to that changeset (manifest
1486 measure diffs for all changes related to that changeset (manifest
1487 and filelogs).
1487 and filelogs).
1488 """
1488 """
1489 opts = _byteskwargs(opts)
1489 opts = _byteskwargs(opts)
1490 if opts[b'alldata']:
1490 if opts[b'alldata']:
1491 opts[b'changelog'] = True
1491 opts[b'changelog'] = True
1492
1492
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 file_, rev = None, file_
1494 file_, rev = None, file_
1495 elif rev is None:
1495 elif rev is None:
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497
1497
1498 textpairs = []
1498 textpairs = []
1499
1499
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501
1501
1502 startrev = r.rev(r.lookup(rev))
1502 startrev = r.rev(r.lookup(rev))
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 if opts[b'alldata']:
1504 if opts[b'alldata']:
1505 # Load revisions associated with changeset.
1505 # Load revisions associated with changeset.
1506 ctx = repo[rev]
1506 ctx = repo[rev]
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 for pctx in ctx.parents():
1508 for pctx in ctx.parents():
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1510 textpairs.append((pman, mtext))
1510 textpairs.append((pman, mtext))
1511
1511
1512 # Load filelog revisions by iterating manifest delta.
1512 # Load filelog revisions by iterating manifest delta.
1513 man = ctx.manifest()
1513 man = ctx.manifest()
1514 pman = ctx.p1().manifest()
1514 pman = ctx.p1().manifest()
1515 for filename, change in pman.diff(man).items():
1515 for filename, change in pman.diff(man).items():
1516 fctx = repo.file(filename)
1516 fctx = repo.file(filename)
1517 f1 = fctx.revision(change[0][0] or -1)
1517 f1 = fctx.revision(change[0][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1519 textpairs.append((f1, f2))
1519 textpairs.append((f1, f2))
1520 else:
1520 else:
1521 dp = r.deltaparent(rev)
1521 dp = r.deltaparent(rev)
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1523
1523
1524 def d():
1524 def d():
1525 for left, right in textpairs:
1525 for left, right in textpairs:
1526 # The date strings don't matter, so we pass empty strings.
1526 # The date strings don't matter, so we pass empty strings.
1527 headerlines, hunks = mdiff.unidiff(
1527 headerlines, hunks = mdiff.unidiff(
1528 left, b'', right, b'', b'left', b'right', binary=False)
1528 left, b'', right, b'', b'left', b'right', binary=False)
1529 # consume iterators in roughly the way patch.py does
1529 # consume iterators in roughly the way patch.py does
1530 b'\n'.join(headerlines)
1530 b'\n'.join(headerlines)
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 timer, fm = gettimer(ui, opts)
1532 timer, fm = gettimer(ui, opts)
1533 timer(d)
1533 timer(d)
1534 fm.end()
1534 fm.end()
1535
1535
1536 @command(b'perfdiffwd', formatteropts)
1536 @command(b'perfdiffwd', formatteropts)
1537 def perfdiffwd(ui, repo, **opts):
1537 def perfdiffwd(ui, repo, **opts):
1538 """Profile diff of working directory changes"""
1538 """Profile diff of working directory changes"""
1539 opts = _byteskwargs(opts)
1539 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1541 options = {
1541 options = {
1542 'w': 'ignore_all_space',
1542 'w': 'ignore_all_space',
1543 'b': 'ignore_space_change',
1543 'b': 'ignore_space_change',
1544 'B': 'ignore_blank_lines',
1544 'B': 'ignore_blank_lines',
1545 }
1545 }
1546
1546
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 opts = dict((options[c], b'1') for c in diffopt)
1548 opts = dict((options[c], b'1') for c in diffopt)
1549 def d():
1549 def d():
1550 ui.pushbuffer()
1550 ui.pushbuffer()
1551 commands.diff(ui, repo, **opts)
1551 commands.diff(ui, repo, **opts)
1552 ui.popbuffer()
1552 ui.popbuffer()
1553 diffopt = diffopt.encode('ascii')
1553 diffopt = diffopt.encode('ascii')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 timer(d, title=title)
1555 timer(d, title=title)
1556 fm.end()
1556 fm.end()
1557
1557
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 b'-c|-m|FILE')
1559 b'-c|-m|FILE')
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 """Benchmark operations against a revlog index.
1561 """Benchmark operations against a revlog index.
1562
1562
1563 This tests constructing a revlog instance, reading index data,
1563 This tests constructing a revlog instance, reading index data,
1564 parsing index data, and performing various operations related to
1564 parsing index data, and performing various operations related to
1565 index data.
1565 index data.
1566 """
1566 """
1567
1567
1568 opts = _byteskwargs(opts)
1568 opts = _byteskwargs(opts)
1569
1569
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571
1571
1572 opener = getattr(rl, 'opener') # trick linter
1572 opener = getattr(rl, 'opener') # trick linter
1573 indexfile = rl.indexfile
1573 indexfile = rl.indexfile
1574 data = opener.read(indexfile)
1574 data = opener.read(indexfile)
1575
1575
1576 header = struct.unpack(b'>I', data[0:4])[0]
1576 header = struct.unpack(b'>I', data[0:4])[0]
1577 version = header & 0xFFFF
1577 version = header & 0xFFFF
1578 if version == 1:
1578 if version == 1:
1579 revlogio = revlog.revlogio()
1579 revlogio = revlog.revlogio()
1580 inline = header & (1 << 16)
1580 inline = header & (1 << 16)
1581 else:
1581 else:
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1583
1583
1584 rllen = len(rl)
1584 rllen = len(rl)
1585
1585
1586 node0 = rl.node(0)
1586 node0 = rl.node(0)
1587 node25 = rl.node(rllen // 4)
1587 node25 = rl.node(rllen // 4)
1588 node50 = rl.node(rllen // 2)
1588 node50 = rl.node(rllen // 2)
1589 node75 = rl.node(rllen // 4 * 3)
1589 node75 = rl.node(rllen // 4 * 3)
1590 node100 = rl.node(rllen - 1)
1590 node100 = rl.node(rllen - 1)
1591
1591
1592 allrevs = range(rllen)
1592 allrevs = range(rllen)
1593 allrevsrev = list(reversed(allrevs))
1593 allrevsrev = list(reversed(allrevs))
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 allnodesrev = list(reversed(allnodes))
1595 allnodesrev = list(reversed(allnodes))
1596
1596
1597 def constructor():
1597 def constructor():
1598 revlog.revlog(opener, indexfile)
1598 revlog.revlog(opener, indexfile)
1599
1599
1600 def read():
1600 def read():
1601 with opener(indexfile) as fh:
1601 with opener(indexfile) as fh:
1602 fh.read()
1602 fh.read()
1603
1603
1604 def parseindex():
1604 def parseindex():
1605 revlogio.parseindex(data, inline)
1605 revlogio.parseindex(data, inline)
1606
1606
1607 def getentry(revornode):
1607 def getentry(revornode):
1608 index = revlogio.parseindex(data, inline)[0]
1608 index = revlogio.parseindex(data, inline)[0]
1609 index[revornode]
1609 index[revornode]
1610
1610
1611 def getentries(revs, count=1):
1611 def getentries(revs, count=1):
1612 index = revlogio.parseindex(data, inline)[0]
1612 index = revlogio.parseindex(data, inline)[0]
1613
1613
1614 for i in range(count):
1614 for i in range(count):
1615 for rev in revs:
1615 for rev in revs:
1616 index[rev]
1616 index[rev]
1617
1617
1618 def resolvenode(node):
1618 def resolvenode(node):
1619 nodemap = revlogio.parseindex(data, inline)[1]
1619 nodemap = revlogio.parseindex(data, inline)[1]
1620 # This only works for the C code.
1620 # This only works for the C code.
1621 if nodemap is None:
1621 if nodemap is None:
1622 return
1622 return
1623
1623
1624 try:
1624 try:
1625 nodemap[node]
1625 nodemap[node]
1626 except error.RevlogError:
1626 except error.RevlogError:
1627 pass
1627 pass
1628
1628
1629 def resolvenodes(nodes, count=1):
1629 def resolvenodes(nodes, count=1):
1630 nodemap = revlogio.parseindex(data, inline)[1]
1630 nodemap = revlogio.parseindex(data, inline)[1]
1631 if nodemap is None:
1631 if nodemap is None:
1632 return
1632 return
1633
1633
1634 for i in range(count):
1634 for i in range(count):
1635 for node in nodes:
1635 for node in nodes:
1636 try:
1636 try:
1637 nodemap[node]
1637 nodemap[node]
1638 except error.RevlogError:
1638 except error.RevlogError:
1639 pass
1639 pass
1640
1640
1641 benches = [
1641 benches = [
1642 (constructor, b'revlog constructor'),
1642 (constructor, b'revlog constructor'),
1643 (read, b'read'),
1643 (read, b'read'),
1644 (parseindex, b'create index object'),
1644 (parseindex, b'create index object'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1652 # 2x variation is to measure caching impact.
1652 # 2x variation is to measure caching impact.
1653 (lambda: resolvenodes(allnodes),
1653 (lambda: resolvenodes(allnodes),
1654 b'look up all nodes (forward)'),
1654 b'look up all nodes (forward)'),
1655 (lambda: resolvenodes(allnodes, 2),
1655 (lambda: resolvenodes(allnodes, 2),
1656 b'look up all nodes 2x (forward)'),
1656 b'look up all nodes 2x (forward)'),
1657 (lambda: resolvenodes(allnodesrev),
1657 (lambda: resolvenodes(allnodesrev),
1658 b'look up all nodes (reverse)'),
1658 b'look up all nodes (reverse)'),
1659 (lambda: resolvenodes(allnodesrev, 2),
1659 (lambda: resolvenodes(allnodesrev, 2),
1660 b'look up all nodes 2x (reverse)'),
1660 b'look up all nodes 2x (reverse)'),
1661 (lambda: getentries(allrevs),
1661 (lambda: getentries(allrevs),
1662 b'retrieve all index entries (forward)'),
1662 b'retrieve all index entries (forward)'),
1663 (lambda: getentries(allrevs, 2),
1663 (lambda: getentries(allrevs, 2),
1664 b'retrieve all index entries 2x (forward)'),
1664 b'retrieve all index entries 2x (forward)'),
1665 (lambda: getentries(allrevsrev),
1665 (lambda: getentries(allrevsrev),
1666 b'retrieve all index entries (reverse)'),
1666 b'retrieve all index entries (reverse)'),
1667 (lambda: getentries(allrevsrev, 2),
1667 (lambda: getentries(allrevsrev, 2),
1668 b'retrieve all index entries 2x (reverse)'),
1668 b'retrieve all index entries 2x (reverse)'),
1669 ]
1669 ]
1670
1670
1671 for fn, title in benches:
1671 for fn, title in benches:
1672 timer, fm = gettimer(ui, opts)
1672 timer, fm = gettimer(ui, opts)
1673 timer(fn, title=title)
1673 timer(fn, title=title)
1674 fm.end()
1674 fm.end()
1675
1675
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1679 (b'', b'reverse', False, b'read in reverse')],
1679 (b'', b'reverse', False, b'read in reverse')],
1680 b'-c|-m|FILE')
1680 b'-c|-m|FILE')
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 **opts):
1682 **opts):
1683 """Benchmark reading a series of revisions from a revlog.
1683 """Benchmark reading a series of revisions from a revlog.
1684
1684
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 the specified revlog.
1686 the specified revlog.
1687
1687
1688 The start revision can be defined via ``-s/--startrev``.
1688 The start revision can be defined via ``-s/--startrev``.
1689 """
1689 """
1690 opts = _byteskwargs(opts)
1690 opts = _byteskwargs(opts)
1691
1691
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 rllen = getlen(ui)(rl)
1693 rllen = getlen(ui)(rl)
1694
1694
1695 if startrev < 0:
1695 if startrev < 0:
1696 startrev = rllen + startrev
1696 startrev = rllen + startrev
1697
1697
1698 def d():
1698 def d():
1699 rl.clearcaches()
1699 rl.clearcaches()
1700
1700
1701 beginrev = startrev
1701 beginrev = startrev
1702 endrev = rllen
1702 endrev = rllen
1703 dist = opts[b'dist']
1703 dist = opts[b'dist']
1704
1704
1705 if reverse:
1705 if reverse:
1706 beginrev, endrev = endrev - 1, beginrev - 1
1706 beginrev, endrev = endrev - 1, beginrev - 1
1707 dist = -1 * dist
1707 dist = -1 * dist
1708
1708
1709 for x in _xrange(beginrev, endrev, dist):
1709 for x in _xrange(beginrev, endrev, dist):
1710 # Old revisions don't support passing int.
1710 # Old revisions don't support passing int.
1711 n = rl.node(x)
1711 n = rl.node(x)
1712 rl.revision(n)
1712 rl.revision(n)
1713
1713
1714 timer, fm = gettimer(ui, opts)
1714 timer, fm = gettimer(ui, opts)
1715 timer(d)
1715 timer(d)
1716 fm.end()
1716 fm.end()
1717
1717
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 ],
1725 ],
1726 b'-c|-m|FILE')
1726 b'-c|-m|FILE')
1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1727 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1728 """Benchmark writing a series of revisions to a revlog.
1728 """Benchmark writing a series of revisions to a revlog.
1729
1729
1730 Possible source values are:
1730 Possible source values are:
1731 * `full`: add from a full text (default).
1731 * `full`: add from a full text (default).
1732 * `parent-1`: add from a delta to the first parent
1732 * `parent-1`: add from a delta to the first parent
1733 * `parent-2`: add from a delta to the second parent if it exists
1733 * `parent-2`: add from a delta to the second parent if it exists
1734 (use a delta from the first parent otherwise)
1734 (use a delta from the first parent otherwise)
1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1735 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1736 * `storage`: add from the existing precomputed deltas
1736 * `storage`: add from the existing precomputed deltas
1737 """
1737 """
1738 opts = _byteskwargs(opts)
1738 opts = _byteskwargs(opts)
1739
1739
1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1740 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1741 rllen = getlen(ui)(rl)
1741 rllen = getlen(ui)(rl)
1742 if startrev < 0:
1742 if startrev < 0:
1743 startrev = rllen + startrev
1743 startrev = rllen + startrev
1744 if stoprev < 0:
1744 if stoprev < 0:
1745 stoprev = rllen + stoprev
1745 stoprev = rllen + stoprev
1746
1746
1747 lazydeltabase = opts['lazydeltabase']
1747 lazydeltabase = opts['lazydeltabase']
1748 source = opts['source']
1748 source = opts['source']
1749 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1749 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1750 b'storage')
1750 b'storage')
1751 if source not in validsource:
1751 if source not in validsource:
1752 raise error.Abort('invalid source type: %s' % source)
1752 raise error.Abort('invalid source type: %s' % source)
1753
1753
1754 ### actually gather results
1754 ### actually gather results
1755 count = opts['count']
1755 count = opts['count']
1756 if count <= 0:
1756 if count <= 0:
1757 raise error.Abort('invalide run count: %d' % count)
1757 raise error.Abort('invalide run count: %d' % count)
1758 allresults = []
1758 allresults = []
1759 for c in range(count):
1759 for c in range(count):
1760 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1760 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1761 lazydeltabase=lazydeltabase)
1761 lazydeltabase=lazydeltabase)
1762 allresults.append(timing)
1762 allresults.append(timing)
1763
1763
1764 ### consolidate the results in a single list
1764 ### consolidate the results in a single list
1765 results = []
1765 results = []
1766 for idx, (rev, t) in enumerate(allresults[0]):
1766 for idx, (rev, t) in enumerate(allresults[0]):
1767 ts = [t]
1767 ts = [t]
1768 for other in allresults[1:]:
1768 for other in allresults[1:]:
1769 orev, ot = other[idx]
1769 orev, ot = other[idx]
1770 assert orev == rev
1770 assert orev == rev
1771 ts.append(ot)
1771 ts.append(ot)
1772 results.append((rev, ts))
1772 results.append((rev, ts))
1773 resultcount = len(results)
1773 resultcount = len(results)
1774
1774
1775 ### Compute and display relevant statistics
1775 ### Compute and display relevant statistics
1776
1776
1777 # get a formatter
1777 # get a formatter
1778 fm = ui.formatter(b'perf', opts)
1778 fm = ui.formatter(b'perf', opts)
1779 displayall = ui.configbool(b"perf", b"all-timing", False)
1779 displayall = ui.configbool(b"perf", b"all-timing", False)
1780
1780
1781 # print individual details if requested
1781 # print individual details if requested
1782 if opts['details']:
1782 if opts['details']:
1783 for idx, item in enumerate(results, 1):
1783 for idx, item in enumerate(results, 1):
1784 rev, data = item
1784 rev, data = item
1785 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1785 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1786 formatone(fm, data, title=title, displayall=displayall)
1786 formatone(fm, data, title=title, displayall=displayall)
1787
1787
1788 # sorts results by median time
1788 # sorts results by median time
1789 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1789 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1790 # list of (name, index) to display)
1790 # list of (name, index) to display)
1791 relevants = [
1791 relevants = [
1792 ("min", 0),
1792 ("min", 0),
1793 ("10%", resultcount * 10 // 100),
1793 ("10%", resultcount * 10 // 100),
1794 ("25%", resultcount * 25 // 100),
1794 ("25%", resultcount * 25 // 100),
1795 ("50%", resultcount * 70 // 100),
1795 ("50%", resultcount * 70 // 100),
1796 ("75%", resultcount * 75 // 100),
1796 ("75%", resultcount * 75 // 100),
1797 ("90%", resultcount * 90 // 100),
1797 ("90%", resultcount * 90 // 100),
1798 ("95%", resultcount * 95 // 100),
1798 ("95%", resultcount * 95 // 100),
1799 ("99%", resultcount * 99 // 100),
1799 ("99%", resultcount * 99 // 100),
1800 ("max", -1),
1800 ("max", -1),
1801 ]
1801 ]
1802 if not ui.quiet:
1802 if not ui.quiet:
1803 for name, idx in relevants:
1803 for name, idx in relevants:
1804 data = results[idx]
1804 data = results[idx]
1805 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1805 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1806 formatone(fm, data[1], title=title, displayall=displayall)
1806 formatone(fm, data[1], title=title, displayall=displayall)
1807
1807
1808 # XXX summing that many float will not be very precise, we ignore this fact
1808 # XXX summing that many float will not be very precise, we ignore this fact
1809 # for now
1809 # for now
1810 totaltime = []
1810 totaltime = []
1811 for item in allresults:
1811 for item in allresults:
1812 totaltime.append((sum(x[1][0] for x in item),
1812 totaltime.append((sum(x[1][0] for x in item),
1813 sum(x[1][1] for x in item),
1813 sum(x[1][1] for x in item),
1814 sum(x[1][2] for x in item),)
1814 sum(x[1][2] for x in item),)
1815 )
1815 )
1816 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1816 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1817 displayall=displayall)
1817 displayall=displayall)
1818 fm.end()
1818 fm.end()
1819
1819
1820 class _faketr(object):
1820 class _faketr(object):
1821 def add(s, x, y, z=None):
1821 def add(s, x, y, z=None):
1822 return None
1822 return None
1823
1823
1824 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1824 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1825 lazydeltabase=True):
1825 lazydeltabase=True):
1826 timings = []
1826 timings = []
1827 tr = _faketr()
1827 tr = _faketr()
1828 with _temprevlog(ui, orig, startrev) as dest:
1828 with _temprevlog(ui, orig, startrev) as dest:
1829 dest._lazydeltabase = lazydeltabase
1829 dest._lazydeltabase = lazydeltabase
1830 revs = list(orig.revs(startrev, stoprev))
1830 revs = list(orig.revs(startrev, stoprev))
1831 total = len(revs)
1831 total = len(revs)
1832 topic = 'adding'
1832 topic = 'adding'
1833 if runidx is not None:
1833 if runidx is not None:
1834 topic += ' (run #%d)' % runidx
1834 topic += ' (run #%d)' % runidx
1835 for idx, rev in enumerate(revs):
1835 for idx, rev in enumerate(revs):
1836 ui.progress(topic, idx, unit='revs', total=total)
1836 ui.progress(topic, idx, unit='revs', total=total)
1837 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1837 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1838 with timeone() as r:
1838 with timeone() as r:
1839 dest.addrawrevision(*addargs, **addkwargs)
1839 dest.addrawrevision(*addargs, **addkwargs)
1840 timings.append((rev, r[0]))
1840 timings.append((rev, r[0]))
1841 ui.progress(topic, total, unit='revs', total=total)
1841 ui.progress(topic, total, unit='revs', total=total)
1842 ui.progress(topic, None, unit='revs', total=total)
1842 ui.progress(topic, None, unit='revs', total=total)
1843 return timings
1843 return timings
1844
1844
1845 def _getrevisionseed(orig, rev, tr, source):
1845 def _getrevisionseed(orig, rev, tr, source):
1846 from mercurial.node import nullid
1846 from mercurial.node import nullid
1847
1847
1848 linkrev = orig.linkrev(rev)
1848 linkrev = orig.linkrev(rev)
1849 node = orig.node(rev)
1849 node = orig.node(rev)
1850 p1, p2 = orig.parents(node)
1850 p1, p2 = orig.parents(node)
1851 flags = orig.flags(rev)
1851 flags = orig.flags(rev)
1852 cachedelta = None
1852 cachedelta = None
1853 text = None
1853 text = None
1854
1854
1855 if source == b'full':
1855 if source == b'full':
1856 text = orig.revision(rev)
1856 text = orig.revision(rev)
1857 elif source == b'parent-1':
1857 elif source == b'parent-1':
1858 baserev = orig.rev(p1)
1858 baserev = orig.rev(p1)
1859 cachedelta = (baserev, orig.revdiff(p1, rev))
1859 cachedelta = (baserev, orig.revdiff(p1, rev))
1860 elif source == b'parent-2':
1860 elif source == b'parent-2':
1861 parent = p2
1861 parent = p2
1862 if p2 == nullid:
1862 if p2 == nullid:
1863 parent = p1
1863 parent = p1
1864 baserev = orig.rev(parent)
1864 baserev = orig.rev(parent)
1865 cachedelta = (baserev, orig.revdiff(parent, rev))
1865 cachedelta = (baserev, orig.revdiff(parent, rev))
1866 elif source == b'parent-smallest':
1866 elif source == b'parent-smallest':
1867 p1diff = orig.revdiff(p1, rev)
1867 p1diff = orig.revdiff(p1, rev)
1868 parent = p1
1868 parent = p1
1869 diff = p1diff
1869 diff = p1diff
1870 if p2 != nullid:
1870 if p2 != nullid:
1871 p2diff = orig.revdiff(p2, rev)
1871 p2diff = orig.revdiff(p2, rev)
1872 if len(p1diff) > len(p2diff):
1872 if len(p1diff) > len(p2diff):
1873 parent = p2
1873 parent = p2
1874 diff = p2diff
1874 diff = p2diff
1875 baserev = orig.rev(parent)
1875 baserev = orig.rev(parent)
1876 cachedelta = (baserev, diff)
1876 cachedelta = (baserev, diff)
1877 elif source == b'storage':
1877 elif source == b'storage':
1878 baserev = orig.deltaparent(rev)
1878 baserev = orig.deltaparent(rev)
1879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1879 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1880
1880
1881 return ((text, tr, linkrev, p1, p2),
1881 return ((text, tr, linkrev, p1, p2),
1882 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1882 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1883
1883
1884 @contextlib.contextmanager
1884 @contextlib.contextmanager
1885 def _temprevlog(ui, orig, truncaterev):
1885 def _temprevlog(ui, orig, truncaterev):
1886 from mercurial import vfs as vfsmod
1886 from mercurial import vfs as vfsmod
1887
1887
1888 if orig._inline:
1888 if orig._inline:
1889 raise error.Abort('not supporting inline revlog (yet)')
1889 raise error.Abort('not supporting inline revlog (yet)')
1890
1890
1891 origindexpath = orig.opener.join(orig.indexfile)
1891 origindexpath = orig.opener.join(orig.indexfile)
1892 origdatapath = orig.opener.join(orig.datafile)
1892 origdatapath = orig.opener.join(orig.datafile)
1893 indexname = 'revlog.i'
1893 indexname = 'revlog.i'
1894 dataname = 'revlog.d'
1894 dataname = 'revlog.d'
1895
1895
1896 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1896 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1897 try:
1897 try:
1898 # copy the data file in a temporary directory
1898 # copy the data file in a temporary directory
1899 ui.debug('copying data in %s\n' % tmpdir)
1899 ui.debug('copying data in %s\n' % tmpdir)
1900 destindexpath = os.path.join(tmpdir, 'revlog.i')
1900 destindexpath = os.path.join(tmpdir, 'revlog.i')
1901 destdatapath = os.path.join(tmpdir, 'revlog.d')
1901 destdatapath = os.path.join(tmpdir, 'revlog.d')
1902 shutil.copyfile(origindexpath, destindexpath)
1902 shutil.copyfile(origindexpath, destindexpath)
1903 shutil.copyfile(origdatapath, destdatapath)
1903 shutil.copyfile(origdatapath, destdatapath)
1904
1904
1905 # remove the data we want to add again
1905 # remove the data we want to add again
1906 ui.debug('truncating data to be rewritten\n')
1906 ui.debug('truncating data to be rewritten\n')
1907 with open(destindexpath, 'ab') as index:
1907 with open(destindexpath, 'ab') as index:
1908 index.seek(0)
1908 index.seek(0)
1909 index.truncate(truncaterev * orig._io.size)
1909 index.truncate(truncaterev * orig._io.size)
1910 with open(destdatapath, 'ab') as data:
1910 with open(destdatapath, 'ab') as data:
1911 data.seek(0)
1911 data.seek(0)
1912 data.truncate(orig.start(truncaterev))
1912 data.truncate(orig.start(truncaterev))
1913
1913
1914 # instantiate a new revlog from the temporary copy
1914 # instantiate a new revlog from the temporary copy
1915 ui.debug('truncating adding to be rewritten\n')
1915 ui.debug('truncating adding to be rewritten\n')
1916 vfs = vfsmod.vfs(tmpdir)
1916 vfs = vfsmod.vfs(tmpdir)
1917 vfs.options = getattr(orig.opener, 'options', None)
1917 vfs.options = getattr(orig.opener, 'options', None)
1918
1918
1919 dest = revlog.revlog(vfs,
1919 dest = revlog.revlog(vfs,
1920 indexfile=indexname,
1920 indexfile=indexname,
1921 datafile=dataname)
1921 datafile=dataname)
1922 if dest._inline:
1922 if dest._inline:
1923 raise error.Abort('not supporting inline revlog (yet)')
1923 raise error.Abort('not supporting inline revlog (yet)')
1924 # make sure internals are initialized
1924 # make sure internals are initialized
1925 dest.revision(len(dest) - 1)
1925 dest.revision(len(dest) - 1)
1926 yield dest
1926 yield dest
1927 del dest, vfs
1927 del dest, vfs
1928 finally:
1928 finally:
1929 shutil.rmtree(tmpdir, True)
1929 shutil.rmtree(tmpdir, True)
1930
1930
1931 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1931 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1932 [(b'e', b'engines', b'', b'compression engines to use'),
1932 [(b'e', b'engines', b'', b'compression engines to use'),
1933 (b's', b'startrev', 0, b'revision to start at')],
1933 (b's', b'startrev', 0, b'revision to start at')],
1934 b'-c|-m|FILE')
1934 b'-c|-m|FILE')
1935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1935 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1936 """Benchmark operations on revlog chunks.
1936 """Benchmark operations on revlog chunks.
1937
1937
1938 Logically, each revlog is a collection of fulltext revisions. However,
1938 Logically, each revlog is a collection of fulltext revisions. However,
1939 stored within each revlog are "chunks" of possibly compressed data. This
1939 stored within each revlog are "chunks" of possibly compressed data. This
1940 data needs to be read and decompressed or compressed and written.
1940 data needs to be read and decompressed or compressed and written.
1941
1941
1942 This command measures the time it takes to read+decompress and recompress
1942 This command measures the time it takes to read+decompress and recompress
1943 chunks in a revlog. It effectively isolates I/O and compression performance.
1943 chunks in a revlog. It effectively isolates I/O and compression performance.
1944 For measurements of higher-level operations like resolving revisions,
1944 For measurements of higher-level operations like resolving revisions,
1945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1945 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1946 """
1946 """
1947 opts = _byteskwargs(opts)
1947 opts = _byteskwargs(opts)
1948
1948
1949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1949 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1950
1950
1951 # _chunkraw was renamed to _getsegmentforrevs.
1951 # _chunkraw was renamed to _getsegmentforrevs.
1952 try:
1952 try:
1953 segmentforrevs = rl._getsegmentforrevs
1953 segmentforrevs = rl._getsegmentforrevs
1954 except AttributeError:
1954 except AttributeError:
1955 segmentforrevs = rl._chunkraw
1955 segmentforrevs = rl._chunkraw
1956
1956
1957 # Verify engines argument.
1957 # Verify engines argument.
1958 if engines:
1958 if engines:
1959 engines = set(e.strip() for e in engines.split(b','))
1959 engines = set(e.strip() for e in engines.split(b','))
1960 for engine in engines:
1960 for engine in engines:
1961 try:
1961 try:
1962 util.compressionengines[engine]
1962 util.compressionengines[engine]
1963 except KeyError:
1963 except KeyError:
1964 raise error.Abort(b'unknown compression engine: %s' % engine)
1964 raise error.Abort(b'unknown compression engine: %s' % engine)
1965 else:
1965 else:
1966 engines = []
1966 engines = []
1967 for e in util.compengines:
1967 for e in util.compengines:
1968 engine = util.compengines[e]
1968 engine = util.compengines[e]
1969 try:
1969 try:
1970 if engine.available():
1970 if engine.available():
1971 engine.revlogcompressor().compress(b'dummy')
1971 engine.revlogcompressor().compress(b'dummy')
1972 engines.append(e)
1972 engines.append(e)
1973 except NotImplementedError:
1973 except NotImplementedError:
1974 pass
1974 pass
1975
1975
1976 revs = list(rl.revs(startrev, len(rl) - 1))
1976 revs = list(rl.revs(startrev, len(rl) - 1))
1977
1977
1978 def rlfh(rl):
1978 def rlfh(rl):
1979 if rl._inline:
1979 if rl._inline:
1980 return getsvfs(repo)(rl.indexfile)
1980 return getsvfs(repo)(rl.indexfile)
1981 else:
1981 else:
1982 return getsvfs(repo)(rl.datafile)
1982 return getsvfs(repo)(rl.datafile)
1983
1983
1984 def doread():
1984 def doread():
1985 rl.clearcaches()
1985 rl.clearcaches()
1986 for rev in revs:
1986 for rev in revs:
1987 segmentforrevs(rev, rev)
1987 segmentforrevs(rev, rev)
1988
1988
1989 def doreadcachedfh():
1989 def doreadcachedfh():
1990 rl.clearcaches()
1990 rl.clearcaches()
1991 fh = rlfh(rl)
1991 fh = rlfh(rl)
1992 for rev in revs:
1992 for rev in revs:
1993 segmentforrevs(rev, rev, df=fh)
1993 segmentforrevs(rev, rev, df=fh)
1994
1994
1995 def doreadbatch():
1995 def doreadbatch():
1996 rl.clearcaches()
1996 rl.clearcaches()
1997 segmentforrevs(revs[0], revs[-1])
1997 segmentforrevs(revs[0], revs[-1])
1998
1998
1999 def doreadbatchcachedfh():
1999 def doreadbatchcachedfh():
2000 rl.clearcaches()
2000 rl.clearcaches()
2001 fh = rlfh(rl)
2001 fh = rlfh(rl)
2002 segmentforrevs(revs[0], revs[-1], df=fh)
2002 segmentforrevs(revs[0], revs[-1], df=fh)
2003
2003
2004 def dochunk():
2004 def dochunk():
2005 rl.clearcaches()
2005 rl.clearcaches()
2006 fh = rlfh(rl)
2006 fh = rlfh(rl)
2007 for rev in revs:
2007 for rev in revs:
2008 rl._chunk(rev, df=fh)
2008 rl._chunk(rev, df=fh)
2009
2009
2010 chunks = [None]
2010 chunks = [None]
2011
2011
2012 def dochunkbatch():
2012 def dochunkbatch():
2013 rl.clearcaches()
2013 rl.clearcaches()
2014 fh = rlfh(rl)
2014 fh = rlfh(rl)
2015 # Save chunks as a side-effect.
2015 # Save chunks as a side-effect.
2016 chunks[0] = rl._chunks(revs, df=fh)
2016 chunks[0] = rl._chunks(revs, df=fh)
2017
2017
2018 def docompress(compressor):
2018 def docompress(compressor):
2019 rl.clearcaches()
2019 rl.clearcaches()
2020
2020
2021 try:
2021 try:
2022 # Swap in the requested compression engine.
2022 # Swap in the requested compression engine.
2023 oldcompressor = rl._compressor
2023 oldcompressor = rl._compressor
2024 rl._compressor = compressor
2024 rl._compressor = compressor
2025 for chunk in chunks[0]:
2025 for chunk in chunks[0]:
2026 rl.compress(chunk)
2026 rl.compress(chunk)
2027 finally:
2027 finally:
2028 rl._compressor = oldcompressor
2028 rl._compressor = oldcompressor
2029
2029
2030 benches = [
2030 benches = [
2031 (lambda: doread(), b'read'),
2031 (lambda: doread(), b'read'),
2032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2032 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2033 (lambda: doreadbatch(), b'read batch'),
2033 (lambda: doreadbatch(), b'read batch'),
2034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2034 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2035 (lambda: dochunk(), b'chunk'),
2035 (lambda: dochunk(), b'chunk'),
2036 (lambda: dochunkbatch(), b'chunk batch'),
2036 (lambda: dochunkbatch(), b'chunk batch'),
2037 ]
2037 ]
2038
2038
2039 for engine in sorted(engines):
2039 for engine in sorted(engines):
2040 compressor = util.compengines[engine].revlogcompressor()
2040 compressor = util.compengines[engine].revlogcompressor()
2041 benches.append((functools.partial(docompress, compressor),
2041 benches.append((functools.partial(docompress, compressor),
2042 b'compress w/ %s' % engine))
2042 b'compress w/ %s' % engine))
2043
2043
2044 for fn, title in benches:
2044 for fn, title in benches:
2045 timer, fm = gettimer(ui, opts)
2045 timer, fm = gettimer(ui, opts)
2046 timer(fn, title=title)
2046 timer(fn, title=title)
2047 fm.end()
2047 fm.end()
2048
2048
2049 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2049 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2050 [(b'', b'cache', False, b'use caches instead of clearing')],
2050 [(b'', b'cache', False, b'use caches instead of clearing')],
2051 b'-c|-m|FILE REV')
2051 b'-c|-m|FILE REV')
2052 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2052 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2053 """Benchmark obtaining a revlog revision.
2053 """Benchmark obtaining a revlog revision.
2054
2054
2055 Obtaining a revlog revision consists of roughly the following steps:
2055 Obtaining a revlog revision consists of roughly the following steps:
2056
2056
2057 1. Compute the delta chain
2057 1. Compute the delta chain
2058 2. Slice the delta chain if applicable
2058 2. Slice the delta chain if applicable
2059 3. Obtain the raw chunks for that delta chain
2059 3. Obtain the raw chunks for that delta chain
2060 4. Decompress each raw chunk
2060 4. Decompress each raw chunk
2061 5. Apply binary patches to obtain fulltext
2061 5. Apply binary patches to obtain fulltext
2062 6. Verify hash of fulltext
2062 6. Verify hash of fulltext
2063
2063
2064 This command measures the time spent in each of these phases.
2064 This command measures the time spent in each of these phases.
2065 """
2065 """
2066 opts = _byteskwargs(opts)
2066 opts = _byteskwargs(opts)
2067
2067
2068 if opts.get(b'changelog') or opts.get(b'manifest'):
2068 if opts.get(b'changelog') or opts.get(b'manifest'):
2069 file_, rev = None, file_
2069 file_, rev = None, file_
2070 elif rev is None:
2070 elif rev is None:
2071 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2071 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2072
2072
2073 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2073 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2074
2074
2075 # _chunkraw was renamed to _getsegmentforrevs.
2075 # _chunkraw was renamed to _getsegmentforrevs.
2076 try:
2076 try:
2077 segmentforrevs = r._getsegmentforrevs
2077 segmentforrevs = r._getsegmentforrevs
2078 except AttributeError:
2078 except AttributeError:
2079 segmentforrevs = r._chunkraw
2079 segmentforrevs = r._chunkraw
2080
2080
2081 node = r.lookup(rev)
2081 node = r.lookup(rev)
2082 rev = r.rev(node)
2082 rev = r.rev(node)
2083
2083
2084 def getrawchunks(data, chain):
2084 def getrawchunks(data, chain):
2085 start = r.start
2085 start = r.start
2086 length = r.length
2086 length = r.length
2087 inline = r._inline
2087 inline = r._inline
2088 iosize = r._io.size
2088 iosize = r._io.size
2089 buffer = util.buffer
2089 buffer = util.buffer
2090
2090
2091 chunks = []
2091 chunks = []
2092 ladd = chunks.append
2092 ladd = chunks.append
2093 for idx, item in enumerate(chain):
2093 for idx, item in enumerate(chain):
2094 offset = start(item[0])
2094 offset = start(item[0])
2095 bits = data[idx]
2095 bits = data[idx]
2096 for rev in item:
2096 for rev in item:
2097 chunkstart = start(rev)
2097 chunkstart = start(rev)
2098 if inline:
2098 if inline:
2099 chunkstart += (rev + 1) * iosize
2099 chunkstart += (rev + 1) * iosize
2100 chunklength = length(rev)
2100 chunklength = length(rev)
2101 ladd(buffer(bits, chunkstart - offset, chunklength))
2101 ladd(buffer(bits, chunkstart - offset, chunklength))
2102
2102
2103 return chunks
2103 return chunks
2104
2104
2105 def dodeltachain(rev):
2105 def dodeltachain(rev):
2106 if not cache:
2106 if not cache:
2107 r.clearcaches()
2107 r.clearcaches()
2108 r._deltachain(rev)
2108 r._deltachain(rev)
2109
2109
2110 def doread(chain):
2110 def doread(chain):
2111 if not cache:
2111 if not cache:
2112 r.clearcaches()
2112 r.clearcaches()
2113 for item in slicedchain:
2113 for item in slicedchain:
2114 segmentforrevs(item[0], item[-1])
2114 segmentforrevs(item[0], item[-1])
2115
2115
2116 def doslice(r, chain, size):
2116 def doslice(r, chain, size):
2117 for s in slicechunk(r, chain, targetsize=size):
2117 for s in slicechunk(r, chain, targetsize=size):
2118 pass
2118 pass
2119
2119
2120 def dorawchunks(data, chain):
2120 def dorawchunks(data, chain):
2121 if not cache:
2121 if not cache:
2122 r.clearcaches()
2122 r.clearcaches()
2123 getrawchunks(data, chain)
2123 getrawchunks(data, chain)
2124
2124
2125 def dodecompress(chunks):
2125 def dodecompress(chunks):
2126 decomp = r.decompress
2126 decomp = r.decompress
2127 for chunk in chunks:
2127 for chunk in chunks:
2128 decomp(chunk)
2128 decomp(chunk)
2129
2129
2130 def dopatch(text, bins):
2130 def dopatch(text, bins):
2131 if not cache:
2131 if not cache:
2132 r.clearcaches()
2132 r.clearcaches()
2133 mdiff.patches(text, bins)
2133 mdiff.patches(text, bins)
2134
2134
2135 def dohash(text):
2135 def dohash(text):
2136 if not cache:
2136 if not cache:
2137 r.clearcaches()
2137 r.clearcaches()
2138 r.checkhash(text, node, rev=rev)
2138 r.checkhash(text, node, rev=rev)
2139
2139
2140 def dorevision():
2140 def dorevision():
2141 if not cache:
2141 if not cache:
2142 r.clearcaches()
2142 r.clearcaches()
2143 r.revision(node)
2143 r.revision(node)
2144
2144
2145 try:
2145 try:
2146 from mercurial.revlogutils.deltas import slicechunk
2146 from mercurial.revlogutils.deltas import slicechunk
2147 except ImportError:
2147 except ImportError:
2148 slicechunk = getattr(revlog, '_slicechunk', None)
2148 slicechunk = getattr(revlog, '_slicechunk', None)
2149
2149
2150 size = r.length(rev)
2150 size = r.length(rev)
2151 chain = r._deltachain(rev)[0]
2151 chain = r._deltachain(rev)[0]
2152 if not getattr(r, '_withsparseread', False):
2152 if not getattr(r, '_withsparseread', False):
2153 slicedchain = (chain,)
2153 slicedchain = (chain,)
2154 else:
2154 else:
2155 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2155 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2156 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2156 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2157 rawchunks = getrawchunks(data, slicedchain)
2157 rawchunks = getrawchunks(data, slicedchain)
2158 bins = r._chunks(chain)
2158 bins = r._chunks(chain)
2159 text = bytes(bins[0])
2159 text = bytes(bins[0])
2160 bins = bins[1:]
2160 bins = bins[1:]
2161 text = mdiff.patches(text, bins)
2161 text = mdiff.patches(text, bins)
2162
2162
2163 benches = [
2163 benches = [
2164 (lambda: dorevision(), b'full'),
2164 (lambda: dorevision(), b'full'),
2165 (lambda: dodeltachain(rev), b'deltachain'),
2165 (lambda: dodeltachain(rev), b'deltachain'),
2166 (lambda: doread(chain), b'read'),
2166 (lambda: doread(chain), b'read'),
2167 ]
2167 ]
2168
2168
2169 if getattr(r, '_withsparseread', False):
2169 if getattr(r, '_withsparseread', False):
2170 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2170 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2171 benches.append(slicing)
2171 benches.append(slicing)
2172
2172
2173 benches.extend([
2173 benches.extend([
2174 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2174 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2175 (lambda: dodecompress(rawchunks), b'decompress'),
2175 (lambda: dodecompress(rawchunks), b'decompress'),
2176 (lambda: dopatch(text, bins), b'patch'),
2176 (lambda: dopatch(text, bins), b'patch'),
2177 (lambda: dohash(text), b'hash'),
2177 (lambda: dohash(text), b'hash'),
2178 ])
2178 ])
2179
2179
2180 timer, fm = gettimer(ui, opts)
2180 timer, fm = gettimer(ui, opts)
2181 for fn, title in benches:
2181 for fn, title in benches:
2182 timer(fn, title=title)
2182 timer(fn, title=title)
2183 fm.end()
2183 fm.end()
2184
2184
2185 @command(b'perfrevset',
2185 @command(b'perfrevset',
2186 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2186 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2187 (b'', b'contexts', False, b'obtain changectx for each revision')]
2187 (b'', b'contexts', False, b'obtain changectx for each revision')]
2188 + formatteropts, b"REVSET")
2188 + formatteropts, b"REVSET")
2189 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2189 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2190 """benchmark the execution time of a revset
2190 """benchmark the execution time of a revset
2191
2191
2192 Use the --clean option if need to evaluate the impact of build volatile
2192 Use the --clean option if need to evaluate the impact of build volatile
2193 revisions set cache on the revset execution. Volatile cache hold filtered
2193 revisions set cache on the revset execution. Volatile cache hold filtered
2194 and obsolete related cache."""
2194 and obsolete related cache."""
2195 opts = _byteskwargs(opts)
2195 opts = _byteskwargs(opts)
2196
2196
2197 timer, fm = gettimer(ui, opts)
2197 timer, fm = gettimer(ui, opts)
2198 def d():
2198 def d():
2199 if clear:
2199 if clear:
2200 repo.invalidatevolatilesets()
2200 repo.invalidatevolatilesets()
2201 if contexts:
2201 if contexts:
2202 for ctx in repo.set(expr): pass
2202 for ctx in repo.set(expr): pass
2203 else:
2203 else:
2204 for r in repo.revs(expr): pass
2204 for r in repo.revs(expr): pass
2205 timer(d)
2205 timer(d)
2206 fm.end()
2206 fm.end()
2207
2207
2208 @command(b'perfvolatilesets',
2208 @command(b'perfvolatilesets',
2209 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2209 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2210 ] + formatteropts)
2210 ] + formatteropts)
2211 def perfvolatilesets(ui, repo, *names, **opts):
2211 def perfvolatilesets(ui, repo, *names, **opts):
2212 """benchmark the computation of various volatile set
2212 """benchmark the computation of various volatile set
2213
2213
2214 Volatile set computes element related to filtering and obsolescence."""
2214 Volatile set computes element related to filtering and obsolescence."""
2215 opts = _byteskwargs(opts)
2215 opts = _byteskwargs(opts)
2216 timer, fm = gettimer(ui, opts)
2216 timer, fm = gettimer(ui, opts)
2217 repo = repo.unfiltered()
2217 repo = repo.unfiltered()
2218
2218
2219 def getobs(name):
2219 def getobs(name):
2220 def d():
2220 def d():
2221 repo.invalidatevolatilesets()
2221 repo.invalidatevolatilesets()
2222 if opts[b'clear_obsstore']:
2222 if opts[b'clear_obsstore']:
2223 clearfilecache(repo, b'obsstore')
2223 clearfilecache(repo, b'obsstore')
2224 obsolete.getrevs(repo, name)
2224 obsolete.getrevs(repo, name)
2225 return d
2225 return d
2226
2226
2227 allobs = sorted(obsolete.cachefuncs)
2227 allobs = sorted(obsolete.cachefuncs)
2228 if names:
2228 if names:
2229 allobs = [n for n in allobs if n in names]
2229 allobs = [n for n in allobs if n in names]
2230
2230
2231 for name in allobs:
2231 for name in allobs:
2232 timer(getobs(name), title=name)
2232 timer(getobs(name), title=name)
2233
2233
2234 def getfiltered(name):
2234 def getfiltered(name):
2235 def d():
2235 def d():
2236 repo.invalidatevolatilesets()
2236 repo.invalidatevolatilesets()
2237 if opts[b'clear_obsstore']:
2237 if opts[b'clear_obsstore']:
2238 clearfilecache(repo, b'obsstore')
2238 clearfilecache(repo, b'obsstore')
2239 repoview.filterrevs(repo, name)
2239 repoview.filterrevs(repo, name)
2240 return d
2240 return d
2241
2241
2242 allfilter = sorted(repoview.filtertable)
2242 allfilter = sorted(repoview.filtertable)
2243 if names:
2243 if names:
2244 allfilter = [n for n in allfilter if n in names]
2244 allfilter = [n for n in allfilter if n in names]
2245
2245
2246 for name in allfilter:
2246 for name in allfilter:
2247 timer(getfiltered(name), title=name)
2247 timer(getfiltered(name), title=name)
2248 fm.end()
2248 fm.end()
2249
2249
2250 @command(b'perfbranchmap',
2250 @command(b'perfbranchmap',
2251 [(b'f', b'full', False,
2251 [(b'f', b'full', False,
2252 b'Includes build time of subset'),
2252 b'Includes build time of subset'),
2253 (b'', b'clear-revbranch', False,
2253 (b'', b'clear-revbranch', False,
2254 b'purge the revbranch cache between computation'),
2254 b'purge the revbranch cache between computation'),
2255 ] + formatteropts)
2255 ] + formatteropts)
2256 def perfbranchmap(ui, repo, *filternames, **opts):
2256 def perfbranchmap(ui, repo, *filternames, **opts):
2257 """benchmark the update of a branchmap
2257 """benchmark the update of a branchmap
2258
2258
2259 This benchmarks the full repo.branchmap() call with read and write disabled
2259 This benchmarks the full repo.branchmap() call with read and write disabled
2260 """
2260 """
2261 opts = _byteskwargs(opts)
2261 opts = _byteskwargs(opts)
2262 full = opts.get(b"full", False)
2262 full = opts.get(b"full", False)
2263 clear_revbranch = opts.get(b"clear_revbranch", False)
2263 clear_revbranch = opts.get(b"clear_revbranch", False)
2264 timer, fm = gettimer(ui, opts)
2264 timer, fm = gettimer(ui, opts)
2265 def getbranchmap(filtername):
2265 def getbranchmap(filtername):
2266 """generate a benchmark function for the filtername"""
2266 """generate a benchmark function for the filtername"""
2267 if filtername is None:
2267 if filtername is None:
2268 view = repo
2268 view = repo
2269 else:
2269 else:
2270 view = repo.filtered(filtername)
2270 view = repo.filtered(filtername)
2271 def d():
2271 def d():
2272 if clear_revbranch:
2272 if clear_revbranch:
2273 repo.revbranchcache()._clear()
2273 repo.revbranchcache()._clear()
2274 if full:
2274 if full:
2275 view._branchcaches.clear()
2275 view._branchcaches.clear()
2276 else:
2276 else:
2277 view._branchcaches.pop(filtername, None)
2277 view._branchcaches.pop(filtername, None)
2278 view.branchmap()
2278 view.branchmap()
2279 return d
2279 return d
2280 # add filter in smaller subset to bigger subset
2280 # add filter in smaller subset to bigger subset
2281 possiblefilters = set(repoview.filtertable)
2281 possiblefilters = set(repoview.filtertable)
2282 if filternames:
2282 if filternames:
2283 possiblefilters &= set(filternames)
2283 possiblefilters &= set(filternames)
2284 subsettable = getbranchmapsubsettable()
2284 subsettable = getbranchmapsubsettable()
2285 allfilters = []
2285 allfilters = []
2286 while possiblefilters:
2286 while possiblefilters:
2287 for name in possiblefilters:
2287 for name in possiblefilters:
2288 subset = subsettable.get(name)
2288 subset = subsettable.get(name)
2289 if subset not in possiblefilters:
2289 if subset not in possiblefilters:
2290 break
2290 break
2291 else:
2291 else:
2292 assert False, b'subset cycle %s!' % possiblefilters
2292 assert False, b'subset cycle %s!' % possiblefilters
2293 allfilters.append(name)
2293 allfilters.append(name)
2294 possiblefilters.remove(name)
2294 possiblefilters.remove(name)
2295
2295
2296 # warm the cache
2296 # warm the cache
2297 if not full:
2297 if not full:
2298 for name in allfilters:
2298 for name in allfilters:
2299 repo.filtered(name).branchmap()
2299 repo.filtered(name).branchmap()
2300 if not filternames or b'unfiltered' in filternames:
2300 if not filternames or b'unfiltered' in filternames:
2301 # add unfiltered
2301 # add unfiltered
2302 allfilters.append(None)
2302 allfilters.append(None)
2303
2303
2304 branchcacheread = safeattrsetter(branchmap, b'read')
2304 branchcacheread = safeattrsetter(branchmap, b'read')
2305 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2305 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2306 branchcacheread.set(lambda repo: None)
2306 branchcacheread.set(lambda repo: None)
2307 branchcachewrite.set(lambda bc, repo: None)
2307 branchcachewrite.set(lambda bc, repo: None)
2308 try:
2308 try:
2309 for name in allfilters:
2309 for name in allfilters:
2310 printname = name
2310 printname = name
2311 if name is None:
2311 if name is None:
2312 printname = b'unfiltered'
2312 printname = b'unfiltered'
2313 timer(getbranchmap(name), title=str(printname))
2313 timer(getbranchmap(name), title=str(printname))
2314 finally:
2314 finally:
2315 branchcacheread.restore()
2315 branchcacheread.restore()
2316 branchcachewrite.restore()
2316 branchcachewrite.restore()
2317 fm.end()
2317 fm.end()
2318
2318
2319 @command(b'perfbranchmapupdate', [
2319 @command(b'perfbranchmapupdate', [
2320 (b'', b'base', [], b'subset of revision to start from'),
2320 (b'', b'base', [], b'subset of revision to start from'),
2321 (b'', b'target', [], b'subset of revision to end with'),
2321 (b'', b'target', [], b'subset of revision to end with'),
2322 (b'', b'clear-caches', False, b'clear cache between each runs')
2322 (b'', b'clear-caches', False, b'clear cache between each runs')
2323 ] + formatteropts)
2323 ] + formatteropts)
2324 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2324 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2325 """benchmark branchmap update from for <base> revs to <target> revs
2325 """benchmark branchmap update from for <base> revs to <target> revs
2326
2326
2327 If `--clear-caches` is passed, the following items will be reset before
2327 If `--clear-caches` is passed, the following items will be reset before
2328 each update:
2328 each update:
2329 * the changelog instance and associated indexes
2329 * the changelog instance and associated indexes
2330 * the rev-branch-cache instance
2330 * the rev-branch-cache instance
2331
2331
2332 Examples:
2332 Examples:
2333
2333
2334 # update for the one last revision
2334 # update for the one last revision
2335 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2335 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2336
2336
2337 $ update for change coming with a new branch
2337 $ update for change coming with a new branch
2338 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2338 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2339 """
2339 """
2340 from mercurial import branchmap
2340 from mercurial import branchmap
2341 from mercurial import repoview
2341 from mercurial import repoview
2342 opts = _byteskwargs(opts)
2342 opts = _byteskwargs(opts)
2343 timer, fm = gettimer(ui, opts)
2343 timer, fm = gettimer(ui, opts)
2344 clearcaches = opts[b'clear_caches']
2344 clearcaches = opts[b'clear_caches']
2345 unfi = repo.unfiltered()
2345 unfi = repo.unfiltered()
2346 x = [None] # used to pass data between closure
2346 x = [None] # used to pass data between closure
2347
2347
2348 # we use a `list` here to avoid possible side effect from smartset
2348 # we use a `list` here to avoid possible side effect from smartset
2349 baserevs = list(scmutil.revrange(repo, base))
2349 baserevs = list(scmutil.revrange(repo, base))
2350 targetrevs = list(scmutil.revrange(repo, target))
2350 targetrevs = list(scmutil.revrange(repo, target))
2351 if not baserevs:
2351 if not baserevs:
2352 raise error.Abort(b'no revisions selected for --base')
2352 raise error.Abort(b'no revisions selected for --base')
2353 if not targetrevs:
2353 if not targetrevs:
2354 raise error.Abort(b'no revisions selected for --target')
2354 raise error.Abort(b'no revisions selected for --target')
2355
2355
2356 # make sure the target branchmap also contains the one in the base
2356 # make sure the target branchmap also contains the one in the base
2357 targetrevs = list(set(baserevs) | set(targetrevs))
2357 targetrevs = list(set(baserevs) | set(targetrevs))
2358 targetrevs.sort()
2358 targetrevs.sort()
2359
2359
2360 cl = repo.changelog
2360 cl = repo.changelog
2361 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2361 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2362 allbaserevs.sort()
2362 allbaserevs.sort()
2363 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2363 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2364
2364
2365 newrevs = list(alltargetrevs.difference(allbaserevs))
2365 newrevs = list(alltargetrevs.difference(allbaserevs))
2366 newrevs.sort()
2366 newrevs.sort()
2367
2367
2368 allrevs = frozenset(unfi.changelog.revs())
2368 allrevs = frozenset(unfi.changelog.revs())
2369 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2369 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2370 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2370 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2371
2371
2372 def basefilter(repo, visibilityexceptions=None):
2372 def basefilter(repo, visibilityexceptions=None):
2373 return basefilterrevs
2373 return basefilterrevs
2374
2374
2375 def targetfilter(repo, visibilityexceptions=None):
2375 def targetfilter(repo, visibilityexceptions=None):
2376 return targetfilterrevs
2376 return targetfilterrevs
2377
2377
2378 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2378 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2379 ui.status(msg % (len(allbaserevs), len(newrevs)))
2379 ui.status(msg % (len(allbaserevs), len(newrevs)))
2380 if targetfilterrevs:
2380 if targetfilterrevs:
2381 msg = b'(%d revisions still filtered)\n'
2381 msg = b'(%d revisions still filtered)\n'
2382 ui.status(msg % len(targetfilterrevs))
2382 ui.status(msg % len(targetfilterrevs))
2383
2383
2384 try:
2384 try:
2385 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2385 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2386 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2386 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2387
2387
2388 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2388 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2389 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2389 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2390
2390
2391 # try to find an existing branchmap to reuse
2391 # try to find an existing branchmap to reuse
2392 subsettable = getbranchmapsubsettable()
2392 subsettable = getbranchmapsubsettable()
2393 candidatefilter = subsettable.get(None)
2393 candidatefilter = subsettable.get(None)
2394 while candidatefilter is not None:
2394 while candidatefilter is not None:
2395 candidatebm = repo.filtered(candidatefilter).branchmap()
2395 candidatebm = repo.filtered(candidatefilter).branchmap()
2396 if candidatebm.validfor(baserepo):
2396 if candidatebm.validfor(baserepo):
2397 filtered = repoview.filterrevs(repo, candidatefilter)
2397 filtered = repoview.filterrevs(repo, candidatefilter)
2398 missing = [r for r in allbaserevs if r in filtered]
2398 missing = [r for r in allbaserevs if r in filtered]
2399 base = candidatebm.copy()
2399 base = candidatebm.copy()
2400 base.update(baserepo, missing)
2400 base.update(baserepo, missing)
2401 break
2401 break
2402 candidatefilter = subsettable.get(candidatefilter)
2402 candidatefilter = subsettable.get(candidatefilter)
2403 else:
2403 else:
2404 # no suitable subset where found
2404 # no suitable subset where found
2405 base = branchmap.branchcache()
2405 base = branchmap.branchcache()
2406 base.update(baserepo, allbaserevs)
2406 base.update(baserepo, allbaserevs)
2407
2407
2408 def setup():
2408 def setup():
2409 x[0] = base.copy()
2409 x[0] = base.copy()
2410 if clearcaches:
2410 if clearcaches:
2411 unfi._revbranchcache = None
2411 unfi._revbranchcache = None
2412 clearchangelog(repo)
2412 clearchangelog(repo)
2413
2413
2414 def bench():
2414 def bench():
2415 x[0].update(targetrepo, newrevs)
2415 x[0].update(targetrepo, newrevs)
2416
2416
2417 timer(bench, setup=setup)
2417 timer(bench, setup=setup)
2418 fm.end()
2418 fm.end()
2419 finally:
2419 finally:
2420 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2420 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2421 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2421 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2422
2422
2423 @command(b'perfbranchmapload', [
2423 @command(b'perfbranchmapload', [
2424 (b'f', b'filter', b'', b'Specify repoview filter'),
2424 (b'f', b'filter', b'', b'Specify repoview filter'),
2425 (b'', b'list', False, b'List brachmap filter caches'),
2425 (b'', b'list', False, b'List brachmap filter caches'),
2426 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2426 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2427
2427
2428 ] + formatteropts)
2428 ] + formatteropts)
2429 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2429 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2430 """benchmark reading the branchmap"""
2430 """benchmark reading the branchmap"""
2431 opts = _byteskwargs(opts)
2431 opts = _byteskwargs(opts)
2432 clearrevlogs = opts[b'clear_revlogs']
2432 clearrevlogs = opts[b'clear_revlogs']
2433
2433
2434 if list:
2434 if list:
2435 for name, kind, st in repo.cachevfs.readdir(stat=True):
2435 for name, kind, st in repo.cachevfs.readdir(stat=True):
2436 if name.startswith(b'branch2'):
2436 if name.startswith(b'branch2'):
2437 filtername = name.partition(b'-')[2] or b'unfiltered'
2437 filtername = name.partition(b'-')[2] or b'unfiltered'
2438 ui.status(b'%s - %s\n'
2438 ui.status(b'%s - %s\n'
2439 % (filtername, util.bytecount(st.st_size)))
2439 % (filtername, util.bytecount(st.st_size)))
2440 return
2440 return
2441 if not filter:
2441 if not filter:
2442 filter = None
2442 filter = None
2443 subsettable = getbranchmapsubsettable()
2443 subsettable = getbranchmapsubsettable()
2444 if filter is None:
2444 if filter is None:
2445 repo = repo.unfiltered()
2445 repo = repo.unfiltered()
2446 else:
2446 else:
2447 repo = repoview.repoview(repo, filter)
2447 repo = repoview.repoview(repo, filter)
2448
2448
2449 repo.branchmap() # make sure we have a relevant, up to date branchmap
2449 repo.branchmap() # make sure we have a relevant, up to date branchmap
2450
2450
2451 currentfilter = filter
2451 currentfilter = filter
2452 # try once without timer, the filter may not be cached
2452 # try once without timer, the filter may not be cached
2453 while branchmap.read(repo) is None:
2453 while branchmap.read(repo) is None:
2454 currentfilter = subsettable.get(currentfilter)
2454 currentfilter = subsettable.get(currentfilter)
2455 if currentfilter is None:
2455 if currentfilter is None:
2456 raise error.Abort(b'No branchmap cached for %s repo'
2456 raise error.Abort(b'No branchmap cached for %s repo'
2457 % (filter or b'unfiltered'))
2457 % (filter or b'unfiltered'))
2458 repo = repo.filtered(currentfilter)
2458 repo = repo.filtered(currentfilter)
2459 timer, fm = gettimer(ui, opts)
2459 timer, fm = gettimer(ui, opts)
2460 def setup():
2460 def setup():
2461 if clearrevlogs:
2461 if clearrevlogs:
2462 clearchangelog(repo)
2462 clearchangelog(repo)
2463 def bench():
2463 def bench():
2464 branchmap.read(repo)
2464 branchmap.read(repo)
2465 timer(bench, setup=setup)
2465 timer(bench, setup=setup)
2466 fm.end()
2466 fm.end()
2467
2467
2468 @command(b'perfloadmarkers')
2468 @command(b'perfloadmarkers')
2469 def perfloadmarkers(ui, repo):
2469 def perfloadmarkers(ui, repo):
2470 """benchmark the time to parse the on-disk markers for a repo
2470 """benchmark the time to parse the on-disk markers for a repo
2471
2471
2472 Result is the number of markers in the repo."""
2472 Result is the number of markers in the repo."""
2473 timer, fm = gettimer(ui)
2473 timer, fm = gettimer(ui)
2474 svfs = getsvfs(repo)
2474 svfs = getsvfs(repo)
2475 timer(lambda: len(obsolete.obsstore(svfs)))
2475 timer(lambda: len(obsolete.obsstore(svfs)))
2476 fm.end()
2476 fm.end()
2477
2477
2478 @command(b'perflrucachedict', formatteropts +
2478 @command(b'perflrucachedict', formatteropts +
2479 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2479 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2480 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2480 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2481 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2481 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2482 (b'', b'size', 4, b'size of cache'),
2482 (b'', b'size', 4, b'size of cache'),
2483 (b'', b'gets', 10000, b'number of key lookups'),
2483 (b'', b'gets', 10000, b'number of key lookups'),
2484 (b'', b'sets', 10000, b'number of key sets'),
2484 (b'', b'sets', 10000, b'number of key sets'),
2485 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2485 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2486 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2486 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2487 norepo=True)
2487 norepo=True)
2488 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2488 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2489 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2489 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2490 opts = _byteskwargs(opts)
2490 opts = _byteskwargs(opts)
2491
2491
2492 def doinit():
2492 def doinit():
2493 for i in _xrange(10000):
2493 for i in _xrange(10000):
2494 util.lrucachedict(size)
2494 util.lrucachedict(size)
2495
2495
2496 costrange = list(range(mincost, maxcost + 1))
2496 costrange = list(range(mincost, maxcost + 1))
2497
2497
2498 values = []
2498 values = []
2499 for i in _xrange(size):
2499 for i in _xrange(size):
2500 values.append(random.randint(0, _maxint))
2500 values.append(random.randint(0, _maxint))
2501
2501
2502 # Get mode fills the cache and tests raw lookup performance with no
2502 # Get mode fills the cache and tests raw lookup performance with no
2503 # eviction.
2503 # eviction.
2504 getseq = []
2504 getseq = []
2505 for i in _xrange(gets):
2505 for i in _xrange(gets):
2506 getseq.append(random.choice(values))
2506 getseq.append(random.choice(values))
2507
2507
2508 def dogets():
2508 def dogets():
2509 d = util.lrucachedict(size)
2509 d = util.lrucachedict(size)
2510 for v in values:
2510 for v in values:
2511 d[v] = v
2511 d[v] = v
2512 for key in getseq:
2512 for key in getseq:
2513 value = d[key]
2513 value = d[key]
2514 value # silence pyflakes warning
2514 value # silence pyflakes warning
2515
2515
2516 def dogetscost():
2516 def dogetscost():
2517 d = util.lrucachedict(size, maxcost=costlimit)
2517 d = util.lrucachedict(size, maxcost=costlimit)
2518 for i, v in enumerate(values):
2518 for i, v in enumerate(values):
2519 d.insert(v, v, cost=costs[i])
2519 d.insert(v, v, cost=costs[i])
2520 for key in getseq:
2520 for key in getseq:
2521 try:
2521 try:
2522 value = d[key]
2522 value = d[key]
2523 value # silence pyflakes warning
2523 value # silence pyflakes warning
2524 except KeyError:
2524 except KeyError:
2525 pass
2525 pass
2526
2526
2527 # Set mode tests insertion speed with cache eviction.
2527 # Set mode tests insertion speed with cache eviction.
2528 setseq = []
2528 setseq = []
2529 costs = []
2529 costs = []
2530 for i in _xrange(sets):
2530 for i in _xrange(sets):
2531 setseq.append(random.randint(0, _maxint))
2531 setseq.append(random.randint(0, _maxint))
2532 costs.append(random.choice(costrange))
2532 costs.append(random.choice(costrange))
2533
2533
2534 def doinserts():
2534 def doinserts():
2535 d = util.lrucachedict(size)
2535 d = util.lrucachedict(size)
2536 for v in setseq:
2536 for v in setseq:
2537 d.insert(v, v)
2537 d.insert(v, v)
2538
2538
2539 def doinsertscost():
2539 def doinsertscost():
2540 d = util.lrucachedict(size, maxcost=costlimit)
2540 d = util.lrucachedict(size, maxcost=costlimit)
2541 for i, v in enumerate(setseq):
2541 for i, v in enumerate(setseq):
2542 d.insert(v, v, cost=costs[i])
2542 d.insert(v, v, cost=costs[i])
2543
2543
2544 def dosets():
2544 def dosets():
2545 d = util.lrucachedict(size)
2545 d = util.lrucachedict(size)
2546 for v in setseq:
2546 for v in setseq:
2547 d[v] = v
2547 d[v] = v
2548
2548
2549 # Mixed mode randomly performs gets and sets with eviction.
2549 # Mixed mode randomly performs gets and sets with eviction.
2550 mixedops = []
2550 mixedops = []
2551 for i in _xrange(mixed):
2551 for i in _xrange(mixed):
2552 r = random.randint(0, 100)
2552 r = random.randint(0, 100)
2553 if r < mixedgetfreq:
2553 if r < mixedgetfreq:
2554 op = 0
2554 op = 0
2555 else:
2555 else:
2556 op = 1
2556 op = 1
2557
2557
2558 mixedops.append((op,
2558 mixedops.append((op,
2559 random.randint(0, size * 2),
2559 random.randint(0, size * 2),
2560 random.choice(costrange)))
2560 random.choice(costrange)))
2561
2561
2562 def domixed():
2562 def domixed():
2563 d = util.lrucachedict(size)
2563 d = util.lrucachedict(size)
2564
2564
2565 for op, v, cost in mixedops:
2565 for op, v, cost in mixedops:
2566 if op == 0:
2566 if op == 0:
2567 try:
2567 try:
2568 d[v]
2568 d[v]
2569 except KeyError:
2569 except KeyError:
2570 pass
2570 pass
2571 else:
2571 else:
2572 d[v] = v
2572 d[v] = v
2573
2573
2574 def domixedcost():
2574 def domixedcost():
2575 d = util.lrucachedict(size, maxcost=costlimit)
2575 d = util.lrucachedict(size, maxcost=costlimit)
2576
2576
2577 for op, v, cost in mixedops:
2577 for op, v, cost in mixedops:
2578 if op == 0:
2578 if op == 0:
2579 try:
2579 try:
2580 d[v]
2580 d[v]
2581 except KeyError:
2581 except KeyError:
2582 pass
2582 pass
2583 else:
2583 else:
2584 d.insert(v, v, cost=cost)
2584 d.insert(v, v, cost=cost)
2585
2585
2586 benches = [
2586 benches = [
2587 (doinit, b'init'),
2587 (doinit, b'init'),
2588 ]
2588 ]
2589
2589
2590 if costlimit:
2590 if costlimit:
2591 benches.extend([
2591 benches.extend([
2592 (dogetscost, b'gets w/ cost limit'),
2592 (dogetscost, b'gets w/ cost limit'),
2593 (doinsertscost, b'inserts w/ cost limit'),
2593 (doinsertscost, b'inserts w/ cost limit'),
2594 (domixedcost, b'mixed w/ cost limit'),
2594 (domixedcost, b'mixed w/ cost limit'),
2595 ])
2595 ])
2596 else:
2596 else:
2597 benches.extend([
2597 benches.extend([
2598 (dogets, b'gets'),
2598 (dogets, b'gets'),
2599 (doinserts, b'inserts'),
2599 (doinserts, b'inserts'),
2600 (dosets, b'sets'),
2600 (dosets, b'sets'),
2601 (domixed, b'mixed')
2601 (domixed, b'mixed')
2602 ])
2602 ])
2603
2603
2604 for fn, title in benches:
2604 for fn, title in benches:
2605 timer, fm = gettimer(ui, opts)
2605 timer, fm = gettimer(ui, opts)
2606 timer(fn, title=title)
2606 timer(fn, title=title)
2607 fm.end()
2607 fm.end()
2608
2608
2609 @command(b'perfwrite', formatteropts)
2609 @command(b'perfwrite', formatteropts)
2610 def perfwrite(ui, repo, **opts):
2610 def perfwrite(ui, repo, **opts):
2611 """microbenchmark ui.write
2611 """microbenchmark ui.write
2612 """
2612 """
2613 opts = _byteskwargs(opts)
2613 opts = _byteskwargs(opts)
2614
2614
2615 timer, fm = gettimer(ui, opts)
2615 timer, fm = gettimer(ui, opts)
2616 def write():
2616 def write():
2617 for i in range(100000):
2617 for i in range(100000):
2618 ui.write((b'Testing write performance\n'))
2618 ui.write((b'Testing write performance\n'))
2619 timer(write)
2619 timer(write)
2620 fm.end()
2620 fm.end()
2621
2621
2622 def uisetup(ui):
2622 def uisetup(ui):
2623 if (util.safehasattr(cmdutil, b'openrevlog') and
2623 if (util.safehasattr(cmdutil, b'openrevlog') and
2624 not util.safehasattr(commands, b'debugrevlogopts')):
2624 not util.safehasattr(commands, b'debugrevlogopts')):
2625 # for "historical portability":
2625 # for "historical portability":
2626 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2626 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2627 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2627 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2628 # openrevlog() should cause failure, because it has been
2628 # openrevlog() should cause failure, because it has been
2629 # available since 3.5 (or 49c583ca48c4).
2629 # available since 3.5 (or 49c583ca48c4).
2630 def openrevlog(orig, repo, cmd, file_, opts):
2630 def openrevlog(orig, repo, cmd, file_, opts):
2631 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2631 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2632 raise error.Abort(b"This version doesn't support --dir option",
2632 raise error.Abort(b"This version doesn't support --dir option",
2633 hint=b"use 3.5 or later")
2633 hint=b"use 3.5 or later")
2634 return orig(repo, cmd, file_, opts)
2634 return orig(repo, cmd, file_, opts)
2635 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2635 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2636
2636
2637 @command(b'perfprogress', formatteropts + [
2637 @command(b'perfprogress', formatteropts + [
2638 (b'', b'topic', b'topic', b'topic for progress messages'),
2638 (b'', b'topic', b'topic', b'topic for progress messages'),
2639 (b'c', b'total', 1000000, b'total value we are progressing to'),
2639 (b'c', b'total', 1000000, b'total value we are progressing to'),
2640 ], norepo=True)
2640 ], norepo=True)
2641 def perfprogress(ui, topic=None, total=None, **opts):
2641 def perfprogress(ui, topic=None, total=None, **opts):
2642 """printing of progress bars"""
2642 """printing of progress bars"""
2643 opts = _byteskwargs(opts)
2643 opts = _byteskwargs(opts)
2644
2644
2645 timer, fm = gettimer(ui, opts)
2645 timer, fm = gettimer(ui, opts)
2646
2646
2647 def doprogress():
2647 def doprogress():
2648 with ui.makeprogress(topic, total=total) as progress:
2648 with ui.makeprogress(topic, total=total) as progress:
2649 for i in pycompat.xrange(total):
2649 for i in pycompat.xrange(total):
2650 progress.increment()
2650 progress.increment()
2651
2651
2652 timer(doprogress)
2652 timer(doprogress)
2653 fm.end()
2653 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now