##// END OF EJS Templates
perf: move some of the perftags benchmark to the setup function...
Boris Feld -
r40718:4369c00a default
parent child Browse files
Show More
@@ -1,2369 +1,2370 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, setup=None, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 if setup is not None:
298 if setup is not None:
299 setup()
299 setup()
300 with timeone() as item:
300 with timeone() as item:
301 r = func()
301 r = func()
302 count += 1
302 count += 1
303 results.append(item[0])
303 results.append(item[0])
304 cstop = util.timer()
304 cstop = util.timer()
305 if cstop - begin > 3 and count >= 100:
305 if cstop - begin > 3 and count >= 100:
306 break
306 break
307 if cstop - begin > 10 and count >= 3:
307 if cstop - begin > 10 and count >= 3:
308 break
308 break
309
309
310 formatone(fm, results, title=title, result=r,
310 formatone(fm, results, title=title, result=r,
311 displayall=displayall)
311 displayall=displayall)
312
312
313 def formatone(fm, timings, title=None, result=None, displayall=False):
313 def formatone(fm, timings, title=None, result=None, displayall=False):
314
314
315 count = len(timings)
315 count = len(timings)
316
316
317 fm.startitem()
317 fm.startitem()
318
318
319 if title:
319 if title:
320 fm.write(b'title', b'! %s\n', title)
320 fm.write(b'title', b'! %s\n', title)
321 if result:
321 if result:
322 fm.write(b'result', b'! result: %s\n', result)
322 fm.write(b'result', b'! result: %s\n', result)
323 def display(role, entry):
323 def display(role, entry):
324 prefix = b''
324 prefix = b''
325 if role != b'best':
325 if role != b'best':
326 prefix = b'%s.' % role
326 prefix = b'%s.' % role
327 fm.plain(b'!')
327 fm.plain(b'!')
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 fm.write(prefix + b'user', b' user %f', entry[1])
330 fm.write(prefix + b'user', b' user %f', entry[1])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 fm.plain(b'\n')
333 fm.plain(b'\n')
334 timings.sort()
334 timings.sort()
335 min_val = timings[0]
335 min_val = timings[0]
336 display(b'best', min_val)
336 display(b'best', min_val)
337 if displayall:
337 if displayall:
338 max_val = timings[-1]
338 max_val = timings[-1]
339 display(b'max', max_val)
339 display(b'max', max_val)
340 avg = tuple([sum(x) / count for x in zip(*timings)])
340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 display(b'avg', avg)
341 display(b'avg', avg)
342 median = timings[len(timings) // 2]
342 median = timings[len(timings) // 2]
343 display(b'median', median)
343 display(b'median', median)
344
344
345 # utilities for historical portability
345 # utilities for historical portability
346
346
347 def getint(ui, section, name, default):
347 def getint(ui, section, name, default):
348 # for "historical portability":
348 # for "historical portability":
349 # ui.configint has been available since 1.9 (or fa2b596db182)
349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 v = ui.config(section, name, None)
350 v = ui.config(section, name, None)
351 if v is None:
351 if v is None:
352 return default
352 return default
353 try:
353 try:
354 return int(v)
354 return int(v)
355 except ValueError:
355 except ValueError:
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 % (section, name, v))
357 % (section, name, v))
358
358
359 def safeattrsetter(obj, name, ignoremissing=False):
359 def safeattrsetter(obj, name, ignoremissing=False):
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361
361
362 This function is aborted, if 'obj' doesn't have 'name' attribute
362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 at runtime. This avoids overlooking removal of an attribute, which
363 at runtime. This avoids overlooking removal of an attribute, which
364 breaks assumption of performance measurement, in the future.
364 breaks assumption of performance measurement, in the future.
365
365
366 This function returns the object to (1) assign a new value, and
366 This function returns the object to (1) assign a new value, and
367 (2) restore an original value to the attribute.
367 (2) restore an original value to the attribute.
368
368
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 abortion, and this function returns None. This is useful to
370 abortion, and this function returns None. This is useful to
371 examine an attribute, which isn't ensured in all Mercurial
371 examine an attribute, which isn't ensured in all Mercurial
372 versions.
372 versions.
373 """
373 """
374 if not util.safehasattr(obj, name):
374 if not util.safehasattr(obj, name):
375 if ignoremissing:
375 if ignoremissing:
376 return None
376 return None
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 b" of performance measurement") % (name, obj))
378 b" of performance measurement") % (name, obj))
379
379
380 origvalue = getattr(obj, _sysstr(name))
380 origvalue = getattr(obj, _sysstr(name))
381 class attrutil(object):
381 class attrutil(object):
382 def set(self, newvalue):
382 def set(self, newvalue):
383 setattr(obj, _sysstr(name), newvalue)
383 setattr(obj, _sysstr(name), newvalue)
384 def restore(self):
384 def restore(self):
385 setattr(obj, _sysstr(name), origvalue)
385 setattr(obj, _sysstr(name), origvalue)
386
386
387 return attrutil()
387 return attrutil()
388
388
389 # utilities to examine each internal API changes
389 # utilities to examine each internal API changes
390
390
391 def getbranchmapsubsettable():
391 def getbranchmapsubsettable():
392 # for "historical portability":
392 # for "historical portability":
393 # subsettable is defined in:
393 # subsettable is defined in:
394 # - branchmap since 2.9 (or 175c6fd8cacc)
394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 # - repoview since 2.5 (or 59a9f18d4587)
395 # - repoview since 2.5 (or 59a9f18d4587)
396 for mod in (branchmap, repoview):
396 for mod in (branchmap, repoview):
397 subsettable = getattr(mod, 'subsettable', None)
397 subsettable = getattr(mod, 'subsettable', None)
398 if subsettable:
398 if subsettable:
399 return subsettable
399 return subsettable
400
400
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 # branchmap and repoview modules exist, but subsettable attribute
402 # branchmap and repoview modules exist, but subsettable attribute
403 # doesn't)
403 # doesn't)
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 hint=b"use 2.5 or later")
405 hint=b"use 2.5 or later")
406
406
407 def getsvfs(repo):
407 def getsvfs(repo):
408 """Return appropriate object to access files under .hg/store
408 """Return appropriate object to access files under .hg/store
409 """
409 """
410 # for "historical portability":
410 # for "historical portability":
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 svfs = getattr(repo, 'svfs', None)
412 svfs = getattr(repo, 'svfs', None)
413 if svfs:
413 if svfs:
414 return svfs
414 return svfs
415 else:
415 else:
416 return getattr(repo, 'sopener')
416 return getattr(repo, 'sopener')
417
417
418 def getvfs(repo):
418 def getvfs(repo):
419 """Return appropriate object to access files under .hg
419 """Return appropriate object to access files under .hg
420 """
420 """
421 # for "historical portability":
421 # for "historical portability":
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 vfs = getattr(repo, 'vfs', None)
423 vfs = getattr(repo, 'vfs', None)
424 if vfs:
424 if vfs:
425 return vfs
425 return vfs
426 else:
426 else:
427 return getattr(repo, 'opener')
427 return getattr(repo, 'opener')
428
428
429 def repocleartagscachefunc(repo):
429 def repocleartagscachefunc(repo):
430 """Return the function to clear tags cache according to repo internal API
430 """Return the function to clear tags cache according to repo internal API
431 """
431 """
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 # correct way to clear tags cache, because existing code paths
434 # correct way to clear tags cache, because existing code paths
435 # expect _tagscache to be a structured object.
435 # expect _tagscache to be a structured object.
436 def clearcache():
436 def clearcache():
437 # _tagscache has been filteredpropertycache since 2.5 (or
437 # _tagscache has been filteredpropertycache since 2.5 (or
438 # 98c867ac1330), and delattr() can't work in such case
438 # 98c867ac1330), and delattr() can't work in such case
439 if b'_tagscache' in vars(repo):
439 if b'_tagscache' in vars(repo):
440 del repo.__dict__[b'_tagscache']
440 del repo.__dict__[b'_tagscache']
441 return clearcache
441 return clearcache
442
442
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 if repotags: # since 1.4 (or 5614a628d173)
444 if repotags: # since 1.4 (or 5614a628d173)
445 return lambda : repotags.set(None)
445 return lambda : repotags.set(None)
446
446
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 return lambda : repotagscache.set(None)
449 return lambda : repotagscache.set(None)
450
450
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 # this point, but it isn't so problematic, because:
452 # this point, but it isn't so problematic, because:
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 # in perftags() causes failure soon
454 # in perftags() causes failure soon
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 raise error.Abort((b"tags API of this hg command is unknown"))
456 raise error.Abort((b"tags API of this hg command is unknown"))
457
457
458 # utilities to clear cache
458 # utilities to clear cache
459
459
460 def clearfilecache(repo, attrname):
460 def clearfilecache(repo, attrname):
461 unfi = repo.unfiltered()
461 unfi = repo.unfiltered()
462 if attrname in vars(unfi):
462 if attrname in vars(unfi):
463 delattr(unfi, attrname)
463 delattr(unfi, attrname)
464 unfi._filecache.pop(attrname, None)
464 unfi._filecache.pop(attrname, None)
465
465
466 # perf commands
466 # perf commands
467
467
468 @command(b'perfwalk', formatteropts)
468 @command(b'perfwalk', formatteropts)
469 def perfwalk(ui, repo, *pats, **opts):
469 def perfwalk(ui, repo, *pats, **opts):
470 opts = _byteskwargs(opts)
470 opts = _byteskwargs(opts)
471 timer, fm = gettimer(ui, opts)
471 timer, fm = gettimer(ui, opts)
472 m = scmutil.match(repo[None], pats, {})
472 m = scmutil.match(repo[None], pats, {})
473 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
473 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
474 ignored=False))))
474 ignored=False))))
475 fm.end()
475 fm.end()
476
476
477 @command(b'perfannotate', formatteropts)
477 @command(b'perfannotate', formatteropts)
478 def perfannotate(ui, repo, f, **opts):
478 def perfannotate(ui, repo, f, **opts):
479 opts = _byteskwargs(opts)
479 opts = _byteskwargs(opts)
480 timer, fm = gettimer(ui, opts)
480 timer, fm = gettimer(ui, opts)
481 fc = repo[b'.'][f]
481 fc = repo[b'.'][f]
482 timer(lambda: len(fc.annotate(True)))
482 timer(lambda: len(fc.annotate(True)))
483 fm.end()
483 fm.end()
484
484
485 @command(b'perfstatus',
485 @command(b'perfstatus',
486 [(b'u', b'unknown', False,
486 [(b'u', b'unknown', False,
487 b'ask status to look for unknown files')] + formatteropts)
487 b'ask status to look for unknown files')] + formatteropts)
488 def perfstatus(ui, repo, **opts):
488 def perfstatus(ui, repo, **opts):
489 opts = _byteskwargs(opts)
489 opts = _byteskwargs(opts)
490 #m = match.always(repo.root, repo.getcwd())
490 #m = match.always(repo.root, repo.getcwd())
491 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
491 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
492 # False))))
492 # False))))
493 timer, fm = gettimer(ui, opts)
493 timer, fm = gettimer(ui, opts)
494 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
494 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
495 fm.end()
495 fm.end()
496
496
497 @command(b'perfaddremove', formatteropts)
497 @command(b'perfaddremove', formatteropts)
498 def perfaddremove(ui, repo, **opts):
498 def perfaddremove(ui, repo, **opts):
499 opts = _byteskwargs(opts)
499 opts = _byteskwargs(opts)
500 timer, fm = gettimer(ui, opts)
500 timer, fm = gettimer(ui, opts)
501 try:
501 try:
502 oldquiet = repo.ui.quiet
502 oldquiet = repo.ui.quiet
503 repo.ui.quiet = True
503 repo.ui.quiet = True
504 matcher = scmutil.match(repo[None])
504 matcher = scmutil.match(repo[None])
505 opts[b'dry_run'] = True
505 opts[b'dry_run'] = True
506 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
506 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
507 finally:
507 finally:
508 repo.ui.quiet = oldquiet
508 repo.ui.quiet = oldquiet
509 fm.end()
509 fm.end()
510
510
511 def clearcaches(cl):
511 def clearcaches(cl):
512 # behave somewhat consistently across internal API changes
512 # behave somewhat consistently across internal API changes
513 if util.safehasattr(cl, b'clearcaches'):
513 if util.safehasattr(cl, b'clearcaches'):
514 cl.clearcaches()
514 cl.clearcaches()
515 elif util.safehasattr(cl, b'_nodecache'):
515 elif util.safehasattr(cl, b'_nodecache'):
516 from mercurial.node import nullid, nullrev
516 from mercurial.node import nullid, nullrev
517 cl._nodecache = {nullid: nullrev}
517 cl._nodecache = {nullid: nullrev}
518 cl._nodepos = None
518 cl._nodepos = None
519
519
520 @command(b'perfheads', formatteropts)
520 @command(b'perfheads', formatteropts)
521 def perfheads(ui, repo, **opts):
521 def perfheads(ui, repo, **opts):
522 opts = _byteskwargs(opts)
522 opts = _byteskwargs(opts)
523 timer, fm = gettimer(ui, opts)
523 timer, fm = gettimer(ui, opts)
524 cl = repo.changelog
524 cl = repo.changelog
525 def d():
525 def d():
526 len(cl.headrevs())
526 len(cl.headrevs())
527 clearcaches(cl)
527 clearcaches(cl)
528 timer(d)
528 timer(d)
529 fm.end()
529 fm.end()
530
530
531 @command(b'perftags', formatteropts)
531 @command(b'perftags', formatteropts)
532 def perftags(ui, repo, **opts):
532 def perftags(ui, repo, **opts):
533 import mercurial.changelog
533 import mercurial.changelog
534 import mercurial.manifest
534 import mercurial.manifest
535
535
536 opts = _byteskwargs(opts)
536 opts = _byteskwargs(opts)
537 timer, fm = gettimer(ui, opts)
537 timer, fm = gettimer(ui, opts)
538 svfs = getsvfs(repo)
538 svfs = getsvfs(repo)
539 repocleartagscache = repocleartagscachefunc(repo)
539 repocleartagscache = repocleartagscachefunc(repo)
540 def t():
540 def s():
541 repo.changelog = mercurial.changelog.changelog(svfs)
541 repo.changelog = mercurial.changelog.changelog(svfs)
542 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
542 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
543 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
543 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
544 rootmanifest)
544 rootmanifest)
545 repocleartagscache()
545 repocleartagscache()
546 def t():
546 return len(repo.tags())
547 return len(repo.tags())
547 timer(t)
548 timer(t, setup=s)
548 fm.end()
549 fm.end()
549
550
550 @command(b'perfancestors', formatteropts)
551 @command(b'perfancestors', formatteropts)
551 def perfancestors(ui, repo, **opts):
552 def perfancestors(ui, repo, **opts):
552 opts = _byteskwargs(opts)
553 opts = _byteskwargs(opts)
553 timer, fm = gettimer(ui, opts)
554 timer, fm = gettimer(ui, opts)
554 heads = repo.changelog.headrevs()
555 heads = repo.changelog.headrevs()
555 def d():
556 def d():
556 for a in repo.changelog.ancestors(heads):
557 for a in repo.changelog.ancestors(heads):
557 pass
558 pass
558 timer(d)
559 timer(d)
559 fm.end()
560 fm.end()
560
561
561 @command(b'perfancestorset', formatteropts)
562 @command(b'perfancestorset', formatteropts)
562 def perfancestorset(ui, repo, revset, **opts):
563 def perfancestorset(ui, repo, revset, **opts):
563 opts = _byteskwargs(opts)
564 opts = _byteskwargs(opts)
564 timer, fm = gettimer(ui, opts)
565 timer, fm = gettimer(ui, opts)
565 revs = repo.revs(revset)
566 revs = repo.revs(revset)
566 heads = repo.changelog.headrevs()
567 heads = repo.changelog.headrevs()
567 def d():
568 def d():
568 s = repo.changelog.ancestors(heads)
569 s = repo.changelog.ancestors(heads)
569 for rev in revs:
570 for rev in revs:
570 rev in s
571 rev in s
571 timer(d)
572 timer(d)
572 fm.end()
573 fm.end()
573
574
574 @command(b'perfbookmarks', formatteropts)
575 @command(b'perfbookmarks', formatteropts)
575 def perfbookmarks(ui, repo, **opts):
576 def perfbookmarks(ui, repo, **opts):
576 """benchmark parsing bookmarks from disk to memory"""
577 """benchmark parsing bookmarks from disk to memory"""
577 opts = _byteskwargs(opts)
578 opts = _byteskwargs(opts)
578 timer, fm = gettimer(ui, opts)
579 timer, fm = gettimer(ui, opts)
579
580
580 def s():
581 def s():
581 clearfilecache(repo, b'_bookmarks')
582 clearfilecache(repo, b'_bookmarks')
582 def d():
583 def d():
583 repo._bookmarks
584 repo._bookmarks
584 timer(d, setup=s)
585 timer(d, setup=s)
585 fm.end()
586 fm.end()
586
587
587 @command(b'perfbundleread', formatteropts, b'BUNDLE')
588 @command(b'perfbundleread', formatteropts, b'BUNDLE')
588 def perfbundleread(ui, repo, bundlepath, **opts):
589 def perfbundleread(ui, repo, bundlepath, **opts):
589 """Benchmark reading of bundle files.
590 """Benchmark reading of bundle files.
590
591
591 This command is meant to isolate the I/O part of bundle reading as
592 This command is meant to isolate the I/O part of bundle reading as
592 much as possible.
593 much as possible.
593 """
594 """
594 from mercurial import (
595 from mercurial import (
595 bundle2,
596 bundle2,
596 exchange,
597 exchange,
597 streamclone,
598 streamclone,
598 )
599 )
599
600
600 opts = _byteskwargs(opts)
601 opts = _byteskwargs(opts)
601
602
602 def makebench(fn):
603 def makebench(fn):
603 def run():
604 def run():
604 with open(bundlepath, b'rb') as fh:
605 with open(bundlepath, b'rb') as fh:
605 bundle = exchange.readbundle(ui, fh, bundlepath)
606 bundle = exchange.readbundle(ui, fh, bundlepath)
606 fn(bundle)
607 fn(bundle)
607
608
608 return run
609 return run
609
610
610 def makereadnbytes(size):
611 def makereadnbytes(size):
611 def run():
612 def run():
612 with open(bundlepath, b'rb') as fh:
613 with open(bundlepath, b'rb') as fh:
613 bundle = exchange.readbundle(ui, fh, bundlepath)
614 bundle = exchange.readbundle(ui, fh, bundlepath)
614 while bundle.read(size):
615 while bundle.read(size):
615 pass
616 pass
616
617
617 return run
618 return run
618
619
619 def makestdioread(size):
620 def makestdioread(size):
620 def run():
621 def run():
621 with open(bundlepath, b'rb') as fh:
622 with open(bundlepath, b'rb') as fh:
622 while fh.read(size):
623 while fh.read(size):
623 pass
624 pass
624
625
625 return run
626 return run
626
627
627 # bundle1
628 # bundle1
628
629
629 def deltaiter(bundle):
630 def deltaiter(bundle):
630 for delta in bundle.deltaiter():
631 for delta in bundle.deltaiter():
631 pass
632 pass
632
633
633 def iterchunks(bundle):
634 def iterchunks(bundle):
634 for chunk in bundle.getchunks():
635 for chunk in bundle.getchunks():
635 pass
636 pass
636
637
637 # bundle2
638 # bundle2
638
639
639 def forwardchunks(bundle):
640 def forwardchunks(bundle):
640 for chunk in bundle._forwardchunks():
641 for chunk in bundle._forwardchunks():
641 pass
642 pass
642
643
643 def iterparts(bundle):
644 def iterparts(bundle):
644 for part in bundle.iterparts():
645 for part in bundle.iterparts():
645 pass
646 pass
646
647
647 def iterpartsseekable(bundle):
648 def iterpartsseekable(bundle):
648 for part in bundle.iterparts(seekable=True):
649 for part in bundle.iterparts(seekable=True):
649 pass
650 pass
650
651
651 def seek(bundle):
652 def seek(bundle):
652 for part in bundle.iterparts(seekable=True):
653 for part in bundle.iterparts(seekable=True):
653 part.seek(0, os.SEEK_END)
654 part.seek(0, os.SEEK_END)
654
655
655 def makepartreadnbytes(size):
656 def makepartreadnbytes(size):
656 def run():
657 def run():
657 with open(bundlepath, b'rb') as fh:
658 with open(bundlepath, b'rb') as fh:
658 bundle = exchange.readbundle(ui, fh, bundlepath)
659 bundle = exchange.readbundle(ui, fh, bundlepath)
659 for part in bundle.iterparts():
660 for part in bundle.iterparts():
660 while part.read(size):
661 while part.read(size):
661 pass
662 pass
662
663
663 return run
664 return run
664
665
665 benches = [
666 benches = [
666 (makestdioread(8192), b'read(8k)'),
667 (makestdioread(8192), b'read(8k)'),
667 (makestdioread(16384), b'read(16k)'),
668 (makestdioread(16384), b'read(16k)'),
668 (makestdioread(32768), b'read(32k)'),
669 (makestdioread(32768), b'read(32k)'),
669 (makestdioread(131072), b'read(128k)'),
670 (makestdioread(131072), b'read(128k)'),
670 ]
671 ]
671
672
672 with open(bundlepath, b'rb') as fh:
673 with open(bundlepath, b'rb') as fh:
673 bundle = exchange.readbundle(ui, fh, bundlepath)
674 bundle = exchange.readbundle(ui, fh, bundlepath)
674
675
675 if isinstance(bundle, changegroup.cg1unpacker):
676 if isinstance(bundle, changegroup.cg1unpacker):
676 benches.extend([
677 benches.extend([
677 (makebench(deltaiter), b'cg1 deltaiter()'),
678 (makebench(deltaiter), b'cg1 deltaiter()'),
678 (makebench(iterchunks), b'cg1 getchunks()'),
679 (makebench(iterchunks), b'cg1 getchunks()'),
679 (makereadnbytes(8192), b'cg1 read(8k)'),
680 (makereadnbytes(8192), b'cg1 read(8k)'),
680 (makereadnbytes(16384), b'cg1 read(16k)'),
681 (makereadnbytes(16384), b'cg1 read(16k)'),
681 (makereadnbytes(32768), b'cg1 read(32k)'),
682 (makereadnbytes(32768), b'cg1 read(32k)'),
682 (makereadnbytes(131072), b'cg1 read(128k)'),
683 (makereadnbytes(131072), b'cg1 read(128k)'),
683 ])
684 ])
684 elif isinstance(bundle, bundle2.unbundle20):
685 elif isinstance(bundle, bundle2.unbundle20):
685 benches.extend([
686 benches.extend([
686 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
687 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
687 (makebench(iterparts), b'bundle2 iterparts()'),
688 (makebench(iterparts), b'bundle2 iterparts()'),
688 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
689 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
689 (makebench(seek), b'bundle2 part seek()'),
690 (makebench(seek), b'bundle2 part seek()'),
690 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
691 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
691 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
692 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
692 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
693 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
693 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
694 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
694 ])
695 ])
695 elif isinstance(bundle, streamclone.streamcloneapplier):
696 elif isinstance(bundle, streamclone.streamcloneapplier):
696 raise error.Abort(b'stream clone bundles not supported')
697 raise error.Abort(b'stream clone bundles not supported')
697 else:
698 else:
698 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
699 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
699
700
700 for fn, title in benches:
701 for fn, title in benches:
701 timer, fm = gettimer(ui, opts)
702 timer, fm = gettimer(ui, opts)
702 timer(fn, title=title)
703 timer(fn, title=title)
703 fm.end()
704 fm.end()
704
705
705 @command(b'perfchangegroupchangelog', formatteropts +
706 @command(b'perfchangegroupchangelog', formatteropts +
706 [(b'', b'version', b'02', b'changegroup version'),
707 [(b'', b'version', b'02', b'changegroup version'),
707 (b'r', b'rev', b'', b'revisions to add to changegroup')])
708 (b'r', b'rev', b'', b'revisions to add to changegroup')])
708 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
709 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
709 """Benchmark producing a changelog group for a changegroup.
710 """Benchmark producing a changelog group for a changegroup.
710
711
711 This measures the time spent processing the changelog during a
712 This measures the time spent processing the changelog during a
712 bundle operation. This occurs during `hg bundle` and on a server
713 bundle operation. This occurs during `hg bundle` and on a server
713 processing a `getbundle` wire protocol request (handles clones
714 processing a `getbundle` wire protocol request (handles clones
714 and pull requests).
715 and pull requests).
715
716
716 By default, all revisions are added to the changegroup.
717 By default, all revisions are added to the changegroup.
717 """
718 """
718 opts = _byteskwargs(opts)
719 opts = _byteskwargs(opts)
719 cl = repo.changelog
720 cl = repo.changelog
720 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
721 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
721 bundler = changegroup.getbundler(version, repo)
722 bundler = changegroup.getbundler(version, repo)
722
723
723 def d():
724 def d():
724 state, chunks = bundler._generatechangelog(cl, nodes)
725 state, chunks = bundler._generatechangelog(cl, nodes)
725 for chunk in chunks:
726 for chunk in chunks:
726 pass
727 pass
727
728
728 timer, fm = gettimer(ui, opts)
729 timer, fm = gettimer(ui, opts)
729
730
730 # Terminal printing can interfere with timing. So disable it.
731 # Terminal printing can interfere with timing. So disable it.
731 with ui.configoverride({(b'progress', b'disable'): True}):
732 with ui.configoverride({(b'progress', b'disable'): True}):
732 timer(d)
733 timer(d)
733
734
734 fm.end()
735 fm.end()
735
736
736 @command(b'perfdirs', formatteropts)
737 @command(b'perfdirs', formatteropts)
737 def perfdirs(ui, repo, **opts):
738 def perfdirs(ui, repo, **opts):
738 opts = _byteskwargs(opts)
739 opts = _byteskwargs(opts)
739 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
740 dirstate = repo.dirstate
741 dirstate = repo.dirstate
741 b'a' in dirstate
742 b'a' in dirstate
742 def d():
743 def d():
743 dirstate.hasdir(b'a')
744 dirstate.hasdir(b'a')
744 del dirstate._map._dirs
745 del dirstate._map._dirs
745 timer(d)
746 timer(d)
746 fm.end()
747 fm.end()
747
748
748 @command(b'perfdirstate', formatteropts)
749 @command(b'perfdirstate', formatteropts)
749 def perfdirstate(ui, repo, **opts):
750 def perfdirstate(ui, repo, **opts):
750 opts = _byteskwargs(opts)
751 opts = _byteskwargs(opts)
751 timer, fm = gettimer(ui, opts)
752 timer, fm = gettimer(ui, opts)
752 b"a" in repo.dirstate
753 b"a" in repo.dirstate
753 def d():
754 def d():
754 repo.dirstate.invalidate()
755 repo.dirstate.invalidate()
755 b"a" in repo.dirstate
756 b"a" in repo.dirstate
756 timer(d)
757 timer(d)
757 fm.end()
758 fm.end()
758
759
759 @command(b'perfdirstatedirs', formatteropts)
760 @command(b'perfdirstatedirs', formatteropts)
760 def perfdirstatedirs(ui, repo, **opts):
761 def perfdirstatedirs(ui, repo, **opts):
761 opts = _byteskwargs(opts)
762 opts = _byteskwargs(opts)
762 timer, fm = gettimer(ui, opts)
763 timer, fm = gettimer(ui, opts)
763 b"a" in repo.dirstate
764 b"a" in repo.dirstate
764 def d():
765 def d():
765 repo.dirstate.hasdir(b"a")
766 repo.dirstate.hasdir(b"a")
766 del repo.dirstate._map._dirs
767 del repo.dirstate._map._dirs
767 timer(d)
768 timer(d)
768 fm.end()
769 fm.end()
769
770
770 @command(b'perfdirstatefoldmap', formatteropts)
771 @command(b'perfdirstatefoldmap', formatteropts)
771 def perfdirstatefoldmap(ui, repo, **opts):
772 def perfdirstatefoldmap(ui, repo, **opts):
772 opts = _byteskwargs(opts)
773 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
774 timer, fm = gettimer(ui, opts)
774 dirstate = repo.dirstate
775 dirstate = repo.dirstate
775 b'a' in dirstate
776 b'a' in dirstate
776 def d():
777 def d():
777 dirstate._map.filefoldmap.get(b'a')
778 dirstate._map.filefoldmap.get(b'a')
778 del dirstate._map.filefoldmap
779 del dirstate._map.filefoldmap
779 timer(d)
780 timer(d)
780 fm.end()
781 fm.end()
781
782
782 @command(b'perfdirfoldmap', formatteropts)
783 @command(b'perfdirfoldmap', formatteropts)
783 def perfdirfoldmap(ui, repo, **opts):
784 def perfdirfoldmap(ui, repo, **opts):
784 opts = _byteskwargs(opts)
785 opts = _byteskwargs(opts)
785 timer, fm = gettimer(ui, opts)
786 timer, fm = gettimer(ui, opts)
786 dirstate = repo.dirstate
787 dirstate = repo.dirstate
787 b'a' in dirstate
788 b'a' in dirstate
788 def d():
789 def d():
789 dirstate._map.dirfoldmap.get(b'a')
790 dirstate._map.dirfoldmap.get(b'a')
790 del dirstate._map.dirfoldmap
791 del dirstate._map.dirfoldmap
791 del dirstate._map._dirs
792 del dirstate._map._dirs
792 timer(d)
793 timer(d)
793 fm.end()
794 fm.end()
794
795
795 @command(b'perfdirstatewrite', formatteropts)
796 @command(b'perfdirstatewrite', formatteropts)
796 def perfdirstatewrite(ui, repo, **opts):
797 def perfdirstatewrite(ui, repo, **opts):
797 opts = _byteskwargs(opts)
798 opts = _byteskwargs(opts)
798 timer, fm = gettimer(ui, opts)
799 timer, fm = gettimer(ui, opts)
799 ds = repo.dirstate
800 ds = repo.dirstate
800 b"a" in ds
801 b"a" in ds
801 def d():
802 def d():
802 ds._dirty = True
803 ds._dirty = True
803 ds.write(repo.currenttransaction())
804 ds.write(repo.currenttransaction())
804 timer(d)
805 timer(d)
805 fm.end()
806 fm.end()
806
807
807 @command(b'perfmergecalculate',
808 @command(b'perfmergecalculate',
808 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
809 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
809 def perfmergecalculate(ui, repo, rev, **opts):
810 def perfmergecalculate(ui, repo, rev, **opts):
810 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
811 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
812 wctx = repo[None]
813 wctx = repo[None]
813 rctx = scmutil.revsingle(repo, rev, rev)
814 rctx = scmutil.revsingle(repo, rev, rev)
814 ancestor = wctx.ancestor(rctx)
815 ancestor = wctx.ancestor(rctx)
815 # we don't want working dir files to be stat'd in the benchmark, so prime
816 # we don't want working dir files to be stat'd in the benchmark, so prime
816 # that cache
817 # that cache
817 wctx.dirty()
818 wctx.dirty()
818 def d():
819 def d():
819 # acceptremote is True because we don't want prompts in the middle of
820 # acceptremote is True because we don't want prompts in the middle of
820 # our benchmark
821 # our benchmark
821 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
822 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
822 acceptremote=True, followcopies=True)
823 acceptremote=True, followcopies=True)
823 timer(d)
824 timer(d)
824 fm.end()
825 fm.end()
825
826
826 @command(b'perfpathcopies', [], b"REV REV")
827 @command(b'perfpathcopies', [], b"REV REV")
827 def perfpathcopies(ui, repo, rev1, rev2, **opts):
828 def perfpathcopies(ui, repo, rev1, rev2, **opts):
828 opts = _byteskwargs(opts)
829 opts = _byteskwargs(opts)
829 timer, fm = gettimer(ui, opts)
830 timer, fm = gettimer(ui, opts)
830 ctx1 = scmutil.revsingle(repo, rev1, rev1)
831 ctx1 = scmutil.revsingle(repo, rev1, rev1)
831 ctx2 = scmutil.revsingle(repo, rev2, rev2)
832 ctx2 = scmutil.revsingle(repo, rev2, rev2)
832 def d():
833 def d():
833 copies.pathcopies(ctx1, ctx2)
834 copies.pathcopies(ctx1, ctx2)
834 timer(d)
835 timer(d)
835 fm.end()
836 fm.end()
836
837
837 @command(b'perfphases',
838 @command(b'perfphases',
838 [(b'', b'full', False, b'include file reading time too'),
839 [(b'', b'full', False, b'include file reading time too'),
839 ], b"")
840 ], b"")
840 def perfphases(ui, repo, **opts):
841 def perfphases(ui, repo, **opts):
841 """benchmark phasesets computation"""
842 """benchmark phasesets computation"""
842 opts = _byteskwargs(opts)
843 opts = _byteskwargs(opts)
843 timer, fm = gettimer(ui, opts)
844 timer, fm = gettimer(ui, opts)
844 _phases = repo._phasecache
845 _phases = repo._phasecache
845 full = opts.get(b'full')
846 full = opts.get(b'full')
846 def d():
847 def d():
847 phases = _phases
848 phases = _phases
848 if full:
849 if full:
849 clearfilecache(repo, b'_phasecache')
850 clearfilecache(repo, b'_phasecache')
850 phases = repo._phasecache
851 phases = repo._phasecache
851 phases.invalidate()
852 phases.invalidate()
852 phases.loadphaserevs(repo)
853 phases.loadphaserevs(repo)
853 timer(d)
854 timer(d)
854 fm.end()
855 fm.end()
855
856
856 @command(b'perfphasesremote',
857 @command(b'perfphasesremote',
857 [], b"[DEST]")
858 [], b"[DEST]")
858 def perfphasesremote(ui, repo, dest=None, **opts):
859 def perfphasesremote(ui, repo, dest=None, **opts):
859 """benchmark time needed to analyse phases of the remote server"""
860 """benchmark time needed to analyse phases of the remote server"""
860 from mercurial.node import (
861 from mercurial.node import (
861 bin,
862 bin,
862 )
863 )
863 from mercurial import (
864 from mercurial import (
864 exchange,
865 exchange,
865 hg,
866 hg,
866 phases,
867 phases,
867 )
868 )
868 opts = _byteskwargs(opts)
869 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
870 timer, fm = gettimer(ui, opts)
870
871
871 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
872 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
872 if not path:
873 if not path:
873 raise error.Abort((b'default repository not configured!'),
874 raise error.Abort((b'default repository not configured!'),
874 hint=(b"see 'hg help config.paths'"))
875 hint=(b"see 'hg help config.paths'"))
875 dest = path.pushloc or path.loc
876 dest = path.pushloc or path.loc
876 branches = (path.branch, opts.get(b'branch') or [])
877 branches = (path.branch, opts.get(b'branch') or [])
877 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
878 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
878 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
879 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
879 other = hg.peer(repo, opts, dest)
880 other = hg.peer(repo, opts, dest)
880
881
881 # easier to perform discovery through the operation
882 # easier to perform discovery through the operation
882 op = exchange.pushoperation(repo, other)
883 op = exchange.pushoperation(repo, other)
883 exchange._pushdiscoverychangeset(op)
884 exchange._pushdiscoverychangeset(op)
884
885
885 remotesubset = op.fallbackheads
886 remotesubset = op.fallbackheads
886
887
887 with other.commandexecutor() as e:
888 with other.commandexecutor() as e:
888 remotephases = e.callcommand(b'listkeys',
889 remotephases = e.callcommand(b'listkeys',
889 {b'namespace': b'phases'}).result()
890 {b'namespace': b'phases'}).result()
890 del other
891 del other
891 publishing = remotephases.get(b'publishing', False)
892 publishing = remotephases.get(b'publishing', False)
892 if publishing:
893 if publishing:
893 ui.status((b'publishing: yes\n'))
894 ui.status((b'publishing: yes\n'))
894 else:
895 else:
895 ui.status((b'publishing: no\n'))
896 ui.status((b'publishing: no\n'))
896
897
897 nodemap = repo.changelog.nodemap
898 nodemap = repo.changelog.nodemap
898 nonpublishroots = 0
899 nonpublishroots = 0
899 for nhex, phase in remotephases.iteritems():
900 for nhex, phase in remotephases.iteritems():
900 if nhex == b'publishing': # ignore data related to publish option
901 if nhex == b'publishing': # ignore data related to publish option
901 continue
902 continue
902 node = bin(nhex)
903 node = bin(nhex)
903 if node in nodemap and int(phase):
904 if node in nodemap and int(phase):
904 nonpublishroots += 1
905 nonpublishroots += 1
905 ui.status((b'number of roots: %d\n') % len(remotephases))
906 ui.status((b'number of roots: %d\n') % len(remotephases))
906 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
907 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
907 def d():
908 def d():
908 phases.remotephasessummary(repo,
909 phases.remotephasessummary(repo,
909 remotesubset,
910 remotesubset,
910 remotephases)
911 remotephases)
911 timer(d)
912 timer(d)
912 fm.end()
913 fm.end()
913
914
914 @command(b'perfmanifest',[
915 @command(b'perfmanifest',[
915 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
916 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
916 (b'', b'clear-disk', False, b'clear on-disk caches too'),
917 (b'', b'clear-disk', False, b'clear on-disk caches too'),
917 ] + formatteropts, b'REV|NODE')
918 ] + formatteropts, b'REV|NODE')
918 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
919 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
919 """benchmark the time to read a manifest from disk and return a usable
920 """benchmark the time to read a manifest from disk and return a usable
920 dict-like object
921 dict-like object
921
922
922 Manifest caches are cleared before retrieval."""
923 Manifest caches are cleared before retrieval."""
923 opts = _byteskwargs(opts)
924 opts = _byteskwargs(opts)
924 timer, fm = gettimer(ui, opts)
925 timer, fm = gettimer(ui, opts)
925 if not manifest_rev:
926 if not manifest_rev:
926 ctx = scmutil.revsingle(repo, rev, rev)
927 ctx = scmutil.revsingle(repo, rev, rev)
927 t = ctx.manifestnode()
928 t = ctx.manifestnode()
928 else:
929 else:
929 from mercurial.node import bin
930 from mercurial.node import bin
930
931
931 if len(rev) == 40:
932 if len(rev) == 40:
932 t = bin(rev)
933 t = bin(rev)
933 else:
934 else:
934 try:
935 try:
935 rev = int(rev)
936 rev = int(rev)
936
937
937 if util.safehasattr(repo.manifestlog, b'getstorage'):
938 if util.safehasattr(repo.manifestlog, b'getstorage'):
938 t = repo.manifestlog.getstorage(b'').node(rev)
939 t = repo.manifestlog.getstorage(b'').node(rev)
939 else:
940 else:
940 t = repo.manifestlog._revlog.lookup(rev)
941 t = repo.manifestlog._revlog.lookup(rev)
941 except ValueError:
942 except ValueError:
942 raise error.Abort(b'manifest revision must be integer or full '
943 raise error.Abort(b'manifest revision must be integer or full '
943 b'node')
944 b'node')
944 def d():
945 def d():
945 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
946 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
946 repo.manifestlog[t].read()
947 repo.manifestlog[t].read()
947 timer(d)
948 timer(d)
948 fm.end()
949 fm.end()
949
950
950 @command(b'perfchangeset', formatteropts)
951 @command(b'perfchangeset', formatteropts)
951 def perfchangeset(ui, repo, rev, **opts):
952 def perfchangeset(ui, repo, rev, **opts):
952 opts = _byteskwargs(opts)
953 opts = _byteskwargs(opts)
953 timer, fm = gettimer(ui, opts)
954 timer, fm = gettimer(ui, opts)
954 n = scmutil.revsingle(repo, rev).node()
955 n = scmutil.revsingle(repo, rev).node()
955 def d():
956 def d():
956 repo.changelog.read(n)
957 repo.changelog.read(n)
957 #repo.changelog._cache = None
958 #repo.changelog._cache = None
958 timer(d)
959 timer(d)
959 fm.end()
960 fm.end()
960
961
961 @command(b'perfindex', formatteropts)
962 @command(b'perfindex', formatteropts)
962 def perfindex(ui, repo, **opts):
963 def perfindex(ui, repo, **opts):
963 import mercurial.revlog
964 import mercurial.revlog
964 opts = _byteskwargs(opts)
965 opts = _byteskwargs(opts)
965 timer, fm = gettimer(ui, opts)
966 timer, fm = gettimer(ui, opts)
966 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
967 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
967 n = repo[b"tip"].node()
968 n = repo[b"tip"].node()
968 svfs = getsvfs(repo)
969 svfs = getsvfs(repo)
969 def d():
970 def d():
970 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
971 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
971 cl.rev(n)
972 cl.rev(n)
972 timer(d)
973 timer(d)
973 fm.end()
974 fm.end()
974
975
975 @command(b'perfstartup', formatteropts)
976 @command(b'perfstartup', formatteropts)
976 def perfstartup(ui, repo, **opts):
977 def perfstartup(ui, repo, **opts):
977 opts = _byteskwargs(opts)
978 opts = _byteskwargs(opts)
978 timer, fm = gettimer(ui, opts)
979 timer, fm = gettimer(ui, opts)
979 def d():
980 def d():
980 if os.name != r'nt':
981 if os.name != r'nt':
981 os.system(b"HGRCPATH= %s version -q > /dev/null" %
982 os.system(b"HGRCPATH= %s version -q > /dev/null" %
982 fsencode(sys.argv[0]))
983 fsencode(sys.argv[0]))
983 else:
984 else:
984 os.environ[r'HGRCPATH'] = r' '
985 os.environ[r'HGRCPATH'] = r' '
985 os.system(r"%s version -q > NUL" % sys.argv[0])
986 os.system(r"%s version -q > NUL" % sys.argv[0])
986 timer(d)
987 timer(d)
987 fm.end()
988 fm.end()
988
989
989 @command(b'perfparents', formatteropts)
990 @command(b'perfparents', formatteropts)
990 def perfparents(ui, repo, **opts):
991 def perfparents(ui, repo, **opts):
991 opts = _byteskwargs(opts)
992 opts = _byteskwargs(opts)
992 timer, fm = gettimer(ui, opts)
993 timer, fm = gettimer(ui, opts)
993 # control the number of commits perfparents iterates over
994 # control the number of commits perfparents iterates over
994 # experimental config: perf.parentscount
995 # experimental config: perf.parentscount
995 count = getint(ui, b"perf", b"parentscount", 1000)
996 count = getint(ui, b"perf", b"parentscount", 1000)
996 if len(repo.changelog) < count:
997 if len(repo.changelog) < count:
997 raise error.Abort(b"repo needs %d commits for this test" % count)
998 raise error.Abort(b"repo needs %d commits for this test" % count)
998 repo = repo.unfiltered()
999 repo = repo.unfiltered()
999 nl = [repo.changelog.node(i) for i in _xrange(count)]
1000 nl = [repo.changelog.node(i) for i in _xrange(count)]
1000 def d():
1001 def d():
1001 for n in nl:
1002 for n in nl:
1002 repo.changelog.parents(n)
1003 repo.changelog.parents(n)
1003 timer(d)
1004 timer(d)
1004 fm.end()
1005 fm.end()
1005
1006
1006 @command(b'perfctxfiles', formatteropts)
1007 @command(b'perfctxfiles', formatteropts)
1007 def perfctxfiles(ui, repo, x, **opts):
1008 def perfctxfiles(ui, repo, x, **opts):
1008 opts = _byteskwargs(opts)
1009 opts = _byteskwargs(opts)
1009 x = int(x)
1010 x = int(x)
1010 timer, fm = gettimer(ui, opts)
1011 timer, fm = gettimer(ui, opts)
1011 def d():
1012 def d():
1012 len(repo[x].files())
1013 len(repo[x].files())
1013 timer(d)
1014 timer(d)
1014 fm.end()
1015 fm.end()
1015
1016
1016 @command(b'perfrawfiles', formatteropts)
1017 @command(b'perfrawfiles', formatteropts)
1017 def perfrawfiles(ui, repo, x, **opts):
1018 def perfrawfiles(ui, repo, x, **opts):
1018 opts = _byteskwargs(opts)
1019 opts = _byteskwargs(opts)
1019 x = int(x)
1020 x = int(x)
1020 timer, fm = gettimer(ui, opts)
1021 timer, fm = gettimer(ui, opts)
1021 cl = repo.changelog
1022 cl = repo.changelog
1022 def d():
1023 def d():
1023 len(cl.read(x)[3])
1024 len(cl.read(x)[3])
1024 timer(d)
1025 timer(d)
1025 fm.end()
1026 fm.end()
1026
1027
1027 @command(b'perflookup', formatteropts)
1028 @command(b'perflookup', formatteropts)
1028 def perflookup(ui, repo, rev, **opts):
1029 def perflookup(ui, repo, rev, **opts):
1029 opts = _byteskwargs(opts)
1030 opts = _byteskwargs(opts)
1030 timer, fm = gettimer(ui, opts)
1031 timer, fm = gettimer(ui, opts)
1031 timer(lambda: len(repo.lookup(rev)))
1032 timer(lambda: len(repo.lookup(rev)))
1032 fm.end()
1033 fm.end()
1033
1034
1034 @command(b'perflinelogedits',
1035 @command(b'perflinelogedits',
1035 [(b'n', b'edits', 10000, b'number of edits'),
1036 [(b'n', b'edits', 10000, b'number of edits'),
1036 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1037 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1037 ], norepo=True)
1038 ], norepo=True)
1038 def perflinelogedits(ui, **opts):
1039 def perflinelogedits(ui, **opts):
1039 from mercurial import linelog
1040 from mercurial import linelog
1040
1041
1041 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1042
1043
1043 edits = opts[b'edits']
1044 edits = opts[b'edits']
1044 maxhunklines = opts[b'max_hunk_lines']
1045 maxhunklines = opts[b'max_hunk_lines']
1045
1046
1046 maxb1 = 100000
1047 maxb1 = 100000
1047 random.seed(0)
1048 random.seed(0)
1048 randint = random.randint
1049 randint = random.randint
1049 currentlines = 0
1050 currentlines = 0
1050 arglist = []
1051 arglist = []
1051 for rev in _xrange(edits):
1052 for rev in _xrange(edits):
1052 a1 = randint(0, currentlines)
1053 a1 = randint(0, currentlines)
1053 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1054 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1054 b1 = randint(0, maxb1)
1055 b1 = randint(0, maxb1)
1055 b2 = randint(b1, b1 + maxhunklines)
1056 b2 = randint(b1, b1 + maxhunklines)
1056 currentlines += (b2 - b1) - (a2 - a1)
1057 currentlines += (b2 - b1) - (a2 - a1)
1057 arglist.append((rev, a1, a2, b1, b2))
1058 arglist.append((rev, a1, a2, b1, b2))
1058
1059
1059 def d():
1060 def d():
1060 ll = linelog.linelog()
1061 ll = linelog.linelog()
1061 for args in arglist:
1062 for args in arglist:
1062 ll.replacelines(*args)
1063 ll.replacelines(*args)
1063
1064
1064 timer, fm = gettimer(ui, opts)
1065 timer, fm = gettimer(ui, opts)
1065 timer(d)
1066 timer(d)
1066 fm.end()
1067 fm.end()
1067
1068
1068 @command(b'perfrevrange', formatteropts)
1069 @command(b'perfrevrange', formatteropts)
1069 def perfrevrange(ui, repo, *specs, **opts):
1070 def perfrevrange(ui, repo, *specs, **opts):
1070 opts = _byteskwargs(opts)
1071 opts = _byteskwargs(opts)
1071 timer, fm = gettimer(ui, opts)
1072 timer, fm = gettimer(ui, opts)
1072 revrange = scmutil.revrange
1073 revrange = scmutil.revrange
1073 timer(lambda: len(revrange(repo, specs)))
1074 timer(lambda: len(revrange(repo, specs)))
1074 fm.end()
1075 fm.end()
1075
1076
1076 @command(b'perfnodelookup', formatteropts)
1077 @command(b'perfnodelookup', formatteropts)
1077 def perfnodelookup(ui, repo, rev, **opts):
1078 def perfnodelookup(ui, repo, rev, **opts):
1078 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1079 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1080 import mercurial.revlog
1081 import mercurial.revlog
1081 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1082 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1082 n = scmutil.revsingle(repo, rev).node()
1083 n = scmutil.revsingle(repo, rev).node()
1083 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1084 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1084 def d():
1085 def d():
1085 cl.rev(n)
1086 cl.rev(n)
1086 clearcaches(cl)
1087 clearcaches(cl)
1087 timer(d)
1088 timer(d)
1088 fm.end()
1089 fm.end()
1089
1090
1090 @command(b'perflog',
1091 @command(b'perflog',
1091 [(b'', b'rename', False, b'ask log to follow renames')
1092 [(b'', b'rename', False, b'ask log to follow renames')
1092 ] + formatteropts)
1093 ] + formatteropts)
1093 def perflog(ui, repo, rev=None, **opts):
1094 def perflog(ui, repo, rev=None, **opts):
1094 opts = _byteskwargs(opts)
1095 opts = _byteskwargs(opts)
1095 if rev is None:
1096 if rev is None:
1096 rev=[]
1097 rev=[]
1097 timer, fm = gettimer(ui, opts)
1098 timer, fm = gettimer(ui, opts)
1098 ui.pushbuffer()
1099 ui.pushbuffer()
1099 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1100 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1100 copies=opts.get(b'rename')))
1101 copies=opts.get(b'rename')))
1101 ui.popbuffer()
1102 ui.popbuffer()
1102 fm.end()
1103 fm.end()
1103
1104
1104 @command(b'perfmoonwalk', formatteropts)
1105 @command(b'perfmoonwalk', formatteropts)
1105 def perfmoonwalk(ui, repo, **opts):
1106 def perfmoonwalk(ui, repo, **opts):
1106 """benchmark walking the changelog backwards
1107 """benchmark walking the changelog backwards
1107
1108
1108 This also loads the changelog data for each revision in the changelog.
1109 This also loads the changelog data for each revision in the changelog.
1109 """
1110 """
1110 opts = _byteskwargs(opts)
1111 opts = _byteskwargs(opts)
1111 timer, fm = gettimer(ui, opts)
1112 timer, fm = gettimer(ui, opts)
1112 def moonwalk():
1113 def moonwalk():
1113 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1114 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1114 ctx = repo[i]
1115 ctx = repo[i]
1115 ctx.branch() # read changelog data (in addition to the index)
1116 ctx.branch() # read changelog data (in addition to the index)
1116 timer(moonwalk)
1117 timer(moonwalk)
1117 fm.end()
1118 fm.end()
1118
1119
1119 @command(b'perftemplating',
1120 @command(b'perftemplating',
1120 [(b'r', b'rev', [], b'revisions to run the template on'),
1121 [(b'r', b'rev', [], b'revisions to run the template on'),
1121 ] + formatteropts)
1122 ] + formatteropts)
1122 def perftemplating(ui, repo, testedtemplate=None, **opts):
1123 def perftemplating(ui, repo, testedtemplate=None, **opts):
1123 """test the rendering time of a given template"""
1124 """test the rendering time of a given template"""
1124 if makelogtemplater is None:
1125 if makelogtemplater is None:
1125 raise error.Abort((b"perftemplating not available with this Mercurial"),
1126 raise error.Abort((b"perftemplating not available with this Mercurial"),
1126 hint=b"use 4.3 or later")
1127 hint=b"use 4.3 or later")
1127
1128
1128 opts = _byteskwargs(opts)
1129 opts = _byteskwargs(opts)
1129
1130
1130 nullui = ui.copy()
1131 nullui = ui.copy()
1131 nullui.fout = open(os.devnull, r'wb')
1132 nullui.fout = open(os.devnull, r'wb')
1132 nullui.disablepager()
1133 nullui.disablepager()
1133 revs = opts.get(b'rev')
1134 revs = opts.get(b'rev')
1134 if not revs:
1135 if not revs:
1135 revs = [b'all()']
1136 revs = [b'all()']
1136 revs = list(scmutil.revrange(repo, revs))
1137 revs = list(scmutil.revrange(repo, revs))
1137
1138
1138 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1139 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1139 b' {author|person}: {desc|firstline}\n')
1140 b' {author|person}: {desc|firstline}\n')
1140 if testedtemplate is None:
1141 if testedtemplate is None:
1141 testedtemplate = defaulttemplate
1142 testedtemplate = defaulttemplate
1142 displayer = makelogtemplater(nullui, repo, testedtemplate)
1143 displayer = makelogtemplater(nullui, repo, testedtemplate)
1143 def format():
1144 def format():
1144 for r in revs:
1145 for r in revs:
1145 ctx = repo[r]
1146 ctx = repo[r]
1146 displayer.show(ctx)
1147 displayer.show(ctx)
1147 displayer.flush(ctx)
1148 displayer.flush(ctx)
1148
1149
1149 timer, fm = gettimer(ui, opts)
1150 timer, fm = gettimer(ui, opts)
1150 timer(format)
1151 timer(format)
1151 fm.end()
1152 fm.end()
1152
1153
1153 @command(b'perfcca', formatteropts)
1154 @command(b'perfcca', formatteropts)
1154 def perfcca(ui, repo, **opts):
1155 def perfcca(ui, repo, **opts):
1155 opts = _byteskwargs(opts)
1156 opts = _byteskwargs(opts)
1156 timer, fm = gettimer(ui, opts)
1157 timer, fm = gettimer(ui, opts)
1157 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1158 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1158 fm.end()
1159 fm.end()
1159
1160
1160 @command(b'perffncacheload', formatteropts)
1161 @command(b'perffncacheload', formatteropts)
1161 def perffncacheload(ui, repo, **opts):
1162 def perffncacheload(ui, repo, **opts):
1162 opts = _byteskwargs(opts)
1163 opts = _byteskwargs(opts)
1163 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1164 s = repo.store
1165 s = repo.store
1165 def d():
1166 def d():
1166 s.fncache._load()
1167 s.fncache._load()
1167 timer(d)
1168 timer(d)
1168 fm.end()
1169 fm.end()
1169
1170
1170 @command(b'perffncachewrite', formatteropts)
1171 @command(b'perffncachewrite', formatteropts)
1171 def perffncachewrite(ui, repo, **opts):
1172 def perffncachewrite(ui, repo, **opts):
1172 opts = _byteskwargs(opts)
1173 opts = _byteskwargs(opts)
1173 timer, fm = gettimer(ui, opts)
1174 timer, fm = gettimer(ui, opts)
1174 s = repo.store
1175 s = repo.store
1175 lock = repo.lock()
1176 lock = repo.lock()
1176 s.fncache._load()
1177 s.fncache._load()
1177 tr = repo.transaction(b'perffncachewrite')
1178 tr = repo.transaction(b'perffncachewrite')
1178 tr.addbackup(b'fncache')
1179 tr.addbackup(b'fncache')
1179 def d():
1180 def d():
1180 s.fncache._dirty = True
1181 s.fncache._dirty = True
1181 s.fncache.write(tr)
1182 s.fncache.write(tr)
1182 timer(d)
1183 timer(d)
1183 tr.close()
1184 tr.close()
1184 lock.release()
1185 lock.release()
1185 fm.end()
1186 fm.end()
1186
1187
1187 @command(b'perffncacheencode', formatteropts)
1188 @command(b'perffncacheencode', formatteropts)
1188 def perffncacheencode(ui, repo, **opts):
1189 def perffncacheencode(ui, repo, **opts):
1189 opts = _byteskwargs(opts)
1190 opts = _byteskwargs(opts)
1190 timer, fm = gettimer(ui, opts)
1191 timer, fm = gettimer(ui, opts)
1191 s = repo.store
1192 s = repo.store
1192 s.fncache._load()
1193 s.fncache._load()
1193 def d():
1194 def d():
1194 for p in s.fncache.entries:
1195 for p in s.fncache.entries:
1195 s.encode(p)
1196 s.encode(p)
1196 timer(d)
1197 timer(d)
1197 fm.end()
1198 fm.end()
1198
1199
1199 def _bdiffworker(q, blocks, xdiff, ready, done):
1200 def _bdiffworker(q, blocks, xdiff, ready, done):
1200 while not done.is_set():
1201 while not done.is_set():
1201 pair = q.get()
1202 pair = q.get()
1202 while pair is not None:
1203 while pair is not None:
1203 if xdiff:
1204 if xdiff:
1204 mdiff.bdiff.xdiffblocks(*pair)
1205 mdiff.bdiff.xdiffblocks(*pair)
1205 elif blocks:
1206 elif blocks:
1206 mdiff.bdiff.blocks(*pair)
1207 mdiff.bdiff.blocks(*pair)
1207 else:
1208 else:
1208 mdiff.textdiff(*pair)
1209 mdiff.textdiff(*pair)
1209 q.task_done()
1210 q.task_done()
1210 pair = q.get()
1211 pair = q.get()
1211 q.task_done() # for the None one
1212 q.task_done() # for the None one
1212 with ready:
1213 with ready:
1213 ready.wait()
1214 ready.wait()
1214
1215
1215 def _manifestrevision(repo, mnode):
1216 def _manifestrevision(repo, mnode):
1216 ml = repo.manifestlog
1217 ml = repo.manifestlog
1217
1218
1218 if util.safehasattr(ml, b'getstorage'):
1219 if util.safehasattr(ml, b'getstorage'):
1219 store = ml.getstorage(b'')
1220 store = ml.getstorage(b'')
1220 else:
1221 else:
1221 store = ml._revlog
1222 store = ml._revlog
1222
1223
1223 return store.revision(mnode)
1224 return store.revision(mnode)
1224
1225
1225 @command(b'perfbdiff', revlogopts + formatteropts + [
1226 @command(b'perfbdiff', revlogopts + formatteropts + [
1226 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1227 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1227 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1228 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1228 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1229 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1229 (b'', b'blocks', False, b'test computing diffs into blocks'),
1230 (b'', b'blocks', False, b'test computing diffs into blocks'),
1230 (b'', b'xdiff', False, b'use xdiff algorithm'),
1231 (b'', b'xdiff', False, b'use xdiff algorithm'),
1231 ],
1232 ],
1232
1233
1233 b'-c|-m|FILE REV')
1234 b'-c|-m|FILE REV')
1234 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1235 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1235 """benchmark a bdiff between revisions
1236 """benchmark a bdiff between revisions
1236
1237
1237 By default, benchmark a bdiff between its delta parent and itself.
1238 By default, benchmark a bdiff between its delta parent and itself.
1238
1239
1239 With ``--count``, benchmark bdiffs between delta parents and self for N
1240 With ``--count``, benchmark bdiffs between delta parents and self for N
1240 revisions starting at the specified revision.
1241 revisions starting at the specified revision.
1241
1242
1242 With ``--alldata``, assume the requested revision is a changeset and
1243 With ``--alldata``, assume the requested revision is a changeset and
1243 measure bdiffs for all changes related to that changeset (manifest
1244 measure bdiffs for all changes related to that changeset (manifest
1244 and filelogs).
1245 and filelogs).
1245 """
1246 """
1246 opts = _byteskwargs(opts)
1247 opts = _byteskwargs(opts)
1247
1248
1248 if opts[b'xdiff'] and not opts[b'blocks']:
1249 if opts[b'xdiff'] and not opts[b'blocks']:
1249 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1250 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1250
1251
1251 if opts[b'alldata']:
1252 if opts[b'alldata']:
1252 opts[b'changelog'] = True
1253 opts[b'changelog'] = True
1253
1254
1254 if opts.get(b'changelog') or opts.get(b'manifest'):
1255 if opts.get(b'changelog') or opts.get(b'manifest'):
1255 file_, rev = None, file_
1256 file_, rev = None, file_
1256 elif rev is None:
1257 elif rev is None:
1257 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1258 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1258
1259
1259 blocks = opts[b'blocks']
1260 blocks = opts[b'blocks']
1260 xdiff = opts[b'xdiff']
1261 xdiff = opts[b'xdiff']
1261 textpairs = []
1262 textpairs = []
1262
1263
1263 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1264 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1264
1265
1265 startrev = r.rev(r.lookup(rev))
1266 startrev = r.rev(r.lookup(rev))
1266 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1267 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1267 if opts[b'alldata']:
1268 if opts[b'alldata']:
1268 # Load revisions associated with changeset.
1269 # Load revisions associated with changeset.
1269 ctx = repo[rev]
1270 ctx = repo[rev]
1270 mtext = _manifestrevision(repo, ctx.manifestnode())
1271 mtext = _manifestrevision(repo, ctx.manifestnode())
1271 for pctx in ctx.parents():
1272 for pctx in ctx.parents():
1272 pman = _manifestrevision(repo, pctx.manifestnode())
1273 pman = _manifestrevision(repo, pctx.manifestnode())
1273 textpairs.append((pman, mtext))
1274 textpairs.append((pman, mtext))
1274
1275
1275 # Load filelog revisions by iterating manifest delta.
1276 # Load filelog revisions by iterating manifest delta.
1276 man = ctx.manifest()
1277 man = ctx.manifest()
1277 pman = ctx.p1().manifest()
1278 pman = ctx.p1().manifest()
1278 for filename, change in pman.diff(man).items():
1279 for filename, change in pman.diff(man).items():
1279 fctx = repo.file(filename)
1280 fctx = repo.file(filename)
1280 f1 = fctx.revision(change[0][0] or -1)
1281 f1 = fctx.revision(change[0][0] or -1)
1281 f2 = fctx.revision(change[1][0] or -1)
1282 f2 = fctx.revision(change[1][0] or -1)
1282 textpairs.append((f1, f2))
1283 textpairs.append((f1, f2))
1283 else:
1284 else:
1284 dp = r.deltaparent(rev)
1285 dp = r.deltaparent(rev)
1285 textpairs.append((r.revision(dp), r.revision(rev)))
1286 textpairs.append((r.revision(dp), r.revision(rev)))
1286
1287
1287 withthreads = threads > 0
1288 withthreads = threads > 0
1288 if not withthreads:
1289 if not withthreads:
1289 def d():
1290 def d():
1290 for pair in textpairs:
1291 for pair in textpairs:
1291 if xdiff:
1292 if xdiff:
1292 mdiff.bdiff.xdiffblocks(*pair)
1293 mdiff.bdiff.xdiffblocks(*pair)
1293 elif blocks:
1294 elif blocks:
1294 mdiff.bdiff.blocks(*pair)
1295 mdiff.bdiff.blocks(*pair)
1295 else:
1296 else:
1296 mdiff.textdiff(*pair)
1297 mdiff.textdiff(*pair)
1297 else:
1298 else:
1298 q = queue()
1299 q = queue()
1299 for i in _xrange(threads):
1300 for i in _xrange(threads):
1300 q.put(None)
1301 q.put(None)
1301 ready = threading.Condition()
1302 ready = threading.Condition()
1302 done = threading.Event()
1303 done = threading.Event()
1303 for i in _xrange(threads):
1304 for i in _xrange(threads):
1304 threading.Thread(target=_bdiffworker,
1305 threading.Thread(target=_bdiffworker,
1305 args=(q, blocks, xdiff, ready, done)).start()
1306 args=(q, blocks, xdiff, ready, done)).start()
1306 q.join()
1307 q.join()
1307 def d():
1308 def d():
1308 for pair in textpairs:
1309 for pair in textpairs:
1309 q.put(pair)
1310 q.put(pair)
1310 for i in _xrange(threads):
1311 for i in _xrange(threads):
1311 q.put(None)
1312 q.put(None)
1312 with ready:
1313 with ready:
1313 ready.notify_all()
1314 ready.notify_all()
1314 q.join()
1315 q.join()
1315 timer, fm = gettimer(ui, opts)
1316 timer, fm = gettimer(ui, opts)
1316 timer(d)
1317 timer(d)
1317 fm.end()
1318 fm.end()
1318
1319
1319 if withthreads:
1320 if withthreads:
1320 done.set()
1321 done.set()
1321 for i in _xrange(threads):
1322 for i in _xrange(threads):
1322 q.put(None)
1323 q.put(None)
1323 with ready:
1324 with ready:
1324 ready.notify_all()
1325 ready.notify_all()
1325
1326
1326 @command(b'perfunidiff', revlogopts + formatteropts + [
1327 @command(b'perfunidiff', revlogopts + formatteropts + [
1327 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1328 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1328 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1329 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1329 ], b'-c|-m|FILE REV')
1330 ], b'-c|-m|FILE REV')
1330 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1331 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1331 """benchmark a unified diff between revisions
1332 """benchmark a unified diff between revisions
1332
1333
1333 This doesn't include any copy tracing - it's just a unified diff
1334 This doesn't include any copy tracing - it's just a unified diff
1334 of the texts.
1335 of the texts.
1335
1336
1336 By default, benchmark a diff between its delta parent and itself.
1337 By default, benchmark a diff between its delta parent and itself.
1337
1338
1338 With ``--count``, benchmark diffs between delta parents and self for N
1339 With ``--count``, benchmark diffs between delta parents and self for N
1339 revisions starting at the specified revision.
1340 revisions starting at the specified revision.
1340
1341
1341 With ``--alldata``, assume the requested revision is a changeset and
1342 With ``--alldata``, assume the requested revision is a changeset and
1342 measure diffs for all changes related to that changeset (manifest
1343 measure diffs for all changes related to that changeset (manifest
1343 and filelogs).
1344 and filelogs).
1344 """
1345 """
1345 opts = _byteskwargs(opts)
1346 opts = _byteskwargs(opts)
1346 if opts[b'alldata']:
1347 if opts[b'alldata']:
1347 opts[b'changelog'] = True
1348 opts[b'changelog'] = True
1348
1349
1349 if opts.get(b'changelog') or opts.get(b'manifest'):
1350 if opts.get(b'changelog') or opts.get(b'manifest'):
1350 file_, rev = None, file_
1351 file_, rev = None, file_
1351 elif rev is None:
1352 elif rev is None:
1352 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1353 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1353
1354
1354 textpairs = []
1355 textpairs = []
1355
1356
1356 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1357 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1357
1358
1358 startrev = r.rev(r.lookup(rev))
1359 startrev = r.rev(r.lookup(rev))
1359 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1360 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1360 if opts[b'alldata']:
1361 if opts[b'alldata']:
1361 # Load revisions associated with changeset.
1362 # Load revisions associated with changeset.
1362 ctx = repo[rev]
1363 ctx = repo[rev]
1363 mtext = _manifestrevision(repo, ctx.manifestnode())
1364 mtext = _manifestrevision(repo, ctx.manifestnode())
1364 for pctx in ctx.parents():
1365 for pctx in ctx.parents():
1365 pman = _manifestrevision(repo, pctx.manifestnode())
1366 pman = _manifestrevision(repo, pctx.manifestnode())
1366 textpairs.append((pman, mtext))
1367 textpairs.append((pman, mtext))
1367
1368
1368 # Load filelog revisions by iterating manifest delta.
1369 # Load filelog revisions by iterating manifest delta.
1369 man = ctx.manifest()
1370 man = ctx.manifest()
1370 pman = ctx.p1().manifest()
1371 pman = ctx.p1().manifest()
1371 for filename, change in pman.diff(man).items():
1372 for filename, change in pman.diff(man).items():
1372 fctx = repo.file(filename)
1373 fctx = repo.file(filename)
1373 f1 = fctx.revision(change[0][0] or -1)
1374 f1 = fctx.revision(change[0][0] or -1)
1374 f2 = fctx.revision(change[1][0] or -1)
1375 f2 = fctx.revision(change[1][0] or -1)
1375 textpairs.append((f1, f2))
1376 textpairs.append((f1, f2))
1376 else:
1377 else:
1377 dp = r.deltaparent(rev)
1378 dp = r.deltaparent(rev)
1378 textpairs.append((r.revision(dp), r.revision(rev)))
1379 textpairs.append((r.revision(dp), r.revision(rev)))
1379
1380
1380 def d():
1381 def d():
1381 for left, right in textpairs:
1382 for left, right in textpairs:
1382 # The date strings don't matter, so we pass empty strings.
1383 # The date strings don't matter, so we pass empty strings.
1383 headerlines, hunks = mdiff.unidiff(
1384 headerlines, hunks = mdiff.unidiff(
1384 left, b'', right, b'', b'left', b'right', binary=False)
1385 left, b'', right, b'', b'left', b'right', binary=False)
1385 # consume iterators in roughly the way patch.py does
1386 # consume iterators in roughly the way patch.py does
1386 b'\n'.join(headerlines)
1387 b'\n'.join(headerlines)
1387 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1388 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1388 timer, fm = gettimer(ui, opts)
1389 timer, fm = gettimer(ui, opts)
1389 timer(d)
1390 timer(d)
1390 fm.end()
1391 fm.end()
1391
1392
1392 @command(b'perfdiffwd', formatteropts)
1393 @command(b'perfdiffwd', formatteropts)
1393 def perfdiffwd(ui, repo, **opts):
1394 def perfdiffwd(ui, repo, **opts):
1394 """Profile diff of working directory changes"""
1395 """Profile diff of working directory changes"""
1395 opts = _byteskwargs(opts)
1396 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1397 timer, fm = gettimer(ui, opts)
1397 options = {
1398 options = {
1398 'w': 'ignore_all_space',
1399 'w': 'ignore_all_space',
1399 'b': 'ignore_space_change',
1400 'b': 'ignore_space_change',
1400 'B': 'ignore_blank_lines',
1401 'B': 'ignore_blank_lines',
1401 }
1402 }
1402
1403
1403 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1404 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1404 opts = dict((options[c], b'1') for c in diffopt)
1405 opts = dict((options[c], b'1') for c in diffopt)
1405 def d():
1406 def d():
1406 ui.pushbuffer()
1407 ui.pushbuffer()
1407 commands.diff(ui, repo, **opts)
1408 commands.diff(ui, repo, **opts)
1408 ui.popbuffer()
1409 ui.popbuffer()
1409 diffopt = diffopt.encode('ascii')
1410 diffopt = diffopt.encode('ascii')
1410 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1411 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1411 timer(d, title=title)
1412 timer(d, title=title)
1412 fm.end()
1413 fm.end()
1413
1414
1414 @command(b'perfrevlogindex', revlogopts + formatteropts,
1415 @command(b'perfrevlogindex', revlogopts + formatteropts,
1415 b'-c|-m|FILE')
1416 b'-c|-m|FILE')
1416 def perfrevlogindex(ui, repo, file_=None, **opts):
1417 def perfrevlogindex(ui, repo, file_=None, **opts):
1417 """Benchmark operations against a revlog index.
1418 """Benchmark operations against a revlog index.
1418
1419
1419 This tests constructing a revlog instance, reading index data,
1420 This tests constructing a revlog instance, reading index data,
1420 parsing index data, and performing various operations related to
1421 parsing index data, and performing various operations related to
1421 index data.
1422 index data.
1422 """
1423 """
1423
1424
1424 opts = _byteskwargs(opts)
1425 opts = _byteskwargs(opts)
1425
1426
1426 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1427 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1427
1428
1428 opener = getattr(rl, 'opener') # trick linter
1429 opener = getattr(rl, 'opener') # trick linter
1429 indexfile = rl.indexfile
1430 indexfile = rl.indexfile
1430 data = opener.read(indexfile)
1431 data = opener.read(indexfile)
1431
1432
1432 header = struct.unpack(b'>I', data[0:4])[0]
1433 header = struct.unpack(b'>I', data[0:4])[0]
1433 version = header & 0xFFFF
1434 version = header & 0xFFFF
1434 if version == 1:
1435 if version == 1:
1435 revlogio = revlog.revlogio()
1436 revlogio = revlog.revlogio()
1436 inline = header & (1 << 16)
1437 inline = header & (1 << 16)
1437 else:
1438 else:
1438 raise error.Abort((b'unsupported revlog version: %d') % version)
1439 raise error.Abort((b'unsupported revlog version: %d') % version)
1439
1440
1440 rllen = len(rl)
1441 rllen = len(rl)
1441
1442
1442 node0 = rl.node(0)
1443 node0 = rl.node(0)
1443 node25 = rl.node(rllen // 4)
1444 node25 = rl.node(rllen // 4)
1444 node50 = rl.node(rllen // 2)
1445 node50 = rl.node(rllen // 2)
1445 node75 = rl.node(rllen // 4 * 3)
1446 node75 = rl.node(rllen // 4 * 3)
1446 node100 = rl.node(rllen - 1)
1447 node100 = rl.node(rllen - 1)
1447
1448
1448 allrevs = range(rllen)
1449 allrevs = range(rllen)
1449 allrevsrev = list(reversed(allrevs))
1450 allrevsrev = list(reversed(allrevs))
1450 allnodes = [rl.node(rev) for rev in range(rllen)]
1451 allnodes = [rl.node(rev) for rev in range(rllen)]
1451 allnodesrev = list(reversed(allnodes))
1452 allnodesrev = list(reversed(allnodes))
1452
1453
1453 def constructor():
1454 def constructor():
1454 revlog.revlog(opener, indexfile)
1455 revlog.revlog(opener, indexfile)
1455
1456
1456 def read():
1457 def read():
1457 with opener(indexfile) as fh:
1458 with opener(indexfile) as fh:
1458 fh.read()
1459 fh.read()
1459
1460
1460 def parseindex():
1461 def parseindex():
1461 revlogio.parseindex(data, inline)
1462 revlogio.parseindex(data, inline)
1462
1463
1463 def getentry(revornode):
1464 def getentry(revornode):
1464 index = revlogio.parseindex(data, inline)[0]
1465 index = revlogio.parseindex(data, inline)[0]
1465 index[revornode]
1466 index[revornode]
1466
1467
1467 def getentries(revs, count=1):
1468 def getentries(revs, count=1):
1468 index = revlogio.parseindex(data, inline)[0]
1469 index = revlogio.parseindex(data, inline)[0]
1469
1470
1470 for i in range(count):
1471 for i in range(count):
1471 for rev in revs:
1472 for rev in revs:
1472 index[rev]
1473 index[rev]
1473
1474
1474 def resolvenode(node):
1475 def resolvenode(node):
1475 nodemap = revlogio.parseindex(data, inline)[1]
1476 nodemap = revlogio.parseindex(data, inline)[1]
1476 # This only works for the C code.
1477 # This only works for the C code.
1477 if nodemap is None:
1478 if nodemap is None:
1478 return
1479 return
1479
1480
1480 try:
1481 try:
1481 nodemap[node]
1482 nodemap[node]
1482 except error.RevlogError:
1483 except error.RevlogError:
1483 pass
1484 pass
1484
1485
1485 def resolvenodes(nodes, count=1):
1486 def resolvenodes(nodes, count=1):
1486 nodemap = revlogio.parseindex(data, inline)[1]
1487 nodemap = revlogio.parseindex(data, inline)[1]
1487 if nodemap is None:
1488 if nodemap is None:
1488 return
1489 return
1489
1490
1490 for i in range(count):
1491 for i in range(count):
1491 for node in nodes:
1492 for node in nodes:
1492 try:
1493 try:
1493 nodemap[node]
1494 nodemap[node]
1494 except error.RevlogError:
1495 except error.RevlogError:
1495 pass
1496 pass
1496
1497
1497 benches = [
1498 benches = [
1498 (constructor, b'revlog constructor'),
1499 (constructor, b'revlog constructor'),
1499 (read, b'read'),
1500 (read, b'read'),
1500 (parseindex, b'create index object'),
1501 (parseindex, b'create index object'),
1501 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1502 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1502 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1503 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1503 (lambda: resolvenode(node0), b'look up node at rev 0'),
1504 (lambda: resolvenode(node0), b'look up node at rev 0'),
1504 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1505 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1505 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1506 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1506 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1507 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1507 (lambda: resolvenode(node100), b'look up node at tip'),
1508 (lambda: resolvenode(node100), b'look up node at tip'),
1508 # 2x variation is to measure caching impact.
1509 # 2x variation is to measure caching impact.
1509 (lambda: resolvenodes(allnodes),
1510 (lambda: resolvenodes(allnodes),
1510 b'look up all nodes (forward)'),
1511 b'look up all nodes (forward)'),
1511 (lambda: resolvenodes(allnodes, 2),
1512 (lambda: resolvenodes(allnodes, 2),
1512 b'look up all nodes 2x (forward)'),
1513 b'look up all nodes 2x (forward)'),
1513 (lambda: resolvenodes(allnodesrev),
1514 (lambda: resolvenodes(allnodesrev),
1514 b'look up all nodes (reverse)'),
1515 b'look up all nodes (reverse)'),
1515 (lambda: resolvenodes(allnodesrev, 2),
1516 (lambda: resolvenodes(allnodesrev, 2),
1516 b'look up all nodes 2x (reverse)'),
1517 b'look up all nodes 2x (reverse)'),
1517 (lambda: getentries(allrevs),
1518 (lambda: getentries(allrevs),
1518 b'retrieve all index entries (forward)'),
1519 b'retrieve all index entries (forward)'),
1519 (lambda: getentries(allrevs, 2),
1520 (lambda: getentries(allrevs, 2),
1520 b'retrieve all index entries 2x (forward)'),
1521 b'retrieve all index entries 2x (forward)'),
1521 (lambda: getentries(allrevsrev),
1522 (lambda: getentries(allrevsrev),
1522 b'retrieve all index entries (reverse)'),
1523 b'retrieve all index entries (reverse)'),
1523 (lambda: getentries(allrevsrev, 2),
1524 (lambda: getentries(allrevsrev, 2),
1524 b'retrieve all index entries 2x (reverse)'),
1525 b'retrieve all index entries 2x (reverse)'),
1525 ]
1526 ]
1526
1527
1527 for fn, title in benches:
1528 for fn, title in benches:
1528 timer, fm = gettimer(ui, opts)
1529 timer, fm = gettimer(ui, opts)
1529 timer(fn, title=title)
1530 timer(fn, title=title)
1530 fm.end()
1531 fm.end()
1531
1532
1532 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1533 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1533 [(b'd', b'dist', 100, b'distance between the revisions'),
1534 [(b'd', b'dist', 100, b'distance between the revisions'),
1534 (b's', b'startrev', 0, b'revision to start reading at'),
1535 (b's', b'startrev', 0, b'revision to start reading at'),
1535 (b'', b'reverse', False, b'read in reverse')],
1536 (b'', b'reverse', False, b'read in reverse')],
1536 b'-c|-m|FILE')
1537 b'-c|-m|FILE')
1537 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1538 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1538 **opts):
1539 **opts):
1539 """Benchmark reading a series of revisions from a revlog.
1540 """Benchmark reading a series of revisions from a revlog.
1540
1541
1541 By default, we read every ``-d/--dist`` revision from 0 to tip of
1542 By default, we read every ``-d/--dist`` revision from 0 to tip of
1542 the specified revlog.
1543 the specified revlog.
1543
1544
1544 The start revision can be defined via ``-s/--startrev``.
1545 The start revision can be defined via ``-s/--startrev``.
1545 """
1546 """
1546 opts = _byteskwargs(opts)
1547 opts = _byteskwargs(opts)
1547
1548
1548 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1549 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1549 rllen = getlen(ui)(rl)
1550 rllen = getlen(ui)(rl)
1550
1551
1551 if startrev < 0:
1552 if startrev < 0:
1552 startrev = rllen + startrev
1553 startrev = rllen + startrev
1553
1554
1554 def d():
1555 def d():
1555 rl.clearcaches()
1556 rl.clearcaches()
1556
1557
1557 beginrev = startrev
1558 beginrev = startrev
1558 endrev = rllen
1559 endrev = rllen
1559 dist = opts[b'dist']
1560 dist = opts[b'dist']
1560
1561
1561 if reverse:
1562 if reverse:
1562 beginrev, endrev = endrev - 1, beginrev - 1
1563 beginrev, endrev = endrev - 1, beginrev - 1
1563 dist = -1 * dist
1564 dist = -1 * dist
1564
1565
1565 for x in _xrange(beginrev, endrev, dist):
1566 for x in _xrange(beginrev, endrev, dist):
1566 # Old revisions don't support passing int.
1567 # Old revisions don't support passing int.
1567 n = rl.node(x)
1568 n = rl.node(x)
1568 rl.revision(n)
1569 rl.revision(n)
1569
1570
1570 timer, fm = gettimer(ui, opts)
1571 timer, fm = gettimer(ui, opts)
1571 timer(d)
1572 timer(d)
1572 fm.end()
1573 fm.end()
1573
1574
1574 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1575 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1575 [(b's', b'startrev', 1000, b'revision to start writing at'),
1576 [(b's', b'startrev', 1000, b'revision to start writing at'),
1576 (b'', b'stoprev', -1, b'last revision to write'),
1577 (b'', b'stoprev', -1, b'last revision to write'),
1577 (b'', b'count', 3, b'last revision to write'),
1578 (b'', b'count', 3, b'last revision to write'),
1578 (b'', b'details', False, b'print timing for every revisions tested'),
1579 (b'', b'details', False, b'print timing for every revisions tested'),
1579 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1580 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1580 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1581 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1581 ],
1582 ],
1582 b'-c|-m|FILE')
1583 b'-c|-m|FILE')
1583 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1584 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1584 """Benchmark writing a series of revisions to a revlog.
1585 """Benchmark writing a series of revisions to a revlog.
1585
1586
1586 Possible source values are:
1587 Possible source values are:
1587 * `full`: add from a full text (default).
1588 * `full`: add from a full text (default).
1588 * `parent-1`: add from a delta to the first parent
1589 * `parent-1`: add from a delta to the first parent
1589 * `parent-2`: add from a delta to the second parent if it exists
1590 * `parent-2`: add from a delta to the second parent if it exists
1590 (use a delta from the first parent otherwise)
1591 (use a delta from the first parent otherwise)
1591 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1592 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1592 * `storage`: add from the existing precomputed deltas
1593 * `storage`: add from the existing precomputed deltas
1593 """
1594 """
1594 opts = _byteskwargs(opts)
1595 opts = _byteskwargs(opts)
1595
1596
1596 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1597 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1597 rllen = getlen(ui)(rl)
1598 rllen = getlen(ui)(rl)
1598 if startrev < 0:
1599 if startrev < 0:
1599 startrev = rllen + startrev
1600 startrev = rllen + startrev
1600 if stoprev < 0:
1601 if stoprev < 0:
1601 stoprev = rllen + stoprev
1602 stoprev = rllen + stoprev
1602
1603
1603 lazydeltabase = opts['lazydeltabase']
1604 lazydeltabase = opts['lazydeltabase']
1604 source = opts['source']
1605 source = opts['source']
1605 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1606 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1606 b'storage')
1607 b'storage')
1607 if source not in validsource:
1608 if source not in validsource:
1608 raise error.Abort('invalid source type: %s' % source)
1609 raise error.Abort('invalid source type: %s' % source)
1609
1610
1610 ### actually gather results
1611 ### actually gather results
1611 count = opts['count']
1612 count = opts['count']
1612 if count <= 0:
1613 if count <= 0:
1613 raise error.Abort('invalide run count: %d' % count)
1614 raise error.Abort('invalide run count: %d' % count)
1614 allresults = []
1615 allresults = []
1615 for c in range(count):
1616 for c in range(count):
1616 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1617 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1617 lazydeltabase=lazydeltabase)
1618 lazydeltabase=lazydeltabase)
1618 allresults.append(timing)
1619 allresults.append(timing)
1619
1620
1620 ### consolidate the results in a single list
1621 ### consolidate the results in a single list
1621 results = []
1622 results = []
1622 for idx, (rev, t) in enumerate(allresults[0]):
1623 for idx, (rev, t) in enumerate(allresults[0]):
1623 ts = [t]
1624 ts = [t]
1624 for other in allresults[1:]:
1625 for other in allresults[1:]:
1625 orev, ot = other[idx]
1626 orev, ot = other[idx]
1626 assert orev == rev
1627 assert orev == rev
1627 ts.append(ot)
1628 ts.append(ot)
1628 results.append((rev, ts))
1629 results.append((rev, ts))
1629 resultcount = len(results)
1630 resultcount = len(results)
1630
1631
1631 ### Compute and display relevant statistics
1632 ### Compute and display relevant statistics
1632
1633
1633 # get a formatter
1634 # get a formatter
1634 fm = ui.formatter(b'perf', opts)
1635 fm = ui.formatter(b'perf', opts)
1635 displayall = ui.configbool(b"perf", b"all-timing", False)
1636 displayall = ui.configbool(b"perf", b"all-timing", False)
1636
1637
1637 # print individual details if requested
1638 # print individual details if requested
1638 if opts['details']:
1639 if opts['details']:
1639 for idx, item in enumerate(results, 1):
1640 for idx, item in enumerate(results, 1):
1640 rev, data = item
1641 rev, data = item
1641 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1642 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1642 formatone(fm, data, title=title, displayall=displayall)
1643 formatone(fm, data, title=title, displayall=displayall)
1643
1644
1644 # sorts results by median time
1645 # sorts results by median time
1645 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1646 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1646 # list of (name, index) to display)
1647 # list of (name, index) to display)
1647 relevants = [
1648 relevants = [
1648 ("min", 0),
1649 ("min", 0),
1649 ("10%", resultcount * 10 // 100),
1650 ("10%", resultcount * 10 // 100),
1650 ("25%", resultcount * 25 // 100),
1651 ("25%", resultcount * 25 // 100),
1651 ("50%", resultcount * 70 // 100),
1652 ("50%", resultcount * 70 // 100),
1652 ("75%", resultcount * 75 // 100),
1653 ("75%", resultcount * 75 // 100),
1653 ("90%", resultcount * 90 // 100),
1654 ("90%", resultcount * 90 // 100),
1654 ("95%", resultcount * 95 // 100),
1655 ("95%", resultcount * 95 // 100),
1655 ("99%", resultcount * 99 // 100),
1656 ("99%", resultcount * 99 // 100),
1656 ("max", -1),
1657 ("max", -1),
1657 ]
1658 ]
1658 if not ui.quiet:
1659 if not ui.quiet:
1659 for name, idx in relevants:
1660 for name, idx in relevants:
1660 data = results[idx]
1661 data = results[idx]
1661 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1662 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1662 formatone(fm, data[1], title=title, displayall=displayall)
1663 formatone(fm, data[1], title=title, displayall=displayall)
1663
1664
1664 # XXX summing that many float will not be very precise, we ignore this fact
1665 # XXX summing that many float will not be very precise, we ignore this fact
1665 # for now
1666 # for now
1666 totaltime = []
1667 totaltime = []
1667 for item in allresults:
1668 for item in allresults:
1668 totaltime.append((sum(x[1][0] for x in item),
1669 totaltime.append((sum(x[1][0] for x in item),
1669 sum(x[1][1] for x in item),
1670 sum(x[1][1] for x in item),
1670 sum(x[1][2] for x in item),)
1671 sum(x[1][2] for x in item),)
1671 )
1672 )
1672 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1673 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1673 displayall=displayall)
1674 displayall=displayall)
1674 fm.end()
1675 fm.end()
1675
1676
1676 class _faketr(object):
1677 class _faketr(object):
1677 def add(s, x, y, z=None):
1678 def add(s, x, y, z=None):
1678 return None
1679 return None
1679
1680
1680 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1681 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1681 lazydeltabase=True):
1682 lazydeltabase=True):
1682 timings = []
1683 timings = []
1683 tr = _faketr()
1684 tr = _faketr()
1684 with _temprevlog(ui, orig, startrev) as dest:
1685 with _temprevlog(ui, orig, startrev) as dest:
1685 dest._lazydeltabase = lazydeltabase
1686 dest._lazydeltabase = lazydeltabase
1686 revs = list(orig.revs(startrev, stoprev))
1687 revs = list(orig.revs(startrev, stoprev))
1687 total = len(revs)
1688 total = len(revs)
1688 topic = 'adding'
1689 topic = 'adding'
1689 if runidx is not None:
1690 if runidx is not None:
1690 topic += ' (run #%d)' % runidx
1691 topic += ' (run #%d)' % runidx
1691 for idx, rev in enumerate(revs):
1692 for idx, rev in enumerate(revs):
1692 ui.progress(topic, idx, unit='revs', total=total)
1693 ui.progress(topic, idx, unit='revs', total=total)
1693 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1694 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1694 with timeone() as r:
1695 with timeone() as r:
1695 dest.addrawrevision(*addargs, **addkwargs)
1696 dest.addrawrevision(*addargs, **addkwargs)
1696 timings.append((rev, r[0]))
1697 timings.append((rev, r[0]))
1697 ui.progress(topic, total, unit='revs', total=total)
1698 ui.progress(topic, total, unit='revs', total=total)
1698 ui.progress(topic, None, unit='revs', total=total)
1699 ui.progress(topic, None, unit='revs', total=total)
1699 return timings
1700 return timings
1700
1701
1701 def _getrevisionseed(orig, rev, tr, source):
1702 def _getrevisionseed(orig, rev, tr, source):
1702 from mercurial.node import nullid
1703 from mercurial.node import nullid
1703
1704
1704 linkrev = orig.linkrev(rev)
1705 linkrev = orig.linkrev(rev)
1705 node = orig.node(rev)
1706 node = orig.node(rev)
1706 p1, p2 = orig.parents(node)
1707 p1, p2 = orig.parents(node)
1707 flags = orig.flags(rev)
1708 flags = orig.flags(rev)
1708 cachedelta = None
1709 cachedelta = None
1709 text = None
1710 text = None
1710
1711
1711 if source == b'full':
1712 if source == b'full':
1712 text = orig.revision(rev)
1713 text = orig.revision(rev)
1713 elif source == b'parent-1':
1714 elif source == b'parent-1':
1714 baserev = orig.rev(p1)
1715 baserev = orig.rev(p1)
1715 cachedelta = (baserev, orig.revdiff(p1, rev))
1716 cachedelta = (baserev, orig.revdiff(p1, rev))
1716 elif source == b'parent-2':
1717 elif source == b'parent-2':
1717 parent = p2
1718 parent = p2
1718 if p2 == nullid:
1719 if p2 == nullid:
1719 parent = p1
1720 parent = p1
1720 baserev = orig.rev(parent)
1721 baserev = orig.rev(parent)
1721 cachedelta = (baserev, orig.revdiff(parent, rev))
1722 cachedelta = (baserev, orig.revdiff(parent, rev))
1722 elif source == b'parent-smallest':
1723 elif source == b'parent-smallest':
1723 p1diff = orig.revdiff(p1, rev)
1724 p1diff = orig.revdiff(p1, rev)
1724 parent = p1
1725 parent = p1
1725 diff = p1diff
1726 diff = p1diff
1726 if p2 != nullid:
1727 if p2 != nullid:
1727 p2diff = orig.revdiff(p2, rev)
1728 p2diff = orig.revdiff(p2, rev)
1728 if len(p1diff) > len(p2diff):
1729 if len(p1diff) > len(p2diff):
1729 parent = p2
1730 parent = p2
1730 diff = p2diff
1731 diff = p2diff
1731 baserev = orig.rev(parent)
1732 baserev = orig.rev(parent)
1732 cachedelta = (baserev, diff)
1733 cachedelta = (baserev, diff)
1733 elif source == b'storage':
1734 elif source == b'storage':
1734 baserev = orig.deltaparent(rev)
1735 baserev = orig.deltaparent(rev)
1735 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1736 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1736
1737
1737 return ((text, tr, linkrev, p1, p2),
1738 return ((text, tr, linkrev, p1, p2),
1738 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1739 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1739
1740
1740 @contextlib.contextmanager
1741 @contextlib.contextmanager
1741 def _temprevlog(ui, orig, truncaterev):
1742 def _temprevlog(ui, orig, truncaterev):
1742 from mercurial import vfs as vfsmod
1743 from mercurial import vfs as vfsmod
1743
1744
1744 if orig._inline:
1745 if orig._inline:
1745 raise error.Abort('not supporting inline revlog (yet)')
1746 raise error.Abort('not supporting inline revlog (yet)')
1746
1747
1747 origindexpath = orig.opener.join(orig.indexfile)
1748 origindexpath = orig.opener.join(orig.indexfile)
1748 origdatapath = orig.opener.join(orig.datafile)
1749 origdatapath = orig.opener.join(orig.datafile)
1749 indexname = 'revlog.i'
1750 indexname = 'revlog.i'
1750 dataname = 'revlog.d'
1751 dataname = 'revlog.d'
1751
1752
1752 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1753 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1753 try:
1754 try:
1754 # copy the data file in a temporary directory
1755 # copy the data file in a temporary directory
1755 ui.debug('copying data in %s\n' % tmpdir)
1756 ui.debug('copying data in %s\n' % tmpdir)
1756 destindexpath = os.path.join(tmpdir, 'revlog.i')
1757 destindexpath = os.path.join(tmpdir, 'revlog.i')
1757 destdatapath = os.path.join(tmpdir, 'revlog.d')
1758 destdatapath = os.path.join(tmpdir, 'revlog.d')
1758 shutil.copyfile(origindexpath, destindexpath)
1759 shutil.copyfile(origindexpath, destindexpath)
1759 shutil.copyfile(origdatapath, destdatapath)
1760 shutil.copyfile(origdatapath, destdatapath)
1760
1761
1761 # remove the data we want to add again
1762 # remove the data we want to add again
1762 ui.debug('truncating data to be rewritten\n')
1763 ui.debug('truncating data to be rewritten\n')
1763 with open(destindexpath, 'ab') as index:
1764 with open(destindexpath, 'ab') as index:
1764 index.seek(0)
1765 index.seek(0)
1765 index.truncate(truncaterev * orig._io.size)
1766 index.truncate(truncaterev * orig._io.size)
1766 with open(destdatapath, 'ab') as data:
1767 with open(destdatapath, 'ab') as data:
1767 data.seek(0)
1768 data.seek(0)
1768 data.truncate(orig.start(truncaterev))
1769 data.truncate(orig.start(truncaterev))
1769
1770
1770 # instantiate a new revlog from the temporary copy
1771 # instantiate a new revlog from the temporary copy
1771 ui.debug('truncating adding to be rewritten\n')
1772 ui.debug('truncating adding to be rewritten\n')
1772 vfs = vfsmod.vfs(tmpdir)
1773 vfs = vfsmod.vfs(tmpdir)
1773 vfs.options = getattr(orig.opener, 'options', None)
1774 vfs.options = getattr(orig.opener, 'options', None)
1774
1775
1775 dest = revlog.revlog(vfs,
1776 dest = revlog.revlog(vfs,
1776 indexfile=indexname,
1777 indexfile=indexname,
1777 datafile=dataname)
1778 datafile=dataname)
1778 if dest._inline:
1779 if dest._inline:
1779 raise error.Abort('not supporting inline revlog (yet)')
1780 raise error.Abort('not supporting inline revlog (yet)')
1780 # make sure internals are initialized
1781 # make sure internals are initialized
1781 dest.revision(len(dest) - 1)
1782 dest.revision(len(dest) - 1)
1782 yield dest
1783 yield dest
1783 del dest, vfs
1784 del dest, vfs
1784 finally:
1785 finally:
1785 shutil.rmtree(tmpdir, True)
1786 shutil.rmtree(tmpdir, True)
1786
1787
1787 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1788 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1788 [(b'e', b'engines', b'', b'compression engines to use'),
1789 [(b'e', b'engines', b'', b'compression engines to use'),
1789 (b's', b'startrev', 0, b'revision to start at')],
1790 (b's', b'startrev', 0, b'revision to start at')],
1790 b'-c|-m|FILE')
1791 b'-c|-m|FILE')
1791 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1792 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1792 """Benchmark operations on revlog chunks.
1793 """Benchmark operations on revlog chunks.
1793
1794
1794 Logically, each revlog is a collection of fulltext revisions. However,
1795 Logically, each revlog is a collection of fulltext revisions. However,
1795 stored within each revlog are "chunks" of possibly compressed data. This
1796 stored within each revlog are "chunks" of possibly compressed data. This
1796 data needs to be read and decompressed or compressed and written.
1797 data needs to be read and decompressed or compressed and written.
1797
1798
1798 This command measures the time it takes to read+decompress and recompress
1799 This command measures the time it takes to read+decompress and recompress
1799 chunks in a revlog. It effectively isolates I/O and compression performance.
1800 chunks in a revlog. It effectively isolates I/O and compression performance.
1800 For measurements of higher-level operations like resolving revisions,
1801 For measurements of higher-level operations like resolving revisions,
1801 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1802 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1802 """
1803 """
1803 opts = _byteskwargs(opts)
1804 opts = _byteskwargs(opts)
1804
1805
1805 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1806 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1806
1807
1807 # _chunkraw was renamed to _getsegmentforrevs.
1808 # _chunkraw was renamed to _getsegmentforrevs.
1808 try:
1809 try:
1809 segmentforrevs = rl._getsegmentforrevs
1810 segmentforrevs = rl._getsegmentforrevs
1810 except AttributeError:
1811 except AttributeError:
1811 segmentforrevs = rl._chunkraw
1812 segmentforrevs = rl._chunkraw
1812
1813
1813 # Verify engines argument.
1814 # Verify engines argument.
1814 if engines:
1815 if engines:
1815 engines = set(e.strip() for e in engines.split(b','))
1816 engines = set(e.strip() for e in engines.split(b','))
1816 for engine in engines:
1817 for engine in engines:
1817 try:
1818 try:
1818 util.compressionengines[engine]
1819 util.compressionengines[engine]
1819 except KeyError:
1820 except KeyError:
1820 raise error.Abort(b'unknown compression engine: %s' % engine)
1821 raise error.Abort(b'unknown compression engine: %s' % engine)
1821 else:
1822 else:
1822 engines = []
1823 engines = []
1823 for e in util.compengines:
1824 for e in util.compengines:
1824 engine = util.compengines[e]
1825 engine = util.compengines[e]
1825 try:
1826 try:
1826 if engine.available():
1827 if engine.available():
1827 engine.revlogcompressor().compress(b'dummy')
1828 engine.revlogcompressor().compress(b'dummy')
1828 engines.append(e)
1829 engines.append(e)
1829 except NotImplementedError:
1830 except NotImplementedError:
1830 pass
1831 pass
1831
1832
1832 revs = list(rl.revs(startrev, len(rl) - 1))
1833 revs = list(rl.revs(startrev, len(rl) - 1))
1833
1834
1834 def rlfh(rl):
1835 def rlfh(rl):
1835 if rl._inline:
1836 if rl._inline:
1836 return getsvfs(repo)(rl.indexfile)
1837 return getsvfs(repo)(rl.indexfile)
1837 else:
1838 else:
1838 return getsvfs(repo)(rl.datafile)
1839 return getsvfs(repo)(rl.datafile)
1839
1840
1840 def doread():
1841 def doread():
1841 rl.clearcaches()
1842 rl.clearcaches()
1842 for rev in revs:
1843 for rev in revs:
1843 segmentforrevs(rev, rev)
1844 segmentforrevs(rev, rev)
1844
1845
1845 def doreadcachedfh():
1846 def doreadcachedfh():
1846 rl.clearcaches()
1847 rl.clearcaches()
1847 fh = rlfh(rl)
1848 fh = rlfh(rl)
1848 for rev in revs:
1849 for rev in revs:
1849 segmentforrevs(rev, rev, df=fh)
1850 segmentforrevs(rev, rev, df=fh)
1850
1851
1851 def doreadbatch():
1852 def doreadbatch():
1852 rl.clearcaches()
1853 rl.clearcaches()
1853 segmentforrevs(revs[0], revs[-1])
1854 segmentforrevs(revs[0], revs[-1])
1854
1855
1855 def doreadbatchcachedfh():
1856 def doreadbatchcachedfh():
1856 rl.clearcaches()
1857 rl.clearcaches()
1857 fh = rlfh(rl)
1858 fh = rlfh(rl)
1858 segmentforrevs(revs[0], revs[-1], df=fh)
1859 segmentforrevs(revs[0], revs[-1], df=fh)
1859
1860
1860 def dochunk():
1861 def dochunk():
1861 rl.clearcaches()
1862 rl.clearcaches()
1862 fh = rlfh(rl)
1863 fh = rlfh(rl)
1863 for rev in revs:
1864 for rev in revs:
1864 rl._chunk(rev, df=fh)
1865 rl._chunk(rev, df=fh)
1865
1866
1866 chunks = [None]
1867 chunks = [None]
1867
1868
1868 def dochunkbatch():
1869 def dochunkbatch():
1869 rl.clearcaches()
1870 rl.clearcaches()
1870 fh = rlfh(rl)
1871 fh = rlfh(rl)
1871 # Save chunks as a side-effect.
1872 # Save chunks as a side-effect.
1872 chunks[0] = rl._chunks(revs, df=fh)
1873 chunks[0] = rl._chunks(revs, df=fh)
1873
1874
1874 def docompress(compressor):
1875 def docompress(compressor):
1875 rl.clearcaches()
1876 rl.clearcaches()
1876
1877
1877 try:
1878 try:
1878 # Swap in the requested compression engine.
1879 # Swap in the requested compression engine.
1879 oldcompressor = rl._compressor
1880 oldcompressor = rl._compressor
1880 rl._compressor = compressor
1881 rl._compressor = compressor
1881 for chunk in chunks[0]:
1882 for chunk in chunks[0]:
1882 rl.compress(chunk)
1883 rl.compress(chunk)
1883 finally:
1884 finally:
1884 rl._compressor = oldcompressor
1885 rl._compressor = oldcompressor
1885
1886
1886 benches = [
1887 benches = [
1887 (lambda: doread(), b'read'),
1888 (lambda: doread(), b'read'),
1888 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1889 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1889 (lambda: doreadbatch(), b'read batch'),
1890 (lambda: doreadbatch(), b'read batch'),
1890 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1891 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1891 (lambda: dochunk(), b'chunk'),
1892 (lambda: dochunk(), b'chunk'),
1892 (lambda: dochunkbatch(), b'chunk batch'),
1893 (lambda: dochunkbatch(), b'chunk batch'),
1893 ]
1894 ]
1894
1895
1895 for engine in sorted(engines):
1896 for engine in sorted(engines):
1896 compressor = util.compengines[engine].revlogcompressor()
1897 compressor = util.compengines[engine].revlogcompressor()
1897 benches.append((functools.partial(docompress, compressor),
1898 benches.append((functools.partial(docompress, compressor),
1898 b'compress w/ %s' % engine))
1899 b'compress w/ %s' % engine))
1899
1900
1900 for fn, title in benches:
1901 for fn, title in benches:
1901 timer, fm = gettimer(ui, opts)
1902 timer, fm = gettimer(ui, opts)
1902 timer(fn, title=title)
1903 timer(fn, title=title)
1903 fm.end()
1904 fm.end()
1904
1905
1905 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1906 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1906 [(b'', b'cache', False, b'use caches instead of clearing')],
1907 [(b'', b'cache', False, b'use caches instead of clearing')],
1907 b'-c|-m|FILE REV')
1908 b'-c|-m|FILE REV')
1908 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1909 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1909 """Benchmark obtaining a revlog revision.
1910 """Benchmark obtaining a revlog revision.
1910
1911
1911 Obtaining a revlog revision consists of roughly the following steps:
1912 Obtaining a revlog revision consists of roughly the following steps:
1912
1913
1913 1. Compute the delta chain
1914 1. Compute the delta chain
1914 2. Slice the delta chain if applicable
1915 2. Slice the delta chain if applicable
1915 3. Obtain the raw chunks for that delta chain
1916 3. Obtain the raw chunks for that delta chain
1916 4. Decompress each raw chunk
1917 4. Decompress each raw chunk
1917 5. Apply binary patches to obtain fulltext
1918 5. Apply binary patches to obtain fulltext
1918 6. Verify hash of fulltext
1919 6. Verify hash of fulltext
1919
1920
1920 This command measures the time spent in each of these phases.
1921 This command measures the time spent in each of these phases.
1921 """
1922 """
1922 opts = _byteskwargs(opts)
1923 opts = _byteskwargs(opts)
1923
1924
1924 if opts.get(b'changelog') or opts.get(b'manifest'):
1925 if opts.get(b'changelog') or opts.get(b'manifest'):
1925 file_, rev = None, file_
1926 file_, rev = None, file_
1926 elif rev is None:
1927 elif rev is None:
1927 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1928 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1928
1929
1929 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1930 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1930
1931
1931 # _chunkraw was renamed to _getsegmentforrevs.
1932 # _chunkraw was renamed to _getsegmentforrevs.
1932 try:
1933 try:
1933 segmentforrevs = r._getsegmentforrevs
1934 segmentforrevs = r._getsegmentforrevs
1934 except AttributeError:
1935 except AttributeError:
1935 segmentforrevs = r._chunkraw
1936 segmentforrevs = r._chunkraw
1936
1937
1937 node = r.lookup(rev)
1938 node = r.lookup(rev)
1938 rev = r.rev(node)
1939 rev = r.rev(node)
1939
1940
1940 def getrawchunks(data, chain):
1941 def getrawchunks(data, chain):
1941 start = r.start
1942 start = r.start
1942 length = r.length
1943 length = r.length
1943 inline = r._inline
1944 inline = r._inline
1944 iosize = r._io.size
1945 iosize = r._io.size
1945 buffer = util.buffer
1946 buffer = util.buffer
1946
1947
1947 chunks = []
1948 chunks = []
1948 ladd = chunks.append
1949 ladd = chunks.append
1949 for idx, item in enumerate(chain):
1950 for idx, item in enumerate(chain):
1950 offset = start(item[0])
1951 offset = start(item[0])
1951 bits = data[idx]
1952 bits = data[idx]
1952 for rev in item:
1953 for rev in item:
1953 chunkstart = start(rev)
1954 chunkstart = start(rev)
1954 if inline:
1955 if inline:
1955 chunkstart += (rev + 1) * iosize
1956 chunkstart += (rev + 1) * iosize
1956 chunklength = length(rev)
1957 chunklength = length(rev)
1957 ladd(buffer(bits, chunkstart - offset, chunklength))
1958 ladd(buffer(bits, chunkstart - offset, chunklength))
1958
1959
1959 return chunks
1960 return chunks
1960
1961
1961 def dodeltachain(rev):
1962 def dodeltachain(rev):
1962 if not cache:
1963 if not cache:
1963 r.clearcaches()
1964 r.clearcaches()
1964 r._deltachain(rev)
1965 r._deltachain(rev)
1965
1966
1966 def doread(chain):
1967 def doread(chain):
1967 if not cache:
1968 if not cache:
1968 r.clearcaches()
1969 r.clearcaches()
1969 for item in slicedchain:
1970 for item in slicedchain:
1970 segmentforrevs(item[0], item[-1])
1971 segmentforrevs(item[0], item[-1])
1971
1972
1972 def doslice(r, chain, size):
1973 def doslice(r, chain, size):
1973 for s in slicechunk(r, chain, targetsize=size):
1974 for s in slicechunk(r, chain, targetsize=size):
1974 pass
1975 pass
1975
1976
1976 def dorawchunks(data, chain):
1977 def dorawchunks(data, chain):
1977 if not cache:
1978 if not cache:
1978 r.clearcaches()
1979 r.clearcaches()
1979 getrawchunks(data, chain)
1980 getrawchunks(data, chain)
1980
1981
1981 def dodecompress(chunks):
1982 def dodecompress(chunks):
1982 decomp = r.decompress
1983 decomp = r.decompress
1983 for chunk in chunks:
1984 for chunk in chunks:
1984 decomp(chunk)
1985 decomp(chunk)
1985
1986
1986 def dopatch(text, bins):
1987 def dopatch(text, bins):
1987 if not cache:
1988 if not cache:
1988 r.clearcaches()
1989 r.clearcaches()
1989 mdiff.patches(text, bins)
1990 mdiff.patches(text, bins)
1990
1991
1991 def dohash(text):
1992 def dohash(text):
1992 if not cache:
1993 if not cache:
1993 r.clearcaches()
1994 r.clearcaches()
1994 r.checkhash(text, node, rev=rev)
1995 r.checkhash(text, node, rev=rev)
1995
1996
1996 def dorevision():
1997 def dorevision():
1997 if not cache:
1998 if not cache:
1998 r.clearcaches()
1999 r.clearcaches()
1999 r.revision(node)
2000 r.revision(node)
2000
2001
2001 try:
2002 try:
2002 from mercurial.revlogutils.deltas import slicechunk
2003 from mercurial.revlogutils.deltas import slicechunk
2003 except ImportError:
2004 except ImportError:
2004 slicechunk = getattr(revlog, '_slicechunk', None)
2005 slicechunk = getattr(revlog, '_slicechunk', None)
2005
2006
2006 size = r.length(rev)
2007 size = r.length(rev)
2007 chain = r._deltachain(rev)[0]
2008 chain = r._deltachain(rev)[0]
2008 if not getattr(r, '_withsparseread', False):
2009 if not getattr(r, '_withsparseread', False):
2009 slicedchain = (chain,)
2010 slicedchain = (chain,)
2010 else:
2011 else:
2011 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2012 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2012 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2013 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2013 rawchunks = getrawchunks(data, slicedchain)
2014 rawchunks = getrawchunks(data, slicedchain)
2014 bins = r._chunks(chain)
2015 bins = r._chunks(chain)
2015 text = bytes(bins[0])
2016 text = bytes(bins[0])
2016 bins = bins[1:]
2017 bins = bins[1:]
2017 text = mdiff.patches(text, bins)
2018 text = mdiff.patches(text, bins)
2018
2019
2019 benches = [
2020 benches = [
2020 (lambda: dorevision(), b'full'),
2021 (lambda: dorevision(), b'full'),
2021 (lambda: dodeltachain(rev), b'deltachain'),
2022 (lambda: dodeltachain(rev), b'deltachain'),
2022 (lambda: doread(chain), b'read'),
2023 (lambda: doread(chain), b'read'),
2023 ]
2024 ]
2024
2025
2025 if getattr(r, '_withsparseread', False):
2026 if getattr(r, '_withsparseread', False):
2026 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2027 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2027 benches.append(slicing)
2028 benches.append(slicing)
2028
2029
2029 benches.extend([
2030 benches.extend([
2030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2031 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2031 (lambda: dodecompress(rawchunks), b'decompress'),
2032 (lambda: dodecompress(rawchunks), b'decompress'),
2032 (lambda: dopatch(text, bins), b'patch'),
2033 (lambda: dopatch(text, bins), b'patch'),
2033 (lambda: dohash(text), b'hash'),
2034 (lambda: dohash(text), b'hash'),
2034 ])
2035 ])
2035
2036
2036 timer, fm = gettimer(ui, opts)
2037 timer, fm = gettimer(ui, opts)
2037 for fn, title in benches:
2038 for fn, title in benches:
2038 timer(fn, title=title)
2039 timer(fn, title=title)
2039 fm.end()
2040 fm.end()
2040
2041
2041 @command(b'perfrevset',
2042 @command(b'perfrevset',
2042 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2043 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2043 (b'', b'contexts', False, b'obtain changectx for each revision')]
2044 (b'', b'contexts', False, b'obtain changectx for each revision')]
2044 + formatteropts, b"REVSET")
2045 + formatteropts, b"REVSET")
2045 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2046 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2046 """benchmark the execution time of a revset
2047 """benchmark the execution time of a revset
2047
2048
2048 Use the --clean option if need to evaluate the impact of build volatile
2049 Use the --clean option if need to evaluate the impact of build volatile
2049 revisions set cache on the revset execution. Volatile cache hold filtered
2050 revisions set cache on the revset execution. Volatile cache hold filtered
2050 and obsolete related cache."""
2051 and obsolete related cache."""
2051 opts = _byteskwargs(opts)
2052 opts = _byteskwargs(opts)
2052
2053
2053 timer, fm = gettimer(ui, opts)
2054 timer, fm = gettimer(ui, opts)
2054 def d():
2055 def d():
2055 if clear:
2056 if clear:
2056 repo.invalidatevolatilesets()
2057 repo.invalidatevolatilesets()
2057 if contexts:
2058 if contexts:
2058 for ctx in repo.set(expr): pass
2059 for ctx in repo.set(expr): pass
2059 else:
2060 else:
2060 for r in repo.revs(expr): pass
2061 for r in repo.revs(expr): pass
2061 timer(d)
2062 timer(d)
2062 fm.end()
2063 fm.end()
2063
2064
2064 @command(b'perfvolatilesets',
2065 @command(b'perfvolatilesets',
2065 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2066 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2066 ] + formatteropts)
2067 ] + formatteropts)
2067 def perfvolatilesets(ui, repo, *names, **opts):
2068 def perfvolatilesets(ui, repo, *names, **opts):
2068 """benchmark the computation of various volatile set
2069 """benchmark the computation of various volatile set
2069
2070
2070 Volatile set computes element related to filtering and obsolescence."""
2071 Volatile set computes element related to filtering and obsolescence."""
2071 opts = _byteskwargs(opts)
2072 opts = _byteskwargs(opts)
2072 timer, fm = gettimer(ui, opts)
2073 timer, fm = gettimer(ui, opts)
2073 repo = repo.unfiltered()
2074 repo = repo.unfiltered()
2074
2075
2075 def getobs(name):
2076 def getobs(name):
2076 def d():
2077 def d():
2077 repo.invalidatevolatilesets()
2078 repo.invalidatevolatilesets()
2078 if opts[b'clear_obsstore']:
2079 if opts[b'clear_obsstore']:
2079 clearfilecache(repo, b'obsstore')
2080 clearfilecache(repo, b'obsstore')
2080 obsolete.getrevs(repo, name)
2081 obsolete.getrevs(repo, name)
2081 return d
2082 return d
2082
2083
2083 allobs = sorted(obsolete.cachefuncs)
2084 allobs = sorted(obsolete.cachefuncs)
2084 if names:
2085 if names:
2085 allobs = [n for n in allobs if n in names]
2086 allobs = [n for n in allobs if n in names]
2086
2087
2087 for name in allobs:
2088 for name in allobs:
2088 timer(getobs(name), title=name)
2089 timer(getobs(name), title=name)
2089
2090
2090 def getfiltered(name):
2091 def getfiltered(name):
2091 def d():
2092 def d():
2092 repo.invalidatevolatilesets()
2093 repo.invalidatevolatilesets()
2093 if opts[b'clear_obsstore']:
2094 if opts[b'clear_obsstore']:
2094 clearfilecache(repo, b'obsstore')
2095 clearfilecache(repo, b'obsstore')
2095 repoview.filterrevs(repo, name)
2096 repoview.filterrevs(repo, name)
2096 return d
2097 return d
2097
2098
2098 allfilter = sorted(repoview.filtertable)
2099 allfilter = sorted(repoview.filtertable)
2099 if names:
2100 if names:
2100 allfilter = [n for n in allfilter if n in names]
2101 allfilter = [n for n in allfilter if n in names]
2101
2102
2102 for name in allfilter:
2103 for name in allfilter:
2103 timer(getfiltered(name), title=name)
2104 timer(getfiltered(name), title=name)
2104 fm.end()
2105 fm.end()
2105
2106
2106 @command(b'perfbranchmap',
2107 @command(b'perfbranchmap',
2107 [(b'f', b'full', False,
2108 [(b'f', b'full', False,
2108 b'Includes build time of subset'),
2109 b'Includes build time of subset'),
2109 (b'', b'clear-revbranch', False,
2110 (b'', b'clear-revbranch', False,
2110 b'purge the revbranch cache between computation'),
2111 b'purge the revbranch cache between computation'),
2111 ] + formatteropts)
2112 ] + formatteropts)
2112 def perfbranchmap(ui, repo, *filternames, **opts):
2113 def perfbranchmap(ui, repo, *filternames, **opts):
2113 """benchmark the update of a branchmap
2114 """benchmark the update of a branchmap
2114
2115
2115 This benchmarks the full repo.branchmap() call with read and write disabled
2116 This benchmarks the full repo.branchmap() call with read and write disabled
2116 """
2117 """
2117 opts = _byteskwargs(opts)
2118 opts = _byteskwargs(opts)
2118 full = opts.get(b"full", False)
2119 full = opts.get(b"full", False)
2119 clear_revbranch = opts.get(b"clear_revbranch", False)
2120 clear_revbranch = opts.get(b"clear_revbranch", False)
2120 timer, fm = gettimer(ui, opts)
2121 timer, fm = gettimer(ui, opts)
2121 def getbranchmap(filtername):
2122 def getbranchmap(filtername):
2122 """generate a benchmark function for the filtername"""
2123 """generate a benchmark function for the filtername"""
2123 if filtername is None:
2124 if filtername is None:
2124 view = repo
2125 view = repo
2125 else:
2126 else:
2126 view = repo.filtered(filtername)
2127 view = repo.filtered(filtername)
2127 def d():
2128 def d():
2128 if clear_revbranch:
2129 if clear_revbranch:
2129 repo.revbranchcache()._clear()
2130 repo.revbranchcache()._clear()
2130 if full:
2131 if full:
2131 view._branchcaches.clear()
2132 view._branchcaches.clear()
2132 else:
2133 else:
2133 view._branchcaches.pop(filtername, None)
2134 view._branchcaches.pop(filtername, None)
2134 view.branchmap()
2135 view.branchmap()
2135 return d
2136 return d
2136 # add filter in smaller subset to bigger subset
2137 # add filter in smaller subset to bigger subset
2137 possiblefilters = set(repoview.filtertable)
2138 possiblefilters = set(repoview.filtertable)
2138 if filternames:
2139 if filternames:
2139 possiblefilters &= set(filternames)
2140 possiblefilters &= set(filternames)
2140 subsettable = getbranchmapsubsettable()
2141 subsettable = getbranchmapsubsettable()
2141 allfilters = []
2142 allfilters = []
2142 while possiblefilters:
2143 while possiblefilters:
2143 for name in possiblefilters:
2144 for name in possiblefilters:
2144 subset = subsettable.get(name)
2145 subset = subsettable.get(name)
2145 if subset not in possiblefilters:
2146 if subset not in possiblefilters:
2146 break
2147 break
2147 else:
2148 else:
2148 assert False, b'subset cycle %s!' % possiblefilters
2149 assert False, b'subset cycle %s!' % possiblefilters
2149 allfilters.append(name)
2150 allfilters.append(name)
2150 possiblefilters.remove(name)
2151 possiblefilters.remove(name)
2151
2152
2152 # warm the cache
2153 # warm the cache
2153 if not full:
2154 if not full:
2154 for name in allfilters:
2155 for name in allfilters:
2155 repo.filtered(name).branchmap()
2156 repo.filtered(name).branchmap()
2156 if not filternames or b'unfiltered' in filternames:
2157 if not filternames or b'unfiltered' in filternames:
2157 # add unfiltered
2158 # add unfiltered
2158 allfilters.append(None)
2159 allfilters.append(None)
2159
2160
2160 branchcacheread = safeattrsetter(branchmap, b'read')
2161 branchcacheread = safeattrsetter(branchmap, b'read')
2161 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2162 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2162 branchcacheread.set(lambda repo: None)
2163 branchcacheread.set(lambda repo: None)
2163 branchcachewrite.set(lambda bc, repo: None)
2164 branchcachewrite.set(lambda bc, repo: None)
2164 try:
2165 try:
2165 for name in allfilters:
2166 for name in allfilters:
2166 printname = name
2167 printname = name
2167 if name is None:
2168 if name is None:
2168 printname = b'unfiltered'
2169 printname = b'unfiltered'
2169 timer(getbranchmap(name), title=str(printname))
2170 timer(getbranchmap(name), title=str(printname))
2170 finally:
2171 finally:
2171 branchcacheread.restore()
2172 branchcacheread.restore()
2172 branchcachewrite.restore()
2173 branchcachewrite.restore()
2173 fm.end()
2174 fm.end()
2174
2175
2175 @command(b'perfbranchmapload', [
2176 @command(b'perfbranchmapload', [
2176 (b'f', b'filter', b'', b'Specify repoview filter'),
2177 (b'f', b'filter', b'', b'Specify repoview filter'),
2177 (b'', b'list', False, b'List brachmap filter caches'),
2178 (b'', b'list', False, b'List brachmap filter caches'),
2178 ] + formatteropts)
2179 ] + formatteropts)
2179 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2180 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2180 """benchmark reading the branchmap"""
2181 """benchmark reading the branchmap"""
2181 opts = _byteskwargs(opts)
2182 opts = _byteskwargs(opts)
2182
2183
2183 if list:
2184 if list:
2184 for name, kind, st in repo.cachevfs.readdir(stat=True):
2185 for name, kind, st in repo.cachevfs.readdir(stat=True):
2185 if name.startswith(b'branch2'):
2186 if name.startswith(b'branch2'):
2186 filtername = name.partition(b'-')[2] or b'unfiltered'
2187 filtername = name.partition(b'-')[2] or b'unfiltered'
2187 ui.status(b'%s - %s\n'
2188 ui.status(b'%s - %s\n'
2188 % (filtername, util.bytecount(st.st_size)))
2189 % (filtername, util.bytecount(st.st_size)))
2189 return
2190 return
2190 if filter:
2191 if filter:
2191 repo = repoview.repoview(repo, filter)
2192 repo = repoview.repoview(repo, filter)
2192 else:
2193 else:
2193 repo = repo.unfiltered()
2194 repo = repo.unfiltered()
2194 # try once without timer, the filter may not be cached
2195 # try once without timer, the filter may not be cached
2195 if branchmap.read(repo) is None:
2196 if branchmap.read(repo) is None:
2196 raise error.Abort(b'No brachmap cached for %s repo'
2197 raise error.Abort(b'No brachmap cached for %s repo'
2197 % (filter or b'unfiltered'))
2198 % (filter or b'unfiltered'))
2198 timer, fm = gettimer(ui, opts)
2199 timer, fm = gettimer(ui, opts)
2199 timer(lambda: branchmap.read(repo) and None)
2200 timer(lambda: branchmap.read(repo) and None)
2200 fm.end()
2201 fm.end()
2201
2202
2202 @command(b'perfloadmarkers')
2203 @command(b'perfloadmarkers')
2203 def perfloadmarkers(ui, repo):
2204 def perfloadmarkers(ui, repo):
2204 """benchmark the time to parse the on-disk markers for a repo
2205 """benchmark the time to parse the on-disk markers for a repo
2205
2206
2206 Result is the number of markers in the repo."""
2207 Result is the number of markers in the repo."""
2207 timer, fm = gettimer(ui)
2208 timer, fm = gettimer(ui)
2208 svfs = getsvfs(repo)
2209 svfs = getsvfs(repo)
2209 timer(lambda: len(obsolete.obsstore(svfs)))
2210 timer(lambda: len(obsolete.obsstore(svfs)))
2210 fm.end()
2211 fm.end()
2211
2212
2212 @command(b'perflrucachedict', formatteropts +
2213 @command(b'perflrucachedict', formatteropts +
2213 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2214 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2214 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2215 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2215 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2216 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2216 (b'', b'size', 4, b'size of cache'),
2217 (b'', b'size', 4, b'size of cache'),
2217 (b'', b'gets', 10000, b'number of key lookups'),
2218 (b'', b'gets', 10000, b'number of key lookups'),
2218 (b'', b'sets', 10000, b'number of key sets'),
2219 (b'', b'sets', 10000, b'number of key sets'),
2219 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2220 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2220 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2221 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2221 norepo=True)
2222 norepo=True)
2222 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2223 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2223 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2224 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2224 opts = _byteskwargs(opts)
2225 opts = _byteskwargs(opts)
2225
2226
2226 def doinit():
2227 def doinit():
2227 for i in _xrange(10000):
2228 for i in _xrange(10000):
2228 util.lrucachedict(size)
2229 util.lrucachedict(size)
2229
2230
2230 costrange = list(range(mincost, maxcost + 1))
2231 costrange = list(range(mincost, maxcost + 1))
2231
2232
2232 values = []
2233 values = []
2233 for i in _xrange(size):
2234 for i in _xrange(size):
2234 values.append(random.randint(0, _maxint))
2235 values.append(random.randint(0, _maxint))
2235
2236
2236 # Get mode fills the cache and tests raw lookup performance with no
2237 # Get mode fills the cache and tests raw lookup performance with no
2237 # eviction.
2238 # eviction.
2238 getseq = []
2239 getseq = []
2239 for i in _xrange(gets):
2240 for i in _xrange(gets):
2240 getseq.append(random.choice(values))
2241 getseq.append(random.choice(values))
2241
2242
2242 def dogets():
2243 def dogets():
2243 d = util.lrucachedict(size)
2244 d = util.lrucachedict(size)
2244 for v in values:
2245 for v in values:
2245 d[v] = v
2246 d[v] = v
2246 for key in getseq:
2247 for key in getseq:
2247 value = d[key]
2248 value = d[key]
2248 value # silence pyflakes warning
2249 value # silence pyflakes warning
2249
2250
2250 def dogetscost():
2251 def dogetscost():
2251 d = util.lrucachedict(size, maxcost=costlimit)
2252 d = util.lrucachedict(size, maxcost=costlimit)
2252 for i, v in enumerate(values):
2253 for i, v in enumerate(values):
2253 d.insert(v, v, cost=costs[i])
2254 d.insert(v, v, cost=costs[i])
2254 for key in getseq:
2255 for key in getseq:
2255 try:
2256 try:
2256 value = d[key]
2257 value = d[key]
2257 value # silence pyflakes warning
2258 value # silence pyflakes warning
2258 except KeyError:
2259 except KeyError:
2259 pass
2260 pass
2260
2261
2261 # Set mode tests insertion speed with cache eviction.
2262 # Set mode tests insertion speed with cache eviction.
2262 setseq = []
2263 setseq = []
2263 costs = []
2264 costs = []
2264 for i in _xrange(sets):
2265 for i in _xrange(sets):
2265 setseq.append(random.randint(0, _maxint))
2266 setseq.append(random.randint(0, _maxint))
2266 costs.append(random.choice(costrange))
2267 costs.append(random.choice(costrange))
2267
2268
2268 def doinserts():
2269 def doinserts():
2269 d = util.lrucachedict(size)
2270 d = util.lrucachedict(size)
2270 for v in setseq:
2271 for v in setseq:
2271 d.insert(v, v)
2272 d.insert(v, v)
2272
2273
2273 def doinsertscost():
2274 def doinsertscost():
2274 d = util.lrucachedict(size, maxcost=costlimit)
2275 d = util.lrucachedict(size, maxcost=costlimit)
2275 for i, v in enumerate(setseq):
2276 for i, v in enumerate(setseq):
2276 d.insert(v, v, cost=costs[i])
2277 d.insert(v, v, cost=costs[i])
2277
2278
2278 def dosets():
2279 def dosets():
2279 d = util.lrucachedict(size)
2280 d = util.lrucachedict(size)
2280 for v in setseq:
2281 for v in setseq:
2281 d[v] = v
2282 d[v] = v
2282
2283
2283 # Mixed mode randomly performs gets and sets with eviction.
2284 # Mixed mode randomly performs gets and sets with eviction.
2284 mixedops = []
2285 mixedops = []
2285 for i in _xrange(mixed):
2286 for i in _xrange(mixed):
2286 r = random.randint(0, 100)
2287 r = random.randint(0, 100)
2287 if r < mixedgetfreq:
2288 if r < mixedgetfreq:
2288 op = 0
2289 op = 0
2289 else:
2290 else:
2290 op = 1
2291 op = 1
2291
2292
2292 mixedops.append((op,
2293 mixedops.append((op,
2293 random.randint(0, size * 2),
2294 random.randint(0, size * 2),
2294 random.choice(costrange)))
2295 random.choice(costrange)))
2295
2296
2296 def domixed():
2297 def domixed():
2297 d = util.lrucachedict(size)
2298 d = util.lrucachedict(size)
2298
2299
2299 for op, v, cost in mixedops:
2300 for op, v, cost in mixedops:
2300 if op == 0:
2301 if op == 0:
2301 try:
2302 try:
2302 d[v]
2303 d[v]
2303 except KeyError:
2304 except KeyError:
2304 pass
2305 pass
2305 else:
2306 else:
2306 d[v] = v
2307 d[v] = v
2307
2308
2308 def domixedcost():
2309 def domixedcost():
2309 d = util.lrucachedict(size, maxcost=costlimit)
2310 d = util.lrucachedict(size, maxcost=costlimit)
2310
2311
2311 for op, v, cost in mixedops:
2312 for op, v, cost in mixedops:
2312 if op == 0:
2313 if op == 0:
2313 try:
2314 try:
2314 d[v]
2315 d[v]
2315 except KeyError:
2316 except KeyError:
2316 pass
2317 pass
2317 else:
2318 else:
2318 d.insert(v, v, cost=cost)
2319 d.insert(v, v, cost=cost)
2319
2320
2320 benches = [
2321 benches = [
2321 (doinit, b'init'),
2322 (doinit, b'init'),
2322 ]
2323 ]
2323
2324
2324 if costlimit:
2325 if costlimit:
2325 benches.extend([
2326 benches.extend([
2326 (dogetscost, b'gets w/ cost limit'),
2327 (dogetscost, b'gets w/ cost limit'),
2327 (doinsertscost, b'inserts w/ cost limit'),
2328 (doinsertscost, b'inserts w/ cost limit'),
2328 (domixedcost, b'mixed w/ cost limit'),
2329 (domixedcost, b'mixed w/ cost limit'),
2329 ])
2330 ])
2330 else:
2331 else:
2331 benches.extend([
2332 benches.extend([
2332 (dogets, b'gets'),
2333 (dogets, b'gets'),
2333 (doinserts, b'inserts'),
2334 (doinserts, b'inserts'),
2334 (dosets, b'sets'),
2335 (dosets, b'sets'),
2335 (domixed, b'mixed')
2336 (domixed, b'mixed')
2336 ])
2337 ])
2337
2338
2338 for fn, title in benches:
2339 for fn, title in benches:
2339 timer, fm = gettimer(ui, opts)
2340 timer, fm = gettimer(ui, opts)
2340 timer(fn, title=title)
2341 timer(fn, title=title)
2341 fm.end()
2342 fm.end()
2342
2343
2343 @command(b'perfwrite', formatteropts)
2344 @command(b'perfwrite', formatteropts)
2344 def perfwrite(ui, repo, **opts):
2345 def perfwrite(ui, repo, **opts):
2345 """microbenchmark ui.write
2346 """microbenchmark ui.write
2346 """
2347 """
2347 opts = _byteskwargs(opts)
2348 opts = _byteskwargs(opts)
2348
2349
2349 timer, fm = gettimer(ui, opts)
2350 timer, fm = gettimer(ui, opts)
2350 def write():
2351 def write():
2351 for i in range(100000):
2352 for i in range(100000):
2352 ui.write((b'Testing write performance\n'))
2353 ui.write((b'Testing write performance\n'))
2353 timer(write)
2354 timer(write)
2354 fm.end()
2355 fm.end()
2355
2356
2356 def uisetup(ui):
2357 def uisetup(ui):
2357 if (util.safehasattr(cmdutil, b'openrevlog') and
2358 if (util.safehasattr(cmdutil, b'openrevlog') and
2358 not util.safehasattr(commands, b'debugrevlogopts')):
2359 not util.safehasattr(commands, b'debugrevlogopts')):
2359 # for "historical portability":
2360 # for "historical portability":
2360 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2361 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2361 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2362 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2362 # openrevlog() should cause failure, because it has been
2363 # openrevlog() should cause failure, because it has been
2363 # available since 3.5 (or 49c583ca48c4).
2364 # available since 3.5 (or 49c583ca48c4).
2364 def openrevlog(orig, repo, cmd, file_, opts):
2365 def openrevlog(orig, repo, cmd, file_, opts):
2365 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2366 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2366 raise error.Abort(b"This version doesn't support --dir option",
2367 raise error.Abort(b"This version doesn't support --dir option",
2367 hint=b"use 3.5 or later")
2368 hint=b"use 3.5 or later")
2368 return orig(repo, cmd, file_, opts)
2369 return orig(repo, cmd, file_, opts)
2369 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2370 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now