##// END OF EJS Templates
perf: add some documentation to perfindex...
Boris Feld -
r41482:d65ba1ff default
parent child Browse files
Show More
@@ -1,2675 +1,2683
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 """benchmark the computation of a changelog heads"""
538 """benchmark the computation of a changelog heads"""
539 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
540 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
541 cl = repo.changelog
541 cl = repo.changelog
542 def s():
542 def s():
543 clearcaches(cl)
543 clearcaches(cl)
544 def d():
544 def d():
545 len(cl.headrevs())
545 len(cl.headrevs())
546 timer(d, setup=s)
546 timer(d, setup=s)
547 fm.end()
547 fm.end()
548
548
549 @command(b'perftags', formatteropts+
549 @command(b'perftags', formatteropts+
550 [
550 [
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 ])
552 ])
553 def perftags(ui, repo, **opts):
553 def perftags(ui, repo, **opts):
554 opts = _byteskwargs(opts)
554 opts = _byteskwargs(opts)
555 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
556 repocleartagscache = repocleartagscachefunc(repo)
556 repocleartagscache = repocleartagscachefunc(repo)
557 clearrevlogs = opts[b'clear_revlogs']
557 clearrevlogs = opts[b'clear_revlogs']
558 def s():
558 def s():
559 if clearrevlogs:
559 if clearrevlogs:
560 clearchangelog(repo)
560 clearchangelog(repo)
561 clearfilecache(repo.unfiltered(), 'manifest')
561 clearfilecache(repo.unfiltered(), 'manifest')
562 repocleartagscache()
562 repocleartagscache()
563 def t():
563 def t():
564 return len(repo.tags())
564 return len(repo.tags())
565 timer(t, setup=s)
565 timer(t, setup=s)
566 fm.end()
566 fm.end()
567
567
568 @command(b'perfancestors', formatteropts)
568 @command(b'perfancestors', formatteropts)
569 def perfancestors(ui, repo, **opts):
569 def perfancestors(ui, repo, **opts):
570 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
571 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
572 heads = repo.changelog.headrevs()
572 heads = repo.changelog.headrevs()
573 def d():
573 def d():
574 for a in repo.changelog.ancestors(heads):
574 for a in repo.changelog.ancestors(heads):
575 pass
575 pass
576 timer(d)
576 timer(d)
577 fm.end()
577 fm.end()
578
578
579 @command(b'perfancestorset', formatteropts)
579 @command(b'perfancestorset', formatteropts)
580 def perfancestorset(ui, repo, revset, **opts):
580 def perfancestorset(ui, repo, revset, **opts):
581 opts = _byteskwargs(opts)
581 opts = _byteskwargs(opts)
582 timer, fm = gettimer(ui, opts)
582 timer, fm = gettimer(ui, opts)
583 revs = repo.revs(revset)
583 revs = repo.revs(revset)
584 heads = repo.changelog.headrevs()
584 heads = repo.changelog.headrevs()
585 def d():
585 def d():
586 s = repo.changelog.ancestors(heads)
586 s = repo.changelog.ancestors(heads)
587 for rev in revs:
587 for rev in revs:
588 rev in s
588 rev in s
589 timer(d)
589 timer(d)
590 fm.end()
590 fm.end()
591
591
592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 def perfdiscovery(ui, repo, path, **opts):
593 def perfdiscovery(ui, repo, path, **opts):
594 """benchmark discovery between local repo and the peer at given path
594 """benchmark discovery between local repo and the peer at given path
595 """
595 """
596 repos = [repo, None]
596 repos = [repo, None]
597 timer, fm = gettimer(ui, opts)
597 timer, fm = gettimer(ui, opts)
598 path = ui.expandpath(path)
598 path = ui.expandpath(path)
599
599
600 def s():
600 def s():
601 repos[1] = hg.peer(ui, opts, path)
601 repos[1] = hg.peer(ui, opts, path)
602 def d():
602 def d():
603 setdiscovery.findcommonheads(ui, *repos)
603 setdiscovery.findcommonheads(ui, *repos)
604 timer(d, setup=s)
604 timer(d, setup=s)
605 fm.end()
605 fm.end()
606
606
607 @command(b'perfbookmarks', formatteropts +
607 @command(b'perfbookmarks', formatteropts +
608 [
608 [
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 ])
610 ])
611 def perfbookmarks(ui, repo, **opts):
611 def perfbookmarks(ui, repo, **opts):
612 """benchmark parsing bookmarks from disk to memory"""
612 """benchmark parsing bookmarks from disk to memory"""
613 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
615
615
616 clearrevlogs = opts[b'clear_revlogs']
616 clearrevlogs = opts[b'clear_revlogs']
617 def s():
617 def s():
618 if clearrevlogs:
618 if clearrevlogs:
619 clearchangelog(repo)
619 clearchangelog(repo)
620 clearfilecache(repo, b'_bookmarks')
620 clearfilecache(repo, b'_bookmarks')
621 def d():
621 def d():
622 repo._bookmarks
622 repo._bookmarks
623 timer(d, setup=s)
623 timer(d, setup=s)
624 fm.end()
624 fm.end()
625
625
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 def perfbundleread(ui, repo, bundlepath, **opts):
627 def perfbundleread(ui, repo, bundlepath, **opts):
628 """Benchmark reading of bundle files.
628 """Benchmark reading of bundle files.
629
629
630 This command is meant to isolate the I/O part of bundle reading as
630 This command is meant to isolate the I/O part of bundle reading as
631 much as possible.
631 much as possible.
632 """
632 """
633 from mercurial import (
633 from mercurial import (
634 bundle2,
634 bundle2,
635 exchange,
635 exchange,
636 streamclone,
636 streamclone,
637 )
637 )
638
638
639 opts = _byteskwargs(opts)
639 opts = _byteskwargs(opts)
640
640
641 def makebench(fn):
641 def makebench(fn):
642 def run():
642 def run():
643 with open(bundlepath, b'rb') as fh:
643 with open(bundlepath, b'rb') as fh:
644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 fn(bundle)
645 fn(bundle)
646
646
647 return run
647 return run
648
648
649 def makereadnbytes(size):
649 def makereadnbytes(size):
650 def run():
650 def run():
651 with open(bundlepath, b'rb') as fh:
651 with open(bundlepath, b'rb') as fh:
652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 while bundle.read(size):
653 while bundle.read(size):
654 pass
654 pass
655
655
656 return run
656 return run
657
657
658 def makestdioread(size):
658 def makestdioread(size):
659 def run():
659 def run():
660 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
661 while fh.read(size):
661 while fh.read(size):
662 pass
662 pass
663
663
664 return run
664 return run
665
665
666 # bundle1
666 # bundle1
667
667
668 def deltaiter(bundle):
668 def deltaiter(bundle):
669 for delta in bundle.deltaiter():
669 for delta in bundle.deltaiter():
670 pass
670 pass
671
671
672 def iterchunks(bundle):
672 def iterchunks(bundle):
673 for chunk in bundle.getchunks():
673 for chunk in bundle.getchunks():
674 pass
674 pass
675
675
676 # bundle2
676 # bundle2
677
677
678 def forwardchunks(bundle):
678 def forwardchunks(bundle):
679 for chunk in bundle._forwardchunks():
679 for chunk in bundle._forwardchunks():
680 pass
680 pass
681
681
682 def iterparts(bundle):
682 def iterparts(bundle):
683 for part in bundle.iterparts():
683 for part in bundle.iterparts():
684 pass
684 pass
685
685
686 def iterpartsseekable(bundle):
686 def iterpartsseekable(bundle):
687 for part in bundle.iterparts(seekable=True):
687 for part in bundle.iterparts(seekable=True):
688 pass
688 pass
689
689
690 def seek(bundle):
690 def seek(bundle):
691 for part in bundle.iterparts(seekable=True):
691 for part in bundle.iterparts(seekable=True):
692 part.seek(0, os.SEEK_END)
692 part.seek(0, os.SEEK_END)
693
693
694 def makepartreadnbytes(size):
694 def makepartreadnbytes(size):
695 def run():
695 def run():
696 with open(bundlepath, b'rb') as fh:
696 with open(bundlepath, b'rb') as fh:
697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 for part in bundle.iterparts():
698 for part in bundle.iterparts():
699 while part.read(size):
699 while part.read(size):
700 pass
700 pass
701
701
702 return run
702 return run
703
703
704 benches = [
704 benches = [
705 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(8192), b'read(8k)'),
706 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(16384), b'read(16k)'),
707 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(32768), b'read(32k)'),
708 (makestdioread(131072), b'read(128k)'),
708 (makestdioread(131072), b'read(128k)'),
709 ]
709 ]
710
710
711 with open(bundlepath, b'rb') as fh:
711 with open(bundlepath, b'rb') as fh:
712 bundle = exchange.readbundle(ui, fh, bundlepath)
712 bundle = exchange.readbundle(ui, fh, bundlepath)
713
713
714 if isinstance(bundle, changegroup.cg1unpacker):
714 if isinstance(bundle, changegroup.cg1unpacker):
715 benches.extend([
715 benches.extend([
716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 ])
722 ])
723 elif isinstance(bundle, bundle2.unbundle20):
723 elif isinstance(bundle, bundle2.unbundle20):
724 benches.extend([
724 benches.extend([
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 (makebench(seek), b'bundle2 part seek()'),
728 (makebench(seek), b'bundle2 part seek()'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 ])
733 ])
734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 raise error.Abort(b'stream clone bundles not supported')
735 raise error.Abort(b'stream clone bundles not supported')
736 else:
736 else:
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738
738
739 for fn, title in benches:
739 for fn, title in benches:
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 timer(fn, title=title)
741 timer(fn, title=title)
742 fm.end()
742 fm.end()
743
743
744 @command(b'perfchangegroupchangelog', formatteropts +
744 @command(b'perfchangegroupchangelog', formatteropts +
745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 """Benchmark producing a changelog group for a changegroup.
748 """Benchmark producing a changelog group for a changegroup.
749
749
750 This measures the time spent processing the changelog during a
750 This measures the time spent processing the changelog during a
751 bundle operation. This occurs during `hg bundle` and on a server
751 bundle operation. This occurs during `hg bundle` and on a server
752 processing a `getbundle` wire protocol request (handles clones
752 processing a `getbundle` wire protocol request (handles clones
753 and pull requests).
753 and pull requests).
754
754
755 By default, all revisions are added to the changegroup.
755 By default, all revisions are added to the changegroup.
756 """
756 """
757 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
758 cl = repo.changelog
758 cl = repo.changelog
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 bundler = changegroup.getbundler(cgversion, repo)
760 bundler = changegroup.getbundler(cgversion, repo)
761
761
762 def d():
762 def d():
763 state, chunks = bundler._generatechangelog(cl, nodes)
763 state, chunks = bundler._generatechangelog(cl, nodes)
764 for chunk in chunks:
764 for chunk in chunks:
765 pass
765 pass
766
766
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768
768
769 # Terminal printing can interfere with timing. So disable it.
769 # Terminal printing can interfere with timing. So disable it.
770 with ui.configoverride({(b'progress', b'disable'): True}):
770 with ui.configoverride({(b'progress', b'disable'): True}):
771 timer(d)
771 timer(d)
772
772
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfdirs', formatteropts)
775 @command(b'perfdirs', formatteropts)
776 def perfdirs(ui, repo, **opts):
776 def perfdirs(ui, repo, **opts):
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 dirstate = repo.dirstate
779 dirstate = repo.dirstate
780 b'a' in dirstate
780 b'a' in dirstate
781 def d():
781 def d():
782 dirstate.hasdir(b'a')
782 dirstate.hasdir(b'a')
783 del dirstate._map._dirs
783 del dirstate._map._dirs
784 timer(d)
784 timer(d)
785 fm.end()
785 fm.end()
786
786
787 @command(b'perfdirstate', formatteropts)
787 @command(b'perfdirstate', formatteropts)
788 def perfdirstate(ui, repo, **opts):
788 def perfdirstate(ui, repo, **opts):
789 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
790 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
791 b"a" in repo.dirstate
791 b"a" in repo.dirstate
792 def d():
792 def d():
793 repo.dirstate.invalidate()
793 repo.dirstate.invalidate()
794 b"a" in repo.dirstate
794 b"a" in repo.dirstate
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirstatedirs', formatteropts)
798 @command(b'perfdirstatedirs', formatteropts)
799 def perfdirstatedirs(ui, repo, **opts):
799 def perfdirstatedirs(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 b"a" in repo.dirstate
802 b"a" in repo.dirstate
803 def d():
803 def d():
804 repo.dirstate.hasdir(b"a")
804 repo.dirstate.hasdir(b"a")
805 del repo.dirstate._map._dirs
805 del repo.dirstate._map._dirs
806 timer(d)
806 timer(d)
807 fm.end()
807 fm.end()
808
808
809 @command(b'perfdirstatefoldmap', formatteropts)
809 @command(b'perfdirstatefoldmap', formatteropts)
810 def perfdirstatefoldmap(ui, repo, **opts):
810 def perfdirstatefoldmap(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 dirstate = repo.dirstate
813 dirstate = repo.dirstate
814 b'a' in dirstate
814 b'a' in dirstate
815 def d():
815 def d():
816 dirstate._map.filefoldmap.get(b'a')
816 dirstate._map.filefoldmap.get(b'a')
817 del dirstate._map.filefoldmap
817 del dirstate._map.filefoldmap
818 timer(d)
818 timer(d)
819 fm.end()
819 fm.end()
820
820
821 @command(b'perfdirfoldmap', formatteropts)
821 @command(b'perfdirfoldmap', formatteropts)
822 def perfdirfoldmap(ui, repo, **opts):
822 def perfdirfoldmap(ui, repo, **opts):
823 opts = _byteskwargs(opts)
823 opts = _byteskwargs(opts)
824 timer, fm = gettimer(ui, opts)
824 timer, fm = gettimer(ui, opts)
825 dirstate = repo.dirstate
825 dirstate = repo.dirstate
826 b'a' in dirstate
826 b'a' in dirstate
827 def d():
827 def d():
828 dirstate._map.dirfoldmap.get(b'a')
828 dirstate._map.dirfoldmap.get(b'a')
829 del dirstate._map.dirfoldmap
829 del dirstate._map.dirfoldmap
830 del dirstate._map._dirs
830 del dirstate._map._dirs
831 timer(d)
831 timer(d)
832 fm.end()
832 fm.end()
833
833
834 @command(b'perfdirstatewrite', formatteropts)
834 @command(b'perfdirstatewrite', formatteropts)
835 def perfdirstatewrite(ui, repo, **opts):
835 def perfdirstatewrite(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 ds = repo.dirstate
838 ds = repo.dirstate
839 b"a" in ds
839 b"a" in ds
840 def d():
840 def d():
841 ds._dirty = True
841 ds._dirty = True
842 ds.write(repo.currenttransaction())
842 ds.write(repo.currenttransaction())
843 timer(d)
843 timer(d)
844 fm.end()
844 fm.end()
845
845
846 @command(b'perfmergecalculate',
846 @command(b'perfmergecalculate',
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 def perfmergecalculate(ui, repo, rev, **opts):
848 def perfmergecalculate(ui, repo, rev, **opts):
849 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
851 wctx = repo[None]
851 wctx = repo[None]
852 rctx = scmutil.revsingle(repo, rev, rev)
852 rctx = scmutil.revsingle(repo, rev, rev)
853 ancestor = wctx.ancestor(rctx)
853 ancestor = wctx.ancestor(rctx)
854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 # that cache
855 # that cache
856 wctx.dirty()
856 wctx.dirty()
857 def d():
857 def d():
858 # acceptremote is True because we don't want prompts in the middle of
858 # acceptremote is True because we don't want prompts in the middle of
859 # our benchmark
859 # our benchmark
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 acceptremote=True, followcopies=True)
861 acceptremote=True, followcopies=True)
862 timer(d)
862 timer(d)
863 fm.end()
863 fm.end()
864
864
865 @command(b'perfpathcopies', [], b"REV REV")
865 @command(b'perfpathcopies', [], b"REV REV")
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 """benchmark the copy tracing logic"""
867 """benchmark the copy tracing logic"""
868 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 def d():
872 def d():
873 copies.pathcopies(ctx1, ctx2)
873 copies.pathcopies(ctx1, ctx2)
874 timer(d)
874 timer(d)
875 fm.end()
875 fm.end()
876
876
877 @command(b'perfphases',
877 @command(b'perfphases',
878 [(b'', b'full', False, b'include file reading time too'),
878 [(b'', b'full', False, b'include file reading time too'),
879 ], b"")
879 ], b"")
880 def perfphases(ui, repo, **opts):
880 def perfphases(ui, repo, **opts):
881 """benchmark phasesets computation"""
881 """benchmark phasesets computation"""
882 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
883 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
884 _phases = repo._phasecache
884 _phases = repo._phasecache
885 full = opts.get(b'full')
885 full = opts.get(b'full')
886 def d():
886 def d():
887 phases = _phases
887 phases = _phases
888 if full:
888 if full:
889 clearfilecache(repo, b'_phasecache')
889 clearfilecache(repo, b'_phasecache')
890 phases = repo._phasecache
890 phases = repo._phasecache
891 phases.invalidate()
891 phases.invalidate()
892 phases.loadphaserevs(repo)
892 phases.loadphaserevs(repo)
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896 @command(b'perfphasesremote',
896 @command(b'perfphasesremote',
897 [], b"[DEST]")
897 [], b"[DEST]")
898 def perfphasesremote(ui, repo, dest=None, **opts):
898 def perfphasesremote(ui, repo, dest=None, **opts):
899 """benchmark time needed to analyse phases of the remote server"""
899 """benchmark time needed to analyse phases of the remote server"""
900 from mercurial.node import (
900 from mercurial.node import (
901 bin,
901 bin,
902 )
902 )
903 from mercurial import (
903 from mercurial import (
904 exchange,
904 exchange,
905 hg,
905 hg,
906 phases,
906 phases,
907 )
907 )
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
910
910
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 if not path:
912 if not path:
913 raise error.Abort((b'default repository not configured!'),
913 raise error.Abort((b'default repository not configured!'),
914 hint=(b"see 'hg help config.paths'"))
914 hint=(b"see 'hg help config.paths'"))
915 dest = path.pushloc or path.loc
915 dest = path.pushloc or path.loc
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1018 ] + formatteropts)
1018 ] + formatteropts)
1019 def perfindex(ui, repo, **opts):
1019 def perfindex(ui, repo, **opts):
1020 """benchmark index creation time followed by a lookup
1021
1022 The default is to look `tip` up. Depending on the index implementation,
1023 the revision looked up can matters. For example, an implementation
1024 scanning the index will have a faster lookup time for `--rev tip` than for
1025 `--rev 0`.
1026
1027 It is not currently possible to check for lookup of a missing node."""
1020 import mercurial.revlog
1028 import mercurial.revlog
1021 opts = _byteskwargs(opts)
1029 opts = _byteskwargs(opts)
1022 timer, fm = gettimer(ui, opts)
1030 timer, fm = gettimer(ui, opts)
1023 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1031 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1024 if opts[b'rev'] is None:
1032 if opts[b'rev'] is None:
1025 n = repo[b"tip"].node()
1033 n = repo[b"tip"].node()
1026 else:
1034 else:
1027 rev = scmutil.revsingle(repo, opts[b'rev'])
1035 rev = scmutil.revsingle(repo, opts[b'rev'])
1028 n = repo[rev].node()
1036 n = repo[rev].node()
1029
1037
1030 unfi = repo.unfiltered()
1038 unfi = repo.unfiltered()
1031 # find the filecache func directly
1039 # find the filecache func directly
1032 # This avoid polluting the benchmark with the filecache logic
1040 # This avoid polluting the benchmark with the filecache logic
1033 makecl = unfi.__class__.changelog.func
1041 makecl = unfi.__class__.changelog.func
1034 def setup():
1042 def setup():
1035 # probably not necessary, but for good measure
1043 # probably not necessary, but for good measure
1036 clearchangelog(unfi)
1044 clearchangelog(unfi)
1037 def d():
1045 def d():
1038 cl = makecl(unfi)
1046 cl = makecl(unfi)
1039 cl.rev(n)
1047 cl.rev(n)
1040 timer(d, setup=setup)
1048 timer(d, setup=setup)
1041 fm.end()
1049 fm.end()
1042
1050
1043 @command(b'perfstartup', formatteropts)
1051 @command(b'perfstartup', formatteropts)
1044 def perfstartup(ui, repo, **opts):
1052 def perfstartup(ui, repo, **opts):
1045 opts = _byteskwargs(opts)
1053 opts = _byteskwargs(opts)
1046 timer, fm = gettimer(ui, opts)
1054 timer, fm = gettimer(ui, opts)
1047 def d():
1055 def d():
1048 if os.name != r'nt':
1056 if os.name != r'nt':
1049 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1057 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1050 fsencode(sys.argv[0]))
1058 fsencode(sys.argv[0]))
1051 else:
1059 else:
1052 os.environ[r'HGRCPATH'] = r' '
1060 os.environ[r'HGRCPATH'] = r' '
1053 os.system(r"%s version -q > NUL" % sys.argv[0])
1061 os.system(r"%s version -q > NUL" % sys.argv[0])
1054 timer(d)
1062 timer(d)
1055 fm.end()
1063 fm.end()
1056
1064
1057 @command(b'perfparents', formatteropts)
1065 @command(b'perfparents', formatteropts)
1058 def perfparents(ui, repo, **opts):
1066 def perfparents(ui, repo, **opts):
1059 opts = _byteskwargs(opts)
1067 opts = _byteskwargs(opts)
1060 timer, fm = gettimer(ui, opts)
1068 timer, fm = gettimer(ui, opts)
1061 # control the number of commits perfparents iterates over
1069 # control the number of commits perfparents iterates over
1062 # experimental config: perf.parentscount
1070 # experimental config: perf.parentscount
1063 count = getint(ui, b"perf", b"parentscount", 1000)
1071 count = getint(ui, b"perf", b"parentscount", 1000)
1064 if len(repo.changelog) < count:
1072 if len(repo.changelog) < count:
1065 raise error.Abort(b"repo needs %d commits for this test" % count)
1073 raise error.Abort(b"repo needs %d commits for this test" % count)
1066 repo = repo.unfiltered()
1074 repo = repo.unfiltered()
1067 nl = [repo.changelog.node(i) for i in _xrange(count)]
1075 nl = [repo.changelog.node(i) for i in _xrange(count)]
1068 def d():
1076 def d():
1069 for n in nl:
1077 for n in nl:
1070 repo.changelog.parents(n)
1078 repo.changelog.parents(n)
1071 timer(d)
1079 timer(d)
1072 fm.end()
1080 fm.end()
1073
1081
1074 @command(b'perfctxfiles', formatteropts)
1082 @command(b'perfctxfiles', formatteropts)
1075 def perfctxfiles(ui, repo, x, **opts):
1083 def perfctxfiles(ui, repo, x, **opts):
1076 opts = _byteskwargs(opts)
1084 opts = _byteskwargs(opts)
1077 x = int(x)
1085 x = int(x)
1078 timer, fm = gettimer(ui, opts)
1086 timer, fm = gettimer(ui, opts)
1079 def d():
1087 def d():
1080 len(repo[x].files())
1088 len(repo[x].files())
1081 timer(d)
1089 timer(d)
1082 fm.end()
1090 fm.end()
1083
1091
1084 @command(b'perfrawfiles', formatteropts)
1092 @command(b'perfrawfiles', formatteropts)
1085 def perfrawfiles(ui, repo, x, **opts):
1093 def perfrawfiles(ui, repo, x, **opts):
1086 opts = _byteskwargs(opts)
1094 opts = _byteskwargs(opts)
1087 x = int(x)
1095 x = int(x)
1088 timer, fm = gettimer(ui, opts)
1096 timer, fm = gettimer(ui, opts)
1089 cl = repo.changelog
1097 cl = repo.changelog
1090 def d():
1098 def d():
1091 len(cl.read(x)[3])
1099 len(cl.read(x)[3])
1092 timer(d)
1100 timer(d)
1093 fm.end()
1101 fm.end()
1094
1102
1095 @command(b'perflookup', formatteropts)
1103 @command(b'perflookup', formatteropts)
1096 def perflookup(ui, repo, rev, **opts):
1104 def perflookup(ui, repo, rev, **opts):
1097 opts = _byteskwargs(opts)
1105 opts = _byteskwargs(opts)
1098 timer, fm = gettimer(ui, opts)
1106 timer, fm = gettimer(ui, opts)
1099 timer(lambda: len(repo.lookup(rev)))
1107 timer(lambda: len(repo.lookup(rev)))
1100 fm.end()
1108 fm.end()
1101
1109
1102 @command(b'perflinelogedits',
1110 @command(b'perflinelogedits',
1103 [(b'n', b'edits', 10000, b'number of edits'),
1111 [(b'n', b'edits', 10000, b'number of edits'),
1104 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1112 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1105 ], norepo=True)
1113 ], norepo=True)
1106 def perflinelogedits(ui, **opts):
1114 def perflinelogedits(ui, **opts):
1107 from mercurial import linelog
1115 from mercurial import linelog
1108
1116
1109 opts = _byteskwargs(opts)
1117 opts = _byteskwargs(opts)
1110
1118
1111 edits = opts[b'edits']
1119 edits = opts[b'edits']
1112 maxhunklines = opts[b'max_hunk_lines']
1120 maxhunklines = opts[b'max_hunk_lines']
1113
1121
1114 maxb1 = 100000
1122 maxb1 = 100000
1115 random.seed(0)
1123 random.seed(0)
1116 randint = random.randint
1124 randint = random.randint
1117 currentlines = 0
1125 currentlines = 0
1118 arglist = []
1126 arglist = []
1119 for rev in _xrange(edits):
1127 for rev in _xrange(edits):
1120 a1 = randint(0, currentlines)
1128 a1 = randint(0, currentlines)
1121 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1129 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1122 b1 = randint(0, maxb1)
1130 b1 = randint(0, maxb1)
1123 b2 = randint(b1, b1 + maxhunklines)
1131 b2 = randint(b1, b1 + maxhunklines)
1124 currentlines += (b2 - b1) - (a2 - a1)
1132 currentlines += (b2 - b1) - (a2 - a1)
1125 arglist.append((rev, a1, a2, b1, b2))
1133 arglist.append((rev, a1, a2, b1, b2))
1126
1134
1127 def d():
1135 def d():
1128 ll = linelog.linelog()
1136 ll = linelog.linelog()
1129 for args in arglist:
1137 for args in arglist:
1130 ll.replacelines(*args)
1138 ll.replacelines(*args)
1131
1139
1132 timer, fm = gettimer(ui, opts)
1140 timer, fm = gettimer(ui, opts)
1133 timer(d)
1141 timer(d)
1134 fm.end()
1142 fm.end()
1135
1143
1136 @command(b'perfrevrange', formatteropts)
1144 @command(b'perfrevrange', formatteropts)
1137 def perfrevrange(ui, repo, *specs, **opts):
1145 def perfrevrange(ui, repo, *specs, **opts):
1138 opts = _byteskwargs(opts)
1146 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1147 timer, fm = gettimer(ui, opts)
1140 revrange = scmutil.revrange
1148 revrange = scmutil.revrange
1141 timer(lambda: len(revrange(repo, specs)))
1149 timer(lambda: len(revrange(repo, specs)))
1142 fm.end()
1150 fm.end()
1143
1151
1144 @command(b'perfnodelookup', formatteropts)
1152 @command(b'perfnodelookup', formatteropts)
1145 def perfnodelookup(ui, repo, rev, **opts):
1153 def perfnodelookup(ui, repo, rev, **opts):
1146 opts = _byteskwargs(opts)
1154 opts = _byteskwargs(opts)
1147 timer, fm = gettimer(ui, opts)
1155 timer, fm = gettimer(ui, opts)
1148 import mercurial.revlog
1156 import mercurial.revlog
1149 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1157 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1150 n = scmutil.revsingle(repo, rev).node()
1158 n = scmutil.revsingle(repo, rev).node()
1151 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1159 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1152 def d():
1160 def d():
1153 cl.rev(n)
1161 cl.rev(n)
1154 clearcaches(cl)
1162 clearcaches(cl)
1155 timer(d)
1163 timer(d)
1156 fm.end()
1164 fm.end()
1157
1165
1158 @command(b'perflog',
1166 @command(b'perflog',
1159 [(b'', b'rename', False, b'ask log to follow renames')
1167 [(b'', b'rename', False, b'ask log to follow renames')
1160 ] + formatteropts)
1168 ] + formatteropts)
1161 def perflog(ui, repo, rev=None, **opts):
1169 def perflog(ui, repo, rev=None, **opts):
1162 opts = _byteskwargs(opts)
1170 opts = _byteskwargs(opts)
1163 if rev is None:
1171 if rev is None:
1164 rev=[]
1172 rev=[]
1165 timer, fm = gettimer(ui, opts)
1173 timer, fm = gettimer(ui, opts)
1166 ui.pushbuffer()
1174 ui.pushbuffer()
1167 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1175 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1168 copies=opts.get(b'rename')))
1176 copies=opts.get(b'rename')))
1169 ui.popbuffer()
1177 ui.popbuffer()
1170 fm.end()
1178 fm.end()
1171
1179
1172 @command(b'perfmoonwalk', formatteropts)
1180 @command(b'perfmoonwalk', formatteropts)
1173 def perfmoonwalk(ui, repo, **opts):
1181 def perfmoonwalk(ui, repo, **opts):
1174 """benchmark walking the changelog backwards
1182 """benchmark walking the changelog backwards
1175
1183
1176 This also loads the changelog data for each revision in the changelog.
1184 This also loads the changelog data for each revision in the changelog.
1177 """
1185 """
1178 opts = _byteskwargs(opts)
1186 opts = _byteskwargs(opts)
1179 timer, fm = gettimer(ui, opts)
1187 timer, fm = gettimer(ui, opts)
1180 def moonwalk():
1188 def moonwalk():
1181 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1189 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1182 ctx = repo[i]
1190 ctx = repo[i]
1183 ctx.branch() # read changelog data (in addition to the index)
1191 ctx.branch() # read changelog data (in addition to the index)
1184 timer(moonwalk)
1192 timer(moonwalk)
1185 fm.end()
1193 fm.end()
1186
1194
1187 @command(b'perftemplating',
1195 @command(b'perftemplating',
1188 [(b'r', b'rev', [], b'revisions to run the template on'),
1196 [(b'r', b'rev', [], b'revisions to run the template on'),
1189 ] + formatteropts)
1197 ] + formatteropts)
1190 def perftemplating(ui, repo, testedtemplate=None, **opts):
1198 def perftemplating(ui, repo, testedtemplate=None, **opts):
1191 """test the rendering time of a given template"""
1199 """test the rendering time of a given template"""
1192 if makelogtemplater is None:
1200 if makelogtemplater is None:
1193 raise error.Abort((b"perftemplating not available with this Mercurial"),
1201 raise error.Abort((b"perftemplating not available with this Mercurial"),
1194 hint=b"use 4.3 or later")
1202 hint=b"use 4.3 or later")
1195
1203
1196 opts = _byteskwargs(opts)
1204 opts = _byteskwargs(opts)
1197
1205
1198 nullui = ui.copy()
1206 nullui = ui.copy()
1199 nullui.fout = open(os.devnull, r'wb')
1207 nullui.fout = open(os.devnull, r'wb')
1200 nullui.disablepager()
1208 nullui.disablepager()
1201 revs = opts.get(b'rev')
1209 revs = opts.get(b'rev')
1202 if not revs:
1210 if not revs:
1203 revs = [b'all()']
1211 revs = [b'all()']
1204 revs = list(scmutil.revrange(repo, revs))
1212 revs = list(scmutil.revrange(repo, revs))
1205
1213
1206 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1214 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1207 b' {author|person}: {desc|firstline}\n')
1215 b' {author|person}: {desc|firstline}\n')
1208 if testedtemplate is None:
1216 if testedtemplate is None:
1209 testedtemplate = defaulttemplate
1217 testedtemplate = defaulttemplate
1210 displayer = makelogtemplater(nullui, repo, testedtemplate)
1218 displayer = makelogtemplater(nullui, repo, testedtemplate)
1211 def format():
1219 def format():
1212 for r in revs:
1220 for r in revs:
1213 ctx = repo[r]
1221 ctx = repo[r]
1214 displayer.show(ctx)
1222 displayer.show(ctx)
1215 displayer.flush(ctx)
1223 displayer.flush(ctx)
1216
1224
1217 timer, fm = gettimer(ui, opts)
1225 timer, fm = gettimer(ui, opts)
1218 timer(format)
1226 timer(format)
1219 fm.end()
1227 fm.end()
1220
1228
1221 @command(b'perfhelper-pathcopies', formatteropts +
1229 @command(b'perfhelper-pathcopies', formatteropts +
1222 [
1230 [
1223 (b'r', b'revs', [], b'restrict search to these revisions'),
1231 (b'r', b'revs', [], b'restrict search to these revisions'),
1224 (b'', b'timing', False, b'provides extra data (costly)'),
1232 (b'', b'timing', False, b'provides extra data (costly)'),
1225 ])
1233 ])
1226 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1234 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1227 """find statistic about potential parameters for the `perftracecopies`
1235 """find statistic about potential parameters for the `perftracecopies`
1228
1236
1229 This command find source-destination pair relevant for copytracing testing.
1237 This command find source-destination pair relevant for copytracing testing.
1230 It report value for some of the parameters that impact copy tracing time.
1238 It report value for some of the parameters that impact copy tracing time.
1231
1239
1232 If `--timing` is set, rename detection is run and the associated timing
1240 If `--timing` is set, rename detection is run and the associated timing
1233 will be reported. The extra details comes at the cost of a slower command
1241 will be reported. The extra details comes at the cost of a slower command
1234 execution.
1242 execution.
1235
1243
1236 Since the rename detection is only run once, other factors might easily
1244 Since the rename detection is only run once, other factors might easily
1237 affect the precision of the timing. However it should give a good
1245 affect the precision of the timing. However it should give a good
1238 approximation of which revision pairs are very costly.
1246 approximation of which revision pairs are very costly.
1239 """
1247 """
1240 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1241 fm = ui.formatter(b'perf', opts)
1249 fm = ui.formatter(b'perf', opts)
1242 dotiming = opts[b'timing']
1250 dotiming = opts[b'timing']
1243
1251
1244 if dotiming:
1252 if dotiming:
1245 header = '%12s %12s %12s %12s %12s %12s\n'
1253 header = '%12s %12s %12s %12s %12s %12s\n'
1246 output = ("%(source)12s %(destination)12s "
1254 output = ("%(source)12s %(destination)12s "
1247 "%(nbrevs)12d %(nbmissingfiles)12d "
1255 "%(nbrevs)12d %(nbmissingfiles)12d "
1248 "%(nbrenamedfiles)12d %(time)18.5f\n")
1256 "%(nbrenamedfiles)12d %(time)18.5f\n")
1249 header_names = ("source", "destination", "nb-revs", "nb-files",
1257 header_names = ("source", "destination", "nb-revs", "nb-files",
1250 "nb-renames", "time")
1258 "nb-renames", "time")
1251 fm.plain(header % header_names)
1259 fm.plain(header % header_names)
1252 else:
1260 else:
1253 header = '%12s %12s %12s %12s\n'
1261 header = '%12s %12s %12s %12s\n'
1254 output = ("%(source)12s %(destination)12s "
1262 output = ("%(source)12s %(destination)12s "
1255 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1263 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1256 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1264 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1257
1265
1258 if not revs:
1266 if not revs:
1259 revs = ['all()']
1267 revs = ['all()']
1260 revs = scmutil.revrange(repo, revs)
1268 revs = scmutil.revrange(repo, revs)
1261
1269
1262 roi = repo.revs('merge() and %ld', revs)
1270 roi = repo.revs('merge() and %ld', revs)
1263 for r in roi:
1271 for r in roi:
1264 ctx = repo[r]
1272 ctx = repo[r]
1265 p1 = ctx.p1().rev()
1273 p1 = ctx.p1().rev()
1266 p2 = ctx.p2().rev()
1274 p2 = ctx.p2().rev()
1267 bases = repo.changelog._commonancestorsheads(p1, p2)
1275 bases = repo.changelog._commonancestorsheads(p1, p2)
1268 for p in (p1, p2):
1276 for p in (p1, p2):
1269 for b in bases:
1277 for b in bases:
1270 base = repo[b]
1278 base = repo[b]
1271 parent = repo[p]
1279 parent = repo[p]
1272 missing = copies._computeforwardmissing(base, parent)
1280 missing = copies._computeforwardmissing(base, parent)
1273 if not missing:
1281 if not missing:
1274 continue
1282 continue
1275 data = {
1283 data = {
1276 b'source': base.hex(),
1284 b'source': base.hex(),
1277 b'destination': parent.hex(),
1285 b'destination': parent.hex(),
1278 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1286 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1279 b'nbmissingfiles': len(missing),
1287 b'nbmissingfiles': len(missing),
1280 }
1288 }
1281 if dotiming:
1289 if dotiming:
1282 begin = util.timer()
1290 begin = util.timer()
1283 renames = copies.pathcopies(base, parent)
1291 renames = copies.pathcopies(base, parent)
1284 end = util.timer()
1292 end = util.timer()
1285 # not very stable timing since we did only one run
1293 # not very stable timing since we did only one run
1286 data['time'] = end - begin
1294 data['time'] = end - begin
1287 data['nbrenamedfiles'] = len(renames)
1295 data['nbrenamedfiles'] = len(renames)
1288 fm.startitem()
1296 fm.startitem()
1289 fm.data(**data)
1297 fm.data(**data)
1290 out = data.copy()
1298 out = data.copy()
1291 out['source'] = fm.hexfunc(base.node())
1299 out['source'] = fm.hexfunc(base.node())
1292 out['destination'] = fm.hexfunc(parent.node())
1300 out['destination'] = fm.hexfunc(parent.node())
1293 fm.plain(output % out)
1301 fm.plain(output % out)
1294
1302
1295 fm.end()
1303 fm.end()
1296
1304
1297 @command(b'perfcca', formatteropts)
1305 @command(b'perfcca', formatteropts)
1298 def perfcca(ui, repo, **opts):
1306 def perfcca(ui, repo, **opts):
1299 opts = _byteskwargs(opts)
1307 opts = _byteskwargs(opts)
1300 timer, fm = gettimer(ui, opts)
1308 timer, fm = gettimer(ui, opts)
1301 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1309 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1302 fm.end()
1310 fm.end()
1303
1311
1304 @command(b'perffncacheload', formatteropts)
1312 @command(b'perffncacheload', formatteropts)
1305 def perffncacheload(ui, repo, **opts):
1313 def perffncacheload(ui, repo, **opts):
1306 opts = _byteskwargs(opts)
1314 opts = _byteskwargs(opts)
1307 timer, fm = gettimer(ui, opts)
1315 timer, fm = gettimer(ui, opts)
1308 s = repo.store
1316 s = repo.store
1309 def d():
1317 def d():
1310 s.fncache._load()
1318 s.fncache._load()
1311 timer(d)
1319 timer(d)
1312 fm.end()
1320 fm.end()
1313
1321
1314 @command(b'perffncachewrite', formatteropts)
1322 @command(b'perffncachewrite', formatteropts)
1315 def perffncachewrite(ui, repo, **opts):
1323 def perffncachewrite(ui, repo, **opts):
1316 opts = _byteskwargs(opts)
1324 opts = _byteskwargs(opts)
1317 timer, fm = gettimer(ui, opts)
1325 timer, fm = gettimer(ui, opts)
1318 s = repo.store
1326 s = repo.store
1319 lock = repo.lock()
1327 lock = repo.lock()
1320 s.fncache._load()
1328 s.fncache._load()
1321 tr = repo.transaction(b'perffncachewrite')
1329 tr = repo.transaction(b'perffncachewrite')
1322 tr.addbackup(b'fncache')
1330 tr.addbackup(b'fncache')
1323 def d():
1331 def d():
1324 s.fncache._dirty = True
1332 s.fncache._dirty = True
1325 s.fncache.write(tr)
1333 s.fncache.write(tr)
1326 timer(d)
1334 timer(d)
1327 tr.close()
1335 tr.close()
1328 lock.release()
1336 lock.release()
1329 fm.end()
1337 fm.end()
1330
1338
1331 @command(b'perffncacheencode', formatteropts)
1339 @command(b'perffncacheencode', formatteropts)
1332 def perffncacheencode(ui, repo, **opts):
1340 def perffncacheencode(ui, repo, **opts):
1333 opts = _byteskwargs(opts)
1341 opts = _byteskwargs(opts)
1334 timer, fm = gettimer(ui, opts)
1342 timer, fm = gettimer(ui, opts)
1335 s = repo.store
1343 s = repo.store
1336 s.fncache._load()
1344 s.fncache._load()
1337 def d():
1345 def d():
1338 for p in s.fncache.entries:
1346 for p in s.fncache.entries:
1339 s.encode(p)
1347 s.encode(p)
1340 timer(d)
1348 timer(d)
1341 fm.end()
1349 fm.end()
1342
1350
1343 def _bdiffworker(q, blocks, xdiff, ready, done):
1351 def _bdiffworker(q, blocks, xdiff, ready, done):
1344 while not done.is_set():
1352 while not done.is_set():
1345 pair = q.get()
1353 pair = q.get()
1346 while pair is not None:
1354 while pair is not None:
1347 if xdiff:
1355 if xdiff:
1348 mdiff.bdiff.xdiffblocks(*pair)
1356 mdiff.bdiff.xdiffblocks(*pair)
1349 elif blocks:
1357 elif blocks:
1350 mdiff.bdiff.blocks(*pair)
1358 mdiff.bdiff.blocks(*pair)
1351 else:
1359 else:
1352 mdiff.textdiff(*pair)
1360 mdiff.textdiff(*pair)
1353 q.task_done()
1361 q.task_done()
1354 pair = q.get()
1362 pair = q.get()
1355 q.task_done() # for the None one
1363 q.task_done() # for the None one
1356 with ready:
1364 with ready:
1357 ready.wait()
1365 ready.wait()
1358
1366
1359 def _manifestrevision(repo, mnode):
1367 def _manifestrevision(repo, mnode):
1360 ml = repo.manifestlog
1368 ml = repo.manifestlog
1361
1369
1362 if util.safehasattr(ml, b'getstorage'):
1370 if util.safehasattr(ml, b'getstorage'):
1363 store = ml.getstorage(b'')
1371 store = ml.getstorage(b'')
1364 else:
1372 else:
1365 store = ml._revlog
1373 store = ml._revlog
1366
1374
1367 return store.revision(mnode)
1375 return store.revision(mnode)
1368
1376
1369 @command(b'perfbdiff', revlogopts + formatteropts + [
1377 @command(b'perfbdiff', revlogopts + formatteropts + [
1370 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1371 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1379 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1372 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1380 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1373 (b'', b'blocks', False, b'test computing diffs into blocks'),
1381 (b'', b'blocks', False, b'test computing diffs into blocks'),
1374 (b'', b'xdiff', False, b'use xdiff algorithm'),
1382 (b'', b'xdiff', False, b'use xdiff algorithm'),
1375 ],
1383 ],
1376
1384
1377 b'-c|-m|FILE REV')
1385 b'-c|-m|FILE REV')
1378 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1386 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1379 """benchmark a bdiff between revisions
1387 """benchmark a bdiff between revisions
1380
1388
1381 By default, benchmark a bdiff between its delta parent and itself.
1389 By default, benchmark a bdiff between its delta parent and itself.
1382
1390
1383 With ``--count``, benchmark bdiffs between delta parents and self for N
1391 With ``--count``, benchmark bdiffs between delta parents and self for N
1384 revisions starting at the specified revision.
1392 revisions starting at the specified revision.
1385
1393
1386 With ``--alldata``, assume the requested revision is a changeset and
1394 With ``--alldata``, assume the requested revision is a changeset and
1387 measure bdiffs for all changes related to that changeset (manifest
1395 measure bdiffs for all changes related to that changeset (manifest
1388 and filelogs).
1396 and filelogs).
1389 """
1397 """
1390 opts = _byteskwargs(opts)
1398 opts = _byteskwargs(opts)
1391
1399
1392 if opts[b'xdiff'] and not opts[b'blocks']:
1400 if opts[b'xdiff'] and not opts[b'blocks']:
1393 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1401 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1394
1402
1395 if opts[b'alldata']:
1403 if opts[b'alldata']:
1396 opts[b'changelog'] = True
1404 opts[b'changelog'] = True
1397
1405
1398 if opts.get(b'changelog') or opts.get(b'manifest'):
1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1399 file_, rev = None, file_
1407 file_, rev = None, file_
1400 elif rev is None:
1408 elif rev is None:
1401 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1409 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1402
1410
1403 blocks = opts[b'blocks']
1411 blocks = opts[b'blocks']
1404 xdiff = opts[b'xdiff']
1412 xdiff = opts[b'xdiff']
1405 textpairs = []
1413 textpairs = []
1406
1414
1407 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1415 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1408
1416
1409 startrev = r.rev(r.lookup(rev))
1417 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1418 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1419 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1420 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1421 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1422 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1423 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1424 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1425 textpairs.append((pman, mtext))
1418
1426
1419 # Load filelog revisions by iterating manifest delta.
1427 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1428 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1429 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1430 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1431 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1432 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1433 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1434 textpairs.append((f1, f2))
1427 else:
1435 else:
1428 dp = r.deltaparent(rev)
1436 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1437 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1438
1431 withthreads = threads > 0
1439 withthreads = threads > 0
1432 if not withthreads:
1440 if not withthreads:
1433 def d():
1441 def d():
1434 for pair in textpairs:
1442 for pair in textpairs:
1435 if xdiff:
1443 if xdiff:
1436 mdiff.bdiff.xdiffblocks(*pair)
1444 mdiff.bdiff.xdiffblocks(*pair)
1437 elif blocks:
1445 elif blocks:
1438 mdiff.bdiff.blocks(*pair)
1446 mdiff.bdiff.blocks(*pair)
1439 else:
1447 else:
1440 mdiff.textdiff(*pair)
1448 mdiff.textdiff(*pair)
1441 else:
1449 else:
1442 q = queue()
1450 q = queue()
1443 for i in _xrange(threads):
1451 for i in _xrange(threads):
1444 q.put(None)
1452 q.put(None)
1445 ready = threading.Condition()
1453 ready = threading.Condition()
1446 done = threading.Event()
1454 done = threading.Event()
1447 for i in _xrange(threads):
1455 for i in _xrange(threads):
1448 threading.Thread(target=_bdiffworker,
1456 threading.Thread(target=_bdiffworker,
1449 args=(q, blocks, xdiff, ready, done)).start()
1457 args=(q, blocks, xdiff, ready, done)).start()
1450 q.join()
1458 q.join()
1451 def d():
1459 def d():
1452 for pair in textpairs:
1460 for pair in textpairs:
1453 q.put(pair)
1461 q.put(pair)
1454 for i in _xrange(threads):
1462 for i in _xrange(threads):
1455 q.put(None)
1463 q.put(None)
1456 with ready:
1464 with ready:
1457 ready.notify_all()
1465 ready.notify_all()
1458 q.join()
1466 q.join()
1459 timer, fm = gettimer(ui, opts)
1467 timer, fm = gettimer(ui, opts)
1460 timer(d)
1468 timer(d)
1461 fm.end()
1469 fm.end()
1462
1470
1463 if withthreads:
1471 if withthreads:
1464 done.set()
1472 done.set()
1465 for i in _xrange(threads):
1473 for i in _xrange(threads):
1466 q.put(None)
1474 q.put(None)
1467 with ready:
1475 with ready:
1468 ready.notify_all()
1476 ready.notify_all()
1469
1477
1470 @command(b'perfunidiff', revlogopts + formatteropts + [
1478 @command(b'perfunidiff', revlogopts + formatteropts + [
1471 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1479 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1472 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1480 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1473 ], b'-c|-m|FILE REV')
1481 ], b'-c|-m|FILE REV')
1474 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1482 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1475 """benchmark a unified diff between revisions
1483 """benchmark a unified diff between revisions
1476
1484
1477 This doesn't include any copy tracing - it's just a unified diff
1485 This doesn't include any copy tracing - it's just a unified diff
1478 of the texts.
1486 of the texts.
1479
1487
1480 By default, benchmark a diff between its delta parent and itself.
1488 By default, benchmark a diff between its delta parent and itself.
1481
1489
1482 With ``--count``, benchmark diffs between delta parents and self for N
1490 With ``--count``, benchmark diffs between delta parents and self for N
1483 revisions starting at the specified revision.
1491 revisions starting at the specified revision.
1484
1492
1485 With ``--alldata``, assume the requested revision is a changeset and
1493 With ``--alldata``, assume the requested revision is a changeset and
1486 measure diffs for all changes related to that changeset (manifest
1494 measure diffs for all changes related to that changeset (manifest
1487 and filelogs).
1495 and filelogs).
1488 """
1496 """
1489 opts = _byteskwargs(opts)
1497 opts = _byteskwargs(opts)
1490 if opts[b'alldata']:
1498 if opts[b'alldata']:
1491 opts[b'changelog'] = True
1499 opts[b'changelog'] = True
1492
1500
1493 if opts.get(b'changelog') or opts.get(b'manifest'):
1501 if opts.get(b'changelog') or opts.get(b'manifest'):
1494 file_, rev = None, file_
1502 file_, rev = None, file_
1495 elif rev is None:
1503 elif rev is None:
1496 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1504 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1497
1505
1498 textpairs = []
1506 textpairs = []
1499
1507
1500 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1508 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1501
1509
1502 startrev = r.rev(r.lookup(rev))
1510 startrev = r.rev(r.lookup(rev))
1503 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1511 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1504 if opts[b'alldata']:
1512 if opts[b'alldata']:
1505 # Load revisions associated with changeset.
1513 # Load revisions associated with changeset.
1506 ctx = repo[rev]
1514 ctx = repo[rev]
1507 mtext = _manifestrevision(repo, ctx.manifestnode())
1515 mtext = _manifestrevision(repo, ctx.manifestnode())
1508 for pctx in ctx.parents():
1516 for pctx in ctx.parents():
1509 pman = _manifestrevision(repo, pctx.manifestnode())
1517 pman = _manifestrevision(repo, pctx.manifestnode())
1510 textpairs.append((pman, mtext))
1518 textpairs.append((pman, mtext))
1511
1519
1512 # Load filelog revisions by iterating manifest delta.
1520 # Load filelog revisions by iterating manifest delta.
1513 man = ctx.manifest()
1521 man = ctx.manifest()
1514 pman = ctx.p1().manifest()
1522 pman = ctx.p1().manifest()
1515 for filename, change in pman.diff(man).items():
1523 for filename, change in pman.diff(man).items():
1516 fctx = repo.file(filename)
1524 fctx = repo.file(filename)
1517 f1 = fctx.revision(change[0][0] or -1)
1525 f1 = fctx.revision(change[0][0] or -1)
1518 f2 = fctx.revision(change[1][0] or -1)
1526 f2 = fctx.revision(change[1][0] or -1)
1519 textpairs.append((f1, f2))
1527 textpairs.append((f1, f2))
1520 else:
1528 else:
1521 dp = r.deltaparent(rev)
1529 dp = r.deltaparent(rev)
1522 textpairs.append((r.revision(dp), r.revision(rev)))
1530 textpairs.append((r.revision(dp), r.revision(rev)))
1523
1531
1524 def d():
1532 def d():
1525 for left, right in textpairs:
1533 for left, right in textpairs:
1526 # The date strings don't matter, so we pass empty strings.
1534 # The date strings don't matter, so we pass empty strings.
1527 headerlines, hunks = mdiff.unidiff(
1535 headerlines, hunks = mdiff.unidiff(
1528 left, b'', right, b'', b'left', b'right', binary=False)
1536 left, b'', right, b'', b'left', b'right', binary=False)
1529 # consume iterators in roughly the way patch.py does
1537 # consume iterators in roughly the way patch.py does
1530 b'\n'.join(headerlines)
1538 b'\n'.join(headerlines)
1531 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1532 timer, fm = gettimer(ui, opts)
1540 timer, fm = gettimer(ui, opts)
1533 timer(d)
1541 timer(d)
1534 fm.end()
1542 fm.end()
1535
1543
1536 @command(b'perfdiffwd', formatteropts)
1544 @command(b'perfdiffwd', formatteropts)
1537 def perfdiffwd(ui, repo, **opts):
1545 def perfdiffwd(ui, repo, **opts):
1538 """Profile diff of working directory changes"""
1546 """Profile diff of working directory changes"""
1539 opts = _byteskwargs(opts)
1547 opts = _byteskwargs(opts)
1540 timer, fm = gettimer(ui, opts)
1548 timer, fm = gettimer(ui, opts)
1541 options = {
1549 options = {
1542 'w': 'ignore_all_space',
1550 'w': 'ignore_all_space',
1543 'b': 'ignore_space_change',
1551 'b': 'ignore_space_change',
1544 'B': 'ignore_blank_lines',
1552 'B': 'ignore_blank_lines',
1545 }
1553 }
1546
1554
1547 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1555 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1548 opts = dict((options[c], b'1') for c in diffopt)
1556 opts = dict((options[c], b'1') for c in diffopt)
1549 def d():
1557 def d():
1550 ui.pushbuffer()
1558 ui.pushbuffer()
1551 commands.diff(ui, repo, **opts)
1559 commands.diff(ui, repo, **opts)
1552 ui.popbuffer()
1560 ui.popbuffer()
1553 diffopt = diffopt.encode('ascii')
1561 diffopt = diffopt.encode('ascii')
1554 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1562 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1555 timer(d, title=title)
1563 timer(d, title=title)
1556 fm.end()
1564 fm.end()
1557
1565
1558 @command(b'perfrevlogindex', revlogopts + formatteropts,
1566 @command(b'perfrevlogindex', revlogopts + formatteropts,
1559 b'-c|-m|FILE')
1567 b'-c|-m|FILE')
1560 def perfrevlogindex(ui, repo, file_=None, **opts):
1568 def perfrevlogindex(ui, repo, file_=None, **opts):
1561 """Benchmark operations against a revlog index.
1569 """Benchmark operations against a revlog index.
1562
1570
1563 This tests constructing a revlog instance, reading index data,
1571 This tests constructing a revlog instance, reading index data,
1564 parsing index data, and performing various operations related to
1572 parsing index data, and performing various operations related to
1565 index data.
1573 index data.
1566 """
1574 """
1567
1575
1568 opts = _byteskwargs(opts)
1576 opts = _byteskwargs(opts)
1569
1577
1570 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1578 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1571
1579
1572 opener = getattr(rl, 'opener') # trick linter
1580 opener = getattr(rl, 'opener') # trick linter
1573 indexfile = rl.indexfile
1581 indexfile = rl.indexfile
1574 data = opener.read(indexfile)
1582 data = opener.read(indexfile)
1575
1583
1576 header = struct.unpack(b'>I', data[0:4])[0]
1584 header = struct.unpack(b'>I', data[0:4])[0]
1577 version = header & 0xFFFF
1585 version = header & 0xFFFF
1578 if version == 1:
1586 if version == 1:
1579 revlogio = revlog.revlogio()
1587 revlogio = revlog.revlogio()
1580 inline = header & (1 << 16)
1588 inline = header & (1 << 16)
1581 else:
1589 else:
1582 raise error.Abort((b'unsupported revlog version: %d') % version)
1590 raise error.Abort((b'unsupported revlog version: %d') % version)
1583
1591
1584 rllen = len(rl)
1592 rllen = len(rl)
1585
1593
1586 node0 = rl.node(0)
1594 node0 = rl.node(0)
1587 node25 = rl.node(rllen // 4)
1595 node25 = rl.node(rllen // 4)
1588 node50 = rl.node(rllen // 2)
1596 node50 = rl.node(rllen // 2)
1589 node75 = rl.node(rllen // 4 * 3)
1597 node75 = rl.node(rllen // 4 * 3)
1590 node100 = rl.node(rllen - 1)
1598 node100 = rl.node(rllen - 1)
1591
1599
1592 allrevs = range(rllen)
1600 allrevs = range(rllen)
1593 allrevsrev = list(reversed(allrevs))
1601 allrevsrev = list(reversed(allrevs))
1594 allnodes = [rl.node(rev) for rev in range(rllen)]
1602 allnodes = [rl.node(rev) for rev in range(rllen)]
1595 allnodesrev = list(reversed(allnodes))
1603 allnodesrev = list(reversed(allnodes))
1596
1604
1597 def constructor():
1605 def constructor():
1598 revlog.revlog(opener, indexfile)
1606 revlog.revlog(opener, indexfile)
1599
1607
1600 def read():
1608 def read():
1601 with opener(indexfile) as fh:
1609 with opener(indexfile) as fh:
1602 fh.read()
1610 fh.read()
1603
1611
1604 def parseindex():
1612 def parseindex():
1605 revlogio.parseindex(data, inline)
1613 revlogio.parseindex(data, inline)
1606
1614
1607 def getentry(revornode):
1615 def getentry(revornode):
1608 index = revlogio.parseindex(data, inline)[0]
1616 index = revlogio.parseindex(data, inline)[0]
1609 index[revornode]
1617 index[revornode]
1610
1618
1611 def getentries(revs, count=1):
1619 def getentries(revs, count=1):
1612 index = revlogio.parseindex(data, inline)[0]
1620 index = revlogio.parseindex(data, inline)[0]
1613
1621
1614 for i in range(count):
1622 for i in range(count):
1615 for rev in revs:
1623 for rev in revs:
1616 index[rev]
1624 index[rev]
1617
1625
1618 def resolvenode(node):
1626 def resolvenode(node):
1619 nodemap = revlogio.parseindex(data, inline)[1]
1627 nodemap = revlogio.parseindex(data, inline)[1]
1620 # This only works for the C code.
1628 # This only works for the C code.
1621 if nodemap is None:
1629 if nodemap is None:
1622 return
1630 return
1623
1631
1624 try:
1632 try:
1625 nodemap[node]
1633 nodemap[node]
1626 except error.RevlogError:
1634 except error.RevlogError:
1627 pass
1635 pass
1628
1636
1629 def resolvenodes(nodes, count=1):
1637 def resolvenodes(nodes, count=1):
1630 nodemap = revlogio.parseindex(data, inline)[1]
1638 nodemap = revlogio.parseindex(data, inline)[1]
1631 if nodemap is None:
1639 if nodemap is None:
1632 return
1640 return
1633
1641
1634 for i in range(count):
1642 for i in range(count):
1635 for node in nodes:
1643 for node in nodes:
1636 try:
1644 try:
1637 nodemap[node]
1645 nodemap[node]
1638 except error.RevlogError:
1646 except error.RevlogError:
1639 pass
1647 pass
1640
1648
1641 benches = [
1649 benches = [
1642 (constructor, b'revlog constructor'),
1650 (constructor, b'revlog constructor'),
1643 (read, b'read'),
1651 (read, b'read'),
1644 (parseindex, b'create index object'),
1652 (parseindex, b'create index object'),
1645 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1653 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1646 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1654 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1647 (lambda: resolvenode(node0), b'look up node at rev 0'),
1655 (lambda: resolvenode(node0), b'look up node at rev 0'),
1648 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1656 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1649 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1657 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1650 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1658 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1651 (lambda: resolvenode(node100), b'look up node at tip'),
1659 (lambda: resolvenode(node100), b'look up node at tip'),
1652 # 2x variation is to measure caching impact.
1660 # 2x variation is to measure caching impact.
1653 (lambda: resolvenodes(allnodes),
1661 (lambda: resolvenodes(allnodes),
1654 b'look up all nodes (forward)'),
1662 b'look up all nodes (forward)'),
1655 (lambda: resolvenodes(allnodes, 2),
1663 (lambda: resolvenodes(allnodes, 2),
1656 b'look up all nodes 2x (forward)'),
1664 b'look up all nodes 2x (forward)'),
1657 (lambda: resolvenodes(allnodesrev),
1665 (lambda: resolvenodes(allnodesrev),
1658 b'look up all nodes (reverse)'),
1666 b'look up all nodes (reverse)'),
1659 (lambda: resolvenodes(allnodesrev, 2),
1667 (lambda: resolvenodes(allnodesrev, 2),
1660 b'look up all nodes 2x (reverse)'),
1668 b'look up all nodes 2x (reverse)'),
1661 (lambda: getentries(allrevs),
1669 (lambda: getentries(allrevs),
1662 b'retrieve all index entries (forward)'),
1670 b'retrieve all index entries (forward)'),
1663 (lambda: getentries(allrevs, 2),
1671 (lambda: getentries(allrevs, 2),
1664 b'retrieve all index entries 2x (forward)'),
1672 b'retrieve all index entries 2x (forward)'),
1665 (lambda: getentries(allrevsrev),
1673 (lambda: getentries(allrevsrev),
1666 b'retrieve all index entries (reverse)'),
1674 b'retrieve all index entries (reverse)'),
1667 (lambda: getentries(allrevsrev, 2),
1675 (lambda: getentries(allrevsrev, 2),
1668 b'retrieve all index entries 2x (reverse)'),
1676 b'retrieve all index entries 2x (reverse)'),
1669 ]
1677 ]
1670
1678
1671 for fn, title in benches:
1679 for fn, title in benches:
1672 timer, fm = gettimer(ui, opts)
1680 timer, fm = gettimer(ui, opts)
1673 timer(fn, title=title)
1681 timer(fn, title=title)
1674 fm.end()
1682 fm.end()
1675
1683
1676 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1684 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1677 [(b'd', b'dist', 100, b'distance between the revisions'),
1685 [(b'd', b'dist', 100, b'distance between the revisions'),
1678 (b's', b'startrev', 0, b'revision to start reading at'),
1686 (b's', b'startrev', 0, b'revision to start reading at'),
1679 (b'', b'reverse', False, b'read in reverse')],
1687 (b'', b'reverse', False, b'read in reverse')],
1680 b'-c|-m|FILE')
1688 b'-c|-m|FILE')
1681 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1689 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1682 **opts):
1690 **opts):
1683 """Benchmark reading a series of revisions from a revlog.
1691 """Benchmark reading a series of revisions from a revlog.
1684
1692
1685 By default, we read every ``-d/--dist`` revision from 0 to tip of
1693 By default, we read every ``-d/--dist`` revision from 0 to tip of
1686 the specified revlog.
1694 the specified revlog.
1687
1695
1688 The start revision can be defined via ``-s/--startrev``.
1696 The start revision can be defined via ``-s/--startrev``.
1689 """
1697 """
1690 opts = _byteskwargs(opts)
1698 opts = _byteskwargs(opts)
1691
1699
1692 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1700 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1693 rllen = getlen(ui)(rl)
1701 rllen = getlen(ui)(rl)
1694
1702
1695 if startrev < 0:
1703 if startrev < 0:
1696 startrev = rllen + startrev
1704 startrev = rllen + startrev
1697
1705
1698 def d():
1706 def d():
1699 rl.clearcaches()
1707 rl.clearcaches()
1700
1708
1701 beginrev = startrev
1709 beginrev = startrev
1702 endrev = rllen
1710 endrev = rllen
1703 dist = opts[b'dist']
1711 dist = opts[b'dist']
1704
1712
1705 if reverse:
1713 if reverse:
1706 beginrev, endrev = endrev - 1, beginrev - 1
1714 beginrev, endrev = endrev - 1, beginrev - 1
1707 dist = -1 * dist
1715 dist = -1 * dist
1708
1716
1709 for x in _xrange(beginrev, endrev, dist):
1717 for x in _xrange(beginrev, endrev, dist):
1710 # Old revisions don't support passing int.
1718 # Old revisions don't support passing int.
1711 n = rl.node(x)
1719 n = rl.node(x)
1712 rl.revision(n)
1720 rl.revision(n)
1713
1721
1714 timer, fm = gettimer(ui, opts)
1722 timer, fm = gettimer(ui, opts)
1715 timer(d)
1723 timer(d)
1716 fm.end()
1724 fm.end()
1717
1725
1718 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1726 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1719 [(b's', b'startrev', 1000, b'revision to start writing at'),
1727 [(b's', b'startrev', 1000, b'revision to start writing at'),
1720 (b'', b'stoprev', -1, b'last revision to write'),
1728 (b'', b'stoprev', -1, b'last revision to write'),
1721 (b'', b'count', 3, b'last revision to write'),
1729 (b'', b'count', 3, b'last revision to write'),
1722 (b'', b'details', False, b'print timing for every revisions tested'),
1730 (b'', b'details', False, b'print timing for every revisions tested'),
1723 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1731 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1724 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1732 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1725 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1733 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1726 ],
1734 ],
1727 b'-c|-m|FILE')
1735 b'-c|-m|FILE')
1728 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1736 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1729 """Benchmark writing a series of revisions to a revlog.
1737 """Benchmark writing a series of revisions to a revlog.
1730
1738
1731 Possible source values are:
1739 Possible source values are:
1732 * `full`: add from a full text (default).
1740 * `full`: add from a full text (default).
1733 * `parent-1`: add from a delta to the first parent
1741 * `parent-1`: add from a delta to the first parent
1734 * `parent-2`: add from a delta to the second parent if it exists
1742 * `parent-2`: add from a delta to the second parent if it exists
1735 (use a delta from the first parent otherwise)
1743 (use a delta from the first parent otherwise)
1736 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1744 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1737 * `storage`: add from the existing precomputed deltas
1745 * `storage`: add from the existing precomputed deltas
1738 """
1746 """
1739 opts = _byteskwargs(opts)
1747 opts = _byteskwargs(opts)
1740
1748
1741 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1749 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1742 rllen = getlen(ui)(rl)
1750 rllen = getlen(ui)(rl)
1743 if startrev < 0:
1751 if startrev < 0:
1744 startrev = rllen + startrev
1752 startrev = rllen + startrev
1745 if stoprev < 0:
1753 if stoprev < 0:
1746 stoprev = rllen + stoprev
1754 stoprev = rllen + stoprev
1747
1755
1748 lazydeltabase = opts['lazydeltabase']
1756 lazydeltabase = opts['lazydeltabase']
1749 source = opts['source']
1757 source = opts['source']
1750 clearcaches = opts['clear_caches']
1758 clearcaches = opts['clear_caches']
1751 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1759 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1752 b'storage')
1760 b'storage')
1753 if source not in validsource:
1761 if source not in validsource:
1754 raise error.Abort('invalid source type: %s' % source)
1762 raise error.Abort('invalid source type: %s' % source)
1755
1763
1756 ### actually gather results
1764 ### actually gather results
1757 count = opts['count']
1765 count = opts['count']
1758 if count <= 0:
1766 if count <= 0:
1759 raise error.Abort('invalide run count: %d' % count)
1767 raise error.Abort('invalide run count: %d' % count)
1760 allresults = []
1768 allresults = []
1761 for c in range(count):
1769 for c in range(count):
1762 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1770 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1763 lazydeltabase=lazydeltabase,
1771 lazydeltabase=lazydeltabase,
1764 clearcaches=clearcaches)
1772 clearcaches=clearcaches)
1765 allresults.append(timing)
1773 allresults.append(timing)
1766
1774
1767 ### consolidate the results in a single list
1775 ### consolidate the results in a single list
1768 results = []
1776 results = []
1769 for idx, (rev, t) in enumerate(allresults[0]):
1777 for idx, (rev, t) in enumerate(allresults[0]):
1770 ts = [t]
1778 ts = [t]
1771 for other in allresults[1:]:
1779 for other in allresults[1:]:
1772 orev, ot = other[idx]
1780 orev, ot = other[idx]
1773 assert orev == rev
1781 assert orev == rev
1774 ts.append(ot)
1782 ts.append(ot)
1775 results.append((rev, ts))
1783 results.append((rev, ts))
1776 resultcount = len(results)
1784 resultcount = len(results)
1777
1785
1778 ### Compute and display relevant statistics
1786 ### Compute and display relevant statistics
1779
1787
1780 # get a formatter
1788 # get a formatter
1781 fm = ui.formatter(b'perf', opts)
1789 fm = ui.formatter(b'perf', opts)
1782 displayall = ui.configbool(b"perf", b"all-timing", False)
1790 displayall = ui.configbool(b"perf", b"all-timing", False)
1783
1791
1784 # print individual details if requested
1792 # print individual details if requested
1785 if opts['details']:
1793 if opts['details']:
1786 for idx, item in enumerate(results, 1):
1794 for idx, item in enumerate(results, 1):
1787 rev, data = item
1795 rev, data = item
1788 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1796 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1789 formatone(fm, data, title=title, displayall=displayall)
1797 formatone(fm, data, title=title, displayall=displayall)
1790
1798
1791 # sorts results by median time
1799 # sorts results by median time
1792 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1800 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1793 # list of (name, index) to display)
1801 # list of (name, index) to display)
1794 relevants = [
1802 relevants = [
1795 ("min", 0),
1803 ("min", 0),
1796 ("10%", resultcount * 10 // 100),
1804 ("10%", resultcount * 10 // 100),
1797 ("25%", resultcount * 25 // 100),
1805 ("25%", resultcount * 25 // 100),
1798 ("50%", resultcount * 70 // 100),
1806 ("50%", resultcount * 70 // 100),
1799 ("75%", resultcount * 75 // 100),
1807 ("75%", resultcount * 75 // 100),
1800 ("90%", resultcount * 90 // 100),
1808 ("90%", resultcount * 90 // 100),
1801 ("95%", resultcount * 95 // 100),
1809 ("95%", resultcount * 95 // 100),
1802 ("99%", resultcount * 99 // 100),
1810 ("99%", resultcount * 99 // 100),
1803 ("99.9%", resultcount * 999 // 1000),
1811 ("99.9%", resultcount * 999 // 1000),
1804 ("99.99%", resultcount * 9999 // 10000),
1812 ("99.99%", resultcount * 9999 // 10000),
1805 ("99.999%", resultcount * 99999 // 100000),
1813 ("99.999%", resultcount * 99999 // 100000),
1806 ("max", -1),
1814 ("max", -1),
1807 ]
1815 ]
1808 if not ui.quiet:
1816 if not ui.quiet:
1809 for name, idx in relevants:
1817 for name, idx in relevants:
1810 data = results[idx]
1818 data = results[idx]
1811 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1819 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1812 formatone(fm, data[1], title=title, displayall=displayall)
1820 formatone(fm, data[1], title=title, displayall=displayall)
1813
1821
1814 # XXX summing that many float will not be very precise, we ignore this fact
1822 # XXX summing that many float will not be very precise, we ignore this fact
1815 # for now
1823 # for now
1816 totaltime = []
1824 totaltime = []
1817 for item in allresults:
1825 for item in allresults:
1818 totaltime.append((sum(x[1][0] for x in item),
1826 totaltime.append((sum(x[1][0] for x in item),
1819 sum(x[1][1] for x in item),
1827 sum(x[1][1] for x in item),
1820 sum(x[1][2] for x in item),)
1828 sum(x[1][2] for x in item),)
1821 )
1829 )
1822 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1830 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1823 displayall=displayall)
1831 displayall=displayall)
1824 fm.end()
1832 fm.end()
1825
1833
1826 class _faketr(object):
1834 class _faketr(object):
1827 def add(s, x, y, z=None):
1835 def add(s, x, y, z=None):
1828 return None
1836 return None
1829
1837
1830 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1838 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1831 lazydeltabase=True, clearcaches=True):
1839 lazydeltabase=True, clearcaches=True):
1832 timings = []
1840 timings = []
1833 tr = _faketr()
1841 tr = _faketr()
1834 with _temprevlog(ui, orig, startrev) as dest:
1842 with _temprevlog(ui, orig, startrev) as dest:
1835 dest._lazydeltabase = lazydeltabase
1843 dest._lazydeltabase = lazydeltabase
1836 revs = list(orig.revs(startrev, stoprev))
1844 revs = list(orig.revs(startrev, stoprev))
1837 total = len(revs)
1845 total = len(revs)
1838 topic = 'adding'
1846 topic = 'adding'
1839 if runidx is not None:
1847 if runidx is not None:
1840 topic += ' (run #%d)' % runidx
1848 topic += ' (run #%d)' % runidx
1841 # Support both old and new progress API
1849 # Support both old and new progress API
1842 if util.safehasattr(ui, 'makeprogress'):
1850 if util.safehasattr(ui, 'makeprogress'):
1843 progress = ui.makeprogress(topic, unit='revs', total=total)
1851 progress = ui.makeprogress(topic, unit='revs', total=total)
1844 def updateprogress(pos):
1852 def updateprogress(pos):
1845 progress.update(pos)
1853 progress.update(pos)
1846 def completeprogress():
1854 def completeprogress():
1847 progress.complete()
1855 progress.complete()
1848 else:
1856 else:
1849 def updateprogress(pos):
1857 def updateprogress(pos):
1850 ui.progress(topic, pos, unit='revs', total=total)
1858 ui.progress(topic, pos, unit='revs', total=total)
1851 def completeprogress():
1859 def completeprogress():
1852 ui.progress(topic, None, unit='revs', total=total)
1860 ui.progress(topic, None, unit='revs', total=total)
1853
1861
1854 for idx, rev in enumerate(revs):
1862 for idx, rev in enumerate(revs):
1855 updateprogress(idx)
1863 updateprogress(idx)
1856 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1864 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1857 if clearcaches:
1865 if clearcaches:
1858 dest.index.clearcaches()
1866 dest.index.clearcaches()
1859 dest.clearcaches()
1867 dest.clearcaches()
1860 with timeone() as r:
1868 with timeone() as r:
1861 dest.addrawrevision(*addargs, **addkwargs)
1869 dest.addrawrevision(*addargs, **addkwargs)
1862 timings.append((rev, r[0]))
1870 timings.append((rev, r[0]))
1863 updateprogress(total)
1871 updateprogress(total)
1864 completeprogress()
1872 completeprogress()
1865 return timings
1873 return timings
1866
1874
1867 def _getrevisionseed(orig, rev, tr, source):
1875 def _getrevisionseed(orig, rev, tr, source):
1868 from mercurial.node import nullid
1876 from mercurial.node import nullid
1869
1877
1870 linkrev = orig.linkrev(rev)
1878 linkrev = orig.linkrev(rev)
1871 node = orig.node(rev)
1879 node = orig.node(rev)
1872 p1, p2 = orig.parents(node)
1880 p1, p2 = orig.parents(node)
1873 flags = orig.flags(rev)
1881 flags = orig.flags(rev)
1874 cachedelta = None
1882 cachedelta = None
1875 text = None
1883 text = None
1876
1884
1877 if source == b'full':
1885 if source == b'full':
1878 text = orig.revision(rev)
1886 text = orig.revision(rev)
1879 elif source == b'parent-1':
1887 elif source == b'parent-1':
1880 baserev = orig.rev(p1)
1888 baserev = orig.rev(p1)
1881 cachedelta = (baserev, orig.revdiff(p1, rev))
1889 cachedelta = (baserev, orig.revdiff(p1, rev))
1882 elif source == b'parent-2':
1890 elif source == b'parent-2':
1883 parent = p2
1891 parent = p2
1884 if p2 == nullid:
1892 if p2 == nullid:
1885 parent = p1
1893 parent = p1
1886 baserev = orig.rev(parent)
1894 baserev = orig.rev(parent)
1887 cachedelta = (baserev, orig.revdiff(parent, rev))
1895 cachedelta = (baserev, orig.revdiff(parent, rev))
1888 elif source == b'parent-smallest':
1896 elif source == b'parent-smallest':
1889 p1diff = orig.revdiff(p1, rev)
1897 p1diff = orig.revdiff(p1, rev)
1890 parent = p1
1898 parent = p1
1891 diff = p1diff
1899 diff = p1diff
1892 if p2 != nullid:
1900 if p2 != nullid:
1893 p2diff = orig.revdiff(p2, rev)
1901 p2diff = orig.revdiff(p2, rev)
1894 if len(p1diff) > len(p2diff):
1902 if len(p1diff) > len(p2diff):
1895 parent = p2
1903 parent = p2
1896 diff = p2diff
1904 diff = p2diff
1897 baserev = orig.rev(parent)
1905 baserev = orig.rev(parent)
1898 cachedelta = (baserev, diff)
1906 cachedelta = (baserev, diff)
1899 elif source == b'storage':
1907 elif source == b'storage':
1900 baserev = orig.deltaparent(rev)
1908 baserev = orig.deltaparent(rev)
1901 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1909 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1902
1910
1903 return ((text, tr, linkrev, p1, p2),
1911 return ((text, tr, linkrev, p1, p2),
1904 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1912 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1905
1913
1906 @contextlib.contextmanager
1914 @contextlib.contextmanager
1907 def _temprevlog(ui, orig, truncaterev):
1915 def _temprevlog(ui, orig, truncaterev):
1908 from mercurial import vfs as vfsmod
1916 from mercurial import vfs as vfsmod
1909
1917
1910 if orig._inline:
1918 if orig._inline:
1911 raise error.Abort('not supporting inline revlog (yet)')
1919 raise error.Abort('not supporting inline revlog (yet)')
1912
1920
1913 origindexpath = orig.opener.join(orig.indexfile)
1921 origindexpath = orig.opener.join(orig.indexfile)
1914 origdatapath = orig.opener.join(orig.datafile)
1922 origdatapath = orig.opener.join(orig.datafile)
1915 indexname = 'revlog.i'
1923 indexname = 'revlog.i'
1916 dataname = 'revlog.d'
1924 dataname = 'revlog.d'
1917
1925
1918 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1926 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1919 try:
1927 try:
1920 # copy the data file in a temporary directory
1928 # copy the data file in a temporary directory
1921 ui.debug('copying data in %s\n' % tmpdir)
1929 ui.debug('copying data in %s\n' % tmpdir)
1922 destindexpath = os.path.join(tmpdir, 'revlog.i')
1930 destindexpath = os.path.join(tmpdir, 'revlog.i')
1923 destdatapath = os.path.join(tmpdir, 'revlog.d')
1931 destdatapath = os.path.join(tmpdir, 'revlog.d')
1924 shutil.copyfile(origindexpath, destindexpath)
1932 shutil.copyfile(origindexpath, destindexpath)
1925 shutil.copyfile(origdatapath, destdatapath)
1933 shutil.copyfile(origdatapath, destdatapath)
1926
1934
1927 # remove the data we want to add again
1935 # remove the data we want to add again
1928 ui.debug('truncating data to be rewritten\n')
1936 ui.debug('truncating data to be rewritten\n')
1929 with open(destindexpath, 'ab') as index:
1937 with open(destindexpath, 'ab') as index:
1930 index.seek(0)
1938 index.seek(0)
1931 index.truncate(truncaterev * orig._io.size)
1939 index.truncate(truncaterev * orig._io.size)
1932 with open(destdatapath, 'ab') as data:
1940 with open(destdatapath, 'ab') as data:
1933 data.seek(0)
1941 data.seek(0)
1934 data.truncate(orig.start(truncaterev))
1942 data.truncate(orig.start(truncaterev))
1935
1943
1936 # instantiate a new revlog from the temporary copy
1944 # instantiate a new revlog from the temporary copy
1937 ui.debug('truncating adding to be rewritten\n')
1945 ui.debug('truncating adding to be rewritten\n')
1938 vfs = vfsmod.vfs(tmpdir)
1946 vfs = vfsmod.vfs(tmpdir)
1939 vfs.options = getattr(orig.opener, 'options', None)
1947 vfs.options = getattr(orig.opener, 'options', None)
1940
1948
1941 dest = revlog.revlog(vfs,
1949 dest = revlog.revlog(vfs,
1942 indexfile=indexname,
1950 indexfile=indexname,
1943 datafile=dataname)
1951 datafile=dataname)
1944 if dest._inline:
1952 if dest._inline:
1945 raise error.Abort('not supporting inline revlog (yet)')
1953 raise error.Abort('not supporting inline revlog (yet)')
1946 # make sure internals are initialized
1954 # make sure internals are initialized
1947 dest.revision(len(dest) - 1)
1955 dest.revision(len(dest) - 1)
1948 yield dest
1956 yield dest
1949 del dest, vfs
1957 del dest, vfs
1950 finally:
1958 finally:
1951 shutil.rmtree(tmpdir, True)
1959 shutil.rmtree(tmpdir, True)
1952
1960
1953 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1961 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1954 [(b'e', b'engines', b'', b'compression engines to use'),
1962 [(b'e', b'engines', b'', b'compression engines to use'),
1955 (b's', b'startrev', 0, b'revision to start at')],
1963 (b's', b'startrev', 0, b'revision to start at')],
1956 b'-c|-m|FILE')
1964 b'-c|-m|FILE')
1957 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1965 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1958 """Benchmark operations on revlog chunks.
1966 """Benchmark operations on revlog chunks.
1959
1967
1960 Logically, each revlog is a collection of fulltext revisions. However,
1968 Logically, each revlog is a collection of fulltext revisions. However,
1961 stored within each revlog are "chunks" of possibly compressed data. This
1969 stored within each revlog are "chunks" of possibly compressed data. This
1962 data needs to be read and decompressed or compressed and written.
1970 data needs to be read and decompressed or compressed and written.
1963
1971
1964 This command measures the time it takes to read+decompress and recompress
1972 This command measures the time it takes to read+decompress and recompress
1965 chunks in a revlog. It effectively isolates I/O and compression performance.
1973 chunks in a revlog. It effectively isolates I/O and compression performance.
1966 For measurements of higher-level operations like resolving revisions,
1974 For measurements of higher-level operations like resolving revisions,
1967 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1975 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1968 """
1976 """
1969 opts = _byteskwargs(opts)
1977 opts = _byteskwargs(opts)
1970
1978
1971 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1979 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1972
1980
1973 # _chunkraw was renamed to _getsegmentforrevs.
1981 # _chunkraw was renamed to _getsegmentforrevs.
1974 try:
1982 try:
1975 segmentforrevs = rl._getsegmentforrevs
1983 segmentforrevs = rl._getsegmentforrevs
1976 except AttributeError:
1984 except AttributeError:
1977 segmentforrevs = rl._chunkraw
1985 segmentforrevs = rl._chunkraw
1978
1986
1979 # Verify engines argument.
1987 # Verify engines argument.
1980 if engines:
1988 if engines:
1981 engines = set(e.strip() for e in engines.split(b','))
1989 engines = set(e.strip() for e in engines.split(b','))
1982 for engine in engines:
1990 for engine in engines:
1983 try:
1991 try:
1984 util.compressionengines[engine]
1992 util.compressionengines[engine]
1985 except KeyError:
1993 except KeyError:
1986 raise error.Abort(b'unknown compression engine: %s' % engine)
1994 raise error.Abort(b'unknown compression engine: %s' % engine)
1987 else:
1995 else:
1988 engines = []
1996 engines = []
1989 for e in util.compengines:
1997 for e in util.compengines:
1990 engine = util.compengines[e]
1998 engine = util.compengines[e]
1991 try:
1999 try:
1992 if engine.available():
2000 if engine.available():
1993 engine.revlogcompressor().compress(b'dummy')
2001 engine.revlogcompressor().compress(b'dummy')
1994 engines.append(e)
2002 engines.append(e)
1995 except NotImplementedError:
2003 except NotImplementedError:
1996 pass
2004 pass
1997
2005
1998 revs = list(rl.revs(startrev, len(rl) - 1))
2006 revs = list(rl.revs(startrev, len(rl) - 1))
1999
2007
2000 def rlfh(rl):
2008 def rlfh(rl):
2001 if rl._inline:
2009 if rl._inline:
2002 return getsvfs(repo)(rl.indexfile)
2010 return getsvfs(repo)(rl.indexfile)
2003 else:
2011 else:
2004 return getsvfs(repo)(rl.datafile)
2012 return getsvfs(repo)(rl.datafile)
2005
2013
2006 def doread():
2014 def doread():
2007 rl.clearcaches()
2015 rl.clearcaches()
2008 for rev in revs:
2016 for rev in revs:
2009 segmentforrevs(rev, rev)
2017 segmentforrevs(rev, rev)
2010
2018
2011 def doreadcachedfh():
2019 def doreadcachedfh():
2012 rl.clearcaches()
2020 rl.clearcaches()
2013 fh = rlfh(rl)
2021 fh = rlfh(rl)
2014 for rev in revs:
2022 for rev in revs:
2015 segmentforrevs(rev, rev, df=fh)
2023 segmentforrevs(rev, rev, df=fh)
2016
2024
2017 def doreadbatch():
2025 def doreadbatch():
2018 rl.clearcaches()
2026 rl.clearcaches()
2019 segmentforrevs(revs[0], revs[-1])
2027 segmentforrevs(revs[0], revs[-1])
2020
2028
2021 def doreadbatchcachedfh():
2029 def doreadbatchcachedfh():
2022 rl.clearcaches()
2030 rl.clearcaches()
2023 fh = rlfh(rl)
2031 fh = rlfh(rl)
2024 segmentforrevs(revs[0], revs[-1], df=fh)
2032 segmentforrevs(revs[0], revs[-1], df=fh)
2025
2033
2026 def dochunk():
2034 def dochunk():
2027 rl.clearcaches()
2035 rl.clearcaches()
2028 fh = rlfh(rl)
2036 fh = rlfh(rl)
2029 for rev in revs:
2037 for rev in revs:
2030 rl._chunk(rev, df=fh)
2038 rl._chunk(rev, df=fh)
2031
2039
2032 chunks = [None]
2040 chunks = [None]
2033
2041
2034 def dochunkbatch():
2042 def dochunkbatch():
2035 rl.clearcaches()
2043 rl.clearcaches()
2036 fh = rlfh(rl)
2044 fh = rlfh(rl)
2037 # Save chunks as a side-effect.
2045 # Save chunks as a side-effect.
2038 chunks[0] = rl._chunks(revs, df=fh)
2046 chunks[0] = rl._chunks(revs, df=fh)
2039
2047
2040 def docompress(compressor):
2048 def docompress(compressor):
2041 rl.clearcaches()
2049 rl.clearcaches()
2042
2050
2043 try:
2051 try:
2044 # Swap in the requested compression engine.
2052 # Swap in the requested compression engine.
2045 oldcompressor = rl._compressor
2053 oldcompressor = rl._compressor
2046 rl._compressor = compressor
2054 rl._compressor = compressor
2047 for chunk in chunks[0]:
2055 for chunk in chunks[0]:
2048 rl.compress(chunk)
2056 rl.compress(chunk)
2049 finally:
2057 finally:
2050 rl._compressor = oldcompressor
2058 rl._compressor = oldcompressor
2051
2059
2052 benches = [
2060 benches = [
2053 (lambda: doread(), b'read'),
2061 (lambda: doread(), b'read'),
2054 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2062 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2055 (lambda: doreadbatch(), b'read batch'),
2063 (lambda: doreadbatch(), b'read batch'),
2056 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2064 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2057 (lambda: dochunk(), b'chunk'),
2065 (lambda: dochunk(), b'chunk'),
2058 (lambda: dochunkbatch(), b'chunk batch'),
2066 (lambda: dochunkbatch(), b'chunk batch'),
2059 ]
2067 ]
2060
2068
2061 for engine in sorted(engines):
2069 for engine in sorted(engines):
2062 compressor = util.compengines[engine].revlogcompressor()
2070 compressor = util.compengines[engine].revlogcompressor()
2063 benches.append((functools.partial(docompress, compressor),
2071 benches.append((functools.partial(docompress, compressor),
2064 b'compress w/ %s' % engine))
2072 b'compress w/ %s' % engine))
2065
2073
2066 for fn, title in benches:
2074 for fn, title in benches:
2067 timer, fm = gettimer(ui, opts)
2075 timer, fm = gettimer(ui, opts)
2068 timer(fn, title=title)
2076 timer(fn, title=title)
2069 fm.end()
2077 fm.end()
2070
2078
2071 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2079 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2072 [(b'', b'cache', False, b'use caches instead of clearing')],
2080 [(b'', b'cache', False, b'use caches instead of clearing')],
2073 b'-c|-m|FILE REV')
2081 b'-c|-m|FILE REV')
2074 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2082 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2075 """Benchmark obtaining a revlog revision.
2083 """Benchmark obtaining a revlog revision.
2076
2084
2077 Obtaining a revlog revision consists of roughly the following steps:
2085 Obtaining a revlog revision consists of roughly the following steps:
2078
2086
2079 1. Compute the delta chain
2087 1. Compute the delta chain
2080 2. Slice the delta chain if applicable
2088 2. Slice the delta chain if applicable
2081 3. Obtain the raw chunks for that delta chain
2089 3. Obtain the raw chunks for that delta chain
2082 4. Decompress each raw chunk
2090 4. Decompress each raw chunk
2083 5. Apply binary patches to obtain fulltext
2091 5. Apply binary patches to obtain fulltext
2084 6. Verify hash of fulltext
2092 6. Verify hash of fulltext
2085
2093
2086 This command measures the time spent in each of these phases.
2094 This command measures the time spent in each of these phases.
2087 """
2095 """
2088 opts = _byteskwargs(opts)
2096 opts = _byteskwargs(opts)
2089
2097
2090 if opts.get(b'changelog') or opts.get(b'manifest'):
2098 if opts.get(b'changelog') or opts.get(b'manifest'):
2091 file_, rev = None, file_
2099 file_, rev = None, file_
2092 elif rev is None:
2100 elif rev is None:
2093 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2101 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2094
2102
2095 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2103 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2096
2104
2097 # _chunkraw was renamed to _getsegmentforrevs.
2105 # _chunkraw was renamed to _getsegmentforrevs.
2098 try:
2106 try:
2099 segmentforrevs = r._getsegmentforrevs
2107 segmentforrevs = r._getsegmentforrevs
2100 except AttributeError:
2108 except AttributeError:
2101 segmentforrevs = r._chunkraw
2109 segmentforrevs = r._chunkraw
2102
2110
2103 node = r.lookup(rev)
2111 node = r.lookup(rev)
2104 rev = r.rev(node)
2112 rev = r.rev(node)
2105
2113
2106 def getrawchunks(data, chain):
2114 def getrawchunks(data, chain):
2107 start = r.start
2115 start = r.start
2108 length = r.length
2116 length = r.length
2109 inline = r._inline
2117 inline = r._inline
2110 iosize = r._io.size
2118 iosize = r._io.size
2111 buffer = util.buffer
2119 buffer = util.buffer
2112
2120
2113 chunks = []
2121 chunks = []
2114 ladd = chunks.append
2122 ladd = chunks.append
2115 for idx, item in enumerate(chain):
2123 for idx, item in enumerate(chain):
2116 offset = start(item[0])
2124 offset = start(item[0])
2117 bits = data[idx]
2125 bits = data[idx]
2118 for rev in item:
2126 for rev in item:
2119 chunkstart = start(rev)
2127 chunkstart = start(rev)
2120 if inline:
2128 if inline:
2121 chunkstart += (rev + 1) * iosize
2129 chunkstart += (rev + 1) * iosize
2122 chunklength = length(rev)
2130 chunklength = length(rev)
2123 ladd(buffer(bits, chunkstart - offset, chunklength))
2131 ladd(buffer(bits, chunkstart - offset, chunklength))
2124
2132
2125 return chunks
2133 return chunks
2126
2134
2127 def dodeltachain(rev):
2135 def dodeltachain(rev):
2128 if not cache:
2136 if not cache:
2129 r.clearcaches()
2137 r.clearcaches()
2130 r._deltachain(rev)
2138 r._deltachain(rev)
2131
2139
2132 def doread(chain):
2140 def doread(chain):
2133 if not cache:
2141 if not cache:
2134 r.clearcaches()
2142 r.clearcaches()
2135 for item in slicedchain:
2143 for item in slicedchain:
2136 segmentforrevs(item[0], item[-1])
2144 segmentforrevs(item[0], item[-1])
2137
2145
2138 def doslice(r, chain, size):
2146 def doslice(r, chain, size):
2139 for s in slicechunk(r, chain, targetsize=size):
2147 for s in slicechunk(r, chain, targetsize=size):
2140 pass
2148 pass
2141
2149
2142 def dorawchunks(data, chain):
2150 def dorawchunks(data, chain):
2143 if not cache:
2151 if not cache:
2144 r.clearcaches()
2152 r.clearcaches()
2145 getrawchunks(data, chain)
2153 getrawchunks(data, chain)
2146
2154
2147 def dodecompress(chunks):
2155 def dodecompress(chunks):
2148 decomp = r.decompress
2156 decomp = r.decompress
2149 for chunk in chunks:
2157 for chunk in chunks:
2150 decomp(chunk)
2158 decomp(chunk)
2151
2159
2152 def dopatch(text, bins):
2160 def dopatch(text, bins):
2153 if not cache:
2161 if not cache:
2154 r.clearcaches()
2162 r.clearcaches()
2155 mdiff.patches(text, bins)
2163 mdiff.patches(text, bins)
2156
2164
2157 def dohash(text):
2165 def dohash(text):
2158 if not cache:
2166 if not cache:
2159 r.clearcaches()
2167 r.clearcaches()
2160 r.checkhash(text, node, rev=rev)
2168 r.checkhash(text, node, rev=rev)
2161
2169
2162 def dorevision():
2170 def dorevision():
2163 if not cache:
2171 if not cache:
2164 r.clearcaches()
2172 r.clearcaches()
2165 r.revision(node)
2173 r.revision(node)
2166
2174
2167 try:
2175 try:
2168 from mercurial.revlogutils.deltas import slicechunk
2176 from mercurial.revlogutils.deltas import slicechunk
2169 except ImportError:
2177 except ImportError:
2170 slicechunk = getattr(revlog, '_slicechunk', None)
2178 slicechunk = getattr(revlog, '_slicechunk', None)
2171
2179
2172 size = r.length(rev)
2180 size = r.length(rev)
2173 chain = r._deltachain(rev)[0]
2181 chain = r._deltachain(rev)[0]
2174 if not getattr(r, '_withsparseread', False):
2182 if not getattr(r, '_withsparseread', False):
2175 slicedchain = (chain,)
2183 slicedchain = (chain,)
2176 else:
2184 else:
2177 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2185 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2178 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2186 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2179 rawchunks = getrawchunks(data, slicedchain)
2187 rawchunks = getrawchunks(data, slicedchain)
2180 bins = r._chunks(chain)
2188 bins = r._chunks(chain)
2181 text = bytes(bins[0])
2189 text = bytes(bins[0])
2182 bins = bins[1:]
2190 bins = bins[1:]
2183 text = mdiff.patches(text, bins)
2191 text = mdiff.patches(text, bins)
2184
2192
2185 benches = [
2193 benches = [
2186 (lambda: dorevision(), b'full'),
2194 (lambda: dorevision(), b'full'),
2187 (lambda: dodeltachain(rev), b'deltachain'),
2195 (lambda: dodeltachain(rev), b'deltachain'),
2188 (lambda: doread(chain), b'read'),
2196 (lambda: doread(chain), b'read'),
2189 ]
2197 ]
2190
2198
2191 if getattr(r, '_withsparseread', False):
2199 if getattr(r, '_withsparseread', False):
2192 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2200 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2193 benches.append(slicing)
2201 benches.append(slicing)
2194
2202
2195 benches.extend([
2203 benches.extend([
2196 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2204 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2197 (lambda: dodecompress(rawchunks), b'decompress'),
2205 (lambda: dodecompress(rawchunks), b'decompress'),
2198 (lambda: dopatch(text, bins), b'patch'),
2206 (lambda: dopatch(text, bins), b'patch'),
2199 (lambda: dohash(text), b'hash'),
2207 (lambda: dohash(text), b'hash'),
2200 ])
2208 ])
2201
2209
2202 timer, fm = gettimer(ui, opts)
2210 timer, fm = gettimer(ui, opts)
2203 for fn, title in benches:
2211 for fn, title in benches:
2204 timer(fn, title=title)
2212 timer(fn, title=title)
2205 fm.end()
2213 fm.end()
2206
2214
2207 @command(b'perfrevset',
2215 @command(b'perfrevset',
2208 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2216 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2209 (b'', b'contexts', False, b'obtain changectx for each revision')]
2217 (b'', b'contexts', False, b'obtain changectx for each revision')]
2210 + formatteropts, b"REVSET")
2218 + formatteropts, b"REVSET")
2211 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2219 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2212 """benchmark the execution time of a revset
2220 """benchmark the execution time of a revset
2213
2221
2214 Use the --clean option if need to evaluate the impact of build volatile
2222 Use the --clean option if need to evaluate the impact of build volatile
2215 revisions set cache on the revset execution. Volatile cache hold filtered
2223 revisions set cache on the revset execution. Volatile cache hold filtered
2216 and obsolete related cache."""
2224 and obsolete related cache."""
2217 opts = _byteskwargs(opts)
2225 opts = _byteskwargs(opts)
2218
2226
2219 timer, fm = gettimer(ui, opts)
2227 timer, fm = gettimer(ui, opts)
2220 def d():
2228 def d():
2221 if clear:
2229 if clear:
2222 repo.invalidatevolatilesets()
2230 repo.invalidatevolatilesets()
2223 if contexts:
2231 if contexts:
2224 for ctx in repo.set(expr): pass
2232 for ctx in repo.set(expr): pass
2225 else:
2233 else:
2226 for r in repo.revs(expr): pass
2234 for r in repo.revs(expr): pass
2227 timer(d)
2235 timer(d)
2228 fm.end()
2236 fm.end()
2229
2237
2230 @command(b'perfvolatilesets',
2238 @command(b'perfvolatilesets',
2231 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2239 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2232 ] + formatteropts)
2240 ] + formatteropts)
2233 def perfvolatilesets(ui, repo, *names, **opts):
2241 def perfvolatilesets(ui, repo, *names, **opts):
2234 """benchmark the computation of various volatile set
2242 """benchmark the computation of various volatile set
2235
2243
2236 Volatile set computes element related to filtering and obsolescence."""
2244 Volatile set computes element related to filtering and obsolescence."""
2237 opts = _byteskwargs(opts)
2245 opts = _byteskwargs(opts)
2238 timer, fm = gettimer(ui, opts)
2246 timer, fm = gettimer(ui, opts)
2239 repo = repo.unfiltered()
2247 repo = repo.unfiltered()
2240
2248
2241 def getobs(name):
2249 def getobs(name):
2242 def d():
2250 def d():
2243 repo.invalidatevolatilesets()
2251 repo.invalidatevolatilesets()
2244 if opts[b'clear_obsstore']:
2252 if opts[b'clear_obsstore']:
2245 clearfilecache(repo, b'obsstore')
2253 clearfilecache(repo, b'obsstore')
2246 obsolete.getrevs(repo, name)
2254 obsolete.getrevs(repo, name)
2247 return d
2255 return d
2248
2256
2249 allobs = sorted(obsolete.cachefuncs)
2257 allobs = sorted(obsolete.cachefuncs)
2250 if names:
2258 if names:
2251 allobs = [n for n in allobs if n in names]
2259 allobs = [n for n in allobs if n in names]
2252
2260
2253 for name in allobs:
2261 for name in allobs:
2254 timer(getobs(name), title=name)
2262 timer(getobs(name), title=name)
2255
2263
2256 def getfiltered(name):
2264 def getfiltered(name):
2257 def d():
2265 def d():
2258 repo.invalidatevolatilesets()
2266 repo.invalidatevolatilesets()
2259 if opts[b'clear_obsstore']:
2267 if opts[b'clear_obsstore']:
2260 clearfilecache(repo, b'obsstore')
2268 clearfilecache(repo, b'obsstore')
2261 repoview.filterrevs(repo, name)
2269 repoview.filterrevs(repo, name)
2262 return d
2270 return d
2263
2271
2264 allfilter = sorted(repoview.filtertable)
2272 allfilter = sorted(repoview.filtertable)
2265 if names:
2273 if names:
2266 allfilter = [n for n in allfilter if n in names]
2274 allfilter = [n for n in allfilter if n in names]
2267
2275
2268 for name in allfilter:
2276 for name in allfilter:
2269 timer(getfiltered(name), title=name)
2277 timer(getfiltered(name), title=name)
2270 fm.end()
2278 fm.end()
2271
2279
2272 @command(b'perfbranchmap',
2280 @command(b'perfbranchmap',
2273 [(b'f', b'full', False,
2281 [(b'f', b'full', False,
2274 b'Includes build time of subset'),
2282 b'Includes build time of subset'),
2275 (b'', b'clear-revbranch', False,
2283 (b'', b'clear-revbranch', False,
2276 b'purge the revbranch cache between computation'),
2284 b'purge the revbranch cache between computation'),
2277 ] + formatteropts)
2285 ] + formatteropts)
2278 def perfbranchmap(ui, repo, *filternames, **opts):
2286 def perfbranchmap(ui, repo, *filternames, **opts):
2279 """benchmark the update of a branchmap
2287 """benchmark the update of a branchmap
2280
2288
2281 This benchmarks the full repo.branchmap() call with read and write disabled
2289 This benchmarks the full repo.branchmap() call with read and write disabled
2282 """
2290 """
2283 opts = _byteskwargs(opts)
2291 opts = _byteskwargs(opts)
2284 full = opts.get(b"full", False)
2292 full = opts.get(b"full", False)
2285 clear_revbranch = opts.get(b"clear_revbranch", False)
2293 clear_revbranch = opts.get(b"clear_revbranch", False)
2286 timer, fm = gettimer(ui, opts)
2294 timer, fm = gettimer(ui, opts)
2287 def getbranchmap(filtername):
2295 def getbranchmap(filtername):
2288 """generate a benchmark function for the filtername"""
2296 """generate a benchmark function for the filtername"""
2289 if filtername is None:
2297 if filtername is None:
2290 view = repo
2298 view = repo
2291 else:
2299 else:
2292 view = repo.filtered(filtername)
2300 view = repo.filtered(filtername)
2293 def d():
2301 def d():
2294 if clear_revbranch:
2302 if clear_revbranch:
2295 repo.revbranchcache()._clear()
2303 repo.revbranchcache()._clear()
2296 if full:
2304 if full:
2297 view._branchcaches.clear()
2305 view._branchcaches.clear()
2298 else:
2306 else:
2299 view._branchcaches.pop(filtername, None)
2307 view._branchcaches.pop(filtername, None)
2300 view.branchmap()
2308 view.branchmap()
2301 return d
2309 return d
2302 # add filter in smaller subset to bigger subset
2310 # add filter in smaller subset to bigger subset
2303 possiblefilters = set(repoview.filtertable)
2311 possiblefilters = set(repoview.filtertable)
2304 if filternames:
2312 if filternames:
2305 possiblefilters &= set(filternames)
2313 possiblefilters &= set(filternames)
2306 subsettable = getbranchmapsubsettable()
2314 subsettable = getbranchmapsubsettable()
2307 allfilters = []
2315 allfilters = []
2308 while possiblefilters:
2316 while possiblefilters:
2309 for name in possiblefilters:
2317 for name in possiblefilters:
2310 subset = subsettable.get(name)
2318 subset = subsettable.get(name)
2311 if subset not in possiblefilters:
2319 if subset not in possiblefilters:
2312 break
2320 break
2313 else:
2321 else:
2314 assert False, b'subset cycle %s!' % possiblefilters
2322 assert False, b'subset cycle %s!' % possiblefilters
2315 allfilters.append(name)
2323 allfilters.append(name)
2316 possiblefilters.remove(name)
2324 possiblefilters.remove(name)
2317
2325
2318 # warm the cache
2326 # warm the cache
2319 if not full:
2327 if not full:
2320 for name in allfilters:
2328 for name in allfilters:
2321 repo.filtered(name).branchmap()
2329 repo.filtered(name).branchmap()
2322 if not filternames or b'unfiltered' in filternames:
2330 if not filternames or b'unfiltered' in filternames:
2323 # add unfiltered
2331 # add unfiltered
2324 allfilters.append(None)
2332 allfilters.append(None)
2325
2333
2326 branchcacheread = safeattrsetter(branchmap, b'read')
2334 branchcacheread = safeattrsetter(branchmap, b'read')
2327 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2335 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2328 branchcacheread.set(lambda repo: None)
2336 branchcacheread.set(lambda repo: None)
2329 branchcachewrite.set(lambda bc, repo: None)
2337 branchcachewrite.set(lambda bc, repo: None)
2330 try:
2338 try:
2331 for name in allfilters:
2339 for name in allfilters:
2332 printname = name
2340 printname = name
2333 if name is None:
2341 if name is None:
2334 printname = b'unfiltered'
2342 printname = b'unfiltered'
2335 timer(getbranchmap(name), title=str(printname))
2343 timer(getbranchmap(name), title=str(printname))
2336 finally:
2344 finally:
2337 branchcacheread.restore()
2345 branchcacheread.restore()
2338 branchcachewrite.restore()
2346 branchcachewrite.restore()
2339 fm.end()
2347 fm.end()
2340
2348
2341 @command(b'perfbranchmapupdate', [
2349 @command(b'perfbranchmapupdate', [
2342 (b'', b'base', [], b'subset of revision to start from'),
2350 (b'', b'base', [], b'subset of revision to start from'),
2343 (b'', b'target', [], b'subset of revision to end with'),
2351 (b'', b'target', [], b'subset of revision to end with'),
2344 (b'', b'clear-caches', False, b'clear cache between each runs')
2352 (b'', b'clear-caches', False, b'clear cache between each runs')
2345 ] + formatteropts)
2353 ] + formatteropts)
2346 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2354 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2347 """benchmark branchmap update from for <base> revs to <target> revs
2355 """benchmark branchmap update from for <base> revs to <target> revs
2348
2356
2349 If `--clear-caches` is passed, the following items will be reset before
2357 If `--clear-caches` is passed, the following items will be reset before
2350 each update:
2358 each update:
2351 * the changelog instance and associated indexes
2359 * the changelog instance and associated indexes
2352 * the rev-branch-cache instance
2360 * the rev-branch-cache instance
2353
2361
2354 Examples:
2362 Examples:
2355
2363
2356 # update for the one last revision
2364 # update for the one last revision
2357 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2365 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2358
2366
2359 $ update for change coming with a new branch
2367 $ update for change coming with a new branch
2360 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2368 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2361 """
2369 """
2362 from mercurial import branchmap
2370 from mercurial import branchmap
2363 from mercurial import repoview
2371 from mercurial import repoview
2364 opts = _byteskwargs(opts)
2372 opts = _byteskwargs(opts)
2365 timer, fm = gettimer(ui, opts)
2373 timer, fm = gettimer(ui, opts)
2366 clearcaches = opts[b'clear_caches']
2374 clearcaches = opts[b'clear_caches']
2367 unfi = repo.unfiltered()
2375 unfi = repo.unfiltered()
2368 x = [None] # used to pass data between closure
2376 x = [None] # used to pass data between closure
2369
2377
2370 # we use a `list` here to avoid possible side effect from smartset
2378 # we use a `list` here to avoid possible side effect from smartset
2371 baserevs = list(scmutil.revrange(repo, base))
2379 baserevs = list(scmutil.revrange(repo, base))
2372 targetrevs = list(scmutil.revrange(repo, target))
2380 targetrevs = list(scmutil.revrange(repo, target))
2373 if not baserevs:
2381 if not baserevs:
2374 raise error.Abort(b'no revisions selected for --base')
2382 raise error.Abort(b'no revisions selected for --base')
2375 if not targetrevs:
2383 if not targetrevs:
2376 raise error.Abort(b'no revisions selected for --target')
2384 raise error.Abort(b'no revisions selected for --target')
2377
2385
2378 # make sure the target branchmap also contains the one in the base
2386 # make sure the target branchmap also contains the one in the base
2379 targetrevs = list(set(baserevs) | set(targetrevs))
2387 targetrevs = list(set(baserevs) | set(targetrevs))
2380 targetrevs.sort()
2388 targetrevs.sort()
2381
2389
2382 cl = repo.changelog
2390 cl = repo.changelog
2383 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2391 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2384 allbaserevs.sort()
2392 allbaserevs.sort()
2385 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2393 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2386
2394
2387 newrevs = list(alltargetrevs.difference(allbaserevs))
2395 newrevs = list(alltargetrevs.difference(allbaserevs))
2388 newrevs.sort()
2396 newrevs.sort()
2389
2397
2390 allrevs = frozenset(unfi.changelog.revs())
2398 allrevs = frozenset(unfi.changelog.revs())
2391 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2399 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2392 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2400 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2393
2401
2394 def basefilter(repo, visibilityexceptions=None):
2402 def basefilter(repo, visibilityexceptions=None):
2395 return basefilterrevs
2403 return basefilterrevs
2396
2404
2397 def targetfilter(repo, visibilityexceptions=None):
2405 def targetfilter(repo, visibilityexceptions=None):
2398 return targetfilterrevs
2406 return targetfilterrevs
2399
2407
2400 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2408 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2401 ui.status(msg % (len(allbaserevs), len(newrevs)))
2409 ui.status(msg % (len(allbaserevs), len(newrevs)))
2402 if targetfilterrevs:
2410 if targetfilterrevs:
2403 msg = b'(%d revisions still filtered)\n'
2411 msg = b'(%d revisions still filtered)\n'
2404 ui.status(msg % len(targetfilterrevs))
2412 ui.status(msg % len(targetfilterrevs))
2405
2413
2406 try:
2414 try:
2407 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2415 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2408 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2416 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2409
2417
2410 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2418 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2411 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2419 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2412
2420
2413 # try to find an existing branchmap to reuse
2421 # try to find an existing branchmap to reuse
2414 subsettable = getbranchmapsubsettable()
2422 subsettable = getbranchmapsubsettable()
2415 candidatefilter = subsettable.get(None)
2423 candidatefilter = subsettable.get(None)
2416 while candidatefilter is not None:
2424 while candidatefilter is not None:
2417 candidatebm = repo.filtered(candidatefilter).branchmap()
2425 candidatebm = repo.filtered(candidatefilter).branchmap()
2418 if candidatebm.validfor(baserepo):
2426 if candidatebm.validfor(baserepo):
2419 filtered = repoview.filterrevs(repo, candidatefilter)
2427 filtered = repoview.filterrevs(repo, candidatefilter)
2420 missing = [r for r in allbaserevs if r in filtered]
2428 missing = [r for r in allbaserevs if r in filtered]
2421 base = candidatebm.copy()
2429 base = candidatebm.copy()
2422 base.update(baserepo, missing)
2430 base.update(baserepo, missing)
2423 break
2431 break
2424 candidatefilter = subsettable.get(candidatefilter)
2432 candidatefilter = subsettable.get(candidatefilter)
2425 else:
2433 else:
2426 # no suitable subset where found
2434 # no suitable subset where found
2427 base = branchmap.branchcache()
2435 base = branchmap.branchcache()
2428 base.update(baserepo, allbaserevs)
2436 base.update(baserepo, allbaserevs)
2429
2437
2430 def setup():
2438 def setup():
2431 x[0] = base.copy()
2439 x[0] = base.copy()
2432 if clearcaches:
2440 if clearcaches:
2433 unfi._revbranchcache = None
2441 unfi._revbranchcache = None
2434 clearchangelog(repo)
2442 clearchangelog(repo)
2435
2443
2436 def bench():
2444 def bench():
2437 x[0].update(targetrepo, newrevs)
2445 x[0].update(targetrepo, newrevs)
2438
2446
2439 timer(bench, setup=setup)
2447 timer(bench, setup=setup)
2440 fm.end()
2448 fm.end()
2441 finally:
2449 finally:
2442 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2450 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2443 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2451 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2444
2452
2445 @command(b'perfbranchmapload', [
2453 @command(b'perfbranchmapload', [
2446 (b'f', b'filter', b'', b'Specify repoview filter'),
2454 (b'f', b'filter', b'', b'Specify repoview filter'),
2447 (b'', b'list', False, b'List brachmap filter caches'),
2455 (b'', b'list', False, b'List brachmap filter caches'),
2448 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2456 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2449
2457
2450 ] + formatteropts)
2458 ] + formatteropts)
2451 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2459 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2452 """benchmark reading the branchmap"""
2460 """benchmark reading the branchmap"""
2453 opts = _byteskwargs(opts)
2461 opts = _byteskwargs(opts)
2454 clearrevlogs = opts[b'clear_revlogs']
2462 clearrevlogs = opts[b'clear_revlogs']
2455
2463
2456 if list:
2464 if list:
2457 for name, kind, st in repo.cachevfs.readdir(stat=True):
2465 for name, kind, st in repo.cachevfs.readdir(stat=True):
2458 if name.startswith(b'branch2'):
2466 if name.startswith(b'branch2'):
2459 filtername = name.partition(b'-')[2] or b'unfiltered'
2467 filtername = name.partition(b'-')[2] or b'unfiltered'
2460 ui.status(b'%s - %s\n'
2468 ui.status(b'%s - %s\n'
2461 % (filtername, util.bytecount(st.st_size)))
2469 % (filtername, util.bytecount(st.st_size)))
2462 return
2470 return
2463 if not filter:
2471 if not filter:
2464 filter = None
2472 filter = None
2465 subsettable = getbranchmapsubsettable()
2473 subsettable = getbranchmapsubsettable()
2466 if filter is None:
2474 if filter is None:
2467 repo = repo.unfiltered()
2475 repo = repo.unfiltered()
2468 else:
2476 else:
2469 repo = repoview.repoview(repo, filter)
2477 repo = repoview.repoview(repo, filter)
2470
2478
2471 repo.branchmap() # make sure we have a relevant, up to date branchmap
2479 repo.branchmap() # make sure we have a relevant, up to date branchmap
2472
2480
2473 currentfilter = filter
2481 currentfilter = filter
2474 # try once without timer, the filter may not be cached
2482 # try once without timer, the filter may not be cached
2475 while branchmap.read(repo) is None:
2483 while branchmap.read(repo) is None:
2476 currentfilter = subsettable.get(currentfilter)
2484 currentfilter = subsettable.get(currentfilter)
2477 if currentfilter is None:
2485 if currentfilter is None:
2478 raise error.Abort(b'No branchmap cached for %s repo'
2486 raise error.Abort(b'No branchmap cached for %s repo'
2479 % (filter or b'unfiltered'))
2487 % (filter or b'unfiltered'))
2480 repo = repo.filtered(currentfilter)
2488 repo = repo.filtered(currentfilter)
2481 timer, fm = gettimer(ui, opts)
2489 timer, fm = gettimer(ui, opts)
2482 def setup():
2490 def setup():
2483 if clearrevlogs:
2491 if clearrevlogs:
2484 clearchangelog(repo)
2492 clearchangelog(repo)
2485 def bench():
2493 def bench():
2486 branchmap.read(repo)
2494 branchmap.read(repo)
2487 timer(bench, setup=setup)
2495 timer(bench, setup=setup)
2488 fm.end()
2496 fm.end()
2489
2497
2490 @command(b'perfloadmarkers')
2498 @command(b'perfloadmarkers')
2491 def perfloadmarkers(ui, repo):
2499 def perfloadmarkers(ui, repo):
2492 """benchmark the time to parse the on-disk markers for a repo
2500 """benchmark the time to parse the on-disk markers for a repo
2493
2501
2494 Result is the number of markers in the repo."""
2502 Result is the number of markers in the repo."""
2495 timer, fm = gettimer(ui)
2503 timer, fm = gettimer(ui)
2496 svfs = getsvfs(repo)
2504 svfs = getsvfs(repo)
2497 timer(lambda: len(obsolete.obsstore(svfs)))
2505 timer(lambda: len(obsolete.obsstore(svfs)))
2498 fm.end()
2506 fm.end()
2499
2507
2500 @command(b'perflrucachedict', formatteropts +
2508 @command(b'perflrucachedict', formatteropts +
2501 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2509 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2502 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2510 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2503 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2511 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2504 (b'', b'size', 4, b'size of cache'),
2512 (b'', b'size', 4, b'size of cache'),
2505 (b'', b'gets', 10000, b'number of key lookups'),
2513 (b'', b'gets', 10000, b'number of key lookups'),
2506 (b'', b'sets', 10000, b'number of key sets'),
2514 (b'', b'sets', 10000, b'number of key sets'),
2507 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2515 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2508 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2516 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2509 norepo=True)
2517 norepo=True)
2510 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2518 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2511 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2519 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2512 opts = _byteskwargs(opts)
2520 opts = _byteskwargs(opts)
2513
2521
2514 def doinit():
2522 def doinit():
2515 for i in _xrange(10000):
2523 for i in _xrange(10000):
2516 util.lrucachedict(size)
2524 util.lrucachedict(size)
2517
2525
2518 costrange = list(range(mincost, maxcost + 1))
2526 costrange = list(range(mincost, maxcost + 1))
2519
2527
2520 values = []
2528 values = []
2521 for i in _xrange(size):
2529 for i in _xrange(size):
2522 values.append(random.randint(0, _maxint))
2530 values.append(random.randint(0, _maxint))
2523
2531
2524 # Get mode fills the cache and tests raw lookup performance with no
2532 # Get mode fills the cache and tests raw lookup performance with no
2525 # eviction.
2533 # eviction.
2526 getseq = []
2534 getseq = []
2527 for i in _xrange(gets):
2535 for i in _xrange(gets):
2528 getseq.append(random.choice(values))
2536 getseq.append(random.choice(values))
2529
2537
2530 def dogets():
2538 def dogets():
2531 d = util.lrucachedict(size)
2539 d = util.lrucachedict(size)
2532 for v in values:
2540 for v in values:
2533 d[v] = v
2541 d[v] = v
2534 for key in getseq:
2542 for key in getseq:
2535 value = d[key]
2543 value = d[key]
2536 value # silence pyflakes warning
2544 value # silence pyflakes warning
2537
2545
2538 def dogetscost():
2546 def dogetscost():
2539 d = util.lrucachedict(size, maxcost=costlimit)
2547 d = util.lrucachedict(size, maxcost=costlimit)
2540 for i, v in enumerate(values):
2548 for i, v in enumerate(values):
2541 d.insert(v, v, cost=costs[i])
2549 d.insert(v, v, cost=costs[i])
2542 for key in getseq:
2550 for key in getseq:
2543 try:
2551 try:
2544 value = d[key]
2552 value = d[key]
2545 value # silence pyflakes warning
2553 value # silence pyflakes warning
2546 except KeyError:
2554 except KeyError:
2547 pass
2555 pass
2548
2556
2549 # Set mode tests insertion speed with cache eviction.
2557 # Set mode tests insertion speed with cache eviction.
2550 setseq = []
2558 setseq = []
2551 costs = []
2559 costs = []
2552 for i in _xrange(sets):
2560 for i in _xrange(sets):
2553 setseq.append(random.randint(0, _maxint))
2561 setseq.append(random.randint(0, _maxint))
2554 costs.append(random.choice(costrange))
2562 costs.append(random.choice(costrange))
2555
2563
2556 def doinserts():
2564 def doinserts():
2557 d = util.lrucachedict(size)
2565 d = util.lrucachedict(size)
2558 for v in setseq:
2566 for v in setseq:
2559 d.insert(v, v)
2567 d.insert(v, v)
2560
2568
2561 def doinsertscost():
2569 def doinsertscost():
2562 d = util.lrucachedict(size, maxcost=costlimit)
2570 d = util.lrucachedict(size, maxcost=costlimit)
2563 for i, v in enumerate(setseq):
2571 for i, v in enumerate(setseq):
2564 d.insert(v, v, cost=costs[i])
2572 d.insert(v, v, cost=costs[i])
2565
2573
2566 def dosets():
2574 def dosets():
2567 d = util.lrucachedict(size)
2575 d = util.lrucachedict(size)
2568 for v in setseq:
2576 for v in setseq:
2569 d[v] = v
2577 d[v] = v
2570
2578
2571 # Mixed mode randomly performs gets and sets with eviction.
2579 # Mixed mode randomly performs gets and sets with eviction.
2572 mixedops = []
2580 mixedops = []
2573 for i in _xrange(mixed):
2581 for i in _xrange(mixed):
2574 r = random.randint(0, 100)
2582 r = random.randint(0, 100)
2575 if r < mixedgetfreq:
2583 if r < mixedgetfreq:
2576 op = 0
2584 op = 0
2577 else:
2585 else:
2578 op = 1
2586 op = 1
2579
2587
2580 mixedops.append((op,
2588 mixedops.append((op,
2581 random.randint(0, size * 2),
2589 random.randint(0, size * 2),
2582 random.choice(costrange)))
2590 random.choice(costrange)))
2583
2591
2584 def domixed():
2592 def domixed():
2585 d = util.lrucachedict(size)
2593 d = util.lrucachedict(size)
2586
2594
2587 for op, v, cost in mixedops:
2595 for op, v, cost in mixedops:
2588 if op == 0:
2596 if op == 0:
2589 try:
2597 try:
2590 d[v]
2598 d[v]
2591 except KeyError:
2599 except KeyError:
2592 pass
2600 pass
2593 else:
2601 else:
2594 d[v] = v
2602 d[v] = v
2595
2603
2596 def domixedcost():
2604 def domixedcost():
2597 d = util.lrucachedict(size, maxcost=costlimit)
2605 d = util.lrucachedict(size, maxcost=costlimit)
2598
2606
2599 for op, v, cost in mixedops:
2607 for op, v, cost in mixedops:
2600 if op == 0:
2608 if op == 0:
2601 try:
2609 try:
2602 d[v]
2610 d[v]
2603 except KeyError:
2611 except KeyError:
2604 pass
2612 pass
2605 else:
2613 else:
2606 d.insert(v, v, cost=cost)
2614 d.insert(v, v, cost=cost)
2607
2615
2608 benches = [
2616 benches = [
2609 (doinit, b'init'),
2617 (doinit, b'init'),
2610 ]
2618 ]
2611
2619
2612 if costlimit:
2620 if costlimit:
2613 benches.extend([
2621 benches.extend([
2614 (dogetscost, b'gets w/ cost limit'),
2622 (dogetscost, b'gets w/ cost limit'),
2615 (doinsertscost, b'inserts w/ cost limit'),
2623 (doinsertscost, b'inserts w/ cost limit'),
2616 (domixedcost, b'mixed w/ cost limit'),
2624 (domixedcost, b'mixed w/ cost limit'),
2617 ])
2625 ])
2618 else:
2626 else:
2619 benches.extend([
2627 benches.extend([
2620 (dogets, b'gets'),
2628 (dogets, b'gets'),
2621 (doinserts, b'inserts'),
2629 (doinserts, b'inserts'),
2622 (dosets, b'sets'),
2630 (dosets, b'sets'),
2623 (domixed, b'mixed')
2631 (domixed, b'mixed')
2624 ])
2632 ])
2625
2633
2626 for fn, title in benches:
2634 for fn, title in benches:
2627 timer, fm = gettimer(ui, opts)
2635 timer, fm = gettimer(ui, opts)
2628 timer(fn, title=title)
2636 timer(fn, title=title)
2629 fm.end()
2637 fm.end()
2630
2638
2631 @command(b'perfwrite', formatteropts)
2639 @command(b'perfwrite', formatteropts)
2632 def perfwrite(ui, repo, **opts):
2640 def perfwrite(ui, repo, **opts):
2633 """microbenchmark ui.write
2641 """microbenchmark ui.write
2634 """
2642 """
2635 opts = _byteskwargs(opts)
2643 opts = _byteskwargs(opts)
2636
2644
2637 timer, fm = gettimer(ui, opts)
2645 timer, fm = gettimer(ui, opts)
2638 def write():
2646 def write():
2639 for i in range(100000):
2647 for i in range(100000):
2640 ui.write((b'Testing write performance\n'))
2648 ui.write((b'Testing write performance\n'))
2641 timer(write)
2649 timer(write)
2642 fm.end()
2650 fm.end()
2643
2651
2644 def uisetup(ui):
2652 def uisetup(ui):
2645 if (util.safehasattr(cmdutil, b'openrevlog') and
2653 if (util.safehasattr(cmdutil, b'openrevlog') and
2646 not util.safehasattr(commands, b'debugrevlogopts')):
2654 not util.safehasattr(commands, b'debugrevlogopts')):
2647 # for "historical portability":
2655 # for "historical portability":
2648 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2656 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2649 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2657 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2650 # openrevlog() should cause failure, because it has been
2658 # openrevlog() should cause failure, because it has been
2651 # available since 3.5 (or 49c583ca48c4).
2659 # available since 3.5 (or 49c583ca48c4).
2652 def openrevlog(orig, repo, cmd, file_, opts):
2660 def openrevlog(orig, repo, cmd, file_, opts):
2653 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2661 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2654 raise error.Abort(b"This version doesn't support --dir option",
2662 raise error.Abort(b"This version doesn't support --dir option",
2655 hint=b"use 3.5 or later")
2663 hint=b"use 3.5 or later")
2656 return orig(repo, cmd, file_, opts)
2664 return orig(repo, cmd, file_, opts)
2657 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2665 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2658
2666
2659 @command(b'perfprogress', formatteropts + [
2667 @command(b'perfprogress', formatteropts + [
2660 (b'', b'topic', b'topic', b'topic for progress messages'),
2668 (b'', b'topic', b'topic', b'topic for progress messages'),
2661 (b'c', b'total', 1000000, b'total value we are progressing to'),
2669 (b'c', b'total', 1000000, b'total value we are progressing to'),
2662 ], norepo=True)
2670 ], norepo=True)
2663 def perfprogress(ui, topic=None, total=None, **opts):
2671 def perfprogress(ui, topic=None, total=None, **opts):
2664 """printing of progress bars"""
2672 """printing of progress bars"""
2665 opts = _byteskwargs(opts)
2673 opts = _byteskwargs(opts)
2666
2674
2667 timer, fm = gettimer(ui, opts)
2675 timer, fm = gettimer(ui, opts)
2668
2676
2669 def doprogress():
2677 def doprogress():
2670 with ui.makeprogress(topic, total=total) as progress:
2678 with ui.makeprogress(topic, total=total) as progress:
2671 for i in pycompat.xrange(total):
2679 for i in pycompat.xrange(total):
2672 progress.increment()
2680 progress.increment()
2673
2681
2674 timer(doprogress)
2682 timer(doprogress)
2675 fm.end()
2683 fm.end()
@@ -1,300 +1,300
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perfstatusext=$CONTRIBDIR/perf.py
35 > perfstatusext=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help perfstatusext
41 $ hg help perfstatusext
42 perfstatusext extension - helper extension to measure performance
42 perfstatusext extension - helper extension to measure performance
43
43
44 list of commands:
44 list of commands:
45
45
46 perfaddremove
46 perfaddremove
47 (no help text available)
47 (no help text available)
48 perfancestors
48 perfancestors
49 (no help text available)
49 (no help text available)
50 perfancestorset
50 perfancestorset
51 (no help text available)
51 (no help text available)
52 perfannotate (no help text available)
52 perfannotate (no help text available)
53 perfbdiff benchmark a bdiff between revisions
53 perfbdiff benchmark a bdiff between revisions
54 perfbookmarks
54 perfbookmarks
55 benchmark parsing bookmarks from disk to memory
55 benchmark parsing bookmarks from disk to memory
56 perfbranchmap
56 perfbranchmap
57 benchmark the update of a branchmap
57 benchmark the update of a branchmap
58 perfbranchmapload
58 perfbranchmapload
59 benchmark reading the branchmap
59 benchmark reading the branchmap
60 perfbranchmapupdate
60 perfbranchmapupdate
61 benchmark branchmap update from for <base> revs to <target>
61 benchmark branchmap update from for <base> revs to <target>
62 revs
62 revs
63 perfbundleread
63 perfbundleread
64 Benchmark reading of bundle files.
64 Benchmark reading of bundle files.
65 perfcca (no help text available)
65 perfcca (no help text available)
66 perfchangegroupchangelog
66 perfchangegroupchangelog
67 Benchmark producing a changelog group for a changegroup.
67 Benchmark producing a changelog group for a changegroup.
68 perfchangeset
68 perfchangeset
69 (no help text available)
69 (no help text available)
70 perfctxfiles (no help text available)
70 perfctxfiles (no help text available)
71 perfdiffwd Profile diff of working directory changes
71 perfdiffwd Profile diff of working directory changes
72 perfdirfoldmap
72 perfdirfoldmap
73 (no help text available)
73 (no help text available)
74 perfdirs (no help text available)
74 perfdirs (no help text available)
75 perfdirstate (no help text available)
75 perfdirstate (no help text available)
76 perfdirstatedirs
76 perfdirstatedirs
77 (no help text available)
77 (no help text available)
78 perfdirstatefoldmap
78 perfdirstatefoldmap
79 (no help text available)
79 (no help text available)
80 perfdirstatewrite
80 perfdirstatewrite
81 (no help text available)
81 (no help text available)
82 perfdiscovery
82 perfdiscovery
83 benchmark discovery between local repo and the peer at given
83 benchmark discovery between local repo and the peer at given
84 path
84 path
85 perffncacheencode
85 perffncacheencode
86 (no help text available)
86 (no help text available)
87 perffncacheload
87 perffncacheload
88 (no help text available)
88 (no help text available)
89 perffncachewrite
89 perffncachewrite
90 (no help text available)
90 (no help text available)
91 perfheads benchmark the computation of a changelog heads
91 perfheads benchmark the computation of a changelog heads
92 perfhelper-pathcopies
92 perfhelper-pathcopies
93 find statistic about potential parameters for the
93 find statistic about potential parameters for the
94 'perftracecopies'
94 'perftracecopies'
95 perfignore benchmark operation related to computing ignore
95 perfignore benchmark operation related to computing ignore
96 perfindex (no help text available)
96 perfindex benchmark index creation time followed by a lookup
97 perflinelogedits
97 perflinelogedits
98 (no help text available)
98 (no help text available)
99 perfloadmarkers
99 perfloadmarkers
100 benchmark the time to parse the on-disk markers for a repo
100 benchmark the time to parse the on-disk markers for a repo
101 perflog (no help text available)
101 perflog (no help text available)
102 perflookup (no help text available)
102 perflookup (no help text available)
103 perflrucachedict
103 perflrucachedict
104 (no help text available)
104 (no help text available)
105 perfmanifest benchmark the time to read a manifest from disk and return a
105 perfmanifest benchmark the time to read a manifest from disk and return a
106 usable
106 usable
107 perfmergecalculate
107 perfmergecalculate
108 (no help text available)
108 (no help text available)
109 perfmoonwalk benchmark walking the changelog backwards
109 perfmoonwalk benchmark walking the changelog backwards
110 perfnodelookup
110 perfnodelookup
111 (no help text available)
111 (no help text available)
112 perfparents (no help text available)
112 perfparents (no help text available)
113 perfpathcopies
113 perfpathcopies
114 benchmark the copy tracing logic
114 benchmark the copy tracing logic
115 perfphases benchmark phasesets computation
115 perfphases benchmark phasesets computation
116 perfphasesremote
116 perfphasesremote
117 benchmark time needed to analyse phases of the remote server
117 benchmark time needed to analyse phases of the remote server
118 perfprogress printing of progress bars
118 perfprogress printing of progress bars
119 perfrawfiles (no help text available)
119 perfrawfiles (no help text available)
120 perfrevlogchunks
120 perfrevlogchunks
121 Benchmark operations on revlog chunks.
121 Benchmark operations on revlog chunks.
122 perfrevlogindex
122 perfrevlogindex
123 Benchmark operations against a revlog index.
123 Benchmark operations against a revlog index.
124 perfrevlogrevision
124 perfrevlogrevision
125 Benchmark obtaining a revlog revision.
125 Benchmark obtaining a revlog revision.
126 perfrevlogrevisions
126 perfrevlogrevisions
127 Benchmark reading a series of revisions from a revlog.
127 Benchmark reading a series of revisions from a revlog.
128 perfrevlogwrite
128 perfrevlogwrite
129 Benchmark writing a series of revisions to a revlog.
129 Benchmark writing a series of revisions to a revlog.
130 perfrevrange (no help text available)
130 perfrevrange (no help text available)
131 perfrevset benchmark the execution time of a revset
131 perfrevset benchmark the execution time of a revset
132 perfstartup (no help text available)
132 perfstartup (no help text available)
133 perfstatus (no help text available)
133 perfstatus (no help text available)
134 perftags (no help text available)
134 perftags (no help text available)
135 perftemplating
135 perftemplating
136 test the rendering time of a given template
136 test the rendering time of a given template
137 perfunidiff benchmark a unified diff between revisions
137 perfunidiff benchmark a unified diff between revisions
138 perfvolatilesets
138 perfvolatilesets
139 benchmark the computation of various volatile set
139 benchmark the computation of various volatile set
140 perfwalk (no help text available)
140 perfwalk (no help text available)
141 perfwrite microbenchmark ui.write
141 perfwrite microbenchmark ui.write
142
142
143 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
143 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
144 $ hg perfaddremove
144 $ hg perfaddremove
145 $ hg perfancestors
145 $ hg perfancestors
146 $ hg perfancestorset 2
146 $ hg perfancestorset 2
147 $ hg perfannotate a
147 $ hg perfannotate a
148 $ hg perfbdiff -c 1
148 $ hg perfbdiff -c 1
149 $ hg perfbdiff --alldata 1
149 $ hg perfbdiff --alldata 1
150 $ hg perfunidiff -c 1
150 $ hg perfunidiff -c 1
151 $ hg perfunidiff --alldata 1
151 $ hg perfunidiff --alldata 1
152 $ hg perfbookmarks
152 $ hg perfbookmarks
153 $ hg perfbranchmap
153 $ hg perfbranchmap
154 $ hg perfbranchmapload
154 $ hg perfbranchmapload
155 $ hg perfbranchmapupdate --base "not tip" --target "tip"
155 $ hg perfbranchmapupdate --base "not tip" --target "tip"
156 benchmark of branchmap with 3 revisions with 1 new ones
156 benchmark of branchmap with 3 revisions with 1 new ones
157 $ hg perfcca
157 $ hg perfcca
158 $ hg perfchangegroupchangelog
158 $ hg perfchangegroupchangelog
159 $ hg perfchangegroupchangelog --cgversion 01
159 $ hg perfchangegroupchangelog --cgversion 01
160 $ hg perfchangeset 2
160 $ hg perfchangeset 2
161 $ hg perfctxfiles 2
161 $ hg perfctxfiles 2
162 $ hg perfdiffwd
162 $ hg perfdiffwd
163 $ hg perfdirfoldmap
163 $ hg perfdirfoldmap
164 $ hg perfdirs
164 $ hg perfdirs
165 $ hg perfdirstate
165 $ hg perfdirstate
166 $ hg perfdirstatedirs
166 $ hg perfdirstatedirs
167 $ hg perfdirstatefoldmap
167 $ hg perfdirstatefoldmap
168 $ hg perfdirstatewrite
168 $ hg perfdirstatewrite
169 #if repofncache
169 #if repofncache
170 $ hg perffncacheencode
170 $ hg perffncacheencode
171 $ hg perffncacheload
171 $ hg perffncacheload
172 $ hg debugrebuildfncache
172 $ hg debugrebuildfncache
173 fncache already up to date
173 fncache already up to date
174 $ hg perffncachewrite
174 $ hg perffncachewrite
175 $ hg debugrebuildfncache
175 $ hg debugrebuildfncache
176 fncache already up to date
176 fncache already up to date
177 #endif
177 #endif
178 $ hg perfheads
178 $ hg perfheads
179 $ hg perfignore
179 $ hg perfignore
180 $ hg perfindex
180 $ hg perfindex
181 $ hg perflinelogedits -n 1
181 $ hg perflinelogedits -n 1
182 $ hg perfloadmarkers
182 $ hg perfloadmarkers
183 $ hg perflog
183 $ hg perflog
184 $ hg perflookup 2
184 $ hg perflookup 2
185 $ hg perflrucache
185 $ hg perflrucache
186 $ hg perfmanifest 2
186 $ hg perfmanifest 2
187 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
187 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
188 $ hg perfmanifest -m 44fe2c8352bb
188 $ hg perfmanifest -m 44fe2c8352bb
189 abort: manifest revision must be integer or full node
189 abort: manifest revision must be integer or full node
190 [255]
190 [255]
191 $ hg perfmergecalculate -r 3
191 $ hg perfmergecalculate -r 3
192 $ hg perfmoonwalk
192 $ hg perfmoonwalk
193 $ hg perfnodelookup 2
193 $ hg perfnodelookup 2
194 $ hg perfpathcopies 1 2
194 $ hg perfpathcopies 1 2
195 $ hg perfprogress --total 1000
195 $ hg perfprogress --total 1000
196 $ hg perfrawfiles 2
196 $ hg perfrawfiles 2
197 $ hg perfrevlogindex -c
197 $ hg perfrevlogindex -c
198 #if reporevlogstore
198 #if reporevlogstore
199 $ hg perfrevlogrevisions .hg/store/data/a.i
199 $ hg perfrevlogrevisions .hg/store/data/a.i
200 #endif
200 #endif
201 $ hg perfrevlogrevision -m 0
201 $ hg perfrevlogrevision -m 0
202 $ hg perfrevlogchunks -c
202 $ hg perfrevlogchunks -c
203 $ hg perfrevrange
203 $ hg perfrevrange
204 $ hg perfrevset 'all()'
204 $ hg perfrevset 'all()'
205 $ hg perfstartup
205 $ hg perfstartup
206 $ hg perfstatus
206 $ hg perfstatus
207 $ hg perftags
207 $ hg perftags
208 $ hg perftemplating
208 $ hg perftemplating
209 $ hg perfvolatilesets
209 $ hg perfvolatilesets
210 $ hg perfwalk
210 $ hg perfwalk
211 $ hg perfparents
211 $ hg perfparents
212 $ hg perfdiscovery -q .
212 $ hg perfdiscovery -q .
213
213
214 test actual output
214 test actual output
215 ------------------
215 ------------------
216
216
217 normal output:
217 normal output:
218
218
219 $ hg perfheads --config perf.stub=no
219 $ hg perfheads --config perf.stub=no
220 ! wall * comb * user * sys * (best of *) (glob)
220 ! wall * comb * user * sys * (best of *) (glob)
221
221
222 detailed output:
222 detailed output:
223
223
224 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
224 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
225 ! wall * comb * user * sys * (best of *) (glob)
225 ! wall * comb * user * sys * (best of *) (glob)
226 ! wall * comb * user * sys * (max of *) (glob)
226 ! wall * comb * user * sys * (max of *) (glob)
227 ! wall * comb * user * sys * (avg of *) (glob)
227 ! wall * comb * user * sys * (avg of *) (glob)
228 ! wall * comb * user * sys * (median of *) (glob)
228 ! wall * comb * user * sys * (median of *) (glob)
229
229
230 test json output
230 test json output
231 ----------------
231 ----------------
232
232
233 normal output:
233 normal output:
234
234
235 $ hg perfheads --template json --config perf.stub=no
235 $ hg perfheads --template json --config perf.stub=no
236 [
236 [
237 {
237 {
238 "comb": *, (glob)
238 "comb": *, (glob)
239 "count": *, (glob)
239 "count": *, (glob)
240 "sys": *, (glob)
240 "sys": *, (glob)
241 "user": *, (glob)
241 "user": *, (glob)
242 "wall": * (glob)
242 "wall": * (glob)
243 }
243 }
244 ]
244 ]
245
245
246 detailed output:
246 detailed output:
247
247
248 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
248 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
249 [
249 [
250 {
250 {
251 "avg.comb": *, (glob)
251 "avg.comb": *, (glob)
252 "avg.count": *, (glob)
252 "avg.count": *, (glob)
253 "avg.sys": *, (glob)
253 "avg.sys": *, (glob)
254 "avg.user": *, (glob)
254 "avg.user": *, (glob)
255 "avg.wall": *, (glob)
255 "avg.wall": *, (glob)
256 "comb": *, (glob)
256 "comb": *, (glob)
257 "count": *, (glob)
257 "count": *, (glob)
258 "max.comb": *, (glob)
258 "max.comb": *, (glob)
259 "max.count": *, (glob)
259 "max.count": *, (glob)
260 "max.sys": *, (glob)
260 "max.sys": *, (glob)
261 "max.user": *, (glob)
261 "max.user": *, (glob)
262 "max.wall": *, (glob)
262 "max.wall": *, (glob)
263 "median.comb": *, (glob)
263 "median.comb": *, (glob)
264 "median.count": *, (glob)
264 "median.count": *, (glob)
265 "median.sys": *, (glob)
265 "median.sys": *, (glob)
266 "median.user": *, (glob)
266 "median.user": *, (glob)
267 "median.wall": *, (glob)
267 "median.wall": *, (glob)
268 "sys": *, (glob)
268 "sys": *, (glob)
269 "user": *, (glob)
269 "user": *, (glob)
270 "wall": * (glob)
270 "wall": * (glob)
271 }
271 }
272 ]
272 ]
273
273
274 Check perf.py for historical portability
274 Check perf.py for historical portability
275 ----------------------------------------
275 ----------------------------------------
276
276
277 $ cd "$TESTDIR/.."
277 $ cd "$TESTDIR/.."
278
278
279 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
279 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
280 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
280 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
281 > "$TESTDIR"/check-perf-code.py contrib/perf.py
281 > "$TESTDIR"/check-perf-code.py contrib/perf.py
282 contrib/perf.py:\d+: (re)
282 contrib/perf.py:\d+: (re)
283 > from mercurial import (
283 > from mercurial import (
284 import newer module separately in try clause for early Mercurial
284 import newer module separately in try clause for early Mercurial
285 contrib/perf.py:\d+: (re)
285 contrib/perf.py:\d+: (re)
286 > from mercurial import (
286 > from mercurial import (
287 import newer module separately in try clause for early Mercurial
287 import newer module separately in try clause for early Mercurial
288 contrib/perf.py:\d+: (re)
288 contrib/perf.py:\d+: (re)
289 > origindexpath = orig.opener.join(orig.indexfile)
289 > origindexpath = orig.opener.join(orig.indexfile)
290 use getvfs()/getsvfs() for early Mercurial
290 use getvfs()/getsvfs() for early Mercurial
291 contrib/perf.py:\d+: (re)
291 contrib/perf.py:\d+: (re)
292 > origdatapath = orig.opener.join(orig.datafile)
292 > origdatapath = orig.opener.join(orig.datafile)
293 use getvfs()/getsvfs() for early Mercurial
293 use getvfs()/getsvfs() for early Mercurial
294 contrib/perf.py:\d+: (re)
294 contrib/perf.py:\d+: (re)
295 > vfs = vfsmod.vfs(tmpdir)
295 > vfs = vfsmod.vfs(tmpdir)
296 use getvfs()/getsvfs() for early Mercurial
296 use getvfs()/getsvfs() for early Mercurial
297 contrib/perf.py:\d+: (re)
297 contrib/perf.py:\d+: (re)
298 > vfs.options = getattr(orig.opener, 'options', None)
298 > vfs.options = getattr(orig.opener, 'options', None)
299 use getvfs()/getsvfs() for early Mercurial
299 use getvfs()/getsvfs() for early Mercurial
300 [1]
300 [1]
General Comments 0
You need to be logged in to leave comments. Login now