##// END OF EJS Templates
perf: support looking up multiple revisions...
Boris Feld -
r41484:7eb7637e default
parent child Browse files
Show More
@@ -1,2687 +1,2701 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 """benchmark the computation of a changelog heads"""
538 """benchmark the computation of a changelog heads"""
539 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
540 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
541 cl = repo.changelog
541 cl = repo.changelog
542 def s():
542 def s():
543 clearcaches(cl)
543 clearcaches(cl)
544 def d():
544 def d():
545 len(cl.headrevs())
545 len(cl.headrevs())
546 timer(d, setup=s)
546 timer(d, setup=s)
547 fm.end()
547 fm.end()
548
548
549 @command(b'perftags', formatteropts+
549 @command(b'perftags', formatteropts+
550 [
550 [
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 ])
552 ])
553 def perftags(ui, repo, **opts):
553 def perftags(ui, repo, **opts):
554 opts = _byteskwargs(opts)
554 opts = _byteskwargs(opts)
555 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
556 repocleartagscache = repocleartagscachefunc(repo)
556 repocleartagscache = repocleartagscachefunc(repo)
557 clearrevlogs = opts[b'clear_revlogs']
557 clearrevlogs = opts[b'clear_revlogs']
558 def s():
558 def s():
559 if clearrevlogs:
559 if clearrevlogs:
560 clearchangelog(repo)
560 clearchangelog(repo)
561 clearfilecache(repo.unfiltered(), 'manifest')
561 clearfilecache(repo.unfiltered(), 'manifest')
562 repocleartagscache()
562 repocleartagscache()
563 def t():
563 def t():
564 return len(repo.tags())
564 return len(repo.tags())
565 timer(t, setup=s)
565 timer(t, setup=s)
566 fm.end()
566 fm.end()
567
567
568 @command(b'perfancestors', formatteropts)
568 @command(b'perfancestors', formatteropts)
569 def perfancestors(ui, repo, **opts):
569 def perfancestors(ui, repo, **opts):
570 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
571 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
572 heads = repo.changelog.headrevs()
572 heads = repo.changelog.headrevs()
573 def d():
573 def d():
574 for a in repo.changelog.ancestors(heads):
574 for a in repo.changelog.ancestors(heads):
575 pass
575 pass
576 timer(d)
576 timer(d)
577 fm.end()
577 fm.end()
578
578
579 @command(b'perfancestorset', formatteropts)
579 @command(b'perfancestorset', formatteropts)
580 def perfancestorset(ui, repo, revset, **opts):
580 def perfancestorset(ui, repo, revset, **opts):
581 opts = _byteskwargs(opts)
581 opts = _byteskwargs(opts)
582 timer, fm = gettimer(ui, opts)
582 timer, fm = gettimer(ui, opts)
583 revs = repo.revs(revset)
583 revs = repo.revs(revset)
584 heads = repo.changelog.headrevs()
584 heads = repo.changelog.headrevs()
585 def d():
585 def d():
586 s = repo.changelog.ancestors(heads)
586 s = repo.changelog.ancestors(heads)
587 for rev in revs:
587 for rev in revs:
588 rev in s
588 rev in s
589 timer(d)
589 timer(d)
590 fm.end()
590 fm.end()
591
591
592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 def perfdiscovery(ui, repo, path, **opts):
593 def perfdiscovery(ui, repo, path, **opts):
594 """benchmark discovery between local repo and the peer at given path
594 """benchmark discovery between local repo and the peer at given path
595 """
595 """
596 repos = [repo, None]
596 repos = [repo, None]
597 timer, fm = gettimer(ui, opts)
597 timer, fm = gettimer(ui, opts)
598 path = ui.expandpath(path)
598 path = ui.expandpath(path)
599
599
600 def s():
600 def s():
601 repos[1] = hg.peer(ui, opts, path)
601 repos[1] = hg.peer(ui, opts, path)
602 def d():
602 def d():
603 setdiscovery.findcommonheads(ui, *repos)
603 setdiscovery.findcommonheads(ui, *repos)
604 timer(d, setup=s)
604 timer(d, setup=s)
605 fm.end()
605 fm.end()
606
606
607 @command(b'perfbookmarks', formatteropts +
607 @command(b'perfbookmarks', formatteropts +
608 [
608 [
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 ])
610 ])
611 def perfbookmarks(ui, repo, **opts):
611 def perfbookmarks(ui, repo, **opts):
612 """benchmark parsing bookmarks from disk to memory"""
612 """benchmark parsing bookmarks from disk to memory"""
613 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
615
615
616 clearrevlogs = opts[b'clear_revlogs']
616 clearrevlogs = opts[b'clear_revlogs']
617 def s():
617 def s():
618 if clearrevlogs:
618 if clearrevlogs:
619 clearchangelog(repo)
619 clearchangelog(repo)
620 clearfilecache(repo, b'_bookmarks')
620 clearfilecache(repo, b'_bookmarks')
621 def d():
621 def d():
622 repo._bookmarks
622 repo._bookmarks
623 timer(d, setup=s)
623 timer(d, setup=s)
624 fm.end()
624 fm.end()
625
625
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 def perfbundleread(ui, repo, bundlepath, **opts):
627 def perfbundleread(ui, repo, bundlepath, **opts):
628 """Benchmark reading of bundle files.
628 """Benchmark reading of bundle files.
629
629
630 This command is meant to isolate the I/O part of bundle reading as
630 This command is meant to isolate the I/O part of bundle reading as
631 much as possible.
631 much as possible.
632 """
632 """
633 from mercurial import (
633 from mercurial import (
634 bundle2,
634 bundle2,
635 exchange,
635 exchange,
636 streamclone,
636 streamclone,
637 )
637 )
638
638
639 opts = _byteskwargs(opts)
639 opts = _byteskwargs(opts)
640
640
641 def makebench(fn):
641 def makebench(fn):
642 def run():
642 def run():
643 with open(bundlepath, b'rb') as fh:
643 with open(bundlepath, b'rb') as fh:
644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 fn(bundle)
645 fn(bundle)
646
646
647 return run
647 return run
648
648
649 def makereadnbytes(size):
649 def makereadnbytes(size):
650 def run():
650 def run():
651 with open(bundlepath, b'rb') as fh:
651 with open(bundlepath, b'rb') as fh:
652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 while bundle.read(size):
653 while bundle.read(size):
654 pass
654 pass
655
655
656 return run
656 return run
657
657
658 def makestdioread(size):
658 def makestdioread(size):
659 def run():
659 def run():
660 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
661 while fh.read(size):
661 while fh.read(size):
662 pass
662 pass
663
663
664 return run
664 return run
665
665
666 # bundle1
666 # bundle1
667
667
668 def deltaiter(bundle):
668 def deltaiter(bundle):
669 for delta in bundle.deltaiter():
669 for delta in bundle.deltaiter():
670 pass
670 pass
671
671
672 def iterchunks(bundle):
672 def iterchunks(bundle):
673 for chunk in bundle.getchunks():
673 for chunk in bundle.getchunks():
674 pass
674 pass
675
675
676 # bundle2
676 # bundle2
677
677
678 def forwardchunks(bundle):
678 def forwardchunks(bundle):
679 for chunk in bundle._forwardchunks():
679 for chunk in bundle._forwardchunks():
680 pass
680 pass
681
681
682 def iterparts(bundle):
682 def iterparts(bundle):
683 for part in bundle.iterparts():
683 for part in bundle.iterparts():
684 pass
684 pass
685
685
686 def iterpartsseekable(bundle):
686 def iterpartsseekable(bundle):
687 for part in bundle.iterparts(seekable=True):
687 for part in bundle.iterparts(seekable=True):
688 pass
688 pass
689
689
690 def seek(bundle):
690 def seek(bundle):
691 for part in bundle.iterparts(seekable=True):
691 for part in bundle.iterparts(seekable=True):
692 part.seek(0, os.SEEK_END)
692 part.seek(0, os.SEEK_END)
693
693
694 def makepartreadnbytes(size):
694 def makepartreadnbytes(size):
695 def run():
695 def run():
696 with open(bundlepath, b'rb') as fh:
696 with open(bundlepath, b'rb') as fh:
697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 for part in bundle.iterparts():
698 for part in bundle.iterparts():
699 while part.read(size):
699 while part.read(size):
700 pass
700 pass
701
701
702 return run
702 return run
703
703
704 benches = [
704 benches = [
705 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(8192), b'read(8k)'),
706 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(16384), b'read(16k)'),
707 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(32768), b'read(32k)'),
708 (makestdioread(131072), b'read(128k)'),
708 (makestdioread(131072), b'read(128k)'),
709 ]
709 ]
710
710
711 with open(bundlepath, b'rb') as fh:
711 with open(bundlepath, b'rb') as fh:
712 bundle = exchange.readbundle(ui, fh, bundlepath)
712 bundle = exchange.readbundle(ui, fh, bundlepath)
713
713
714 if isinstance(bundle, changegroup.cg1unpacker):
714 if isinstance(bundle, changegroup.cg1unpacker):
715 benches.extend([
715 benches.extend([
716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 ])
722 ])
723 elif isinstance(bundle, bundle2.unbundle20):
723 elif isinstance(bundle, bundle2.unbundle20):
724 benches.extend([
724 benches.extend([
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 (makebench(seek), b'bundle2 part seek()'),
728 (makebench(seek), b'bundle2 part seek()'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 ])
733 ])
734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 raise error.Abort(b'stream clone bundles not supported')
735 raise error.Abort(b'stream clone bundles not supported')
736 else:
736 else:
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738
738
739 for fn, title in benches:
739 for fn, title in benches:
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 timer(fn, title=title)
741 timer(fn, title=title)
742 fm.end()
742 fm.end()
743
743
744 @command(b'perfchangegroupchangelog', formatteropts +
744 @command(b'perfchangegroupchangelog', formatteropts +
745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 """Benchmark producing a changelog group for a changegroup.
748 """Benchmark producing a changelog group for a changegroup.
749
749
750 This measures the time spent processing the changelog during a
750 This measures the time spent processing the changelog during a
751 bundle operation. This occurs during `hg bundle` and on a server
751 bundle operation. This occurs during `hg bundle` and on a server
752 processing a `getbundle` wire protocol request (handles clones
752 processing a `getbundle` wire protocol request (handles clones
753 and pull requests).
753 and pull requests).
754
754
755 By default, all revisions are added to the changegroup.
755 By default, all revisions are added to the changegroup.
756 """
756 """
757 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
758 cl = repo.changelog
758 cl = repo.changelog
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 bundler = changegroup.getbundler(cgversion, repo)
760 bundler = changegroup.getbundler(cgversion, repo)
761
761
762 def d():
762 def d():
763 state, chunks = bundler._generatechangelog(cl, nodes)
763 state, chunks = bundler._generatechangelog(cl, nodes)
764 for chunk in chunks:
764 for chunk in chunks:
765 pass
765 pass
766
766
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768
768
769 # Terminal printing can interfere with timing. So disable it.
769 # Terminal printing can interfere with timing. So disable it.
770 with ui.configoverride({(b'progress', b'disable'): True}):
770 with ui.configoverride({(b'progress', b'disable'): True}):
771 timer(d)
771 timer(d)
772
772
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfdirs', formatteropts)
775 @command(b'perfdirs', formatteropts)
776 def perfdirs(ui, repo, **opts):
776 def perfdirs(ui, repo, **opts):
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 dirstate = repo.dirstate
779 dirstate = repo.dirstate
780 b'a' in dirstate
780 b'a' in dirstate
781 def d():
781 def d():
782 dirstate.hasdir(b'a')
782 dirstate.hasdir(b'a')
783 del dirstate._map._dirs
783 del dirstate._map._dirs
784 timer(d)
784 timer(d)
785 fm.end()
785 fm.end()
786
786
787 @command(b'perfdirstate', formatteropts)
787 @command(b'perfdirstate', formatteropts)
788 def perfdirstate(ui, repo, **opts):
788 def perfdirstate(ui, repo, **opts):
789 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
790 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
791 b"a" in repo.dirstate
791 b"a" in repo.dirstate
792 def d():
792 def d():
793 repo.dirstate.invalidate()
793 repo.dirstate.invalidate()
794 b"a" in repo.dirstate
794 b"a" in repo.dirstate
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirstatedirs', formatteropts)
798 @command(b'perfdirstatedirs', formatteropts)
799 def perfdirstatedirs(ui, repo, **opts):
799 def perfdirstatedirs(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 b"a" in repo.dirstate
802 b"a" in repo.dirstate
803 def d():
803 def d():
804 repo.dirstate.hasdir(b"a")
804 repo.dirstate.hasdir(b"a")
805 del repo.dirstate._map._dirs
805 del repo.dirstate._map._dirs
806 timer(d)
806 timer(d)
807 fm.end()
807 fm.end()
808
808
809 @command(b'perfdirstatefoldmap', formatteropts)
809 @command(b'perfdirstatefoldmap', formatteropts)
810 def perfdirstatefoldmap(ui, repo, **opts):
810 def perfdirstatefoldmap(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 dirstate = repo.dirstate
813 dirstate = repo.dirstate
814 b'a' in dirstate
814 b'a' in dirstate
815 def d():
815 def d():
816 dirstate._map.filefoldmap.get(b'a')
816 dirstate._map.filefoldmap.get(b'a')
817 del dirstate._map.filefoldmap
817 del dirstate._map.filefoldmap
818 timer(d)
818 timer(d)
819 fm.end()
819 fm.end()
820
820
821 @command(b'perfdirfoldmap', formatteropts)
821 @command(b'perfdirfoldmap', formatteropts)
822 def perfdirfoldmap(ui, repo, **opts):
822 def perfdirfoldmap(ui, repo, **opts):
823 opts = _byteskwargs(opts)
823 opts = _byteskwargs(opts)
824 timer, fm = gettimer(ui, opts)
824 timer, fm = gettimer(ui, opts)
825 dirstate = repo.dirstate
825 dirstate = repo.dirstate
826 b'a' in dirstate
826 b'a' in dirstate
827 def d():
827 def d():
828 dirstate._map.dirfoldmap.get(b'a')
828 dirstate._map.dirfoldmap.get(b'a')
829 del dirstate._map.dirfoldmap
829 del dirstate._map.dirfoldmap
830 del dirstate._map._dirs
830 del dirstate._map._dirs
831 timer(d)
831 timer(d)
832 fm.end()
832 fm.end()
833
833
834 @command(b'perfdirstatewrite', formatteropts)
834 @command(b'perfdirstatewrite', formatteropts)
835 def perfdirstatewrite(ui, repo, **opts):
835 def perfdirstatewrite(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 ds = repo.dirstate
838 ds = repo.dirstate
839 b"a" in ds
839 b"a" in ds
840 def d():
840 def d():
841 ds._dirty = True
841 ds._dirty = True
842 ds.write(repo.currenttransaction())
842 ds.write(repo.currenttransaction())
843 timer(d)
843 timer(d)
844 fm.end()
844 fm.end()
845
845
846 @command(b'perfmergecalculate',
846 @command(b'perfmergecalculate',
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 def perfmergecalculate(ui, repo, rev, **opts):
848 def perfmergecalculate(ui, repo, rev, **opts):
849 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
851 wctx = repo[None]
851 wctx = repo[None]
852 rctx = scmutil.revsingle(repo, rev, rev)
852 rctx = scmutil.revsingle(repo, rev, rev)
853 ancestor = wctx.ancestor(rctx)
853 ancestor = wctx.ancestor(rctx)
854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 # that cache
855 # that cache
856 wctx.dirty()
856 wctx.dirty()
857 def d():
857 def d():
858 # acceptremote is True because we don't want prompts in the middle of
858 # acceptremote is True because we don't want prompts in the middle of
859 # our benchmark
859 # our benchmark
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 acceptremote=True, followcopies=True)
861 acceptremote=True, followcopies=True)
862 timer(d)
862 timer(d)
863 fm.end()
863 fm.end()
864
864
865 @command(b'perfpathcopies', [], b"REV REV")
865 @command(b'perfpathcopies', [], b"REV REV")
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 """benchmark the copy tracing logic"""
867 """benchmark the copy tracing logic"""
868 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 def d():
872 def d():
873 copies.pathcopies(ctx1, ctx2)
873 copies.pathcopies(ctx1, ctx2)
874 timer(d)
874 timer(d)
875 fm.end()
875 fm.end()
876
876
877 @command(b'perfphases',
877 @command(b'perfphases',
878 [(b'', b'full', False, b'include file reading time too'),
878 [(b'', b'full', False, b'include file reading time too'),
879 ], b"")
879 ], b"")
880 def perfphases(ui, repo, **opts):
880 def perfphases(ui, repo, **opts):
881 """benchmark phasesets computation"""
881 """benchmark phasesets computation"""
882 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
883 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
884 _phases = repo._phasecache
884 _phases = repo._phasecache
885 full = opts.get(b'full')
885 full = opts.get(b'full')
886 def d():
886 def d():
887 phases = _phases
887 phases = _phases
888 if full:
888 if full:
889 clearfilecache(repo, b'_phasecache')
889 clearfilecache(repo, b'_phasecache')
890 phases = repo._phasecache
890 phases = repo._phasecache
891 phases.invalidate()
891 phases.invalidate()
892 phases.loadphaserevs(repo)
892 phases.loadphaserevs(repo)
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896 @command(b'perfphasesremote',
896 @command(b'perfphasesremote',
897 [], b"[DEST]")
897 [], b"[DEST]")
898 def perfphasesremote(ui, repo, dest=None, **opts):
898 def perfphasesremote(ui, repo, dest=None, **opts):
899 """benchmark time needed to analyse phases of the remote server"""
899 """benchmark time needed to analyse phases of the remote server"""
900 from mercurial.node import (
900 from mercurial.node import (
901 bin,
901 bin,
902 )
902 )
903 from mercurial import (
903 from mercurial import (
904 exchange,
904 exchange,
905 hg,
905 hg,
906 phases,
906 phases,
907 )
907 )
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
910
910
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 if not path:
912 if not path:
913 raise error.Abort((b'default repository not configured!'),
913 raise error.Abort((b'default repository not configured!'),
914 hint=(b"see 'hg help config.paths'"))
914 hint=(b"see 'hg help config.paths'"))
915 dest = path.pushloc or path.loc
915 dest = path.pushloc or path.loc
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', b'', b'revision to be looked up (default tip)'),
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1019 ] + formatteropts)
1019 ] + formatteropts)
1020 def perfindex(ui, repo, **opts):
1020 def perfindex(ui, repo, **opts):
1021 """benchmark index creation time followed by a lookup
1021 """benchmark index creation time followed by a lookup
1022
1022
1023 The default is to look `tip` up. Depending on the index implementation,
1023 The default is to look `tip` up. Depending on the index implementation,
1024 the revision looked up can matters. For example, an implementation
1024 the revision looked up can matters. For example, an implementation
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1026 `--rev 0`.
1026 `--rev 0`. The number of looked up revisions and their order can also
1027 matters.
1028
1029 Example of useful set to test:
1030 * tip
1031 * 0
1032 * -10:
1033 * :10
1034 * -10: + :10
1035 * :10: + -10:
1036 * -10000:
1037 * -10000: + 0
1027
1038
1028 It is not currently possible to check for lookup of a missing node."""
1039 It is not currently possible to check for lookup of a missing node."""
1029 import mercurial.revlog
1040 import mercurial.revlog
1030 opts = _byteskwargs(opts)
1041 opts = _byteskwargs(opts)
1031 timer, fm = gettimer(ui, opts)
1042 timer, fm = gettimer(ui, opts)
1032 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1043 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1033 if opts[b'no_lookup']:
1044 if opts[b'no_lookup']:
1034 n = None
1045 if opts['rev']:
1035 elif opts[b'rev'] is None:
1046 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1036 n = repo[b"tip"].node()
1047 nodes = []
1048 elif not opts[b'rev']:
1049 nodes = [repo[b"tip"].node()]
1037 else:
1050 else:
1038 rev = scmutil.revsingle(repo, opts[b'rev'])
1051 revs = scmutil.revrange(repo, opts[b'rev'])
1039 n = repo[rev].node()
1052 cl = repo.changelog
1053 nodes = [cl.node(r) for r in revs]
1040
1054
1041 unfi = repo.unfiltered()
1055 unfi = repo.unfiltered()
1042 # find the filecache func directly
1056 # find the filecache func directly
1043 # This avoid polluting the benchmark with the filecache logic
1057 # This avoid polluting the benchmark with the filecache logic
1044 makecl = unfi.__class__.changelog.func
1058 makecl = unfi.__class__.changelog.func
1045 def setup():
1059 def setup():
1046 # probably not necessary, but for good measure
1060 # probably not necessary, but for good measure
1047 clearchangelog(unfi)
1061 clearchangelog(unfi)
1048 def d():
1062 def d():
1049 cl = makecl(unfi)
1063 cl = makecl(unfi)
1050 if n is not None:
1064 for n in nodes:
1051 cl.rev(n)
1065 cl.rev(n)
1052 timer(d, setup=setup)
1066 timer(d, setup=setup)
1053 fm.end()
1067 fm.end()
1054
1068
1055 @command(b'perfstartup', formatteropts)
1069 @command(b'perfstartup', formatteropts)
1056 def perfstartup(ui, repo, **opts):
1070 def perfstartup(ui, repo, **opts):
1057 opts = _byteskwargs(opts)
1071 opts = _byteskwargs(opts)
1058 timer, fm = gettimer(ui, opts)
1072 timer, fm = gettimer(ui, opts)
1059 def d():
1073 def d():
1060 if os.name != r'nt':
1074 if os.name != r'nt':
1061 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1075 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1062 fsencode(sys.argv[0]))
1076 fsencode(sys.argv[0]))
1063 else:
1077 else:
1064 os.environ[r'HGRCPATH'] = r' '
1078 os.environ[r'HGRCPATH'] = r' '
1065 os.system(r"%s version -q > NUL" % sys.argv[0])
1079 os.system(r"%s version -q > NUL" % sys.argv[0])
1066 timer(d)
1080 timer(d)
1067 fm.end()
1081 fm.end()
1068
1082
1069 @command(b'perfparents', formatteropts)
1083 @command(b'perfparents', formatteropts)
1070 def perfparents(ui, repo, **opts):
1084 def perfparents(ui, repo, **opts):
1071 opts = _byteskwargs(opts)
1085 opts = _byteskwargs(opts)
1072 timer, fm = gettimer(ui, opts)
1086 timer, fm = gettimer(ui, opts)
1073 # control the number of commits perfparents iterates over
1087 # control the number of commits perfparents iterates over
1074 # experimental config: perf.parentscount
1088 # experimental config: perf.parentscount
1075 count = getint(ui, b"perf", b"parentscount", 1000)
1089 count = getint(ui, b"perf", b"parentscount", 1000)
1076 if len(repo.changelog) < count:
1090 if len(repo.changelog) < count:
1077 raise error.Abort(b"repo needs %d commits for this test" % count)
1091 raise error.Abort(b"repo needs %d commits for this test" % count)
1078 repo = repo.unfiltered()
1092 repo = repo.unfiltered()
1079 nl = [repo.changelog.node(i) for i in _xrange(count)]
1093 nl = [repo.changelog.node(i) for i in _xrange(count)]
1080 def d():
1094 def d():
1081 for n in nl:
1095 for n in nl:
1082 repo.changelog.parents(n)
1096 repo.changelog.parents(n)
1083 timer(d)
1097 timer(d)
1084 fm.end()
1098 fm.end()
1085
1099
1086 @command(b'perfctxfiles', formatteropts)
1100 @command(b'perfctxfiles', formatteropts)
1087 def perfctxfiles(ui, repo, x, **opts):
1101 def perfctxfiles(ui, repo, x, **opts):
1088 opts = _byteskwargs(opts)
1102 opts = _byteskwargs(opts)
1089 x = int(x)
1103 x = int(x)
1090 timer, fm = gettimer(ui, opts)
1104 timer, fm = gettimer(ui, opts)
1091 def d():
1105 def d():
1092 len(repo[x].files())
1106 len(repo[x].files())
1093 timer(d)
1107 timer(d)
1094 fm.end()
1108 fm.end()
1095
1109
1096 @command(b'perfrawfiles', formatteropts)
1110 @command(b'perfrawfiles', formatteropts)
1097 def perfrawfiles(ui, repo, x, **opts):
1111 def perfrawfiles(ui, repo, x, **opts):
1098 opts = _byteskwargs(opts)
1112 opts = _byteskwargs(opts)
1099 x = int(x)
1113 x = int(x)
1100 timer, fm = gettimer(ui, opts)
1114 timer, fm = gettimer(ui, opts)
1101 cl = repo.changelog
1115 cl = repo.changelog
1102 def d():
1116 def d():
1103 len(cl.read(x)[3])
1117 len(cl.read(x)[3])
1104 timer(d)
1118 timer(d)
1105 fm.end()
1119 fm.end()
1106
1120
1107 @command(b'perflookup', formatteropts)
1121 @command(b'perflookup', formatteropts)
1108 def perflookup(ui, repo, rev, **opts):
1122 def perflookup(ui, repo, rev, **opts):
1109 opts = _byteskwargs(opts)
1123 opts = _byteskwargs(opts)
1110 timer, fm = gettimer(ui, opts)
1124 timer, fm = gettimer(ui, opts)
1111 timer(lambda: len(repo.lookup(rev)))
1125 timer(lambda: len(repo.lookup(rev)))
1112 fm.end()
1126 fm.end()
1113
1127
1114 @command(b'perflinelogedits',
1128 @command(b'perflinelogedits',
1115 [(b'n', b'edits', 10000, b'number of edits'),
1129 [(b'n', b'edits', 10000, b'number of edits'),
1116 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1130 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1117 ], norepo=True)
1131 ], norepo=True)
1118 def perflinelogedits(ui, **opts):
1132 def perflinelogedits(ui, **opts):
1119 from mercurial import linelog
1133 from mercurial import linelog
1120
1134
1121 opts = _byteskwargs(opts)
1135 opts = _byteskwargs(opts)
1122
1136
1123 edits = opts[b'edits']
1137 edits = opts[b'edits']
1124 maxhunklines = opts[b'max_hunk_lines']
1138 maxhunklines = opts[b'max_hunk_lines']
1125
1139
1126 maxb1 = 100000
1140 maxb1 = 100000
1127 random.seed(0)
1141 random.seed(0)
1128 randint = random.randint
1142 randint = random.randint
1129 currentlines = 0
1143 currentlines = 0
1130 arglist = []
1144 arglist = []
1131 for rev in _xrange(edits):
1145 for rev in _xrange(edits):
1132 a1 = randint(0, currentlines)
1146 a1 = randint(0, currentlines)
1133 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1147 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1134 b1 = randint(0, maxb1)
1148 b1 = randint(0, maxb1)
1135 b2 = randint(b1, b1 + maxhunklines)
1149 b2 = randint(b1, b1 + maxhunklines)
1136 currentlines += (b2 - b1) - (a2 - a1)
1150 currentlines += (b2 - b1) - (a2 - a1)
1137 arglist.append((rev, a1, a2, b1, b2))
1151 arglist.append((rev, a1, a2, b1, b2))
1138
1152
1139 def d():
1153 def d():
1140 ll = linelog.linelog()
1154 ll = linelog.linelog()
1141 for args in arglist:
1155 for args in arglist:
1142 ll.replacelines(*args)
1156 ll.replacelines(*args)
1143
1157
1144 timer, fm = gettimer(ui, opts)
1158 timer, fm = gettimer(ui, opts)
1145 timer(d)
1159 timer(d)
1146 fm.end()
1160 fm.end()
1147
1161
1148 @command(b'perfrevrange', formatteropts)
1162 @command(b'perfrevrange', formatteropts)
1149 def perfrevrange(ui, repo, *specs, **opts):
1163 def perfrevrange(ui, repo, *specs, **opts):
1150 opts = _byteskwargs(opts)
1164 opts = _byteskwargs(opts)
1151 timer, fm = gettimer(ui, opts)
1165 timer, fm = gettimer(ui, opts)
1152 revrange = scmutil.revrange
1166 revrange = scmutil.revrange
1153 timer(lambda: len(revrange(repo, specs)))
1167 timer(lambda: len(revrange(repo, specs)))
1154 fm.end()
1168 fm.end()
1155
1169
1156 @command(b'perfnodelookup', formatteropts)
1170 @command(b'perfnodelookup', formatteropts)
1157 def perfnodelookup(ui, repo, rev, **opts):
1171 def perfnodelookup(ui, repo, rev, **opts):
1158 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1159 timer, fm = gettimer(ui, opts)
1173 timer, fm = gettimer(ui, opts)
1160 import mercurial.revlog
1174 import mercurial.revlog
1161 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1175 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1162 n = scmutil.revsingle(repo, rev).node()
1176 n = scmutil.revsingle(repo, rev).node()
1163 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1177 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1164 def d():
1178 def d():
1165 cl.rev(n)
1179 cl.rev(n)
1166 clearcaches(cl)
1180 clearcaches(cl)
1167 timer(d)
1181 timer(d)
1168 fm.end()
1182 fm.end()
1169
1183
1170 @command(b'perflog',
1184 @command(b'perflog',
1171 [(b'', b'rename', False, b'ask log to follow renames')
1185 [(b'', b'rename', False, b'ask log to follow renames')
1172 ] + formatteropts)
1186 ] + formatteropts)
1173 def perflog(ui, repo, rev=None, **opts):
1187 def perflog(ui, repo, rev=None, **opts):
1174 opts = _byteskwargs(opts)
1188 opts = _byteskwargs(opts)
1175 if rev is None:
1189 if rev is None:
1176 rev=[]
1190 rev=[]
1177 timer, fm = gettimer(ui, opts)
1191 timer, fm = gettimer(ui, opts)
1178 ui.pushbuffer()
1192 ui.pushbuffer()
1179 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1193 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1180 copies=opts.get(b'rename')))
1194 copies=opts.get(b'rename')))
1181 ui.popbuffer()
1195 ui.popbuffer()
1182 fm.end()
1196 fm.end()
1183
1197
1184 @command(b'perfmoonwalk', formatteropts)
1198 @command(b'perfmoonwalk', formatteropts)
1185 def perfmoonwalk(ui, repo, **opts):
1199 def perfmoonwalk(ui, repo, **opts):
1186 """benchmark walking the changelog backwards
1200 """benchmark walking the changelog backwards
1187
1201
1188 This also loads the changelog data for each revision in the changelog.
1202 This also loads the changelog data for each revision in the changelog.
1189 """
1203 """
1190 opts = _byteskwargs(opts)
1204 opts = _byteskwargs(opts)
1191 timer, fm = gettimer(ui, opts)
1205 timer, fm = gettimer(ui, opts)
1192 def moonwalk():
1206 def moonwalk():
1193 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1207 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1194 ctx = repo[i]
1208 ctx = repo[i]
1195 ctx.branch() # read changelog data (in addition to the index)
1209 ctx.branch() # read changelog data (in addition to the index)
1196 timer(moonwalk)
1210 timer(moonwalk)
1197 fm.end()
1211 fm.end()
1198
1212
1199 @command(b'perftemplating',
1213 @command(b'perftemplating',
1200 [(b'r', b'rev', [], b'revisions to run the template on'),
1214 [(b'r', b'rev', [], b'revisions to run the template on'),
1201 ] + formatteropts)
1215 ] + formatteropts)
1202 def perftemplating(ui, repo, testedtemplate=None, **opts):
1216 def perftemplating(ui, repo, testedtemplate=None, **opts):
1203 """test the rendering time of a given template"""
1217 """test the rendering time of a given template"""
1204 if makelogtemplater is None:
1218 if makelogtemplater is None:
1205 raise error.Abort((b"perftemplating not available with this Mercurial"),
1219 raise error.Abort((b"perftemplating not available with this Mercurial"),
1206 hint=b"use 4.3 or later")
1220 hint=b"use 4.3 or later")
1207
1221
1208 opts = _byteskwargs(opts)
1222 opts = _byteskwargs(opts)
1209
1223
1210 nullui = ui.copy()
1224 nullui = ui.copy()
1211 nullui.fout = open(os.devnull, r'wb')
1225 nullui.fout = open(os.devnull, r'wb')
1212 nullui.disablepager()
1226 nullui.disablepager()
1213 revs = opts.get(b'rev')
1227 revs = opts.get(b'rev')
1214 if not revs:
1228 if not revs:
1215 revs = [b'all()']
1229 revs = [b'all()']
1216 revs = list(scmutil.revrange(repo, revs))
1230 revs = list(scmutil.revrange(repo, revs))
1217
1231
1218 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1232 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1219 b' {author|person}: {desc|firstline}\n')
1233 b' {author|person}: {desc|firstline}\n')
1220 if testedtemplate is None:
1234 if testedtemplate is None:
1221 testedtemplate = defaulttemplate
1235 testedtemplate = defaulttemplate
1222 displayer = makelogtemplater(nullui, repo, testedtemplate)
1236 displayer = makelogtemplater(nullui, repo, testedtemplate)
1223 def format():
1237 def format():
1224 for r in revs:
1238 for r in revs:
1225 ctx = repo[r]
1239 ctx = repo[r]
1226 displayer.show(ctx)
1240 displayer.show(ctx)
1227 displayer.flush(ctx)
1241 displayer.flush(ctx)
1228
1242
1229 timer, fm = gettimer(ui, opts)
1243 timer, fm = gettimer(ui, opts)
1230 timer(format)
1244 timer(format)
1231 fm.end()
1245 fm.end()
1232
1246
1233 @command(b'perfhelper-pathcopies', formatteropts +
1247 @command(b'perfhelper-pathcopies', formatteropts +
1234 [
1248 [
1235 (b'r', b'revs', [], b'restrict search to these revisions'),
1249 (b'r', b'revs', [], b'restrict search to these revisions'),
1236 (b'', b'timing', False, b'provides extra data (costly)'),
1250 (b'', b'timing', False, b'provides extra data (costly)'),
1237 ])
1251 ])
1238 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1252 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1239 """find statistic about potential parameters for the `perftracecopies`
1253 """find statistic about potential parameters for the `perftracecopies`
1240
1254
1241 This command find source-destination pair relevant for copytracing testing.
1255 This command find source-destination pair relevant for copytracing testing.
1242 It report value for some of the parameters that impact copy tracing time.
1256 It report value for some of the parameters that impact copy tracing time.
1243
1257
1244 If `--timing` is set, rename detection is run and the associated timing
1258 If `--timing` is set, rename detection is run and the associated timing
1245 will be reported. The extra details comes at the cost of a slower command
1259 will be reported. The extra details comes at the cost of a slower command
1246 execution.
1260 execution.
1247
1261
1248 Since the rename detection is only run once, other factors might easily
1262 Since the rename detection is only run once, other factors might easily
1249 affect the precision of the timing. However it should give a good
1263 affect the precision of the timing. However it should give a good
1250 approximation of which revision pairs are very costly.
1264 approximation of which revision pairs are very costly.
1251 """
1265 """
1252 opts = _byteskwargs(opts)
1266 opts = _byteskwargs(opts)
1253 fm = ui.formatter(b'perf', opts)
1267 fm = ui.formatter(b'perf', opts)
1254 dotiming = opts[b'timing']
1268 dotiming = opts[b'timing']
1255
1269
1256 if dotiming:
1270 if dotiming:
1257 header = '%12s %12s %12s %12s %12s %12s\n'
1271 header = '%12s %12s %12s %12s %12s %12s\n'
1258 output = ("%(source)12s %(destination)12s "
1272 output = ("%(source)12s %(destination)12s "
1259 "%(nbrevs)12d %(nbmissingfiles)12d "
1273 "%(nbrevs)12d %(nbmissingfiles)12d "
1260 "%(nbrenamedfiles)12d %(time)18.5f\n")
1274 "%(nbrenamedfiles)12d %(time)18.5f\n")
1261 header_names = ("source", "destination", "nb-revs", "nb-files",
1275 header_names = ("source", "destination", "nb-revs", "nb-files",
1262 "nb-renames", "time")
1276 "nb-renames", "time")
1263 fm.plain(header % header_names)
1277 fm.plain(header % header_names)
1264 else:
1278 else:
1265 header = '%12s %12s %12s %12s\n'
1279 header = '%12s %12s %12s %12s\n'
1266 output = ("%(source)12s %(destination)12s "
1280 output = ("%(source)12s %(destination)12s "
1267 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1281 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1268 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1282 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1269
1283
1270 if not revs:
1284 if not revs:
1271 revs = ['all()']
1285 revs = ['all()']
1272 revs = scmutil.revrange(repo, revs)
1286 revs = scmutil.revrange(repo, revs)
1273
1287
1274 roi = repo.revs('merge() and %ld', revs)
1288 roi = repo.revs('merge() and %ld', revs)
1275 for r in roi:
1289 for r in roi:
1276 ctx = repo[r]
1290 ctx = repo[r]
1277 p1 = ctx.p1().rev()
1291 p1 = ctx.p1().rev()
1278 p2 = ctx.p2().rev()
1292 p2 = ctx.p2().rev()
1279 bases = repo.changelog._commonancestorsheads(p1, p2)
1293 bases = repo.changelog._commonancestorsheads(p1, p2)
1280 for p in (p1, p2):
1294 for p in (p1, p2):
1281 for b in bases:
1295 for b in bases:
1282 base = repo[b]
1296 base = repo[b]
1283 parent = repo[p]
1297 parent = repo[p]
1284 missing = copies._computeforwardmissing(base, parent)
1298 missing = copies._computeforwardmissing(base, parent)
1285 if not missing:
1299 if not missing:
1286 continue
1300 continue
1287 data = {
1301 data = {
1288 b'source': base.hex(),
1302 b'source': base.hex(),
1289 b'destination': parent.hex(),
1303 b'destination': parent.hex(),
1290 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1304 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1291 b'nbmissingfiles': len(missing),
1305 b'nbmissingfiles': len(missing),
1292 }
1306 }
1293 if dotiming:
1307 if dotiming:
1294 begin = util.timer()
1308 begin = util.timer()
1295 renames = copies.pathcopies(base, parent)
1309 renames = copies.pathcopies(base, parent)
1296 end = util.timer()
1310 end = util.timer()
1297 # not very stable timing since we did only one run
1311 # not very stable timing since we did only one run
1298 data['time'] = end - begin
1312 data['time'] = end - begin
1299 data['nbrenamedfiles'] = len(renames)
1313 data['nbrenamedfiles'] = len(renames)
1300 fm.startitem()
1314 fm.startitem()
1301 fm.data(**data)
1315 fm.data(**data)
1302 out = data.copy()
1316 out = data.copy()
1303 out['source'] = fm.hexfunc(base.node())
1317 out['source'] = fm.hexfunc(base.node())
1304 out['destination'] = fm.hexfunc(parent.node())
1318 out['destination'] = fm.hexfunc(parent.node())
1305 fm.plain(output % out)
1319 fm.plain(output % out)
1306
1320
1307 fm.end()
1321 fm.end()
1308
1322
1309 @command(b'perfcca', formatteropts)
1323 @command(b'perfcca', formatteropts)
1310 def perfcca(ui, repo, **opts):
1324 def perfcca(ui, repo, **opts):
1311 opts = _byteskwargs(opts)
1325 opts = _byteskwargs(opts)
1312 timer, fm = gettimer(ui, opts)
1326 timer, fm = gettimer(ui, opts)
1313 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1327 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1314 fm.end()
1328 fm.end()
1315
1329
1316 @command(b'perffncacheload', formatteropts)
1330 @command(b'perffncacheload', formatteropts)
1317 def perffncacheload(ui, repo, **opts):
1331 def perffncacheload(ui, repo, **opts):
1318 opts = _byteskwargs(opts)
1332 opts = _byteskwargs(opts)
1319 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1320 s = repo.store
1334 s = repo.store
1321 def d():
1335 def d():
1322 s.fncache._load()
1336 s.fncache._load()
1323 timer(d)
1337 timer(d)
1324 fm.end()
1338 fm.end()
1325
1339
1326 @command(b'perffncachewrite', formatteropts)
1340 @command(b'perffncachewrite', formatteropts)
1327 def perffncachewrite(ui, repo, **opts):
1341 def perffncachewrite(ui, repo, **opts):
1328 opts = _byteskwargs(opts)
1342 opts = _byteskwargs(opts)
1329 timer, fm = gettimer(ui, opts)
1343 timer, fm = gettimer(ui, opts)
1330 s = repo.store
1344 s = repo.store
1331 lock = repo.lock()
1345 lock = repo.lock()
1332 s.fncache._load()
1346 s.fncache._load()
1333 tr = repo.transaction(b'perffncachewrite')
1347 tr = repo.transaction(b'perffncachewrite')
1334 tr.addbackup(b'fncache')
1348 tr.addbackup(b'fncache')
1335 def d():
1349 def d():
1336 s.fncache._dirty = True
1350 s.fncache._dirty = True
1337 s.fncache.write(tr)
1351 s.fncache.write(tr)
1338 timer(d)
1352 timer(d)
1339 tr.close()
1353 tr.close()
1340 lock.release()
1354 lock.release()
1341 fm.end()
1355 fm.end()
1342
1356
1343 @command(b'perffncacheencode', formatteropts)
1357 @command(b'perffncacheencode', formatteropts)
1344 def perffncacheencode(ui, repo, **opts):
1358 def perffncacheencode(ui, repo, **opts):
1345 opts = _byteskwargs(opts)
1359 opts = _byteskwargs(opts)
1346 timer, fm = gettimer(ui, opts)
1360 timer, fm = gettimer(ui, opts)
1347 s = repo.store
1361 s = repo.store
1348 s.fncache._load()
1362 s.fncache._load()
1349 def d():
1363 def d():
1350 for p in s.fncache.entries:
1364 for p in s.fncache.entries:
1351 s.encode(p)
1365 s.encode(p)
1352 timer(d)
1366 timer(d)
1353 fm.end()
1367 fm.end()
1354
1368
1355 def _bdiffworker(q, blocks, xdiff, ready, done):
1369 def _bdiffworker(q, blocks, xdiff, ready, done):
1356 while not done.is_set():
1370 while not done.is_set():
1357 pair = q.get()
1371 pair = q.get()
1358 while pair is not None:
1372 while pair is not None:
1359 if xdiff:
1373 if xdiff:
1360 mdiff.bdiff.xdiffblocks(*pair)
1374 mdiff.bdiff.xdiffblocks(*pair)
1361 elif blocks:
1375 elif blocks:
1362 mdiff.bdiff.blocks(*pair)
1376 mdiff.bdiff.blocks(*pair)
1363 else:
1377 else:
1364 mdiff.textdiff(*pair)
1378 mdiff.textdiff(*pair)
1365 q.task_done()
1379 q.task_done()
1366 pair = q.get()
1380 pair = q.get()
1367 q.task_done() # for the None one
1381 q.task_done() # for the None one
1368 with ready:
1382 with ready:
1369 ready.wait()
1383 ready.wait()
1370
1384
1371 def _manifestrevision(repo, mnode):
1385 def _manifestrevision(repo, mnode):
1372 ml = repo.manifestlog
1386 ml = repo.manifestlog
1373
1387
1374 if util.safehasattr(ml, b'getstorage'):
1388 if util.safehasattr(ml, b'getstorage'):
1375 store = ml.getstorage(b'')
1389 store = ml.getstorage(b'')
1376 else:
1390 else:
1377 store = ml._revlog
1391 store = ml._revlog
1378
1392
1379 return store.revision(mnode)
1393 return store.revision(mnode)
1380
1394
1381 @command(b'perfbdiff', revlogopts + formatteropts + [
1395 @command(b'perfbdiff', revlogopts + formatteropts + [
1382 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1396 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1383 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1397 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1384 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1398 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1385 (b'', b'blocks', False, b'test computing diffs into blocks'),
1399 (b'', b'blocks', False, b'test computing diffs into blocks'),
1386 (b'', b'xdiff', False, b'use xdiff algorithm'),
1400 (b'', b'xdiff', False, b'use xdiff algorithm'),
1387 ],
1401 ],
1388
1402
1389 b'-c|-m|FILE REV')
1403 b'-c|-m|FILE REV')
1390 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1404 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1391 """benchmark a bdiff between revisions
1405 """benchmark a bdiff between revisions
1392
1406
1393 By default, benchmark a bdiff between its delta parent and itself.
1407 By default, benchmark a bdiff between its delta parent and itself.
1394
1408
1395 With ``--count``, benchmark bdiffs between delta parents and self for N
1409 With ``--count``, benchmark bdiffs between delta parents and self for N
1396 revisions starting at the specified revision.
1410 revisions starting at the specified revision.
1397
1411
1398 With ``--alldata``, assume the requested revision is a changeset and
1412 With ``--alldata``, assume the requested revision is a changeset and
1399 measure bdiffs for all changes related to that changeset (manifest
1413 measure bdiffs for all changes related to that changeset (manifest
1400 and filelogs).
1414 and filelogs).
1401 """
1415 """
1402 opts = _byteskwargs(opts)
1416 opts = _byteskwargs(opts)
1403
1417
1404 if opts[b'xdiff'] and not opts[b'blocks']:
1418 if opts[b'xdiff'] and not opts[b'blocks']:
1405 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1419 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1406
1420
1407 if opts[b'alldata']:
1421 if opts[b'alldata']:
1408 opts[b'changelog'] = True
1422 opts[b'changelog'] = True
1409
1423
1410 if opts.get(b'changelog') or opts.get(b'manifest'):
1424 if opts.get(b'changelog') or opts.get(b'manifest'):
1411 file_, rev = None, file_
1425 file_, rev = None, file_
1412 elif rev is None:
1426 elif rev is None:
1413 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1427 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1414
1428
1415 blocks = opts[b'blocks']
1429 blocks = opts[b'blocks']
1416 xdiff = opts[b'xdiff']
1430 xdiff = opts[b'xdiff']
1417 textpairs = []
1431 textpairs = []
1418
1432
1419 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1433 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1420
1434
1421 startrev = r.rev(r.lookup(rev))
1435 startrev = r.rev(r.lookup(rev))
1422 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1436 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1423 if opts[b'alldata']:
1437 if opts[b'alldata']:
1424 # Load revisions associated with changeset.
1438 # Load revisions associated with changeset.
1425 ctx = repo[rev]
1439 ctx = repo[rev]
1426 mtext = _manifestrevision(repo, ctx.manifestnode())
1440 mtext = _manifestrevision(repo, ctx.manifestnode())
1427 for pctx in ctx.parents():
1441 for pctx in ctx.parents():
1428 pman = _manifestrevision(repo, pctx.manifestnode())
1442 pman = _manifestrevision(repo, pctx.manifestnode())
1429 textpairs.append((pman, mtext))
1443 textpairs.append((pman, mtext))
1430
1444
1431 # Load filelog revisions by iterating manifest delta.
1445 # Load filelog revisions by iterating manifest delta.
1432 man = ctx.manifest()
1446 man = ctx.manifest()
1433 pman = ctx.p1().manifest()
1447 pman = ctx.p1().manifest()
1434 for filename, change in pman.diff(man).items():
1448 for filename, change in pman.diff(man).items():
1435 fctx = repo.file(filename)
1449 fctx = repo.file(filename)
1436 f1 = fctx.revision(change[0][0] or -1)
1450 f1 = fctx.revision(change[0][0] or -1)
1437 f2 = fctx.revision(change[1][0] or -1)
1451 f2 = fctx.revision(change[1][0] or -1)
1438 textpairs.append((f1, f2))
1452 textpairs.append((f1, f2))
1439 else:
1453 else:
1440 dp = r.deltaparent(rev)
1454 dp = r.deltaparent(rev)
1441 textpairs.append((r.revision(dp), r.revision(rev)))
1455 textpairs.append((r.revision(dp), r.revision(rev)))
1442
1456
1443 withthreads = threads > 0
1457 withthreads = threads > 0
1444 if not withthreads:
1458 if not withthreads:
1445 def d():
1459 def d():
1446 for pair in textpairs:
1460 for pair in textpairs:
1447 if xdiff:
1461 if xdiff:
1448 mdiff.bdiff.xdiffblocks(*pair)
1462 mdiff.bdiff.xdiffblocks(*pair)
1449 elif blocks:
1463 elif blocks:
1450 mdiff.bdiff.blocks(*pair)
1464 mdiff.bdiff.blocks(*pair)
1451 else:
1465 else:
1452 mdiff.textdiff(*pair)
1466 mdiff.textdiff(*pair)
1453 else:
1467 else:
1454 q = queue()
1468 q = queue()
1455 for i in _xrange(threads):
1469 for i in _xrange(threads):
1456 q.put(None)
1470 q.put(None)
1457 ready = threading.Condition()
1471 ready = threading.Condition()
1458 done = threading.Event()
1472 done = threading.Event()
1459 for i in _xrange(threads):
1473 for i in _xrange(threads):
1460 threading.Thread(target=_bdiffworker,
1474 threading.Thread(target=_bdiffworker,
1461 args=(q, blocks, xdiff, ready, done)).start()
1475 args=(q, blocks, xdiff, ready, done)).start()
1462 q.join()
1476 q.join()
1463 def d():
1477 def d():
1464 for pair in textpairs:
1478 for pair in textpairs:
1465 q.put(pair)
1479 q.put(pair)
1466 for i in _xrange(threads):
1480 for i in _xrange(threads):
1467 q.put(None)
1481 q.put(None)
1468 with ready:
1482 with ready:
1469 ready.notify_all()
1483 ready.notify_all()
1470 q.join()
1484 q.join()
1471 timer, fm = gettimer(ui, opts)
1485 timer, fm = gettimer(ui, opts)
1472 timer(d)
1486 timer(d)
1473 fm.end()
1487 fm.end()
1474
1488
1475 if withthreads:
1489 if withthreads:
1476 done.set()
1490 done.set()
1477 for i in _xrange(threads):
1491 for i in _xrange(threads):
1478 q.put(None)
1492 q.put(None)
1479 with ready:
1493 with ready:
1480 ready.notify_all()
1494 ready.notify_all()
1481
1495
1482 @command(b'perfunidiff', revlogopts + formatteropts + [
1496 @command(b'perfunidiff', revlogopts + formatteropts + [
1483 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1497 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1484 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1498 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1485 ], b'-c|-m|FILE REV')
1499 ], b'-c|-m|FILE REV')
1486 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1500 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1487 """benchmark a unified diff between revisions
1501 """benchmark a unified diff between revisions
1488
1502
1489 This doesn't include any copy tracing - it's just a unified diff
1503 This doesn't include any copy tracing - it's just a unified diff
1490 of the texts.
1504 of the texts.
1491
1505
1492 By default, benchmark a diff between its delta parent and itself.
1506 By default, benchmark a diff between its delta parent and itself.
1493
1507
1494 With ``--count``, benchmark diffs between delta parents and self for N
1508 With ``--count``, benchmark diffs between delta parents and self for N
1495 revisions starting at the specified revision.
1509 revisions starting at the specified revision.
1496
1510
1497 With ``--alldata``, assume the requested revision is a changeset and
1511 With ``--alldata``, assume the requested revision is a changeset and
1498 measure diffs for all changes related to that changeset (manifest
1512 measure diffs for all changes related to that changeset (manifest
1499 and filelogs).
1513 and filelogs).
1500 """
1514 """
1501 opts = _byteskwargs(opts)
1515 opts = _byteskwargs(opts)
1502 if opts[b'alldata']:
1516 if opts[b'alldata']:
1503 opts[b'changelog'] = True
1517 opts[b'changelog'] = True
1504
1518
1505 if opts.get(b'changelog') or opts.get(b'manifest'):
1519 if opts.get(b'changelog') or opts.get(b'manifest'):
1506 file_, rev = None, file_
1520 file_, rev = None, file_
1507 elif rev is None:
1521 elif rev is None:
1508 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1522 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1509
1523
1510 textpairs = []
1524 textpairs = []
1511
1525
1512 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1526 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1513
1527
1514 startrev = r.rev(r.lookup(rev))
1528 startrev = r.rev(r.lookup(rev))
1515 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1529 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1516 if opts[b'alldata']:
1530 if opts[b'alldata']:
1517 # Load revisions associated with changeset.
1531 # Load revisions associated with changeset.
1518 ctx = repo[rev]
1532 ctx = repo[rev]
1519 mtext = _manifestrevision(repo, ctx.manifestnode())
1533 mtext = _manifestrevision(repo, ctx.manifestnode())
1520 for pctx in ctx.parents():
1534 for pctx in ctx.parents():
1521 pman = _manifestrevision(repo, pctx.manifestnode())
1535 pman = _manifestrevision(repo, pctx.manifestnode())
1522 textpairs.append((pman, mtext))
1536 textpairs.append((pman, mtext))
1523
1537
1524 # Load filelog revisions by iterating manifest delta.
1538 # Load filelog revisions by iterating manifest delta.
1525 man = ctx.manifest()
1539 man = ctx.manifest()
1526 pman = ctx.p1().manifest()
1540 pman = ctx.p1().manifest()
1527 for filename, change in pman.diff(man).items():
1541 for filename, change in pman.diff(man).items():
1528 fctx = repo.file(filename)
1542 fctx = repo.file(filename)
1529 f1 = fctx.revision(change[0][0] or -1)
1543 f1 = fctx.revision(change[0][0] or -1)
1530 f2 = fctx.revision(change[1][0] or -1)
1544 f2 = fctx.revision(change[1][0] or -1)
1531 textpairs.append((f1, f2))
1545 textpairs.append((f1, f2))
1532 else:
1546 else:
1533 dp = r.deltaparent(rev)
1547 dp = r.deltaparent(rev)
1534 textpairs.append((r.revision(dp), r.revision(rev)))
1548 textpairs.append((r.revision(dp), r.revision(rev)))
1535
1549
1536 def d():
1550 def d():
1537 for left, right in textpairs:
1551 for left, right in textpairs:
1538 # The date strings don't matter, so we pass empty strings.
1552 # The date strings don't matter, so we pass empty strings.
1539 headerlines, hunks = mdiff.unidiff(
1553 headerlines, hunks = mdiff.unidiff(
1540 left, b'', right, b'', b'left', b'right', binary=False)
1554 left, b'', right, b'', b'left', b'right', binary=False)
1541 # consume iterators in roughly the way patch.py does
1555 # consume iterators in roughly the way patch.py does
1542 b'\n'.join(headerlines)
1556 b'\n'.join(headerlines)
1543 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1557 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1544 timer, fm = gettimer(ui, opts)
1558 timer, fm = gettimer(ui, opts)
1545 timer(d)
1559 timer(d)
1546 fm.end()
1560 fm.end()
1547
1561
1548 @command(b'perfdiffwd', formatteropts)
1562 @command(b'perfdiffwd', formatteropts)
1549 def perfdiffwd(ui, repo, **opts):
1563 def perfdiffwd(ui, repo, **opts):
1550 """Profile diff of working directory changes"""
1564 """Profile diff of working directory changes"""
1551 opts = _byteskwargs(opts)
1565 opts = _byteskwargs(opts)
1552 timer, fm = gettimer(ui, opts)
1566 timer, fm = gettimer(ui, opts)
1553 options = {
1567 options = {
1554 'w': 'ignore_all_space',
1568 'w': 'ignore_all_space',
1555 'b': 'ignore_space_change',
1569 'b': 'ignore_space_change',
1556 'B': 'ignore_blank_lines',
1570 'B': 'ignore_blank_lines',
1557 }
1571 }
1558
1572
1559 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1573 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1560 opts = dict((options[c], b'1') for c in diffopt)
1574 opts = dict((options[c], b'1') for c in diffopt)
1561 def d():
1575 def d():
1562 ui.pushbuffer()
1576 ui.pushbuffer()
1563 commands.diff(ui, repo, **opts)
1577 commands.diff(ui, repo, **opts)
1564 ui.popbuffer()
1578 ui.popbuffer()
1565 diffopt = diffopt.encode('ascii')
1579 diffopt = diffopt.encode('ascii')
1566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1580 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1567 timer(d, title=title)
1581 timer(d, title=title)
1568 fm.end()
1582 fm.end()
1569
1583
1570 @command(b'perfrevlogindex', revlogopts + formatteropts,
1584 @command(b'perfrevlogindex', revlogopts + formatteropts,
1571 b'-c|-m|FILE')
1585 b'-c|-m|FILE')
1572 def perfrevlogindex(ui, repo, file_=None, **opts):
1586 def perfrevlogindex(ui, repo, file_=None, **opts):
1573 """Benchmark operations against a revlog index.
1587 """Benchmark operations against a revlog index.
1574
1588
1575 This tests constructing a revlog instance, reading index data,
1589 This tests constructing a revlog instance, reading index data,
1576 parsing index data, and performing various operations related to
1590 parsing index data, and performing various operations related to
1577 index data.
1591 index data.
1578 """
1592 """
1579
1593
1580 opts = _byteskwargs(opts)
1594 opts = _byteskwargs(opts)
1581
1595
1582 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1596 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1583
1597
1584 opener = getattr(rl, 'opener') # trick linter
1598 opener = getattr(rl, 'opener') # trick linter
1585 indexfile = rl.indexfile
1599 indexfile = rl.indexfile
1586 data = opener.read(indexfile)
1600 data = opener.read(indexfile)
1587
1601
1588 header = struct.unpack(b'>I', data[0:4])[0]
1602 header = struct.unpack(b'>I', data[0:4])[0]
1589 version = header & 0xFFFF
1603 version = header & 0xFFFF
1590 if version == 1:
1604 if version == 1:
1591 revlogio = revlog.revlogio()
1605 revlogio = revlog.revlogio()
1592 inline = header & (1 << 16)
1606 inline = header & (1 << 16)
1593 else:
1607 else:
1594 raise error.Abort((b'unsupported revlog version: %d') % version)
1608 raise error.Abort((b'unsupported revlog version: %d') % version)
1595
1609
1596 rllen = len(rl)
1610 rllen = len(rl)
1597
1611
1598 node0 = rl.node(0)
1612 node0 = rl.node(0)
1599 node25 = rl.node(rllen // 4)
1613 node25 = rl.node(rllen // 4)
1600 node50 = rl.node(rllen // 2)
1614 node50 = rl.node(rllen // 2)
1601 node75 = rl.node(rllen // 4 * 3)
1615 node75 = rl.node(rllen // 4 * 3)
1602 node100 = rl.node(rllen - 1)
1616 node100 = rl.node(rllen - 1)
1603
1617
1604 allrevs = range(rllen)
1618 allrevs = range(rllen)
1605 allrevsrev = list(reversed(allrevs))
1619 allrevsrev = list(reversed(allrevs))
1606 allnodes = [rl.node(rev) for rev in range(rllen)]
1620 allnodes = [rl.node(rev) for rev in range(rllen)]
1607 allnodesrev = list(reversed(allnodes))
1621 allnodesrev = list(reversed(allnodes))
1608
1622
1609 def constructor():
1623 def constructor():
1610 revlog.revlog(opener, indexfile)
1624 revlog.revlog(opener, indexfile)
1611
1625
1612 def read():
1626 def read():
1613 with opener(indexfile) as fh:
1627 with opener(indexfile) as fh:
1614 fh.read()
1628 fh.read()
1615
1629
1616 def parseindex():
1630 def parseindex():
1617 revlogio.parseindex(data, inline)
1631 revlogio.parseindex(data, inline)
1618
1632
1619 def getentry(revornode):
1633 def getentry(revornode):
1620 index = revlogio.parseindex(data, inline)[0]
1634 index = revlogio.parseindex(data, inline)[0]
1621 index[revornode]
1635 index[revornode]
1622
1636
1623 def getentries(revs, count=1):
1637 def getentries(revs, count=1):
1624 index = revlogio.parseindex(data, inline)[0]
1638 index = revlogio.parseindex(data, inline)[0]
1625
1639
1626 for i in range(count):
1640 for i in range(count):
1627 for rev in revs:
1641 for rev in revs:
1628 index[rev]
1642 index[rev]
1629
1643
1630 def resolvenode(node):
1644 def resolvenode(node):
1631 nodemap = revlogio.parseindex(data, inline)[1]
1645 nodemap = revlogio.parseindex(data, inline)[1]
1632 # This only works for the C code.
1646 # This only works for the C code.
1633 if nodemap is None:
1647 if nodemap is None:
1634 return
1648 return
1635
1649
1636 try:
1650 try:
1637 nodemap[node]
1651 nodemap[node]
1638 except error.RevlogError:
1652 except error.RevlogError:
1639 pass
1653 pass
1640
1654
1641 def resolvenodes(nodes, count=1):
1655 def resolvenodes(nodes, count=1):
1642 nodemap = revlogio.parseindex(data, inline)[1]
1656 nodemap = revlogio.parseindex(data, inline)[1]
1643 if nodemap is None:
1657 if nodemap is None:
1644 return
1658 return
1645
1659
1646 for i in range(count):
1660 for i in range(count):
1647 for node in nodes:
1661 for node in nodes:
1648 try:
1662 try:
1649 nodemap[node]
1663 nodemap[node]
1650 except error.RevlogError:
1664 except error.RevlogError:
1651 pass
1665 pass
1652
1666
1653 benches = [
1667 benches = [
1654 (constructor, b'revlog constructor'),
1668 (constructor, b'revlog constructor'),
1655 (read, b'read'),
1669 (read, b'read'),
1656 (parseindex, b'create index object'),
1670 (parseindex, b'create index object'),
1657 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1671 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1658 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1672 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1659 (lambda: resolvenode(node0), b'look up node at rev 0'),
1673 (lambda: resolvenode(node0), b'look up node at rev 0'),
1660 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1674 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1661 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1675 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1662 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1676 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1663 (lambda: resolvenode(node100), b'look up node at tip'),
1677 (lambda: resolvenode(node100), b'look up node at tip'),
1664 # 2x variation is to measure caching impact.
1678 # 2x variation is to measure caching impact.
1665 (lambda: resolvenodes(allnodes),
1679 (lambda: resolvenodes(allnodes),
1666 b'look up all nodes (forward)'),
1680 b'look up all nodes (forward)'),
1667 (lambda: resolvenodes(allnodes, 2),
1681 (lambda: resolvenodes(allnodes, 2),
1668 b'look up all nodes 2x (forward)'),
1682 b'look up all nodes 2x (forward)'),
1669 (lambda: resolvenodes(allnodesrev),
1683 (lambda: resolvenodes(allnodesrev),
1670 b'look up all nodes (reverse)'),
1684 b'look up all nodes (reverse)'),
1671 (lambda: resolvenodes(allnodesrev, 2),
1685 (lambda: resolvenodes(allnodesrev, 2),
1672 b'look up all nodes 2x (reverse)'),
1686 b'look up all nodes 2x (reverse)'),
1673 (lambda: getentries(allrevs),
1687 (lambda: getentries(allrevs),
1674 b'retrieve all index entries (forward)'),
1688 b'retrieve all index entries (forward)'),
1675 (lambda: getentries(allrevs, 2),
1689 (lambda: getentries(allrevs, 2),
1676 b'retrieve all index entries 2x (forward)'),
1690 b'retrieve all index entries 2x (forward)'),
1677 (lambda: getentries(allrevsrev),
1691 (lambda: getentries(allrevsrev),
1678 b'retrieve all index entries (reverse)'),
1692 b'retrieve all index entries (reverse)'),
1679 (lambda: getentries(allrevsrev, 2),
1693 (lambda: getentries(allrevsrev, 2),
1680 b'retrieve all index entries 2x (reverse)'),
1694 b'retrieve all index entries 2x (reverse)'),
1681 ]
1695 ]
1682
1696
1683 for fn, title in benches:
1697 for fn, title in benches:
1684 timer, fm = gettimer(ui, opts)
1698 timer, fm = gettimer(ui, opts)
1685 timer(fn, title=title)
1699 timer(fn, title=title)
1686 fm.end()
1700 fm.end()
1687
1701
1688 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1702 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1689 [(b'd', b'dist', 100, b'distance between the revisions'),
1703 [(b'd', b'dist', 100, b'distance between the revisions'),
1690 (b's', b'startrev', 0, b'revision to start reading at'),
1704 (b's', b'startrev', 0, b'revision to start reading at'),
1691 (b'', b'reverse', False, b'read in reverse')],
1705 (b'', b'reverse', False, b'read in reverse')],
1692 b'-c|-m|FILE')
1706 b'-c|-m|FILE')
1693 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1707 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1694 **opts):
1708 **opts):
1695 """Benchmark reading a series of revisions from a revlog.
1709 """Benchmark reading a series of revisions from a revlog.
1696
1710
1697 By default, we read every ``-d/--dist`` revision from 0 to tip of
1711 By default, we read every ``-d/--dist`` revision from 0 to tip of
1698 the specified revlog.
1712 the specified revlog.
1699
1713
1700 The start revision can be defined via ``-s/--startrev``.
1714 The start revision can be defined via ``-s/--startrev``.
1701 """
1715 """
1702 opts = _byteskwargs(opts)
1716 opts = _byteskwargs(opts)
1703
1717
1704 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1718 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1705 rllen = getlen(ui)(rl)
1719 rllen = getlen(ui)(rl)
1706
1720
1707 if startrev < 0:
1721 if startrev < 0:
1708 startrev = rllen + startrev
1722 startrev = rllen + startrev
1709
1723
1710 def d():
1724 def d():
1711 rl.clearcaches()
1725 rl.clearcaches()
1712
1726
1713 beginrev = startrev
1727 beginrev = startrev
1714 endrev = rllen
1728 endrev = rllen
1715 dist = opts[b'dist']
1729 dist = opts[b'dist']
1716
1730
1717 if reverse:
1731 if reverse:
1718 beginrev, endrev = endrev - 1, beginrev - 1
1732 beginrev, endrev = endrev - 1, beginrev - 1
1719 dist = -1 * dist
1733 dist = -1 * dist
1720
1734
1721 for x in _xrange(beginrev, endrev, dist):
1735 for x in _xrange(beginrev, endrev, dist):
1722 # Old revisions don't support passing int.
1736 # Old revisions don't support passing int.
1723 n = rl.node(x)
1737 n = rl.node(x)
1724 rl.revision(n)
1738 rl.revision(n)
1725
1739
1726 timer, fm = gettimer(ui, opts)
1740 timer, fm = gettimer(ui, opts)
1727 timer(d)
1741 timer(d)
1728 fm.end()
1742 fm.end()
1729
1743
1730 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1744 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1731 [(b's', b'startrev', 1000, b'revision to start writing at'),
1745 [(b's', b'startrev', 1000, b'revision to start writing at'),
1732 (b'', b'stoprev', -1, b'last revision to write'),
1746 (b'', b'stoprev', -1, b'last revision to write'),
1733 (b'', b'count', 3, b'last revision to write'),
1747 (b'', b'count', 3, b'last revision to write'),
1734 (b'', b'details', False, b'print timing for every revisions tested'),
1748 (b'', b'details', False, b'print timing for every revisions tested'),
1735 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1749 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1736 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1750 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1737 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1751 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1738 ],
1752 ],
1739 b'-c|-m|FILE')
1753 b'-c|-m|FILE')
1740 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1754 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1741 """Benchmark writing a series of revisions to a revlog.
1755 """Benchmark writing a series of revisions to a revlog.
1742
1756
1743 Possible source values are:
1757 Possible source values are:
1744 * `full`: add from a full text (default).
1758 * `full`: add from a full text (default).
1745 * `parent-1`: add from a delta to the first parent
1759 * `parent-1`: add from a delta to the first parent
1746 * `parent-2`: add from a delta to the second parent if it exists
1760 * `parent-2`: add from a delta to the second parent if it exists
1747 (use a delta from the first parent otherwise)
1761 (use a delta from the first parent otherwise)
1748 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1762 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1749 * `storage`: add from the existing precomputed deltas
1763 * `storage`: add from the existing precomputed deltas
1750 """
1764 """
1751 opts = _byteskwargs(opts)
1765 opts = _byteskwargs(opts)
1752
1766
1753 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1767 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1754 rllen = getlen(ui)(rl)
1768 rllen = getlen(ui)(rl)
1755 if startrev < 0:
1769 if startrev < 0:
1756 startrev = rllen + startrev
1770 startrev = rllen + startrev
1757 if stoprev < 0:
1771 if stoprev < 0:
1758 stoprev = rllen + stoprev
1772 stoprev = rllen + stoprev
1759
1773
1760 lazydeltabase = opts['lazydeltabase']
1774 lazydeltabase = opts['lazydeltabase']
1761 source = opts['source']
1775 source = opts['source']
1762 clearcaches = opts['clear_caches']
1776 clearcaches = opts['clear_caches']
1763 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1777 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1764 b'storage')
1778 b'storage')
1765 if source not in validsource:
1779 if source not in validsource:
1766 raise error.Abort('invalid source type: %s' % source)
1780 raise error.Abort('invalid source type: %s' % source)
1767
1781
1768 ### actually gather results
1782 ### actually gather results
1769 count = opts['count']
1783 count = opts['count']
1770 if count <= 0:
1784 if count <= 0:
1771 raise error.Abort('invalide run count: %d' % count)
1785 raise error.Abort('invalide run count: %d' % count)
1772 allresults = []
1786 allresults = []
1773 for c in range(count):
1787 for c in range(count):
1774 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1788 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1775 lazydeltabase=lazydeltabase,
1789 lazydeltabase=lazydeltabase,
1776 clearcaches=clearcaches)
1790 clearcaches=clearcaches)
1777 allresults.append(timing)
1791 allresults.append(timing)
1778
1792
1779 ### consolidate the results in a single list
1793 ### consolidate the results in a single list
1780 results = []
1794 results = []
1781 for idx, (rev, t) in enumerate(allresults[0]):
1795 for idx, (rev, t) in enumerate(allresults[0]):
1782 ts = [t]
1796 ts = [t]
1783 for other in allresults[1:]:
1797 for other in allresults[1:]:
1784 orev, ot = other[idx]
1798 orev, ot = other[idx]
1785 assert orev == rev
1799 assert orev == rev
1786 ts.append(ot)
1800 ts.append(ot)
1787 results.append((rev, ts))
1801 results.append((rev, ts))
1788 resultcount = len(results)
1802 resultcount = len(results)
1789
1803
1790 ### Compute and display relevant statistics
1804 ### Compute and display relevant statistics
1791
1805
1792 # get a formatter
1806 # get a formatter
1793 fm = ui.formatter(b'perf', opts)
1807 fm = ui.formatter(b'perf', opts)
1794 displayall = ui.configbool(b"perf", b"all-timing", False)
1808 displayall = ui.configbool(b"perf", b"all-timing", False)
1795
1809
1796 # print individual details if requested
1810 # print individual details if requested
1797 if opts['details']:
1811 if opts['details']:
1798 for idx, item in enumerate(results, 1):
1812 for idx, item in enumerate(results, 1):
1799 rev, data = item
1813 rev, data = item
1800 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1814 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1801 formatone(fm, data, title=title, displayall=displayall)
1815 formatone(fm, data, title=title, displayall=displayall)
1802
1816
1803 # sorts results by median time
1817 # sorts results by median time
1804 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1818 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1805 # list of (name, index) to display)
1819 # list of (name, index) to display)
1806 relevants = [
1820 relevants = [
1807 ("min", 0),
1821 ("min", 0),
1808 ("10%", resultcount * 10 // 100),
1822 ("10%", resultcount * 10 // 100),
1809 ("25%", resultcount * 25 // 100),
1823 ("25%", resultcount * 25 // 100),
1810 ("50%", resultcount * 70 // 100),
1824 ("50%", resultcount * 70 // 100),
1811 ("75%", resultcount * 75 // 100),
1825 ("75%", resultcount * 75 // 100),
1812 ("90%", resultcount * 90 // 100),
1826 ("90%", resultcount * 90 // 100),
1813 ("95%", resultcount * 95 // 100),
1827 ("95%", resultcount * 95 // 100),
1814 ("99%", resultcount * 99 // 100),
1828 ("99%", resultcount * 99 // 100),
1815 ("99.9%", resultcount * 999 // 1000),
1829 ("99.9%", resultcount * 999 // 1000),
1816 ("99.99%", resultcount * 9999 // 10000),
1830 ("99.99%", resultcount * 9999 // 10000),
1817 ("99.999%", resultcount * 99999 // 100000),
1831 ("99.999%", resultcount * 99999 // 100000),
1818 ("max", -1),
1832 ("max", -1),
1819 ]
1833 ]
1820 if not ui.quiet:
1834 if not ui.quiet:
1821 for name, idx in relevants:
1835 for name, idx in relevants:
1822 data = results[idx]
1836 data = results[idx]
1823 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1837 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1824 formatone(fm, data[1], title=title, displayall=displayall)
1838 formatone(fm, data[1], title=title, displayall=displayall)
1825
1839
1826 # XXX summing that many float will not be very precise, we ignore this fact
1840 # XXX summing that many float will not be very precise, we ignore this fact
1827 # for now
1841 # for now
1828 totaltime = []
1842 totaltime = []
1829 for item in allresults:
1843 for item in allresults:
1830 totaltime.append((sum(x[1][0] for x in item),
1844 totaltime.append((sum(x[1][0] for x in item),
1831 sum(x[1][1] for x in item),
1845 sum(x[1][1] for x in item),
1832 sum(x[1][2] for x in item),)
1846 sum(x[1][2] for x in item),)
1833 )
1847 )
1834 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1848 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1835 displayall=displayall)
1849 displayall=displayall)
1836 fm.end()
1850 fm.end()
1837
1851
1838 class _faketr(object):
1852 class _faketr(object):
1839 def add(s, x, y, z=None):
1853 def add(s, x, y, z=None):
1840 return None
1854 return None
1841
1855
1842 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1856 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1843 lazydeltabase=True, clearcaches=True):
1857 lazydeltabase=True, clearcaches=True):
1844 timings = []
1858 timings = []
1845 tr = _faketr()
1859 tr = _faketr()
1846 with _temprevlog(ui, orig, startrev) as dest:
1860 with _temprevlog(ui, orig, startrev) as dest:
1847 dest._lazydeltabase = lazydeltabase
1861 dest._lazydeltabase = lazydeltabase
1848 revs = list(orig.revs(startrev, stoprev))
1862 revs = list(orig.revs(startrev, stoprev))
1849 total = len(revs)
1863 total = len(revs)
1850 topic = 'adding'
1864 topic = 'adding'
1851 if runidx is not None:
1865 if runidx is not None:
1852 topic += ' (run #%d)' % runidx
1866 topic += ' (run #%d)' % runidx
1853 # Support both old and new progress API
1867 # Support both old and new progress API
1854 if util.safehasattr(ui, 'makeprogress'):
1868 if util.safehasattr(ui, 'makeprogress'):
1855 progress = ui.makeprogress(topic, unit='revs', total=total)
1869 progress = ui.makeprogress(topic, unit='revs', total=total)
1856 def updateprogress(pos):
1870 def updateprogress(pos):
1857 progress.update(pos)
1871 progress.update(pos)
1858 def completeprogress():
1872 def completeprogress():
1859 progress.complete()
1873 progress.complete()
1860 else:
1874 else:
1861 def updateprogress(pos):
1875 def updateprogress(pos):
1862 ui.progress(topic, pos, unit='revs', total=total)
1876 ui.progress(topic, pos, unit='revs', total=total)
1863 def completeprogress():
1877 def completeprogress():
1864 ui.progress(topic, None, unit='revs', total=total)
1878 ui.progress(topic, None, unit='revs', total=total)
1865
1879
1866 for idx, rev in enumerate(revs):
1880 for idx, rev in enumerate(revs):
1867 updateprogress(idx)
1881 updateprogress(idx)
1868 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1882 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1869 if clearcaches:
1883 if clearcaches:
1870 dest.index.clearcaches()
1884 dest.index.clearcaches()
1871 dest.clearcaches()
1885 dest.clearcaches()
1872 with timeone() as r:
1886 with timeone() as r:
1873 dest.addrawrevision(*addargs, **addkwargs)
1887 dest.addrawrevision(*addargs, **addkwargs)
1874 timings.append((rev, r[0]))
1888 timings.append((rev, r[0]))
1875 updateprogress(total)
1889 updateprogress(total)
1876 completeprogress()
1890 completeprogress()
1877 return timings
1891 return timings
1878
1892
1879 def _getrevisionseed(orig, rev, tr, source):
1893 def _getrevisionseed(orig, rev, tr, source):
1880 from mercurial.node import nullid
1894 from mercurial.node import nullid
1881
1895
1882 linkrev = orig.linkrev(rev)
1896 linkrev = orig.linkrev(rev)
1883 node = orig.node(rev)
1897 node = orig.node(rev)
1884 p1, p2 = orig.parents(node)
1898 p1, p2 = orig.parents(node)
1885 flags = orig.flags(rev)
1899 flags = orig.flags(rev)
1886 cachedelta = None
1900 cachedelta = None
1887 text = None
1901 text = None
1888
1902
1889 if source == b'full':
1903 if source == b'full':
1890 text = orig.revision(rev)
1904 text = orig.revision(rev)
1891 elif source == b'parent-1':
1905 elif source == b'parent-1':
1892 baserev = orig.rev(p1)
1906 baserev = orig.rev(p1)
1893 cachedelta = (baserev, orig.revdiff(p1, rev))
1907 cachedelta = (baserev, orig.revdiff(p1, rev))
1894 elif source == b'parent-2':
1908 elif source == b'parent-2':
1895 parent = p2
1909 parent = p2
1896 if p2 == nullid:
1910 if p2 == nullid:
1897 parent = p1
1911 parent = p1
1898 baserev = orig.rev(parent)
1912 baserev = orig.rev(parent)
1899 cachedelta = (baserev, orig.revdiff(parent, rev))
1913 cachedelta = (baserev, orig.revdiff(parent, rev))
1900 elif source == b'parent-smallest':
1914 elif source == b'parent-smallest':
1901 p1diff = orig.revdiff(p1, rev)
1915 p1diff = orig.revdiff(p1, rev)
1902 parent = p1
1916 parent = p1
1903 diff = p1diff
1917 diff = p1diff
1904 if p2 != nullid:
1918 if p2 != nullid:
1905 p2diff = orig.revdiff(p2, rev)
1919 p2diff = orig.revdiff(p2, rev)
1906 if len(p1diff) > len(p2diff):
1920 if len(p1diff) > len(p2diff):
1907 parent = p2
1921 parent = p2
1908 diff = p2diff
1922 diff = p2diff
1909 baserev = orig.rev(parent)
1923 baserev = orig.rev(parent)
1910 cachedelta = (baserev, diff)
1924 cachedelta = (baserev, diff)
1911 elif source == b'storage':
1925 elif source == b'storage':
1912 baserev = orig.deltaparent(rev)
1926 baserev = orig.deltaparent(rev)
1913 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1927 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1914
1928
1915 return ((text, tr, linkrev, p1, p2),
1929 return ((text, tr, linkrev, p1, p2),
1916 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1930 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1917
1931
1918 @contextlib.contextmanager
1932 @contextlib.contextmanager
1919 def _temprevlog(ui, orig, truncaterev):
1933 def _temprevlog(ui, orig, truncaterev):
1920 from mercurial import vfs as vfsmod
1934 from mercurial import vfs as vfsmod
1921
1935
1922 if orig._inline:
1936 if orig._inline:
1923 raise error.Abort('not supporting inline revlog (yet)')
1937 raise error.Abort('not supporting inline revlog (yet)')
1924
1938
1925 origindexpath = orig.opener.join(orig.indexfile)
1939 origindexpath = orig.opener.join(orig.indexfile)
1926 origdatapath = orig.opener.join(orig.datafile)
1940 origdatapath = orig.opener.join(orig.datafile)
1927 indexname = 'revlog.i'
1941 indexname = 'revlog.i'
1928 dataname = 'revlog.d'
1942 dataname = 'revlog.d'
1929
1943
1930 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1944 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1931 try:
1945 try:
1932 # copy the data file in a temporary directory
1946 # copy the data file in a temporary directory
1933 ui.debug('copying data in %s\n' % tmpdir)
1947 ui.debug('copying data in %s\n' % tmpdir)
1934 destindexpath = os.path.join(tmpdir, 'revlog.i')
1948 destindexpath = os.path.join(tmpdir, 'revlog.i')
1935 destdatapath = os.path.join(tmpdir, 'revlog.d')
1949 destdatapath = os.path.join(tmpdir, 'revlog.d')
1936 shutil.copyfile(origindexpath, destindexpath)
1950 shutil.copyfile(origindexpath, destindexpath)
1937 shutil.copyfile(origdatapath, destdatapath)
1951 shutil.copyfile(origdatapath, destdatapath)
1938
1952
1939 # remove the data we want to add again
1953 # remove the data we want to add again
1940 ui.debug('truncating data to be rewritten\n')
1954 ui.debug('truncating data to be rewritten\n')
1941 with open(destindexpath, 'ab') as index:
1955 with open(destindexpath, 'ab') as index:
1942 index.seek(0)
1956 index.seek(0)
1943 index.truncate(truncaterev * orig._io.size)
1957 index.truncate(truncaterev * orig._io.size)
1944 with open(destdatapath, 'ab') as data:
1958 with open(destdatapath, 'ab') as data:
1945 data.seek(0)
1959 data.seek(0)
1946 data.truncate(orig.start(truncaterev))
1960 data.truncate(orig.start(truncaterev))
1947
1961
1948 # instantiate a new revlog from the temporary copy
1962 # instantiate a new revlog from the temporary copy
1949 ui.debug('truncating adding to be rewritten\n')
1963 ui.debug('truncating adding to be rewritten\n')
1950 vfs = vfsmod.vfs(tmpdir)
1964 vfs = vfsmod.vfs(tmpdir)
1951 vfs.options = getattr(orig.opener, 'options', None)
1965 vfs.options = getattr(orig.opener, 'options', None)
1952
1966
1953 dest = revlog.revlog(vfs,
1967 dest = revlog.revlog(vfs,
1954 indexfile=indexname,
1968 indexfile=indexname,
1955 datafile=dataname)
1969 datafile=dataname)
1956 if dest._inline:
1970 if dest._inline:
1957 raise error.Abort('not supporting inline revlog (yet)')
1971 raise error.Abort('not supporting inline revlog (yet)')
1958 # make sure internals are initialized
1972 # make sure internals are initialized
1959 dest.revision(len(dest) - 1)
1973 dest.revision(len(dest) - 1)
1960 yield dest
1974 yield dest
1961 del dest, vfs
1975 del dest, vfs
1962 finally:
1976 finally:
1963 shutil.rmtree(tmpdir, True)
1977 shutil.rmtree(tmpdir, True)
1964
1978
1965 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1979 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1966 [(b'e', b'engines', b'', b'compression engines to use'),
1980 [(b'e', b'engines', b'', b'compression engines to use'),
1967 (b's', b'startrev', 0, b'revision to start at')],
1981 (b's', b'startrev', 0, b'revision to start at')],
1968 b'-c|-m|FILE')
1982 b'-c|-m|FILE')
1969 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1983 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1970 """Benchmark operations on revlog chunks.
1984 """Benchmark operations on revlog chunks.
1971
1985
1972 Logically, each revlog is a collection of fulltext revisions. However,
1986 Logically, each revlog is a collection of fulltext revisions. However,
1973 stored within each revlog are "chunks" of possibly compressed data. This
1987 stored within each revlog are "chunks" of possibly compressed data. This
1974 data needs to be read and decompressed or compressed and written.
1988 data needs to be read and decompressed or compressed and written.
1975
1989
1976 This command measures the time it takes to read+decompress and recompress
1990 This command measures the time it takes to read+decompress and recompress
1977 chunks in a revlog. It effectively isolates I/O and compression performance.
1991 chunks in a revlog. It effectively isolates I/O and compression performance.
1978 For measurements of higher-level operations like resolving revisions,
1992 For measurements of higher-level operations like resolving revisions,
1979 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1993 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1980 """
1994 """
1981 opts = _byteskwargs(opts)
1995 opts = _byteskwargs(opts)
1982
1996
1983 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1997 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1984
1998
1985 # _chunkraw was renamed to _getsegmentforrevs.
1999 # _chunkraw was renamed to _getsegmentforrevs.
1986 try:
2000 try:
1987 segmentforrevs = rl._getsegmentforrevs
2001 segmentforrevs = rl._getsegmentforrevs
1988 except AttributeError:
2002 except AttributeError:
1989 segmentforrevs = rl._chunkraw
2003 segmentforrevs = rl._chunkraw
1990
2004
1991 # Verify engines argument.
2005 # Verify engines argument.
1992 if engines:
2006 if engines:
1993 engines = set(e.strip() for e in engines.split(b','))
2007 engines = set(e.strip() for e in engines.split(b','))
1994 for engine in engines:
2008 for engine in engines:
1995 try:
2009 try:
1996 util.compressionengines[engine]
2010 util.compressionengines[engine]
1997 except KeyError:
2011 except KeyError:
1998 raise error.Abort(b'unknown compression engine: %s' % engine)
2012 raise error.Abort(b'unknown compression engine: %s' % engine)
1999 else:
2013 else:
2000 engines = []
2014 engines = []
2001 for e in util.compengines:
2015 for e in util.compengines:
2002 engine = util.compengines[e]
2016 engine = util.compengines[e]
2003 try:
2017 try:
2004 if engine.available():
2018 if engine.available():
2005 engine.revlogcompressor().compress(b'dummy')
2019 engine.revlogcompressor().compress(b'dummy')
2006 engines.append(e)
2020 engines.append(e)
2007 except NotImplementedError:
2021 except NotImplementedError:
2008 pass
2022 pass
2009
2023
2010 revs = list(rl.revs(startrev, len(rl) - 1))
2024 revs = list(rl.revs(startrev, len(rl) - 1))
2011
2025
2012 def rlfh(rl):
2026 def rlfh(rl):
2013 if rl._inline:
2027 if rl._inline:
2014 return getsvfs(repo)(rl.indexfile)
2028 return getsvfs(repo)(rl.indexfile)
2015 else:
2029 else:
2016 return getsvfs(repo)(rl.datafile)
2030 return getsvfs(repo)(rl.datafile)
2017
2031
2018 def doread():
2032 def doread():
2019 rl.clearcaches()
2033 rl.clearcaches()
2020 for rev in revs:
2034 for rev in revs:
2021 segmentforrevs(rev, rev)
2035 segmentforrevs(rev, rev)
2022
2036
2023 def doreadcachedfh():
2037 def doreadcachedfh():
2024 rl.clearcaches()
2038 rl.clearcaches()
2025 fh = rlfh(rl)
2039 fh = rlfh(rl)
2026 for rev in revs:
2040 for rev in revs:
2027 segmentforrevs(rev, rev, df=fh)
2041 segmentforrevs(rev, rev, df=fh)
2028
2042
2029 def doreadbatch():
2043 def doreadbatch():
2030 rl.clearcaches()
2044 rl.clearcaches()
2031 segmentforrevs(revs[0], revs[-1])
2045 segmentforrevs(revs[0], revs[-1])
2032
2046
2033 def doreadbatchcachedfh():
2047 def doreadbatchcachedfh():
2034 rl.clearcaches()
2048 rl.clearcaches()
2035 fh = rlfh(rl)
2049 fh = rlfh(rl)
2036 segmentforrevs(revs[0], revs[-1], df=fh)
2050 segmentforrevs(revs[0], revs[-1], df=fh)
2037
2051
2038 def dochunk():
2052 def dochunk():
2039 rl.clearcaches()
2053 rl.clearcaches()
2040 fh = rlfh(rl)
2054 fh = rlfh(rl)
2041 for rev in revs:
2055 for rev in revs:
2042 rl._chunk(rev, df=fh)
2056 rl._chunk(rev, df=fh)
2043
2057
2044 chunks = [None]
2058 chunks = [None]
2045
2059
2046 def dochunkbatch():
2060 def dochunkbatch():
2047 rl.clearcaches()
2061 rl.clearcaches()
2048 fh = rlfh(rl)
2062 fh = rlfh(rl)
2049 # Save chunks as a side-effect.
2063 # Save chunks as a side-effect.
2050 chunks[0] = rl._chunks(revs, df=fh)
2064 chunks[0] = rl._chunks(revs, df=fh)
2051
2065
2052 def docompress(compressor):
2066 def docompress(compressor):
2053 rl.clearcaches()
2067 rl.clearcaches()
2054
2068
2055 try:
2069 try:
2056 # Swap in the requested compression engine.
2070 # Swap in the requested compression engine.
2057 oldcompressor = rl._compressor
2071 oldcompressor = rl._compressor
2058 rl._compressor = compressor
2072 rl._compressor = compressor
2059 for chunk in chunks[0]:
2073 for chunk in chunks[0]:
2060 rl.compress(chunk)
2074 rl.compress(chunk)
2061 finally:
2075 finally:
2062 rl._compressor = oldcompressor
2076 rl._compressor = oldcompressor
2063
2077
2064 benches = [
2078 benches = [
2065 (lambda: doread(), b'read'),
2079 (lambda: doread(), b'read'),
2066 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2080 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2067 (lambda: doreadbatch(), b'read batch'),
2081 (lambda: doreadbatch(), b'read batch'),
2068 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2082 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2069 (lambda: dochunk(), b'chunk'),
2083 (lambda: dochunk(), b'chunk'),
2070 (lambda: dochunkbatch(), b'chunk batch'),
2084 (lambda: dochunkbatch(), b'chunk batch'),
2071 ]
2085 ]
2072
2086
2073 for engine in sorted(engines):
2087 for engine in sorted(engines):
2074 compressor = util.compengines[engine].revlogcompressor()
2088 compressor = util.compengines[engine].revlogcompressor()
2075 benches.append((functools.partial(docompress, compressor),
2089 benches.append((functools.partial(docompress, compressor),
2076 b'compress w/ %s' % engine))
2090 b'compress w/ %s' % engine))
2077
2091
2078 for fn, title in benches:
2092 for fn, title in benches:
2079 timer, fm = gettimer(ui, opts)
2093 timer, fm = gettimer(ui, opts)
2080 timer(fn, title=title)
2094 timer(fn, title=title)
2081 fm.end()
2095 fm.end()
2082
2096
2083 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2097 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2084 [(b'', b'cache', False, b'use caches instead of clearing')],
2098 [(b'', b'cache', False, b'use caches instead of clearing')],
2085 b'-c|-m|FILE REV')
2099 b'-c|-m|FILE REV')
2086 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2100 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2087 """Benchmark obtaining a revlog revision.
2101 """Benchmark obtaining a revlog revision.
2088
2102
2089 Obtaining a revlog revision consists of roughly the following steps:
2103 Obtaining a revlog revision consists of roughly the following steps:
2090
2104
2091 1. Compute the delta chain
2105 1. Compute the delta chain
2092 2. Slice the delta chain if applicable
2106 2. Slice the delta chain if applicable
2093 3. Obtain the raw chunks for that delta chain
2107 3. Obtain the raw chunks for that delta chain
2094 4. Decompress each raw chunk
2108 4. Decompress each raw chunk
2095 5. Apply binary patches to obtain fulltext
2109 5. Apply binary patches to obtain fulltext
2096 6. Verify hash of fulltext
2110 6. Verify hash of fulltext
2097
2111
2098 This command measures the time spent in each of these phases.
2112 This command measures the time spent in each of these phases.
2099 """
2113 """
2100 opts = _byteskwargs(opts)
2114 opts = _byteskwargs(opts)
2101
2115
2102 if opts.get(b'changelog') or opts.get(b'manifest'):
2116 if opts.get(b'changelog') or opts.get(b'manifest'):
2103 file_, rev = None, file_
2117 file_, rev = None, file_
2104 elif rev is None:
2118 elif rev is None:
2105 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2119 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2106
2120
2107 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2121 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2108
2122
2109 # _chunkraw was renamed to _getsegmentforrevs.
2123 # _chunkraw was renamed to _getsegmentforrevs.
2110 try:
2124 try:
2111 segmentforrevs = r._getsegmentforrevs
2125 segmentforrevs = r._getsegmentforrevs
2112 except AttributeError:
2126 except AttributeError:
2113 segmentforrevs = r._chunkraw
2127 segmentforrevs = r._chunkraw
2114
2128
2115 node = r.lookup(rev)
2129 node = r.lookup(rev)
2116 rev = r.rev(node)
2130 rev = r.rev(node)
2117
2131
2118 def getrawchunks(data, chain):
2132 def getrawchunks(data, chain):
2119 start = r.start
2133 start = r.start
2120 length = r.length
2134 length = r.length
2121 inline = r._inline
2135 inline = r._inline
2122 iosize = r._io.size
2136 iosize = r._io.size
2123 buffer = util.buffer
2137 buffer = util.buffer
2124
2138
2125 chunks = []
2139 chunks = []
2126 ladd = chunks.append
2140 ladd = chunks.append
2127 for idx, item in enumerate(chain):
2141 for idx, item in enumerate(chain):
2128 offset = start(item[0])
2142 offset = start(item[0])
2129 bits = data[idx]
2143 bits = data[idx]
2130 for rev in item:
2144 for rev in item:
2131 chunkstart = start(rev)
2145 chunkstart = start(rev)
2132 if inline:
2146 if inline:
2133 chunkstart += (rev + 1) * iosize
2147 chunkstart += (rev + 1) * iosize
2134 chunklength = length(rev)
2148 chunklength = length(rev)
2135 ladd(buffer(bits, chunkstart - offset, chunklength))
2149 ladd(buffer(bits, chunkstart - offset, chunklength))
2136
2150
2137 return chunks
2151 return chunks
2138
2152
2139 def dodeltachain(rev):
2153 def dodeltachain(rev):
2140 if not cache:
2154 if not cache:
2141 r.clearcaches()
2155 r.clearcaches()
2142 r._deltachain(rev)
2156 r._deltachain(rev)
2143
2157
2144 def doread(chain):
2158 def doread(chain):
2145 if not cache:
2159 if not cache:
2146 r.clearcaches()
2160 r.clearcaches()
2147 for item in slicedchain:
2161 for item in slicedchain:
2148 segmentforrevs(item[0], item[-1])
2162 segmentforrevs(item[0], item[-1])
2149
2163
2150 def doslice(r, chain, size):
2164 def doslice(r, chain, size):
2151 for s in slicechunk(r, chain, targetsize=size):
2165 for s in slicechunk(r, chain, targetsize=size):
2152 pass
2166 pass
2153
2167
2154 def dorawchunks(data, chain):
2168 def dorawchunks(data, chain):
2155 if not cache:
2169 if not cache:
2156 r.clearcaches()
2170 r.clearcaches()
2157 getrawchunks(data, chain)
2171 getrawchunks(data, chain)
2158
2172
2159 def dodecompress(chunks):
2173 def dodecompress(chunks):
2160 decomp = r.decompress
2174 decomp = r.decompress
2161 for chunk in chunks:
2175 for chunk in chunks:
2162 decomp(chunk)
2176 decomp(chunk)
2163
2177
2164 def dopatch(text, bins):
2178 def dopatch(text, bins):
2165 if not cache:
2179 if not cache:
2166 r.clearcaches()
2180 r.clearcaches()
2167 mdiff.patches(text, bins)
2181 mdiff.patches(text, bins)
2168
2182
2169 def dohash(text):
2183 def dohash(text):
2170 if not cache:
2184 if not cache:
2171 r.clearcaches()
2185 r.clearcaches()
2172 r.checkhash(text, node, rev=rev)
2186 r.checkhash(text, node, rev=rev)
2173
2187
2174 def dorevision():
2188 def dorevision():
2175 if not cache:
2189 if not cache:
2176 r.clearcaches()
2190 r.clearcaches()
2177 r.revision(node)
2191 r.revision(node)
2178
2192
2179 try:
2193 try:
2180 from mercurial.revlogutils.deltas import slicechunk
2194 from mercurial.revlogutils.deltas import slicechunk
2181 except ImportError:
2195 except ImportError:
2182 slicechunk = getattr(revlog, '_slicechunk', None)
2196 slicechunk = getattr(revlog, '_slicechunk', None)
2183
2197
2184 size = r.length(rev)
2198 size = r.length(rev)
2185 chain = r._deltachain(rev)[0]
2199 chain = r._deltachain(rev)[0]
2186 if not getattr(r, '_withsparseread', False):
2200 if not getattr(r, '_withsparseread', False):
2187 slicedchain = (chain,)
2201 slicedchain = (chain,)
2188 else:
2202 else:
2189 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2203 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2190 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2204 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2191 rawchunks = getrawchunks(data, slicedchain)
2205 rawchunks = getrawchunks(data, slicedchain)
2192 bins = r._chunks(chain)
2206 bins = r._chunks(chain)
2193 text = bytes(bins[0])
2207 text = bytes(bins[0])
2194 bins = bins[1:]
2208 bins = bins[1:]
2195 text = mdiff.patches(text, bins)
2209 text = mdiff.patches(text, bins)
2196
2210
2197 benches = [
2211 benches = [
2198 (lambda: dorevision(), b'full'),
2212 (lambda: dorevision(), b'full'),
2199 (lambda: dodeltachain(rev), b'deltachain'),
2213 (lambda: dodeltachain(rev), b'deltachain'),
2200 (lambda: doread(chain), b'read'),
2214 (lambda: doread(chain), b'read'),
2201 ]
2215 ]
2202
2216
2203 if getattr(r, '_withsparseread', False):
2217 if getattr(r, '_withsparseread', False):
2204 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2218 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2205 benches.append(slicing)
2219 benches.append(slicing)
2206
2220
2207 benches.extend([
2221 benches.extend([
2208 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2222 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2209 (lambda: dodecompress(rawchunks), b'decompress'),
2223 (lambda: dodecompress(rawchunks), b'decompress'),
2210 (lambda: dopatch(text, bins), b'patch'),
2224 (lambda: dopatch(text, bins), b'patch'),
2211 (lambda: dohash(text), b'hash'),
2225 (lambda: dohash(text), b'hash'),
2212 ])
2226 ])
2213
2227
2214 timer, fm = gettimer(ui, opts)
2228 timer, fm = gettimer(ui, opts)
2215 for fn, title in benches:
2229 for fn, title in benches:
2216 timer(fn, title=title)
2230 timer(fn, title=title)
2217 fm.end()
2231 fm.end()
2218
2232
2219 @command(b'perfrevset',
2233 @command(b'perfrevset',
2220 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2234 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2221 (b'', b'contexts', False, b'obtain changectx for each revision')]
2235 (b'', b'contexts', False, b'obtain changectx for each revision')]
2222 + formatteropts, b"REVSET")
2236 + formatteropts, b"REVSET")
2223 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2237 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2224 """benchmark the execution time of a revset
2238 """benchmark the execution time of a revset
2225
2239
2226 Use the --clean option if need to evaluate the impact of build volatile
2240 Use the --clean option if need to evaluate the impact of build volatile
2227 revisions set cache on the revset execution. Volatile cache hold filtered
2241 revisions set cache on the revset execution. Volatile cache hold filtered
2228 and obsolete related cache."""
2242 and obsolete related cache."""
2229 opts = _byteskwargs(opts)
2243 opts = _byteskwargs(opts)
2230
2244
2231 timer, fm = gettimer(ui, opts)
2245 timer, fm = gettimer(ui, opts)
2232 def d():
2246 def d():
2233 if clear:
2247 if clear:
2234 repo.invalidatevolatilesets()
2248 repo.invalidatevolatilesets()
2235 if contexts:
2249 if contexts:
2236 for ctx in repo.set(expr): pass
2250 for ctx in repo.set(expr): pass
2237 else:
2251 else:
2238 for r in repo.revs(expr): pass
2252 for r in repo.revs(expr): pass
2239 timer(d)
2253 timer(d)
2240 fm.end()
2254 fm.end()
2241
2255
2242 @command(b'perfvolatilesets',
2256 @command(b'perfvolatilesets',
2243 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2257 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2244 ] + formatteropts)
2258 ] + formatteropts)
2245 def perfvolatilesets(ui, repo, *names, **opts):
2259 def perfvolatilesets(ui, repo, *names, **opts):
2246 """benchmark the computation of various volatile set
2260 """benchmark the computation of various volatile set
2247
2261
2248 Volatile set computes element related to filtering and obsolescence."""
2262 Volatile set computes element related to filtering and obsolescence."""
2249 opts = _byteskwargs(opts)
2263 opts = _byteskwargs(opts)
2250 timer, fm = gettimer(ui, opts)
2264 timer, fm = gettimer(ui, opts)
2251 repo = repo.unfiltered()
2265 repo = repo.unfiltered()
2252
2266
2253 def getobs(name):
2267 def getobs(name):
2254 def d():
2268 def d():
2255 repo.invalidatevolatilesets()
2269 repo.invalidatevolatilesets()
2256 if opts[b'clear_obsstore']:
2270 if opts[b'clear_obsstore']:
2257 clearfilecache(repo, b'obsstore')
2271 clearfilecache(repo, b'obsstore')
2258 obsolete.getrevs(repo, name)
2272 obsolete.getrevs(repo, name)
2259 return d
2273 return d
2260
2274
2261 allobs = sorted(obsolete.cachefuncs)
2275 allobs = sorted(obsolete.cachefuncs)
2262 if names:
2276 if names:
2263 allobs = [n for n in allobs if n in names]
2277 allobs = [n for n in allobs if n in names]
2264
2278
2265 for name in allobs:
2279 for name in allobs:
2266 timer(getobs(name), title=name)
2280 timer(getobs(name), title=name)
2267
2281
2268 def getfiltered(name):
2282 def getfiltered(name):
2269 def d():
2283 def d():
2270 repo.invalidatevolatilesets()
2284 repo.invalidatevolatilesets()
2271 if opts[b'clear_obsstore']:
2285 if opts[b'clear_obsstore']:
2272 clearfilecache(repo, b'obsstore')
2286 clearfilecache(repo, b'obsstore')
2273 repoview.filterrevs(repo, name)
2287 repoview.filterrevs(repo, name)
2274 return d
2288 return d
2275
2289
2276 allfilter = sorted(repoview.filtertable)
2290 allfilter = sorted(repoview.filtertable)
2277 if names:
2291 if names:
2278 allfilter = [n for n in allfilter if n in names]
2292 allfilter = [n for n in allfilter if n in names]
2279
2293
2280 for name in allfilter:
2294 for name in allfilter:
2281 timer(getfiltered(name), title=name)
2295 timer(getfiltered(name), title=name)
2282 fm.end()
2296 fm.end()
2283
2297
2284 @command(b'perfbranchmap',
2298 @command(b'perfbranchmap',
2285 [(b'f', b'full', False,
2299 [(b'f', b'full', False,
2286 b'Includes build time of subset'),
2300 b'Includes build time of subset'),
2287 (b'', b'clear-revbranch', False,
2301 (b'', b'clear-revbranch', False,
2288 b'purge the revbranch cache between computation'),
2302 b'purge the revbranch cache between computation'),
2289 ] + formatteropts)
2303 ] + formatteropts)
2290 def perfbranchmap(ui, repo, *filternames, **opts):
2304 def perfbranchmap(ui, repo, *filternames, **opts):
2291 """benchmark the update of a branchmap
2305 """benchmark the update of a branchmap
2292
2306
2293 This benchmarks the full repo.branchmap() call with read and write disabled
2307 This benchmarks the full repo.branchmap() call with read and write disabled
2294 """
2308 """
2295 opts = _byteskwargs(opts)
2309 opts = _byteskwargs(opts)
2296 full = opts.get(b"full", False)
2310 full = opts.get(b"full", False)
2297 clear_revbranch = opts.get(b"clear_revbranch", False)
2311 clear_revbranch = opts.get(b"clear_revbranch", False)
2298 timer, fm = gettimer(ui, opts)
2312 timer, fm = gettimer(ui, opts)
2299 def getbranchmap(filtername):
2313 def getbranchmap(filtername):
2300 """generate a benchmark function for the filtername"""
2314 """generate a benchmark function for the filtername"""
2301 if filtername is None:
2315 if filtername is None:
2302 view = repo
2316 view = repo
2303 else:
2317 else:
2304 view = repo.filtered(filtername)
2318 view = repo.filtered(filtername)
2305 def d():
2319 def d():
2306 if clear_revbranch:
2320 if clear_revbranch:
2307 repo.revbranchcache()._clear()
2321 repo.revbranchcache()._clear()
2308 if full:
2322 if full:
2309 view._branchcaches.clear()
2323 view._branchcaches.clear()
2310 else:
2324 else:
2311 view._branchcaches.pop(filtername, None)
2325 view._branchcaches.pop(filtername, None)
2312 view.branchmap()
2326 view.branchmap()
2313 return d
2327 return d
2314 # add filter in smaller subset to bigger subset
2328 # add filter in smaller subset to bigger subset
2315 possiblefilters = set(repoview.filtertable)
2329 possiblefilters = set(repoview.filtertable)
2316 if filternames:
2330 if filternames:
2317 possiblefilters &= set(filternames)
2331 possiblefilters &= set(filternames)
2318 subsettable = getbranchmapsubsettable()
2332 subsettable = getbranchmapsubsettable()
2319 allfilters = []
2333 allfilters = []
2320 while possiblefilters:
2334 while possiblefilters:
2321 for name in possiblefilters:
2335 for name in possiblefilters:
2322 subset = subsettable.get(name)
2336 subset = subsettable.get(name)
2323 if subset not in possiblefilters:
2337 if subset not in possiblefilters:
2324 break
2338 break
2325 else:
2339 else:
2326 assert False, b'subset cycle %s!' % possiblefilters
2340 assert False, b'subset cycle %s!' % possiblefilters
2327 allfilters.append(name)
2341 allfilters.append(name)
2328 possiblefilters.remove(name)
2342 possiblefilters.remove(name)
2329
2343
2330 # warm the cache
2344 # warm the cache
2331 if not full:
2345 if not full:
2332 for name in allfilters:
2346 for name in allfilters:
2333 repo.filtered(name).branchmap()
2347 repo.filtered(name).branchmap()
2334 if not filternames or b'unfiltered' in filternames:
2348 if not filternames or b'unfiltered' in filternames:
2335 # add unfiltered
2349 # add unfiltered
2336 allfilters.append(None)
2350 allfilters.append(None)
2337
2351
2338 branchcacheread = safeattrsetter(branchmap, b'read')
2352 branchcacheread = safeattrsetter(branchmap, b'read')
2339 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2353 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2340 branchcacheread.set(lambda repo: None)
2354 branchcacheread.set(lambda repo: None)
2341 branchcachewrite.set(lambda bc, repo: None)
2355 branchcachewrite.set(lambda bc, repo: None)
2342 try:
2356 try:
2343 for name in allfilters:
2357 for name in allfilters:
2344 printname = name
2358 printname = name
2345 if name is None:
2359 if name is None:
2346 printname = b'unfiltered'
2360 printname = b'unfiltered'
2347 timer(getbranchmap(name), title=str(printname))
2361 timer(getbranchmap(name), title=str(printname))
2348 finally:
2362 finally:
2349 branchcacheread.restore()
2363 branchcacheread.restore()
2350 branchcachewrite.restore()
2364 branchcachewrite.restore()
2351 fm.end()
2365 fm.end()
2352
2366
2353 @command(b'perfbranchmapupdate', [
2367 @command(b'perfbranchmapupdate', [
2354 (b'', b'base', [], b'subset of revision to start from'),
2368 (b'', b'base', [], b'subset of revision to start from'),
2355 (b'', b'target', [], b'subset of revision to end with'),
2369 (b'', b'target', [], b'subset of revision to end with'),
2356 (b'', b'clear-caches', False, b'clear cache between each runs')
2370 (b'', b'clear-caches', False, b'clear cache between each runs')
2357 ] + formatteropts)
2371 ] + formatteropts)
2358 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2372 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2359 """benchmark branchmap update from for <base> revs to <target> revs
2373 """benchmark branchmap update from for <base> revs to <target> revs
2360
2374
2361 If `--clear-caches` is passed, the following items will be reset before
2375 If `--clear-caches` is passed, the following items will be reset before
2362 each update:
2376 each update:
2363 * the changelog instance and associated indexes
2377 * the changelog instance and associated indexes
2364 * the rev-branch-cache instance
2378 * the rev-branch-cache instance
2365
2379
2366 Examples:
2380 Examples:
2367
2381
2368 # update for the one last revision
2382 # update for the one last revision
2369 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2383 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2370
2384
2371 $ update for change coming with a new branch
2385 $ update for change coming with a new branch
2372 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2386 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2373 """
2387 """
2374 from mercurial import branchmap
2388 from mercurial import branchmap
2375 from mercurial import repoview
2389 from mercurial import repoview
2376 opts = _byteskwargs(opts)
2390 opts = _byteskwargs(opts)
2377 timer, fm = gettimer(ui, opts)
2391 timer, fm = gettimer(ui, opts)
2378 clearcaches = opts[b'clear_caches']
2392 clearcaches = opts[b'clear_caches']
2379 unfi = repo.unfiltered()
2393 unfi = repo.unfiltered()
2380 x = [None] # used to pass data between closure
2394 x = [None] # used to pass data between closure
2381
2395
2382 # we use a `list` here to avoid possible side effect from smartset
2396 # we use a `list` here to avoid possible side effect from smartset
2383 baserevs = list(scmutil.revrange(repo, base))
2397 baserevs = list(scmutil.revrange(repo, base))
2384 targetrevs = list(scmutil.revrange(repo, target))
2398 targetrevs = list(scmutil.revrange(repo, target))
2385 if not baserevs:
2399 if not baserevs:
2386 raise error.Abort(b'no revisions selected for --base')
2400 raise error.Abort(b'no revisions selected for --base')
2387 if not targetrevs:
2401 if not targetrevs:
2388 raise error.Abort(b'no revisions selected for --target')
2402 raise error.Abort(b'no revisions selected for --target')
2389
2403
2390 # make sure the target branchmap also contains the one in the base
2404 # make sure the target branchmap also contains the one in the base
2391 targetrevs = list(set(baserevs) | set(targetrevs))
2405 targetrevs = list(set(baserevs) | set(targetrevs))
2392 targetrevs.sort()
2406 targetrevs.sort()
2393
2407
2394 cl = repo.changelog
2408 cl = repo.changelog
2395 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2409 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2396 allbaserevs.sort()
2410 allbaserevs.sort()
2397 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2411 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2398
2412
2399 newrevs = list(alltargetrevs.difference(allbaserevs))
2413 newrevs = list(alltargetrevs.difference(allbaserevs))
2400 newrevs.sort()
2414 newrevs.sort()
2401
2415
2402 allrevs = frozenset(unfi.changelog.revs())
2416 allrevs = frozenset(unfi.changelog.revs())
2403 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2417 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2404 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2418 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2405
2419
2406 def basefilter(repo, visibilityexceptions=None):
2420 def basefilter(repo, visibilityexceptions=None):
2407 return basefilterrevs
2421 return basefilterrevs
2408
2422
2409 def targetfilter(repo, visibilityexceptions=None):
2423 def targetfilter(repo, visibilityexceptions=None):
2410 return targetfilterrevs
2424 return targetfilterrevs
2411
2425
2412 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2426 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2413 ui.status(msg % (len(allbaserevs), len(newrevs)))
2427 ui.status(msg % (len(allbaserevs), len(newrevs)))
2414 if targetfilterrevs:
2428 if targetfilterrevs:
2415 msg = b'(%d revisions still filtered)\n'
2429 msg = b'(%d revisions still filtered)\n'
2416 ui.status(msg % len(targetfilterrevs))
2430 ui.status(msg % len(targetfilterrevs))
2417
2431
2418 try:
2432 try:
2419 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2433 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2420 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2434 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2421
2435
2422 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2436 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2423 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2437 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2424
2438
2425 # try to find an existing branchmap to reuse
2439 # try to find an existing branchmap to reuse
2426 subsettable = getbranchmapsubsettable()
2440 subsettable = getbranchmapsubsettable()
2427 candidatefilter = subsettable.get(None)
2441 candidatefilter = subsettable.get(None)
2428 while candidatefilter is not None:
2442 while candidatefilter is not None:
2429 candidatebm = repo.filtered(candidatefilter).branchmap()
2443 candidatebm = repo.filtered(candidatefilter).branchmap()
2430 if candidatebm.validfor(baserepo):
2444 if candidatebm.validfor(baserepo):
2431 filtered = repoview.filterrevs(repo, candidatefilter)
2445 filtered = repoview.filterrevs(repo, candidatefilter)
2432 missing = [r for r in allbaserevs if r in filtered]
2446 missing = [r for r in allbaserevs if r in filtered]
2433 base = candidatebm.copy()
2447 base = candidatebm.copy()
2434 base.update(baserepo, missing)
2448 base.update(baserepo, missing)
2435 break
2449 break
2436 candidatefilter = subsettable.get(candidatefilter)
2450 candidatefilter = subsettable.get(candidatefilter)
2437 else:
2451 else:
2438 # no suitable subset where found
2452 # no suitable subset where found
2439 base = branchmap.branchcache()
2453 base = branchmap.branchcache()
2440 base.update(baserepo, allbaserevs)
2454 base.update(baserepo, allbaserevs)
2441
2455
2442 def setup():
2456 def setup():
2443 x[0] = base.copy()
2457 x[0] = base.copy()
2444 if clearcaches:
2458 if clearcaches:
2445 unfi._revbranchcache = None
2459 unfi._revbranchcache = None
2446 clearchangelog(repo)
2460 clearchangelog(repo)
2447
2461
2448 def bench():
2462 def bench():
2449 x[0].update(targetrepo, newrevs)
2463 x[0].update(targetrepo, newrevs)
2450
2464
2451 timer(bench, setup=setup)
2465 timer(bench, setup=setup)
2452 fm.end()
2466 fm.end()
2453 finally:
2467 finally:
2454 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2468 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2455 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2469 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2456
2470
2457 @command(b'perfbranchmapload', [
2471 @command(b'perfbranchmapload', [
2458 (b'f', b'filter', b'', b'Specify repoview filter'),
2472 (b'f', b'filter', b'', b'Specify repoview filter'),
2459 (b'', b'list', False, b'List brachmap filter caches'),
2473 (b'', b'list', False, b'List brachmap filter caches'),
2460 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2474 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2461
2475
2462 ] + formatteropts)
2476 ] + formatteropts)
2463 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2477 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2464 """benchmark reading the branchmap"""
2478 """benchmark reading the branchmap"""
2465 opts = _byteskwargs(opts)
2479 opts = _byteskwargs(opts)
2466 clearrevlogs = opts[b'clear_revlogs']
2480 clearrevlogs = opts[b'clear_revlogs']
2467
2481
2468 if list:
2482 if list:
2469 for name, kind, st in repo.cachevfs.readdir(stat=True):
2483 for name, kind, st in repo.cachevfs.readdir(stat=True):
2470 if name.startswith(b'branch2'):
2484 if name.startswith(b'branch2'):
2471 filtername = name.partition(b'-')[2] or b'unfiltered'
2485 filtername = name.partition(b'-')[2] or b'unfiltered'
2472 ui.status(b'%s - %s\n'
2486 ui.status(b'%s - %s\n'
2473 % (filtername, util.bytecount(st.st_size)))
2487 % (filtername, util.bytecount(st.st_size)))
2474 return
2488 return
2475 if not filter:
2489 if not filter:
2476 filter = None
2490 filter = None
2477 subsettable = getbranchmapsubsettable()
2491 subsettable = getbranchmapsubsettable()
2478 if filter is None:
2492 if filter is None:
2479 repo = repo.unfiltered()
2493 repo = repo.unfiltered()
2480 else:
2494 else:
2481 repo = repoview.repoview(repo, filter)
2495 repo = repoview.repoview(repo, filter)
2482
2496
2483 repo.branchmap() # make sure we have a relevant, up to date branchmap
2497 repo.branchmap() # make sure we have a relevant, up to date branchmap
2484
2498
2485 currentfilter = filter
2499 currentfilter = filter
2486 # try once without timer, the filter may not be cached
2500 # try once without timer, the filter may not be cached
2487 while branchmap.read(repo) is None:
2501 while branchmap.read(repo) is None:
2488 currentfilter = subsettable.get(currentfilter)
2502 currentfilter = subsettable.get(currentfilter)
2489 if currentfilter is None:
2503 if currentfilter is None:
2490 raise error.Abort(b'No branchmap cached for %s repo'
2504 raise error.Abort(b'No branchmap cached for %s repo'
2491 % (filter or b'unfiltered'))
2505 % (filter or b'unfiltered'))
2492 repo = repo.filtered(currentfilter)
2506 repo = repo.filtered(currentfilter)
2493 timer, fm = gettimer(ui, opts)
2507 timer, fm = gettimer(ui, opts)
2494 def setup():
2508 def setup():
2495 if clearrevlogs:
2509 if clearrevlogs:
2496 clearchangelog(repo)
2510 clearchangelog(repo)
2497 def bench():
2511 def bench():
2498 branchmap.read(repo)
2512 branchmap.read(repo)
2499 timer(bench, setup=setup)
2513 timer(bench, setup=setup)
2500 fm.end()
2514 fm.end()
2501
2515
2502 @command(b'perfloadmarkers')
2516 @command(b'perfloadmarkers')
2503 def perfloadmarkers(ui, repo):
2517 def perfloadmarkers(ui, repo):
2504 """benchmark the time to parse the on-disk markers for a repo
2518 """benchmark the time to parse the on-disk markers for a repo
2505
2519
2506 Result is the number of markers in the repo."""
2520 Result is the number of markers in the repo."""
2507 timer, fm = gettimer(ui)
2521 timer, fm = gettimer(ui)
2508 svfs = getsvfs(repo)
2522 svfs = getsvfs(repo)
2509 timer(lambda: len(obsolete.obsstore(svfs)))
2523 timer(lambda: len(obsolete.obsstore(svfs)))
2510 fm.end()
2524 fm.end()
2511
2525
2512 @command(b'perflrucachedict', formatteropts +
2526 @command(b'perflrucachedict', formatteropts +
2513 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2527 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2514 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2528 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2515 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2529 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2516 (b'', b'size', 4, b'size of cache'),
2530 (b'', b'size', 4, b'size of cache'),
2517 (b'', b'gets', 10000, b'number of key lookups'),
2531 (b'', b'gets', 10000, b'number of key lookups'),
2518 (b'', b'sets', 10000, b'number of key sets'),
2532 (b'', b'sets', 10000, b'number of key sets'),
2519 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2533 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2520 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2534 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2521 norepo=True)
2535 norepo=True)
2522 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2536 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2523 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2537 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2524 opts = _byteskwargs(opts)
2538 opts = _byteskwargs(opts)
2525
2539
2526 def doinit():
2540 def doinit():
2527 for i in _xrange(10000):
2541 for i in _xrange(10000):
2528 util.lrucachedict(size)
2542 util.lrucachedict(size)
2529
2543
2530 costrange = list(range(mincost, maxcost + 1))
2544 costrange = list(range(mincost, maxcost + 1))
2531
2545
2532 values = []
2546 values = []
2533 for i in _xrange(size):
2547 for i in _xrange(size):
2534 values.append(random.randint(0, _maxint))
2548 values.append(random.randint(0, _maxint))
2535
2549
2536 # Get mode fills the cache and tests raw lookup performance with no
2550 # Get mode fills the cache and tests raw lookup performance with no
2537 # eviction.
2551 # eviction.
2538 getseq = []
2552 getseq = []
2539 for i in _xrange(gets):
2553 for i in _xrange(gets):
2540 getseq.append(random.choice(values))
2554 getseq.append(random.choice(values))
2541
2555
2542 def dogets():
2556 def dogets():
2543 d = util.lrucachedict(size)
2557 d = util.lrucachedict(size)
2544 for v in values:
2558 for v in values:
2545 d[v] = v
2559 d[v] = v
2546 for key in getseq:
2560 for key in getseq:
2547 value = d[key]
2561 value = d[key]
2548 value # silence pyflakes warning
2562 value # silence pyflakes warning
2549
2563
2550 def dogetscost():
2564 def dogetscost():
2551 d = util.lrucachedict(size, maxcost=costlimit)
2565 d = util.lrucachedict(size, maxcost=costlimit)
2552 for i, v in enumerate(values):
2566 for i, v in enumerate(values):
2553 d.insert(v, v, cost=costs[i])
2567 d.insert(v, v, cost=costs[i])
2554 for key in getseq:
2568 for key in getseq:
2555 try:
2569 try:
2556 value = d[key]
2570 value = d[key]
2557 value # silence pyflakes warning
2571 value # silence pyflakes warning
2558 except KeyError:
2572 except KeyError:
2559 pass
2573 pass
2560
2574
2561 # Set mode tests insertion speed with cache eviction.
2575 # Set mode tests insertion speed with cache eviction.
2562 setseq = []
2576 setseq = []
2563 costs = []
2577 costs = []
2564 for i in _xrange(sets):
2578 for i in _xrange(sets):
2565 setseq.append(random.randint(0, _maxint))
2579 setseq.append(random.randint(0, _maxint))
2566 costs.append(random.choice(costrange))
2580 costs.append(random.choice(costrange))
2567
2581
2568 def doinserts():
2582 def doinserts():
2569 d = util.lrucachedict(size)
2583 d = util.lrucachedict(size)
2570 for v in setseq:
2584 for v in setseq:
2571 d.insert(v, v)
2585 d.insert(v, v)
2572
2586
2573 def doinsertscost():
2587 def doinsertscost():
2574 d = util.lrucachedict(size, maxcost=costlimit)
2588 d = util.lrucachedict(size, maxcost=costlimit)
2575 for i, v in enumerate(setseq):
2589 for i, v in enumerate(setseq):
2576 d.insert(v, v, cost=costs[i])
2590 d.insert(v, v, cost=costs[i])
2577
2591
2578 def dosets():
2592 def dosets():
2579 d = util.lrucachedict(size)
2593 d = util.lrucachedict(size)
2580 for v in setseq:
2594 for v in setseq:
2581 d[v] = v
2595 d[v] = v
2582
2596
2583 # Mixed mode randomly performs gets and sets with eviction.
2597 # Mixed mode randomly performs gets and sets with eviction.
2584 mixedops = []
2598 mixedops = []
2585 for i in _xrange(mixed):
2599 for i in _xrange(mixed):
2586 r = random.randint(0, 100)
2600 r = random.randint(0, 100)
2587 if r < mixedgetfreq:
2601 if r < mixedgetfreq:
2588 op = 0
2602 op = 0
2589 else:
2603 else:
2590 op = 1
2604 op = 1
2591
2605
2592 mixedops.append((op,
2606 mixedops.append((op,
2593 random.randint(0, size * 2),
2607 random.randint(0, size * 2),
2594 random.choice(costrange)))
2608 random.choice(costrange)))
2595
2609
2596 def domixed():
2610 def domixed():
2597 d = util.lrucachedict(size)
2611 d = util.lrucachedict(size)
2598
2612
2599 for op, v, cost in mixedops:
2613 for op, v, cost in mixedops:
2600 if op == 0:
2614 if op == 0:
2601 try:
2615 try:
2602 d[v]
2616 d[v]
2603 except KeyError:
2617 except KeyError:
2604 pass
2618 pass
2605 else:
2619 else:
2606 d[v] = v
2620 d[v] = v
2607
2621
2608 def domixedcost():
2622 def domixedcost():
2609 d = util.lrucachedict(size, maxcost=costlimit)
2623 d = util.lrucachedict(size, maxcost=costlimit)
2610
2624
2611 for op, v, cost in mixedops:
2625 for op, v, cost in mixedops:
2612 if op == 0:
2626 if op == 0:
2613 try:
2627 try:
2614 d[v]
2628 d[v]
2615 except KeyError:
2629 except KeyError:
2616 pass
2630 pass
2617 else:
2631 else:
2618 d.insert(v, v, cost=cost)
2632 d.insert(v, v, cost=cost)
2619
2633
2620 benches = [
2634 benches = [
2621 (doinit, b'init'),
2635 (doinit, b'init'),
2622 ]
2636 ]
2623
2637
2624 if costlimit:
2638 if costlimit:
2625 benches.extend([
2639 benches.extend([
2626 (dogetscost, b'gets w/ cost limit'),
2640 (dogetscost, b'gets w/ cost limit'),
2627 (doinsertscost, b'inserts w/ cost limit'),
2641 (doinsertscost, b'inserts w/ cost limit'),
2628 (domixedcost, b'mixed w/ cost limit'),
2642 (domixedcost, b'mixed w/ cost limit'),
2629 ])
2643 ])
2630 else:
2644 else:
2631 benches.extend([
2645 benches.extend([
2632 (dogets, b'gets'),
2646 (dogets, b'gets'),
2633 (doinserts, b'inserts'),
2647 (doinserts, b'inserts'),
2634 (dosets, b'sets'),
2648 (dosets, b'sets'),
2635 (domixed, b'mixed')
2649 (domixed, b'mixed')
2636 ])
2650 ])
2637
2651
2638 for fn, title in benches:
2652 for fn, title in benches:
2639 timer, fm = gettimer(ui, opts)
2653 timer, fm = gettimer(ui, opts)
2640 timer(fn, title=title)
2654 timer(fn, title=title)
2641 fm.end()
2655 fm.end()
2642
2656
2643 @command(b'perfwrite', formatteropts)
2657 @command(b'perfwrite', formatteropts)
2644 def perfwrite(ui, repo, **opts):
2658 def perfwrite(ui, repo, **opts):
2645 """microbenchmark ui.write
2659 """microbenchmark ui.write
2646 """
2660 """
2647 opts = _byteskwargs(opts)
2661 opts = _byteskwargs(opts)
2648
2662
2649 timer, fm = gettimer(ui, opts)
2663 timer, fm = gettimer(ui, opts)
2650 def write():
2664 def write():
2651 for i in range(100000):
2665 for i in range(100000):
2652 ui.write((b'Testing write performance\n'))
2666 ui.write((b'Testing write performance\n'))
2653 timer(write)
2667 timer(write)
2654 fm.end()
2668 fm.end()
2655
2669
2656 def uisetup(ui):
2670 def uisetup(ui):
2657 if (util.safehasattr(cmdutil, b'openrevlog') and
2671 if (util.safehasattr(cmdutil, b'openrevlog') and
2658 not util.safehasattr(commands, b'debugrevlogopts')):
2672 not util.safehasattr(commands, b'debugrevlogopts')):
2659 # for "historical portability":
2673 # for "historical portability":
2660 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2674 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2661 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2675 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2662 # openrevlog() should cause failure, because it has been
2676 # openrevlog() should cause failure, because it has been
2663 # available since 3.5 (or 49c583ca48c4).
2677 # available since 3.5 (or 49c583ca48c4).
2664 def openrevlog(orig, repo, cmd, file_, opts):
2678 def openrevlog(orig, repo, cmd, file_, opts):
2665 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2679 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2666 raise error.Abort(b"This version doesn't support --dir option",
2680 raise error.Abort(b"This version doesn't support --dir option",
2667 hint=b"use 3.5 or later")
2681 hint=b"use 3.5 or later")
2668 return orig(repo, cmd, file_, opts)
2682 return orig(repo, cmd, file_, opts)
2669 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2683 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2670
2684
2671 @command(b'perfprogress', formatteropts + [
2685 @command(b'perfprogress', formatteropts + [
2672 (b'', b'topic', b'topic', b'topic for progress messages'),
2686 (b'', b'topic', b'topic', b'topic for progress messages'),
2673 (b'c', b'total', 1000000, b'total value we are progressing to'),
2687 (b'c', b'total', 1000000, b'total value we are progressing to'),
2674 ], norepo=True)
2688 ], norepo=True)
2675 def perfprogress(ui, topic=None, total=None, **opts):
2689 def perfprogress(ui, topic=None, total=None, **opts):
2676 """printing of progress bars"""
2690 """printing of progress bars"""
2677 opts = _byteskwargs(opts)
2691 opts = _byteskwargs(opts)
2678
2692
2679 timer, fm = gettimer(ui, opts)
2693 timer, fm = gettimer(ui, opts)
2680
2694
2681 def doprogress():
2695 def doprogress():
2682 with ui.makeprogress(topic, total=total) as progress:
2696 with ui.makeprogress(topic, total=total) as progress:
2683 for i in pycompat.xrange(total):
2697 for i in pycompat.xrange(total):
2684 progress.increment()
2698 progress.increment()
2685
2699
2686 timer(doprogress)
2700 timer(doprogress)
2687 fm.end()
2701 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now