##// END OF EJS Templates
perf: add --clear-revlog flag to branchmapload...
Boris Feld -
r40738:4240a1da default
parent child Browse files
Show More
@@ -1,2428 +1,2434
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, setup=None, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 if setup is not None:
298 if setup is not None:
299 setup()
299 setup()
300 with timeone() as item:
300 with timeone() as item:
301 r = func()
301 r = func()
302 count += 1
302 count += 1
303 results.append(item[0])
303 results.append(item[0])
304 cstop = util.timer()
304 cstop = util.timer()
305 if cstop - begin > 3 and count >= 100:
305 if cstop - begin > 3 and count >= 100:
306 break
306 break
307 if cstop - begin > 10 and count >= 3:
307 if cstop - begin > 10 and count >= 3:
308 break
308 break
309
309
310 formatone(fm, results, title=title, result=r,
310 formatone(fm, results, title=title, result=r,
311 displayall=displayall)
311 displayall=displayall)
312
312
313 def formatone(fm, timings, title=None, result=None, displayall=False):
313 def formatone(fm, timings, title=None, result=None, displayall=False):
314
314
315 count = len(timings)
315 count = len(timings)
316
316
317 fm.startitem()
317 fm.startitem()
318
318
319 if title:
319 if title:
320 fm.write(b'title', b'! %s\n', title)
320 fm.write(b'title', b'! %s\n', title)
321 if result:
321 if result:
322 fm.write(b'result', b'! result: %s\n', result)
322 fm.write(b'result', b'! result: %s\n', result)
323 def display(role, entry):
323 def display(role, entry):
324 prefix = b''
324 prefix = b''
325 if role != b'best':
325 if role != b'best':
326 prefix = b'%s.' % role
326 prefix = b'%s.' % role
327 fm.plain(b'!')
327 fm.plain(b'!')
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 fm.write(prefix + b'user', b' user %f', entry[1])
330 fm.write(prefix + b'user', b' user %f', entry[1])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 fm.plain(b'\n')
333 fm.plain(b'\n')
334 timings.sort()
334 timings.sort()
335 min_val = timings[0]
335 min_val = timings[0]
336 display(b'best', min_val)
336 display(b'best', min_val)
337 if displayall:
337 if displayall:
338 max_val = timings[-1]
338 max_val = timings[-1]
339 display(b'max', max_val)
339 display(b'max', max_val)
340 avg = tuple([sum(x) / count for x in zip(*timings)])
340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 display(b'avg', avg)
341 display(b'avg', avg)
342 median = timings[len(timings) // 2]
342 median = timings[len(timings) // 2]
343 display(b'median', median)
343 display(b'median', median)
344
344
345 # utilities for historical portability
345 # utilities for historical portability
346
346
347 def getint(ui, section, name, default):
347 def getint(ui, section, name, default):
348 # for "historical portability":
348 # for "historical portability":
349 # ui.configint has been available since 1.9 (or fa2b596db182)
349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 v = ui.config(section, name, None)
350 v = ui.config(section, name, None)
351 if v is None:
351 if v is None:
352 return default
352 return default
353 try:
353 try:
354 return int(v)
354 return int(v)
355 except ValueError:
355 except ValueError:
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 % (section, name, v))
357 % (section, name, v))
358
358
359 def safeattrsetter(obj, name, ignoremissing=False):
359 def safeattrsetter(obj, name, ignoremissing=False):
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361
361
362 This function is aborted, if 'obj' doesn't have 'name' attribute
362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 at runtime. This avoids overlooking removal of an attribute, which
363 at runtime. This avoids overlooking removal of an attribute, which
364 breaks assumption of performance measurement, in the future.
364 breaks assumption of performance measurement, in the future.
365
365
366 This function returns the object to (1) assign a new value, and
366 This function returns the object to (1) assign a new value, and
367 (2) restore an original value to the attribute.
367 (2) restore an original value to the attribute.
368
368
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 abortion, and this function returns None. This is useful to
370 abortion, and this function returns None. This is useful to
371 examine an attribute, which isn't ensured in all Mercurial
371 examine an attribute, which isn't ensured in all Mercurial
372 versions.
372 versions.
373 """
373 """
374 if not util.safehasattr(obj, name):
374 if not util.safehasattr(obj, name):
375 if ignoremissing:
375 if ignoremissing:
376 return None
376 return None
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 b" of performance measurement") % (name, obj))
378 b" of performance measurement") % (name, obj))
379
379
380 origvalue = getattr(obj, _sysstr(name))
380 origvalue = getattr(obj, _sysstr(name))
381 class attrutil(object):
381 class attrutil(object):
382 def set(self, newvalue):
382 def set(self, newvalue):
383 setattr(obj, _sysstr(name), newvalue)
383 setattr(obj, _sysstr(name), newvalue)
384 def restore(self):
384 def restore(self):
385 setattr(obj, _sysstr(name), origvalue)
385 setattr(obj, _sysstr(name), origvalue)
386
386
387 return attrutil()
387 return attrutil()
388
388
389 # utilities to examine each internal API changes
389 # utilities to examine each internal API changes
390
390
391 def getbranchmapsubsettable():
391 def getbranchmapsubsettable():
392 # for "historical portability":
392 # for "historical portability":
393 # subsettable is defined in:
393 # subsettable is defined in:
394 # - branchmap since 2.9 (or 175c6fd8cacc)
394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 # - repoview since 2.5 (or 59a9f18d4587)
395 # - repoview since 2.5 (or 59a9f18d4587)
396 for mod in (branchmap, repoview):
396 for mod in (branchmap, repoview):
397 subsettable = getattr(mod, 'subsettable', None)
397 subsettable = getattr(mod, 'subsettable', None)
398 if subsettable:
398 if subsettable:
399 return subsettable
399 return subsettable
400
400
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 # branchmap and repoview modules exist, but subsettable attribute
402 # branchmap and repoview modules exist, but subsettable attribute
403 # doesn't)
403 # doesn't)
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 hint=b"use 2.5 or later")
405 hint=b"use 2.5 or later")
406
406
407 def getsvfs(repo):
407 def getsvfs(repo):
408 """Return appropriate object to access files under .hg/store
408 """Return appropriate object to access files under .hg/store
409 """
409 """
410 # for "historical portability":
410 # for "historical portability":
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 svfs = getattr(repo, 'svfs', None)
412 svfs = getattr(repo, 'svfs', None)
413 if svfs:
413 if svfs:
414 return svfs
414 return svfs
415 else:
415 else:
416 return getattr(repo, 'sopener')
416 return getattr(repo, 'sopener')
417
417
418 def getvfs(repo):
418 def getvfs(repo):
419 """Return appropriate object to access files under .hg
419 """Return appropriate object to access files under .hg
420 """
420 """
421 # for "historical portability":
421 # for "historical portability":
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 vfs = getattr(repo, 'vfs', None)
423 vfs = getattr(repo, 'vfs', None)
424 if vfs:
424 if vfs:
425 return vfs
425 return vfs
426 else:
426 else:
427 return getattr(repo, 'opener')
427 return getattr(repo, 'opener')
428
428
429 def repocleartagscachefunc(repo):
429 def repocleartagscachefunc(repo):
430 """Return the function to clear tags cache according to repo internal API
430 """Return the function to clear tags cache according to repo internal API
431 """
431 """
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 # correct way to clear tags cache, because existing code paths
434 # correct way to clear tags cache, because existing code paths
435 # expect _tagscache to be a structured object.
435 # expect _tagscache to be a structured object.
436 def clearcache():
436 def clearcache():
437 # _tagscache has been filteredpropertycache since 2.5 (or
437 # _tagscache has been filteredpropertycache since 2.5 (or
438 # 98c867ac1330), and delattr() can't work in such case
438 # 98c867ac1330), and delattr() can't work in such case
439 if b'_tagscache' in vars(repo):
439 if b'_tagscache' in vars(repo):
440 del repo.__dict__[b'_tagscache']
440 del repo.__dict__[b'_tagscache']
441 return clearcache
441 return clearcache
442
442
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 if repotags: # since 1.4 (or 5614a628d173)
444 if repotags: # since 1.4 (or 5614a628d173)
445 return lambda : repotags.set(None)
445 return lambda : repotags.set(None)
446
446
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 return lambda : repotagscache.set(None)
449 return lambda : repotagscache.set(None)
450
450
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 # this point, but it isn't so problematic, because:
452 # this point, but it isn't so problematic, because:
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 # in perftags() causes failure soon
454 # in perftags() causes failure soon
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 raise error.Abort((b"tags API of this hg command is unknown"))
456 raise error.Abort((b"tags API of this hg command is unknown"))
457
457
458 # utilities to clear cache
458 # utilities to clear cache
459
459
460 def clearfilecache(obj, attrname):
460 def clearfilecache(obj, attrname):
461 unfiltered = getattr(obj, 'unfiltered', None)
461 unfiltered = getattr(obj, 'unfiltered', None)
462 if unfiltered is not None:
462 if unfiltered is not None:
463 obj = obj.unfiltered()
463 obj = obj.unfiltered()
464 if attrname in vars(obj):
464 if attrname in vars(obj):
465 delattr(obj, attrname)
465 delattr(obj, attrname)
466 obj._filecache.pop(attrname, None)
466 obj._filecache.pop(attrname, None)
467
467
468 def clearchangelog(repo):
468 def clearchangelog(repo):
469 if repo is not repo.unfiltered():
469 if repo is not repo.unfiltered():
470 object.__setattr__(repo, r'_clcachekey', None)
470 object.__setattr__(repo, r'_clcachekey', None)
471 object.__setattr__(repo, r'_clcache', None)
471 object.__setattr__(repo, r'_clcache', None)
472 clearfilecache(repo.unfiltered(), 'changelog')
472 clearfilecache(repo.unfiltered(), 'changelog')
473
473
474 # perf commands
474 # perf commands
475
475
476 @command(b'perfwalk', formatteropts)
476 @command(b'perfwalk', formatteropts)
477 def perfwalk(ui, repo, *pats, **opts):
477 def perfwalk(ui, repo, *pats, **opts):
478 opts = _byteskwargs(opts)
478 opts = _byteskwargs(opts)
479 timer, fm = gettimer(ui, opts)
479 timer, fm = gettimer(ui, opts)
480 m = scmutil.match(repo[None], pats, {})
480 m = scmutil.match(repo[None], pats, {})
481 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
481 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
482 ignored=False))))
482 ignored=False))))
483 fm.end()
483 fm.end()
484
484
485 @command(b'perfannotate', formatteropts)
485 @command(b'perfannotate', formatteropts)
486 def perfannotate(ui, repo, f, **opts):
486 def perfannotate(ui, repo, f, **opts):
487 opts = _byteskwargs(opts)
487 opts = _byteskwargs(opts)
488 timer, fm = gettimer(ui, opts)
488 timer, fm = gettimer(ui, opts)
489 fc = repo[b'.'][f]
489 fc = repo[b'.'][f]
490 timer(lambda: len(fc.annotate(True)))
490 timer(lambda: len(fc.annotate(True)))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfstatus',
493 @command(b'perfstatus',
494 [(b'u', b'unknown', False,
494 [(b'u', b'unknown', False,
495 b'ask status to look for unknown files')] + formatteropts)
495 b'ask status to look for unknown files')] + formatteropts)
496 def perfstatus(ui, repo, **opts):
496 def perfstatus(ui, repo, **opts):
497 opts = _byteskwargs(opts)
497 opts = _byteskwargs(opts)
498 #m = match.always(repo.root, repo.getcwd())
498 #m = match.always(repo.root, repo.getcwd())
499 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
499 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
500 # False))))
500 # False))))
501 timer, fm = gettimer(ui, opts)
501 timer, fm = gettimer(ui, opts)
502 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
502 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
503 fm.end()
503 fm.end()
504
504
505 @command(b'perfaddremove', formatteropts)
505 @command(b'perfaddremove', formatteropts)
506 def perfaddremove(ui, repo, **opts):
506 def perfaddremove(ui, repo, **opts):
507 opts = _byteskwargs(opts)
507 opts = _byteskwargs(opts)
508 timer, fm = gettimer(ui, opts)
508 timer, fm = gettimer(ui, opts)
509 try:
509 try:
510 oldquiet = repo.ui.quiet
510 oldquiet = repo.ui.quiet
511 repo.ui.quiet = True
511 repo.ui.quiet = True
512 matcher = scmutil.match(repo[None])
512 matcher = scmutil.match(repo[None])
513 opts[b'dry_run'] = True
513 opts[b'dry_run'] = True
514 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
514 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
515 finally:
515 finally:
516 repo.ui.quiet = oldquiet
516 repo.ui.quiet = oldquiet
517 fm.end()
517 fm.end()
518
518
519 def clearcaches(cl):
519 def clearcaches(cl):
520 # behave somewhat consistently across internal API changes
520 # behave somewhat consistently across internal API changes
521 if util.safehasattr(cl, b'clearcaches'):
521 if util.safehasattr(cl, b'clearcaches'):
522 cl.clearcaches()
522 cl.clearcaches()
523 elif util.safehasattr(cl, b'_nodecache'):
523 elif util.safehasattr(cl, b'_nodecache'):
524 from mercurial.node import nullid, nullrev
524 from mercurial.node import nullid, nullrev
525 cl._nodecache = {nullid: nullrev}
525 cl._nodecache = {nullid: nullrev}
526 cl._nodepos = None
526 cl._nodepos = None
527
527
528 @command(b'perfheads', formatteropts)
528 @command(b'perfheads', formatteropts)
529 def perfheads(ui, repo, **opts):
529 def perfheads(ui, repo, **opts):
530 opts = _byteskwargs(opts)
530 opts = _byteskwargs(opts)
531 timer, fm = gettimer(ui, opts)
531 timer, fm = gettimer(ui, opts)
532 cl = repo.changelog
532 cl = repo.changelog
533 def d():
533 def d():
534 len(cl.headrevs())
534 len(cl.headrevs())
535 clearcaches(cl)
535 clearcaches(cl)
536 timer(d)
536 timer(d)
537 fm.end()
537 fm.end()
538
538
539 @command(b'perftags', formatteropts)
539 @command(b'perftags', formatteropts)
540 def perftags(ui, repo, **opts):
540 def perftags(ui, repo, **opts):
541 import mercurial.changelog
541 import mercurial.changelog
542 import mercurial.manifest
542 import mercurial.manifest
543
543
544 opts = _byteskwargs(opts)
544 opts = _byteskwargs(opts)
545 timer, fm = gettimer(ui, opts)
545 timer, fm = gettimer(ui, opts)
546 svfs = getsvfs(repo)
546 svfs = getsvfs(repo)
547 repocleartagscache = repocleartagscachefunc(repo)
547 repocleartagscache = repocleartagscachefunc(repo)
548 def s():
548 def s():
549 repo.changelog = mercurial.changelog.changelog(svfs)
549 repo.changelog = mercurial.changelog.changelog(svfs)
550 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
550 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
551 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
551 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
552 rootmanifest)
552 rootmanifest)
553 repocleartagscache()
553 repocleartagscache()
554 def t():
554 def t():
555 return len(repo.tags())
555 return len(repo.tags())
556 timer(t, setup=s)
556 timer(t, setup=s)
557 fm.end()
557 fm.end()
558
558
559 @command(b'perfancestors', formatteropts)
559 @command(b'perfancestors', formatteropts)
560 def perfancestors(ui, repo, **opts):
560 def perfancestors(ui, repo, **opts):
561 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
562 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
563 heads = repo.changelog.headrevs()
563 heads = repo.changelog.headrevs()
564 def d():
564 def d():
565 for a in repo.changelog.ancestors(heads):
565 for a in repo.changelog.ancestors(heads):
566 pass
566 pass
567 timer(d)
567 timer(d)
568 fm.end()
568 fm.end()
569
569
570 @command(b'perfancestorset', formatteropts)
570 @command(b'perfancestorset', formatteropts)
571 def perfancestorset(ui, repo, revset, **opts):
571 def perfancestorset(ui, repo, revset, **opts):
572 opts = _byteskwargs(opts)
572 opts = _byteskwargs(opts)
573 timer, fm = gettimer(ui, opts)
573 timer, fm = gettimer(ui, opts)
574 revs = repo.revs(revset)
574 revs = repo.revs(revset)
575 heads = repo.changelog.headrevs()
575 heads = repo.changelog.headrevs()
576 def d():
576 def d():
577 s = repo.changelog.ancestors(heads)
577 s = repo.changelog.ancestors(heads)
578 for rev in revs:
578 for rev in revs:
579 rev in s
579 rev in s
580 timer(d)
580 timer(d)
581 fm.end()
581 fm.end()
582
582
583 @command(b'perfbookmarks', formatteropts)
583 @command(b'perfbookmarks', formatteropts)
584 def perfbookmarks(ui, repo, **opts):
584 def perfbookmarks(ui, repo, **opts):
585 """benchmark parsing bookmarks from disk to memory"""
585 """benchmark parsing bookmarks from disk to memory"""
586 opts = _byteskwargs(opts)
586 opts = _byteskwargs(opts)
587 timer, fm = gettimer(ui, opts)
587 timer, fm = gettimer(ui, opts)
588
588
589 def s():
589 def s():
590 clearfilecache(repo, b'_bookmarks')
590 clearfilecache(repo, b'_bookmarks')
591 def d():
591 def d():
592 repo._bookmarks
592 repo._bookmarks
593 timer(d, setup=s)
593 timer(d, setup=s)
594 fm.end()
594 fm.end()
595
595
596 @command(b'perfbundleread', formatteropts, b'BUNDLE')
596 @command(b'perfbundleread', formatteropts, b'BUNDLE')
597 def perfbundleread(ui, repo, bundlepath, **opts):
597 def perfbundleread(ui, repo, bundlepath, **opts):
598 """Benchmark reading of bundle files.
598 """Benchmark reading of bundle files.
599
599
600 This command is meant to isolate the I/O part of bundle reading as
600 This command is meant to isolate the I/O part of bundle reading as
601 much as possible.
601 much as possible.
602 """
602 """
603 from mercurial import (
603 from mercurial import (
604 bundle2,
604 bundle2,
605 exchange,
605 exchange,
606 streamclone,
606 streamclone,
607 )
607 )
608
608
609 opts = _byteskwargs(opts)
609 opts = _byteskwargs(opts)
610
610
611 def makebench(fn):
611 def makebench(fn):
612 def run():
612 def run():
613 with open(bundlepath, b'rb') as fh:
613 with open(bundlepath, b'rb') as fh:
614 bundle = exchange.readbundle(ui, fh, bundlepath)
614 bundle = exchange.readbundle(ui, fh, bundlepath)
615 fn(bundle)
615 fn(bundle)
616
616
617 return run
617 return run
618
618
619 def makereadnbytes(size):
619 def makereadnbytes(size):
620 def run():
620 def run():
621 with open(bundlepath, b'rb') as fh:
621 with open(bundlepath, b'rb') as fh:
622 bundle = exchange.readbundle(ui, fh, bundlepath)
622 bundle = exchange.readbundle(ui, fh, bundlepath)
623 while bundle.read(size):
623 while bundle.read(size):
624 pass
624 pass
625
625
626 return run
626 return run
627
627
628 def makestdioread(size):
628 def makestdioread(size):
629 def run():
629 def run():
630 with open(bundlepath, b'rb') as fh:
630 with open(bundlepath, b'rb') as fh:
631 while fh.read(size):
631 while fh.read(size):
632 pass
632 pass
633
633
634 return run
634 return run
635
635
636 # bundle1
636 # bundle1
637
637
638 def deltaiter(bundle):
638 def deltaiter(bundle):
639 for delta in bundle.deltaiter():
639 for delta in bundle.deltaiter():
640 pass
640 pass
641
641
642 def iterchunks(bundle):
642 def iterchunks(bundle):
643 for chunk in bundle.getchunks():
643 for chunk in bundle.getchunks():
644 pass
644 pass
645
645
646 # bundle2
646 # bundle2
647
647
648 def forwardchunks(bundle):
648 def forwardchunks(bundle):
649 for chunk in bundle._forwardchunks():
649 for chunk in bundle._forwardchunks():
650 pass
650 pass
651
651
652 def iterparts(bundle):
652 def iterparts(bundle):
653 for part in bundle.iterparts():
653 for part in bundle.iterparts():
654 pass
654 pass
655
655
656 def iterpartsseekable(bundle):
656 def iterpartsseekable(bundle):
657 for part in bundle.iterparts(seekable=True):
657 for part in bundle.iterparts(seekable=True):
658 pass
658 pass
659
659
660 def seek(bundle):
660 def seek(bundle):
661 for part in bundle.iterparts(seekable=True):
661 for part in bundle.iterparts(seekable=True):
662 part.seek(0, os.SEEK_END)
662 part.seek(0, os.SEEK_END)
663
663
664 def makepartreadnbytes(size):
664 def makepartreadnbytes(size):
665 def run():
665 def run():
666 with open(bundlepath, b'rb') as fh:
666 with open(bundlepath, b'rb') as fh:
667 bundle = exchange.readbundle(ui, fh, bundlepath)
667 bundle = exchange.readbundle(ui, fh, bundlepath)
668 for part in bundle.iterparts():
668 for part in bundle.iterparts():
669 while part.read(size):
669 while part.read(size):
670 pass
670 pass
671
671
672 return run
672 return run
673
673
674 benches = [
674 benches = [
675 (makestdioread(8192), b'read(8k)'),
675 (makestdioread(8192), b'read(8k)'),
676 (makestdioread(16384), b'read(16k)'),
676 (makestdioread(16384), b'read(16k)'),
677 (makestdioread(32768), b'read(32k)'),
677 (makestdioread(32768), b'read(32k)'),
678 (makestdioread(131072), b'read(128k)'),
678 (makestdioread(131072), b'read(128k)'),
679 ]
679 ]
680
680
681 with open(bundlepath, b'rb') as fh:
681 with open(bundlepath, b'rb') as fh:
682 bundle = exchange.readbundle(ui, fh, bundlepath)
682 bundle = exchange.readbundle(ui, fh, bundlepath)
683
683
684 if isinstance(bundle, changegroup.cg1unpacker):
684 if isinstance(bundle, changegroup.cg1unpacker):
685 benches.extend([
685 benches.extend([
686 (makebench(deltaiter), b'cg1 deltaiter()'),
686 (makebench(deltaiter), b'cg1 deltaiter()'),
687 (makebench(iterchunks), b'cg1 getchunks()'),
687 (makebench(iterchunks), b'cg1 getchunks()'),
688 (makereadnbytes(8192), b'cg1 read(8k)'),
688 (makereadnbytes(8192), b'cg1 read(8k)'),
689 (makereadnbytes(16384), b'cg1 read(16k)'),
689 (makereadnbytes(16384), b'cg1 read(16k)'),
690 (makereadnbytes(32768), b'cg1 read(32k)'),
690 (makereadnbytes(32768), b'cg1 read(32k)'),
691 (makereadnbytes(131072), b'cg1 read(128k)'),
691 (makereadnbytes(131072), b'cg1 read(128k)'),
692 ])
692 ])
693 elif isinstance(bundle, bundle2.unbundle20):
693 elif isinstance(bundle, bundle2.unbundle20):
694 benches.extend([
694 benches.extend([
695 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
695 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
696 (makebench(iterparts), b'bundle2 iterparts()'),
696 (makebench(iterparts), b'bundle2 iterparts()'),
697 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
697 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
698 (makebench(seek), b'bundle2 part seek()'),
698 (makebench(seek), b'bundle2 part seek()'),
699 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
699 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
700 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
700 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
701 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
701 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
702 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
702 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
703 ])
703 ])
704 elif isinstance(bundle, streamclone.streamcloneapplier):
704 elif isinstance(bundle, streamclone.streamcloneapplier):
705 raise error.Abort(b'stream clone bundles not supported')
705 raise error.Abort(b'stream clone bundles not supported')
706 else:
706 else:
707 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
707 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
708
708
709 for fn, title in benches:
709 for fn, title in benches:
710 timer, fm = gettimer(ui, opts)
710 timer, fm = gettimer(ui, opts)
711 timer(fn, title=title)
711 timer(fn, title=title)
712 fm.end()
712 fm.end()
713
713
714 @command(b'perfchangegroupchangelog', formatteropts +
714 @command(b'perfchangegroupchangelog', formatteropts +
715 [(b'', b'version', b'02', b'changegroup version'),
715 [(b'', b'version', b'02', b'changegroup version'),
716 (b'r', b'rev', b'', b'revisions to add to changegroup')])
716 (b'r', b'rev', b'', b'revisions to add to changegroup')])
717 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
717 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
718 """Benchmark producing a changelog group for a changegroup.
718 """Benchmark producing a changelog group for a changegroup.
719
719
720 This measures the time spent processing the changelog during a
720 This measures the time spent processing the changelog during a
721 bundle operation. This occurs during `hg bundle` and on a server
721 bundle operation. This occurs during `hg bundle` and on a server
722 processing a `getbundle` wire protocol request (handles clones
722 processing a `getbundle` wire protocol request (handles clones
723 and pull requests).
723 and pull requests).
724
724
725 By default, all revisions are added to the changegroup.
725 By default, all revisions are added to the changegroup.
726 """
726 """
727 opts = _byteskwargs(opts)
727 opts = _byteskwargs(opts)
728 cl = repo.changelog
728 cl = repo.changelog
729 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
729 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
730 bundler = changegroup.getbundler(version, repo)
730 bundler = changegroup.getbundler(version, repo)
731
731
732 def d():
732 def d():
733 state, chunks = bundler._generatechangelog(cl, nodes)
733 state, chunks = bundler._generatechangelog(cl, nodes)
734 for chunk in chunks:
734 for chunk in chunks:
735 pass
735 pass
736
736
737 timer, fm = gettimer(ui, opts)
737 timer, fm = gettimer(ui, opts)
738
738
739 # Terminal printing can interfere with timing. So disable it.
739 # Terminal printing can interfere with timing. So disable it.
740 with ui.configoverride({(b'progress', b'disable'): True}):
740 with ui.configoverride({(b'progress', b'disable'): True}):
741 timer(d)
741 timer(d)
742
742
743 fm.end()
743 fm.end()
744
744
745 @command(b'perfdirs', formatteropts)
745 @command(b'perfdirs', formatteropts)
746 def perfdirs(ui, repo, **opts):
746 def perfdirs(ui, repo, **opts):
747 opts = _byteskwargs(opts)
747 opts = _byteskwargs(opts)
748 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
749 dirstate = repo.dirstate
749 dirstate = repo.dirstate
750 b'a' in dirstate
750 b'a' in dirstate
751 def d():
751 def d():
752 dirstate.hasdir(b'a')
752 dirstate.hasdir(b'a')
753 del dirstate._map._dirs
753 del dirstate._map._dirs
754 timer(d)
754 timer(d)
755 fm.end()
755 fm.end()
756
756
757 @command(b'perfdirstate', formatteropts)
757 @command(b'perfdirstate', formatteropts)
758 def perfdirstate(ui, repo, **opts):
758 def perfdirstate(ui, repo, **opts):
759 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
760 timer, fm = gettimer(ui, opts)
760 timer, fm = gettimer(ui, opts)
761 b"a" in repo.dirstate
761 b"a" in repo.dirstate
762 def d():
762 def d():
763 repo.dirstate.invalidate()
763 repo.dirstate.invalidate()
764 b"a" in repo.dirstate
764 b"a" in repo.dirstate
765 timer(d)
765 timer(d)
766 fm.end()
766 fm.end()
767
767
768 @command(b'perfdirstatedirs', formatteropts)
768 @command(b'perfdirstatedirs', formatteropts)
769 def perfdirstatedirs(ui, repo, **opts):
769 def perfdirstatedirs(ui, repo, **opts):
770 opts = _byteskwargs(opts)
770 opts = _byteskwargs(opts)
771 timer, fm = gettimer(ui, opts)
771 timer, fm = gettimer(ui, opts)
772 b"a" in repo.dirstate
772 b"a" in repo.dirstate
773 def d():
773 def d():
774 repo.dirstate.hasdir(b"a")
774 repo.dirstate.hasdir(b"a")
775 del repo.dirstate._map._dirs
775 del repo.dirstate._map._dirs
776 timer(d)
776 timer(d)
777 fm.end()
777 fm.end()
778
778
779 @command(b'perfdirstatefoldmap', formatteropts)
779 @command(b'perfdirstatefoldmap', formatteropts)
780 def perfdirstatefoldmap(ui, repo, **opts):
780 def perfdirstatefoldmap(ui, repo, **opts):
781 opts = _byteskwargs(opts)
781 opts = _byteskwargs(opts)
782 timer, fm = gettimer(ui, opts)
782 timer, fm = gettimer(ui, opts)
783 dirstate = repo.dirstate
783 dirstate = repo.dirstate
784 b'a' in dirstate
784 b'a' in dirstate
785 def d():
785 def d():
786 dirstate._map.filefoldmap.get(b'a')
786 dirstate._map.filefoldmap.get(b'a')
787 del dirstate._map.filefoldmap
787 del dirstate._map.filefoldmap
788 timer(d)
788 timer(d)
789 fm.end()
789 fm.end()
790
790
791 @command(b'perfdirfoldmap', formatteropts)
791 @command(b'perfdirfoldmap', formatteropts)
792 def perfdirfoldmap(ui, repo, **opts):
792 def perfdirfoldmap(ui, repo, **opts):
793 opts = _byteskwargs(opts)
793 opts = _byteskwargs(opts)
794 timer, fm = gettimer(ui, opts)
794 timer, fm = gettimer(ui, opts)
795 dirstate = repo.dirstate
795 dirstate = repo.dirstate
796 b'a' in dirstate
796 b'a' in dirstate
797 def d():
797 def d():
798 dirstate._map.dirfoldmap.get(b'a')
798 dirstate._map.dirfoldmap.get(b'a')
799 del dirstate._map.dirfoldmap
799 del dirstate._map.dirfoldmap
800 del dirstate._map._dirs
800 del dirstate._map._dirs
801 timer(d)
801 timer(d)
802 fm.end()
802 fm.end()
803
803
804 @command(b'perfdirstatewrite', formatteropts)
804 @command(b'perfdirstatewrite', formatteropts)
805 def perfdirstatewrite(ui, repo, **opts):
805 def perfdirstatewrite(ui, repo, **opts):
806 opts = _byteskwargs(opts)
806 opts = _byteskwargs(opts)
807 timer, fm = gettimer(ui, opts)
807 timer, fm = gettimer(ui, opts)
808 ds = repo.dirstate
808 ds = repo.dirstate
809 b"a" in ds
809 b"a" in ds
810 def d():
810 def d():
811 ds._dirty = True
811 ds._dirty = True
812 ds.write(repo.currenttransaction())
812 ds.write(repo.currenttransaction())
813 timer(d)
813 timer(d)
814 fm.end()
814 fm.end()
815
815
816 @command(b'perfmergecalculate',
816 @command(b'perfmergecalculate',
817 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
817 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
818 def perfmergecalculate(ui, repo, rev, **opts):
818 def perfmergecalculate(ui, repo, rev, **opts):
819 opts = _byteskwargs(opts)
819 opts = _byteskwargs(opts)
820 timer, fm = gettimer(ui, opts)
820 timer, fm = gettimer(ui, opts)
821 wctx = repo[None]
821 wctx = repo[None]
822 rctx = scmutil.revsingle(repo, rev, rev)
822 rctx = scmutil.revsingle(repo, rev, rev)
823 ancestor = wctx.ancestor(rctx)
823 ancestor = wctx.ancestor(rctx)
824 # we don't want working dir files to be stat'd in the benchmark, so prime
824 # we don't want working dir files to be stat'd in the benchmark, so prime
825 # that cache
825 # that cache
826 wctx.dirty()
826 wctx.dirty()
827 def d():
827 def d():
828 # acceptremote is True because we don't want prompts in the middle of
828 # acceptremote is True because we don't want prompts in the middle of
829 # our benchmark
829 # our benchmark
830 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
830 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
831 acceptremote=True, followcopies=True)
831 acceptremote=True, followcopies=True)
832 timer(d)
832 timer(d)
833 fm.end()
833 fm.end()
834
834
835 @command(b'perfpathcopies', [], b"REV REV")
835 @command(b'perfpathcopies', [], b"REV REV")
836 def perfpathcopies(ui, repo, rev1, rev2, **opts):
836 def perfpathcopies(ui, repo, rev1, rev2, **opts):
837 opts = _byteskwargs(opts)
837 opts = _byteskwargs(opts)
838 timer, fm = gettimer(ui, opts)
838 timer, fm = gettimer(ui, opts)
839 ctx1 = scmutil.revsingle(repo, rev1, rev1)
839 ctx1 = scmutil.revsingle(repo, rev1, rev1)
840 ctx2 = scmutil.revsingle(repo, rev2, rev2)
840 ctx2 = scmutil.revsingle(repo, rev2, rev2)
841 def d():
841 def d():
842 copies.pathcopies(ctx1, ctx2)
842 copies.pathcopies(ctx1, ctx2)
843 timer(d)
843 timer(d)
844 fm.end()
844 fm.end()
845
845
846 @command(b'perfphases',
846 @command(b'perfphases',
847 [(b'', b'full', False, b'include file reading time too'),
847 [(b'', b'full', False, b'include file reading time too'),
848 ], b"")
848 ], b"")
849 def perfphases(ui, repo, **opts):
849 def perfphases(ui, repo, **opts):
850 """benchmark phasesets computation"""
850 """benchmark phasesets computation"""
851 opts = _byteskwargs(opts)
851 opts = _byteskwargs(opts)
852 timer, fm = gettimer(ui, opts)
852 timer, fm = gettimer(ui, opts)
853 _phases = repo._phasecache
853 _phases = repo._phasecache
854 full = opts.get(b'full')
854 full = opts.get(b'full')
855 def d():
855 def d():
856 phases = _phases
856 phases = _phases
857 if full:
857 if full:
858 clearfilecache(repo, b'_phasecache')
858 clearfilecache(repo, b'_phasecache')
859 phases = repo._phasecache
859 phases = repo._phasecache
860 phases.invalidate()
860 phases.invalidate()
861 phases.loadphaserevs(repo)
861 phases.loadphaserevs(repo)
862 timer(d)
862 timer(d)
863 fm.end()
863 fm.end()
864
864
865 @command(b'perfphasesremote',
865 @command(b'perfphasesremote',
866 [], b"[DEST]")
866 [], b"[DEST]")
867 def perfphasesremote(ui, repo, dest=None, **opts):
867 def perfphasesremote(ui, repo, dest=None, **opts):
868 """benchmark time needed to analyse phases of the remote server"""
868 """benchmark time needed to analyse phases of the remote server"""
869 from mercurial.node import (
869 from mercurial.node import (
870 bin,
870 bin,
871 )
871 )
872 from mercurial import (
872 from mercurial import (
873 exchange,
873 exchange,
874 hg,
874 hg,
875 phases,
875 phases,
876 )
876 )
877 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
878 timer, fm = gettimer(ui, opts)
878 timer, fm = gettimer(ui, opts)
879
879
880 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
880 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
881 if not path:
881 if not path:
882 raise error.Abort((b'default repository not configured!'),
882 raise error.Abort((b'default repository not configured!'),
883 hint=(b"see 'hg help config.paths'"))
883 hint=(b"see 'hg help config.paths'"))
884 dest = path.pushloc or path.loc
884 dest = path.pushloc or path.loc
885 branches = (path.branch, opts.get(b'branch') or [])
885 branches = (path.branch, opts.get(b'branch') or [])
886 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
886 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
887 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
887 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
888 other = hg.peer(repo, opts, dest)
888 other = hg.peer(repo, opts, dest)
889
889
890 # easier to perform discovery through the operation
890 # easier to perform discovery through the operation
891 op = exchange.pushoperation(repo, other)
891 op = exchange.pushoperation(repo, other)
892 exchange._pushdiscoverychangeset(op)
892 exchange._pushdiscoverychangeset(op)
893
893
894 remotesubset = op.fallbackheads
894 remotesubset = op.fallbackheads
895
895
896 with other.commandexecutor() as e:
896 with other.commandexecutor() as e:
897 remotephases = e.callcommand(b'listkeys',
897 remotephases = e.callcommand(b'listkeys',
898 {b'namespace': b'phases'}).result()
898 {b'namespace': b'phases'}).result()
899 del other
899 del other
900 publishing = remotephases.get(b'publishing', False)
900 publishing = remotephases.get(b'publishing', False)
901 if publishing:
901 if publishing:
902 ui.status((b'publishing: yes\n'))
902 ui.status((b'publishing: yes\n'))
903 else:
903 else:
904 ui.status((b'publishing: no\n'))
904 ui.status((b'publishing: no\n'))
905
905
906 nodemap = repo.changelog.nodemap
906 nodemap = repo.changelog.nodemap
907 nonpublishroots = 0
907 nonpublishroots = 0
908 for nhex, phase in remotephases.iteritems():
908 for nhex, phase in remotephases.iteritems():
909 if nhex == b'publishing': # ignore data related to publish option
909 if nhex == b'publishing': # ignore data related to publish option
910 continue
910 continue
911 node = bin(nhex)
911 node = bin(nhex)
912 if node in nodemap and int(phase):
912 if node in nodemap and int(phase):
913 nonpublishroots += 1
913 nonpublishroots += 1
914 ui.status((b'number of roots: %d\n') % len(remotephases))
914 ui.status((b'number of roots: %d\n') % len(remotephases))
915 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
915 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
916 def d():
916 def d():
917 phases.remotephasessummary(repo,
917 phases.remotephasessummary(repo,
918 remotesubset,
918 remotesubset,
919 remotephases)
919 remotephases)
920 timer(d)
920 timer(d)
921 fm.end()
921 fm.end()
922
922
923 @command(b'perfmanifest',[
923 @command(b'perfmanifest',[
924 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
924 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
925 (b'', b'clear-disk', False, b'clear on-disk caches too'),
925 (b'', b'clear-disk', False, b'clear on-disk caches too'),
926 ] + formatteropts, b'REV|NODE')
926 ] + formatteropts, b'REV|NODE')
927 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
927 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
928 """benchmark the time to read a manifest from disk and return a usable
928 """benchmark the time to read a manifest from disk and return a usable
929 dict-like object
929 dict-like object
930
930
931 Manifest caches are cleared before retrieval."""
931 Manifest caches are cleared before retrieval."""
932 opts = _byteskwargs(opts)
932 opts = _byteskwargs(opts)
933 timer, fm = gettimer(ui, opts)
933 timer, fm = gettimer(ui, opts)
934 if not manifest_rev:
934 if not manifest_rev:
935 ctx = scmutil.revsingle(repo, rev, rev)
935 ctx = scmutil.revsingle(repo, rev, rev)
936 t = ctx.manifestnode()
936 t = ctx.manifestnode()
937 else:
937 else:
938 from mercurial.node import bin
938 from mercurial.node import bin
939
939
940 if len(rev) == 40:
940 if len(rev) == 40:
941 t = bin(rev)
941 t = bin(rev)
942 else:
942 else:
943 try:
943 try:
944 rev = int(rev)
944 rev = int(rev)
945
945
946 if util.safehasattr(repo.manifestlog, b'getstorage'):
946 if util.safehasattr(repo.manifestlog, b'getstorage'):
947 t = repo.manifestlog.getstorage(b'').node(rev)
947 t = repo.manifestlog.getstorage(b'').node(rev)
948 else:
948 else:
949 t = repo.manifestlog._revlog.lookup(rev)
949 t = repo.manifestlog._revlog.lookup(rev)
950 except ValueError:
950 except ValueError:
951 raise error.Abort(b'manifest revision must be integer or full '
951 raise error.Abort(b'manifest revision must be integer or full '
952 b'node')
952 b'node')
953 def d():
953 def d():
954 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
954 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
955 repo.manifestlog[t].read()
955 repo.manifestlog[t].read()
956 timer(d)
956 timer(d)
957 fm.end()
957 fm.end()
958
958
959 @command(b'perfchangeset', formatteropts)
959 @command(b'perfchangeset', formatteropts)
960 def perfchangeset(ui, repo, rev, **opts):
960 def perfchangeset(ui, repo, rev, **opts):
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 n = scmutil.revsingle(repo, rev).node()
963 n = scmutil.revsingle(repo, rev).node()
964 def d():
964 def d():
965 repo.changelog.read(n)
965 repo.changelog.read(n)
966 #repo.changelog._cache = None
966 #repo.changelog._cache = None
967 timer(d)
967 timer(d)
968 fm.end()
968 fm.end()
969
969
970 @command(b'perfindex', formatteropts)
970 @command(b'perfindex', formatteropts)
971 def perfindex(ui, repo, **opts):
971 def perfindex(ui, repo, **opts):
972 import mercurial.revlog
972 import mercurial.revlog
973 opts = _byteskwargs(opts)
973 opts = _byteskwargs(opts)
974 timer, fm = gettimer(ui, opts)
974 timer, fm = gettimer(ui, opts)
975 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
975 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
976 n = repo[b"tip"].node()
976 n = repo[b"tip"].node()
977 svfs = getsvfs(repo)
977 svfs = getsvfs(repo)
978 def d():
978 def d():
979 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
979 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
980 cl.rev(n)
980 cl.rev(n)
981 timer(d)
981 timer(d)
982 fm.end()
982 fm.end()
983
983
984 @command(b'perfstartup', formatteropts)
984 @command(b'perfstartup', formatteropts)
985 def perfstartup(ui, repo, **opts):
985 def perfstartup(ui, repo, **opts):
986 opts = _byteskwargs(opts)
986 opts = _byteskwargs(opts)
987 timer, fm = gettimer(ui, opts)
987 timer, fm = gettimer(ui, opts)
988 def d():
988 def d():
989 if os.name != r'nt':
989 if os.name != r'nt':
990 os.system(b"HGRCPATH= %s version -q > /dev/null" %
990 os.system(b"HGRCPATH= %s version -q > /dev/null" %
991 fsencode(sys.argv[0]))
991 fsencode(sys.argv[0]))
992 else:
992 else:
993 os.environ[r'HGRCPATH'] = r' '
993 os.environ[r'HGRCPATH'] = r' '
994 os.system(r"%s version -q > NUL" % sys.argv[0])
994 os.system(r"%s version -q > NUL" % sys.argv[0])
995 timer(d)
995 timer(d)
996 fm.end()
996 fm.end()
997
997
998 @command(b'perfparents', formatteropts)
998 @command(b'perfparents', formatteropts)
999 def perfparents(ui, repo, **opts):
999 def perfparents(ui, repo, **opts):
1000 opts = _byteskwargs(opts)
1000 opts = _byteskwargs(opts)
1001 timer, fm = gettimer(ui, opts)
1001 timer, fm = gettimer(ui, opts)
1002 # control the number of commits perfparents iterates over
1002 # control the number of commits perfparents iterates over
1003 # experimental config: perf.parentscount
1003 # experimental config: perf.parentscount
1004 count = getint(ui, b"perf", b"parentscount", 1000)
1004 count = getint(ui, b"perf", b"parentscount", 1000)
1005 if len(repo.changelog) < count:
1005 if len(repo.changelog) < count:
1006 raise error.Abort(b"repo needs %d commits for this test" % count)
1006 raise error.Abort(b"repo needs %d commits for this test" % count)
1007 repo = repo.unfiltered()
1007 repo = repo.unfiltered()
1008 nl = [repo.changelog.node(i) for i in _xrange(count)]
1008 nl = [repo.changelog.node(i) for i in _xrange(count)]
1009 def d():
1009 def d():
1010 for n in nl:
1010 for n in nl:
1011 repo.changelog.parents(n)
1011 repo.changelog.parents(n)
1012 timer(d)
1012 timer(d)
1013 fm.end()
1013 fm.end()
1014
1014
1015 @command(b'perfctxfiles', formatteropts)
1015 @command(b'perfctxfiles', formatteropts)
1016 def perfctxfiles(ui, repo, x, **opts):
1016 def perfctxfiles(ui, repo, x, **opts):
1017 opts = _byteskwargs(opts)
1017 opts = _byteskwargs(opts)
1018 x = int(x)
1018 x = int(x)
1019 timer, fm = gettimer(ui, opts)
1019 timer, fm = gettimer(ui, opts)
1020 def d():
1020 def d():
1021 len(repo[x].files())
1021 len(repo[x].files())
1022 timer(d)
1022 timer(d)
1023 fm.end()
1023 fm.end()
1024
1024
1025 @command(b'perfrawfiles', formatteropts)
1025 @command(b'perfrawfiles', formatteropts)
1026 def perfrawfiles(ui, repo, x, **opts):
1026 def perfrawfiles(ui, repo, x, **opts):
1027 opts = _byteskwargs(opts)
1027 opts = _byteskwargs(opts)
1028 x = int(x)
1028 x = int(x)
1029 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1030 cl = repo.changelog
1030 cl = repo.changelog
1031 def d():
1031 def d():
1032 len(cl.read(x)[3])
1032 len(cl.read(x)[3])
1033 timer(d)
1033 timer(d)
1034 fm.end()
1034 fm.end()
1035
1035
1036 @command(b'perflookup', formatteropts)
1036 @command(b'perflookup', formatteropts)
1037 def perflookup(ui, repo, rev, **opts):
1037 def perflookup(ui, repo, rev, **opts):
1038 opts = _byteskwargs(opts)
1038 opts = _byteskwargs(opts)
1039 timer, fm = gettimer(ui, opts)
1039 timer, fm = gettimer(ui, opts)
1040 timer(lambda: len(repo.lookup(rev)))
1040 timer(lambda: len(repo.lookup(rev)))
1041 fm.end()
1041 fm.end()
1042
1042
1043 @command(b'perflinelogedits',
1043 @command(b'perflinelogedits',
1044 [(b'n', b'edits', 10000, b'number of edits'),
1044 [(b'n', b'edits', 10000, b'number of edits'),
1045 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1045 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1046 ], norepo=True)
1046 ], norepo=True)
1047 def perflinelogedits(ui, **opts):
1047 def perflinelogedits(ui, **opts):
1048 from mercurial import linelog
1048 from mercurial import linelog
1049
1049
1050 opts = _byteskwargs(opts)
1050 opts = _byteskwargs(opts)
1051
1051
1052 edits = opts[b'edits']
1052 edits = opts[b'edits']
1053 maxhunklines = opts[b'max_hunk_lines']
1053 maxhunklines = opts[b'max_hunk_lines']
1054
1054
1055 maxb1 = 100000
1055 maxb1 = 100000
1056 random.seed(0)
1056 random.seed(0)
1057 randint = random.randint
1057 randint = random.randint
1058 currentlines = 0
1058 currentlines = 0
1059 arglist = []
1059 arglist = []
1060 for rev in _xrange(edits):
1060 for rev in _xrange(edits):
1061 a1 = randint(0, currentlines)
1061 a1 = randint(0, currentlines)
1062 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1062 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1063 b1 = randint(0, maxb1)
1063 b1 = randint(0, maxb1)
1064 b2 = randint(b1, b1 + maxhunklines)
1064 b2 = randint(b1, b1 + maxhunklines)
1065 currentlines += (b2 - b1) - (a2 - a1)
1065 currentlines += (b2 - b1) - (a2 - a1)
1066 arglist.append((rev, a1, a2, b1, b2))
1066 arglist.append((rev, a1, a2, b1, b2))
1067
1067
1068 def d():
1068 def d():
1069 ll = linelog.linelog()
1069 ll = linelog.linelog()
1070 for args in arglist:
1070 for args in arglist:
1071 ll.replacelines(*args)
1071 ll.replacelines(*args)
1072
1072
1073 timer, fm = gettimer(ui, opts)
1073 timer, fm = gettimer(ui, opts)
1074 timer(d)
1074 timer(d)
1075 fm.end()
1075 fm.end()
1076
1076
1077 @command(b'perfrevrange', formatteropts)
1077 @command(b'perfrevrange', formatteropts)
1078 def perfrevrange(ui, repo, *specs, **opts):
1078 def perfrevrange(ui, repo, *specs, **opts):
1079 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1080 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1081 revrange = scmutil.revrange
1081 revrange = scmutil.revrange
1082 timer(lambda: len(revrange(repo, specs)))
1082 timer(lambda: len(revrange(repo, specs)))
1083 fm.end()
1083 fm.end()
1084
1084
1085 @command(b'perfnodelookup', formatteropts)
1085 @command(b'perfnodelookup', formatteropts)
1086 def perfnodelookup(ui, repo, rev, **opts):
1086 def perfnodelookup(ui, repo, rev, **opts):
1087 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1088 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1089 import mercurial.revlog
1089 import mercurial.revlog
1090 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1090 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1091 n = scmutil.revsingle(repo, rev).node()
1091 n = scmutil.revsingle(repo, rev).node()
1092 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1092 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1093 def d():
1093 def d():
1094 cl.rev(n)
1094 cl.rev(n)
1095 clearcaches(cl)
1095 clearcaches(cl)
1096 timer(d)
1096 timer(d)
1097 fm.end()
1097 fm.end()
1098
1098
1099 @command(b'perflog',
1099 @command(b'perflog',
1100 [(b'', b'rename', False, b'ask log to follow renames')
1100 [(b'', b'rename', False, b'ask log to follow renames')
1101 ] + formatteropts)
1101 ] + formatteropts)
1102 def perflog(ui, repo, rev=None, **opts):
1102 def perflog(ui, repo, rev=None, **opts):
1103 opts = _byteskwargs(opts)
1103 opts = _byteskwargs(opts)
1104 if rev is None:
1104 if rev is None:
1105 rev=[]
1105 rev=[]
1106 timer, fm = gettimer(ui, opts)
1106 timer, fm = gettimer(ui, opts)
1107 ui.pushbuffer()
1107 ui.pushbuffer()
1108 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1108 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1109 copies=opts.get(b'rename')))
1109 copies=opts.get(b'rename')))
1110 ui.popbuffer()
1110 ui.popbuffer()
1111 fm.end()
1111 fm.end()
1112
1112
1113 @command(b'perfmoonwalk', formatteropts)
1113 @command(b'perfmoonwalk', formatteropts)
1114 def perfmoonwalk(ui, repo, **opts):
1114 def perfmoonwalk(ui, repo, **opts):
1115 """benchmark walking the changelog backwards
1115 """benchmark walking the changelog backwards
1116
1116
1117 This also loads the changelog data for each revision in the changelog.
1117 This also loads the changelog data for each revision in the changelog.
1118 """
1118 """
1119 opts = _byteskwargs(opts)
1119 opts = _byteskwargs(opts)
1120 timer, fm = gettimer(ui, opts)
1120 timer, fm = gettimer(ui, opts)
1121 def moonwalk():
1121 def moonwalk():
1122 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1122 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1123 ctx = repo[i]
1123 ctx = repo[i]
1124 ctx.branch() # read changelog data (in addition to the index)
1124 ctx.branch() # read changelog data (in addition to the index)
1125 timer(moonwalk)
1125 timer(moonwalk)
1126 fm.end()
1126 fm.end()
1127
1127
1128 @command(b'perftemplating',
1128 @command(b'perftemplating',
1129 [(b'r', b'rev', [], b'revisions to run the template on'),
1129 [(b'r', b'rev', [], b'revisions to run the template on'),
1130 ] + formatteropts)
1130 ] + formatteropts)
1131 def perftemplating(ui, repo, testedtemplate=None, **opts):
1131 def perftemplating(ui, repo, testedtemplate=None, **opts):
1132 """test the rendering time of a given template"""
1132 """test the rendering time of a given template"""
1133 if makelogtemplater is None:
1133 if makelogtemplater is None:
1134 raise error.Abort((b"perftemplating not available with this Mercurial"),
1134 raise error.Abort((b"perftemplating not available with this Mercurial"),
1135 hint=b"use 4.3 or later")
1135 hint=b"use 4.3 or later")
1136
1136
1137 opts = _byteskwargs(opts)
1137 opts = _byteskwargs(opts)
1138
1138
1139 nullui = ui.copy()
1139 nullui = ui.copy()
1140 nullui.fout = open(os.devnull, r'wb')
1140 nullui.fout = open(os.devnull, r'wb')
1141 nullui.disablepager()
1141 nullui.disablepager()
1142 revs = opts.get(b'rev')
1142 revs = opts.get(b'rev')
1143 if not revs:
1143 if not revs:
1144 revs = [b'all()']
1144 revs = [b'all()']
1145 revs = list(scmutil.revrange(repo, revs))
1145 revs = list(scmutil.revrange(repo, revs))
1146
1146
1147 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1147 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1148 b' {author|person}: {desc|firstline}\n')
1148 b' {author|person}: {desc|firstline}\n')
1149 if testedtemplate is None:
1149 if testedtemplate is None:
1150 testedtemplate = defaulttemplate
1150 testedtemplate = defaulttemplate
1151 displayer = makelogtemplater(nullui, repo, testedtemplate)
1151 displayer = makelogtemplater(nullui, repo, testedtemplate)
1152 def format():
1152 def format():
1153 for r in revs:
1153 for r in revs:
1154 ctx = repo[r]
1154 ctx = repo[r]
1155 displayer.show(ctx)
1155 displayer.show(ctx)
1156 displayer.flush(ctx)
1156 displayer.flush(ctx)
1157
1157
1158 timer, fm = gettimer(ui, opts)
1158 timer, fm = gettimer(ui, opts)
1159 timer(format)
1159 timer(format)
1160 fm.end()
1160 fm.end()
1161
1161
1162 @command(b'perfhelper-tracecopies', formatteropts +
1162 @command(b'perfhelper-tracecopies', formatteropts +
1163 [
1163 [
1164 (b'r', b'revs', [], b'restrict search to these revisions'),
1164 (b'r', b'revs', [], b'restrict search to these revisions'),
1165 ])
1165 ])
1166 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1166 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1167 """find statistic about potential parameters for the `perftracecopies`
1167 """find statistic about potential parameters for the `perftracecopies`
1168
1168
1169 This command find source-destination pair relevant for copytracing testing.
1169 This command find source-destination pair relevant for copytracing testing.
1170 It report value for some of the parameters that impact copy tracing time.
1170 It report value for some of the parameters that impact copy tracing time.
1171 """
1171 """
1172 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1173 fm = ui.formatter(b'perf', opts)
1173 fm = ui.formatter(b'perf', opts)
1174 header = '%12s %12s %12s %12s\n'
1174 header = '%12s %12s %12s %12s\n'
1175 output = ("%(source)12s %(destination)12s "
1175 output = ("%(source)12s %(destination)12s "
1176 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1176 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1177 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1177 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1178
1178
1179 if not revs:
1179 if not revs:
1180 revs = ['all()']
1180 revs = ['all()']
1181 revs = scmutil.revrange(repo, revs)
1181 revs = scmutil.revrange(repo, revs)
1182
1182
1183 roi = repo.revs('merge() and %ld', revs)
1183 roi = repo.revs('merge() and %ld', revs)
1184 for r in roi:
1184 for r in roi:
1185 ctx = repo[r]
1185 ctx = repo[r]
1186 p1 = ctx.p1().rev()
1186 p1 = ctx.p1().rev()
1187 p2 = ctx.p2().rev()
1187 p2 = ctx.p2().rev()
1188 bases = repo.changelog._commonancestorsheads(p1, p2)
1188 bases = repo.changelog._commonancestorsheads(p1, p2)
1189 for p in (p1, p2):
1189 for p in (p1, p2):
1190 for b in bases:
1190 for b in bases:
1191 base = repo[b]
1191 base = repo[b]
1192 parent = repo[p]
1192 parent = repo[p]
1193 missing = copies._computeforwardmissing(base, parent)
1193 missing = copies._computeforwardmissing(base, parent)
1194 if not missing:
1194 if not missing:
1195 continue
1195 continue
1196 fm.startitem()
1196 fm.startitem()
1197 data = {
1197 data = {
1198 b'source': base.hex(),
1198 b'source': base.hex(),
1199 b'destination': parent.hex(),
1199 b'destination': parent.hex(),
1200 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1200 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1201 b'nbmissingfiles': len(missing),
1201 b'nbmissingfiles': len(missing),
1202 }
1202 }
1203 fm.data(**data)
1203 fm.data(**data)
1204 out = data.copy()
1204 out = data.copy()
1205 out['source'] = fm.hexfunc(base.node())
1205 out['source'] = fm.hexfunc(base.node())
1206 out['destination'] = fm.hexfunc(parent.node())
1206 out['destination'] = fm.hexfunc(parent.node())
1207 fm.plain(output % out)
1207 fm.plain(output % out)
1208 fm.end()
1208 fm.end()
1209
1209
1210 @command(b'perfcca', formatteropts)
1210 @command(b'perfcca', formatteropts)
1211 def perfcca(ui, repo, **opts):
1211 def perfcca(ui, repo, **opts):
1212 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1213 timer, fm = gettimer(ui, opts)
1213 timer, fm = gettimer(ui, opts)
1214 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1214 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1215 fm.end()
1215 fm.end()
1216
1216
1217 @command(b'perffncacheload', formatteropts)
1217 @command(b'perffncacheload', formatteropts)
1218 def perffncacheload(ui, repo, **opts):
1218 def perffncacheload(ui, repo, **opts):
1219 opts = _byteskwargs(opts)
1219 opts = _byteskwargs(opts)
1220 timer, fm = gettimer(ui, opts)
1220 timer, fm = gettimer(ui, opts)
1221 s = repo.store
1221 s = repo.store
1222 def d():
1222 def d():
1223 s.fncache._load()
1223 s.fncache._load()
1224 timer(d)
1224 timer(d)
1225 fm.end()
1225 fm.end()
1226
1226
1227 @command(b'perffncachewrite', formatteropts)
1227 @command(b'perffncachewrite', formatteropts)
1228 def perffncachewrite(ui, repo, **opts):
1228 def perffncachewrite(ui, repo, **opts):
1229 opts = _byteskwargs(opts)
1229 opts = _byteskwargs(opts)
1230 timer, fm = gettimer(ui, opts)
1230 timer, fm = gettimer(ui, opts)
1231 s = repo.store
1231 s = repo.store
1232 lock = repo.lock()
1232 lock = repo.lock()
1233 s.fncache._load()
1233 s.fncache._load()
1234 tr = repo.transaction(b'perffncachewrite')
1234 tr = repo.transaction(b'perffncachewrite')
1235 tr.addbackup(b'fncache')
1235 tr.addbackup(b'fncache')
1236 def d():
1236 def d():
1237 s.fncache._dirty = True
1237 s.fncache._dirty = True
1238 s.fncache.write(tr)
1238 s.fncache.write(tr)
1239 timer(d)
1239 timer(d)
1240 tr.close()
1240 tr.close()
1241 lock.release()
1241 lock.release()
1242 fm.end()
1242 fm.end()
1243
1243
1244 @command(b'perffncacheencode', formatteropts)
1244 @command(b'perffncacheencode', formatteropts)
1245 def perffncacheencode(ui, repo, **opts):
1245 def perffncacheencode(ui, repo, **opts):
1246 opts = _byteskwargs(opts)
1246 opts = _byteskwargs(opts)
1247 timer, fm = gettimer(ui, opts)
1247 timer, fm = gettimer(ui, opts)
1248 s = repo.store
1248 s = repo.store
1249 s.fncache._load()
1249 s.fncache._load()
1250 def d():
1250 def d():
1251 for p in s.fncache.entries:
1251 for p in s.fncache.entries:
1252 s.encode(p)
1252 s.encode(p)
1253 timer(d)
1253 timer(d)
1254 fm.end()
1254 fm.end()
1255
1255
1256 def _bdiffworker(q, blocks, xdiff, ready, done):
1256 def _bdiffworker(q, blocks, xdiff, ready, done):
1257 while not done.is_set():
1257 while not done.is_set():
1258 pair = q.get()
1258 pair = q.get()
1259 while pair is not None:
1259 while pair is not None:
1260 if xdiff:
1260 if xdiff:
1261 mdiff.bdiff.xdiffblocks(*pair)
1261 mdiff.bdiff.xdiffblocks(*pair)
1262 elif blocks:
1262 elif blocks:
1263 mdiff.bdiff.blocks(*pair)
1263 mdiff.bdiff.blocks(*pair)
1264 else:
1264 else:
1265 mdiff.textdiff(*pair)
1265 mdiff.textdiff(*pair)
1266 q.task_done()
1266 q.task_done()
1267 pair = q.get()
1267 pair = q.get()
1268 q.task_done() # for the None one
1268 q.task_done() # for the None one
1269 with ready:
1269 with ready:
1270 ready.wait()
1270 ready.wait()
1271
1271
1272 def _manifestrevision(repo, mnode):
1272 def _manifestrevision(repo, mnode):
1273 ml = repo.manifestlog
1273 ml = repo.manifestlog
1274
1274
1275 if util.safehasattr(ml, b'getstorage'):
1275 if util.safehasattr(ml, b'getstorage'):
1276 store = ml.getstorage(b'')
1276 store = ml.getstorage(b'')
1277 else:
1277 else:
1278 store = ml._revlog
1278 store = ml._revlog
1279
1279
1280 return store.revision(mnode)
1280 return store.revision(mnode)
1281
1281
1282 @command(b'perfbdiff', revlogopts + formatteropts + [
1282 @command(b'perfbdiff', revlogopts + formatteropts + [
1283 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1283 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1284 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1284 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1285 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1285 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1286 (b'', b'blocks', False, b'test computing diffs into blocks'),
1286 (b'', b'blocks', False, b'test computing diffs into blocks'),
1287 (b'', b'xdiff', False, b'use xdiff algorithm'),
1287 (b'', b'xdiff', False, b'use xdiff algorithm'),
1288 ],
1288 ],
1289
1289
1290 b'-c|-m|FILE REV')
1290 b'-c|-m|FILE REV')
1291 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1291 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1292 """benchmark a bdiff between revisions
1292 """benchmark a bdiff between revisions
1293
1293
1294 By default, benchmark a bdiff between its delta parent and itself.
1294 By default, benchmark a bdiff between its delta parent and itself.
1295
1295
1296 With ``--count``, benchmark bdiffs between delta parents and self for N
1296 With ``--count``, benchmark bdiffs between delta parents and self for N
1297 revisions starting at the specified revision.
1297 revisions starting at the specified revision.
1298
1298
1299 With ``--alldata``, assume the requested revision is a changeset and
1299 With ``--alldata``, assume the requested revision is a changeset and
1300 measure bdiffs for all changes related to that changeset (manifest
1300 measure bdiffs for all changes related to that changeset (manifest
1301 and filelogs).
1301 and filelogs).
1302 """
1302 """
1303 opts = _byteskwargs(opts)
1303 opts = _byteskwargs(opts)
1304
1304
1305 if opts[b'xdiff'] and not opts[b'blocks']:
1305 if opts[b'xdiff'] and not opts[b'blocks']:
1306 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1306 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1307
1307
1308 if opts[b'alldata']:
1308 if opts[b'alldata']:
1309 opts[b'changelog'] = True
1309 opts[b'changelog'] = True
1310
1310
1311 if opts.get(b'changelog') or opts.get(b'manifest'):
1311 if opts.get(b'changelog') or opts.get(b'manifest'):
1312 file_, rev = None, file_
1312 file_, rev = None, file_
1313 elif rev is None:
1313 elif rev is None:
1314 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1314 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1315
1315
1316 blocks = opts[b'blocks']
1316 blocks = opts[b'blocks']
1317 xdiff = opts[b'xdiff']
1317 xdiff = opts[b'xdiff']
1318 textpairs = []
1318 textpairs = []
1319
1319
1320 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1320 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1321
1321
1322 startrev = r.rev(r.lookup(rev))
1322 startrev = r.rev(r.lookup(rev))
1323 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1323 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1324 if opts[b'alldata']:
1324 if opts[b'alldata']:
1325 # Load revisions associated with changeset.
1325 # Load revisions associated with changeset.
1326 ctx = repo[rev]
1326 ctx = repo[rev]
1327 mtext = _manifestrevision(repo, ctx.manifestnode())
1327 mtext = _manifestrevision(repo, ctx.manifestnode())
1328 for pctx in ctx.parents():
1328 for pctx in ctx.parents():
1329 pman = _manifestrevision(repo, pctx.manifestnode())
1329 pman = _manifestrevision(repo, pctx.manifestnode())
1330 textpairs.append((pman, mtext))
1330 textpairs.append((pman, mtext))
1331
1331
1332 # Load filelog revisions by iterating manifest delta.
1332 # Load filelog revisions by iterating manifest delta.
1333 man = ctx.manifest()
1333 man = ctx.manifest()
1334 pman = ctx.p1().manifest()
1334 pman = ctx.p1().manifest()
1335 for filename, change in pman.diff(man).items():
1335 for filename, change in pman.diff(man).items():
1336 fctx = repo.file(filename)
1336 fctx = repo.file(filename)
1337 f1 = fctx.revision(change[0][0] or -1)
1337 f1 = fctx.revision(change[0][0] or -1)
1338 f2 = fctx.revision(change[1][0] or -1)
1338 f2 = fctx.revision(change[1][0] or -1)
1339 textpairs.append((f1, f2))
1339 textpairs.append((f1, f2))
1340 else:
1340 else:
1341 dp = r.deltaparent(rev)
1341 dp = r.deltaparent(rev)
1342 textpairs.append((r.revision(dp), r.revision(rev)))
1342 textpairs.append((r.revision(dp), r.revision(rev)))
1343
1343
1344 withthreads = threads > 0
1344 withthreads = threads > 0
1345 if not withthreads:
1345 if not withthreads:
1346 def d():
1346 def d():
1347 for pair in textpairs:
1347 for pair in textpairs:
1348 if xdiff:
1348 if xdiff:
1349 mdiff.bdiff.xdiffblocks(*pair)
1349 mdiff.bdiff.xdiffblocks(*pair)
1350 elif blocks:
1350 elif blocks:
1351 mdiff.bdiff.blocks(*pair)
1351 mdiff.bdiff.blocks(*pair)
1352 else:
1352 else:
1353 mdiff.textdiff(*pair)
1353 mdiff.textdiff(*pair)
1354 else:
1354 else:
1355 q = queue()
1355 q = queue()
1356 for i in _xrange(threads):
1356 for i in _xrange(threads):
1357 q.put(None)
1357 q.put(None)
1358 ready = threading.Condition()
1358 ready = threading.Condition()
1359 done = threading.Event()
1359 done = threading.Event()
1360 for i in _xrange(threads):
1360 for i in _xrange(threads):
1361 threading.Thread(target=_bdiffworker,
1361 threading.Thread(target=_bdiffworker,
1362 args=(q, blocks, xdiff, ready, done)).start()
1362 args=(q, blocks, xdiff, ready, done)).start()
1363 q.join()
1363 q.join()
1364 def d():
1364 def d():
1365 for pair in textpairs:
1365 for pair in textpairs:
1366 q.put(pair)
1366 q.put(pair)
1367 for i in _xrange(threads):
1367 for i in _xrange(threads):
1368 q.put(None)
1368 q.put(None)
1369 with ready:
1369 with ready:
1370 ready.notify_all()
1370 ready.notify_all()
1371 q.join()
1371 q.join()
1372 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1373 timer(d)
1373 timer(d)
1374 fm.end()
1374 fm.end()
1375
1375
1376 if withthreads:
1376 if withthreads:
1377 done.set()
1377 done.set()
1378 for i in _xrange(threads):
1378 for i in _xrange(threads):
1379 q.put(None)
1379 q.put(None)
1380 with ready:
1380 with ready:
1381 ready.notify_all()
1381 ready.notify_all()
1382
1382
1383 @command(b'perfunidiff', revlogopts + formatteropts + [
1383 @command(b'perfunidiff', revlogopts + formatteropts + [
1384 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1384 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1385 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1385 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1386 ], b'-c|-m|FILE REV')
1386 ], b'-c|-m|FILE REV')
1387 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1387 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1388 """benchmark a unified diff between revisions
1388 """benchmark a unified diff between revisions
1389
1389
1390 This doesn't include any copy tracing - it's just a unified diff
1390 This doesn't include any copy tracing - it's just a unified diff
1391 of the texts.
1391 of the texts.
1392
1392
1393 By default, benchmark a diff between its delta parent and itself.
1393 By default, benchmark a diff between its delta parent and itself.
1394
1394
1395 With ``--count``, benchmark diffs between delta parents and self for N
1395 With ``--count``, benchmark diffs between delta parents and self for N
1396 revisions starting at the specified revision.
1396 revisions starting at the specified revision.
1397
1397
1398 With ``--alldata``, assume the requested revision is a changeset and
1398 With ``--alldata``, assume the requested revision is a changeset and
1399 measure diffs for all changes related to that changeset (manifest
1399 measure diffs for all changes related to that changeset (manifest
1400 and filelogs).
1400 and filelogs).
1401 """
1401 """
1402 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1403 if opts[b'alldata']:
1403 if opts[b'alldata']:
1404 opts[b'changelog'] = True
1404 opts[b'changelog'] = True
1405
1405
1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1407 file_, rev = None, file_
1407 file_, rev = None, file_
1408 elif rev is None:
1408 elif rev is None:
1409 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1409 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1410
1410
1411 textpairs = []
1411 textpairs = []
1412
1412
1413 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1413 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1414
1414
1415 startrev = r.rev(r.lookup(rev))
1415 startrev = r.rev(r.lookup(rev))
1416 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1416 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1417 if opts[b'alldata']:
1417 if opts[b'alldata']:
1418 # Load revisions associated with changeset.
1418 # Load revisions associated with changeset.
1419 ctx = repo[rev]
1419 ctx = repo[rev]
1420 mtext = _manifestrevision(repo, ctx.manifestnode())
1420 mtext = _manifestrevision(repo, ctx.manifestnode())
1421 for pctx in ctx.parents():
1421 for pctx in ctx.parents():
1422 pman = _manifestrevision(repo, pctx.manifestnode())
1422 pman = _manifestrevision(repo, pctx.manifestnode())
1423 textpairs.append((pman, mtext))
1423 textpairs.append((pman, mtext))
1424
1424
1425 # Load filelog revisions by iterating manifest delta.
1425 # Load filelog revisions by iterating manifest delta.
1426 man = ctx.manifest()
1426 man = ctx.manifest()
1427 pman = ctx.p1().manifest()
1427 pman = ctx.p1().manifest()
1428 for filename, change in pman.diff(man).items():
1428 for filename, change in pman.diff(man).items():
1429 fctx = repo.file(filename)
1429 fctx = repo.file(filename)
1430 f1 = fctx.revision(change[0][0] or -1)
1430 f1 = fctx.revision(change[0][0] or -1)
1431 f2 = fctx.revision(change[1][0] or -1)
1431 f2 = fctx.revision(change[1][0] or -1)
1432 textpairs.append((f1, f2))
1432 textpairs.append((f1, f2))
1433 else:
1433 else:
1434 dp = r.deltaparent(rev)
1434 dp = r.deltaparent(rev)
1435 textpairs.append((r.revision(dp), r.revision(rev)))
1435 textpairs.append((r.revision(dp), r.revision(rev)))
1436
1436
1437 def d():
1437 def d():
1438 for left, right in textpairs:
1438 for left, right in textpairs:
1439 # The date strings don't matter, so we pass empty strings.
1439 # The date strings don't matter, so we pass empty strings.
1440 headerlines, hunks = mdiff.unidiff(
1440 headerlines, hunks = mdiff.unidiff(
1441 left, b'', right, b'', b'left', b'right', binary=False)
1441 left, b'', right, b'', b'left', b'right', binary=False)
1442 # consume iterators in roughly the way patch.py does
1442 # consume iterators in roughly the way patch.py does
1443 b'\n'.join(headerlines)
1443 b'\n'.join(headerlines)
1444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1445 timer, fm = gettimer(ui, opts)
1445 timer, fm = gettimer(ui, opts)
1446 timer(d)
1446 timer(d)
1447 fm.end()
1447 fm.end()
1448
1448
1449 @command(b'perfdiffwd', formatteropts)
1449 @command(b'perfdiffwd', formatteropts)
1450 def perfdiffwd(ui, repo, **opts):
1450 def perfdiffwd(ui, repo, **opts):
1451 """Profile diff of working directory changes"""
1451 """Profile diff of working directory changes"""
1452 opts = _byteskwargs(opts)
1452 opts = _byteskwargs(opts)
1453 timer, fm = gettimer(ui, opts)
1453 timer, fm = gettimer(ui, opts)
1454 options = {
1454 options = {
1455 'w': 'ignore_all_space',
1455 'w': 'ignore_all_space',
1456 'b': 'ignore_space_change',
1456 'b': 'ignore_space_change',
1457 'B': 'ignore_blank_lines',
1457 'B': 'ignore_blank_lines',
1458 }
1458 }
1459
1459
1460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1461 opts = dict((options[c], b'1') for c in diffopt)
1461 opts = dict((options[c], b'1') for c in diffopt)
1462 def d():
1462 def d():
1463 ui.pushbuffer()
1463 ui.pushbuffer()
1464 commands.diff(ui, repo, **opts)
1464 commands.diff(ui, repo, **opts)
1465 ui.popbuffer()
1465 ui.popbuffer()
1466 diffopt = diffopt.encode('ascii')
1466 diffopt = diffopt.encode('ascii')
1467 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1467 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1468 timer(d, title=title)
1468 timer(d, title=title)
1469 fm.end()
1469 fm.end()
1470
1470
1471 @command(b'perfrevlogindex', revlogopts + formatteropts,
1471 @command(b'perfrevlogindex', revlogopts + formatteropts,
1472 b'-c|-m|FILE')
1472 b'-c|-m|FILE')
1473 def perfrevlogindex(ui, repo, file_=None, **opts):
1473 def perfrevlogindex(ui, repo, file_=None, **opts):
1474 """Benchmark operations against a revlog index.
1474 """Benchmark operations against a revlog index.
1475
1475
1476 This tests constructing a revlog instance, reading index data,
1476 This tests constructing a revlog instance, reading index data,
1477 parsing index data, and performing various operations related to
1477 parsing index data, and performing various operations related to
1478 index data.
1478 index data.
1479 """
1479 """
1480
1480
1481 opts = _byteskwargs(opts)
1481 opts = _byteskwargs(opts)
1482
1482
1483 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1483 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1484
1484
1485 opener = getattr(rl, 'opener') # trick linter
1485 opener = getattr(rl, 'opener') # trick linter
1486 indexfile = rl.indexfile
1486 indexfile = rl.indexfile
1487 data = opener.read(indexfile)
1487 data = opener.read(indexfile)
1488
1488
1489 header = struct.unpack(b'>I', data[0:4])[0]
1489 header = struct.unpack(b'>I', data[0:4])[0]
1490 version = header & 0xFFFF
1490 version = header & 0xFFFF
1491 if version == 1:
1491 if version == 1:
1492 revlogio = revlog.revlogio()
1492 revlogio = revlog.revlogio()
1493 inline = header & (1 << 16)
1493 inline = header & (1 << 16)
1494 else:
1494 else:
1495 raise error.Abort((b'unsupported revlog version: %d') % version)
1495 raise error.Abort((b'unsupported revlog version: %d') % version)
1496
1496
1497 rllen = len(rl)
1497 rllen = len(rl)
1498
1498
1499 node0 = rl.node(0)
1499 node0 = rl.node(0)
1500 node25 = rl.node(rllen // 4)
1500 node25 = rl.node(rllen // 4)
1501 node50 = rl.node(rllen // 2)
1501 node50 = rl.node(rllen // 2)
1502 node75 = rl.node(rllen // 4 * 3)
1502 node75 = rl.node(rllen // 4 * 3)
1503 node100 = rl.node(rllen - 1)
1503 node100 = rl.node(rllen - 1)
1504
1504
1505 allrevs = range(rllen)
1505 allrevs = range(rllen)
1506 allrevsrev = list(reversed(allrevs))
1506 allrevsrev = list(reversed(allrevs))
1507 allnodes = [rl.node(rev) for rev in range(rllen)]
1507 allnodes = [rl.node(rev) for rev in range(rllen)]
1508 allnodesrev = list(reversed(allnodes))
1508 allnodesrev = list(reversed(allnodes))
1509
1509
1510 def constructor():
1510 def constructor():
1511 revlog.revlog(opener, indexfile)
1511 revlog.revlog(opener, indexfile)
1512
1512
1513 def read():
1513 def read():
1514 with opener(indexfile) as fh:
1514 with opener(indexfile) as fh:
1515 fh.read()
1515 fh.read()
1516
1516
1517 def parseindex():
1517 def parseindex():
1518 revlogio.parseindex(data, inline)
1518 revlogio.parseindex(data, inline)
1519
1519
1520 def getentry(revornode):
1520 def getentry(revornode):
1521 index = revlogio.parseindex(data, inline)[0]
1521 index = revlogio.parseindex(data, inline)[0]
1522 index[revornode]
1522 index[revornode]
1523
1523
1524 def getentries(revs, count=1):
1524 def getentries(revs, count=1):
1525 index = revlogio.parseindex(data, inline)[0]
1525 index = revlogio.parseindex(data, inline)[0]
1526
1526
1527 for i in range(count):
1527 for i in range(count):
1528 for rev in revs:
1528 for rev in revs:
1529 index[rev]
1529 index[rev]
1530
1530
1531 def resolvenode(node):
1531 def resolvenode(node):
1532 nodemap = revlogio.parseindex(data, inline)[1]
1532 nodemap = revlogio.parseindex(data, inline)[1]
1533 # This only works for the C code.
1533 # This only works for the C code.
1534 if nodemap is None:
1534 if nodemap is None:
1535 return
1535 return
1536
1536
1537 try:
1537 try:
1538 nodemap[node]
1538 nodemap[node]
1539 except error.RevlogError:
1539 except error.RevlogError:
1540 pass
1540 pass
1541
1541
1542 def resolvenodes(nodes, count=1):
1542 def resolvenodes(nodes, count=1):
1543 nodemap = revlogio.parseindex(data, inline)[1]
1543 nodemap = revlogio.parseindex(data, inline)[1]
1544 if nodemap is None:
1544 if nodemap is None:
1545 return
1545 return
1546
1546
1547 for i in range(count):
1547 for i in range(count):
1548 for node in nodes:
1548 for node in nodes:
1549 try:
1549 try:
1550 nodemap[node]
1550 nodemap[node]
1551 except error.RevlogError:
1551 except error.RevlogError:
1552 pass
1552 pass
1553
1553
1554 benches = [
1554 benches = [
1555 (constructor, b'revlog constructor'),
1555 (constructor, b'revlog constructor'),
1556 (read, b'read'),
1556 (read, b'read'),
1557 (parseindex, b'create index object'),
1557 (parseindex, b'create index object'),
1558 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1558 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1559 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1559 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1560 (lambda: resolvenode(node0), b'look up node at rev 0'),
1560 (lambda: resolvenode(node0), b'look up node at rev 0'),
1561 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1561 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1562 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1562 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1563 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1563 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1564 (lambda: resolvenode(node100), b'look up node at tip'),
1564 (lambda: resolvenode(node100), b'look up node at tip'),
1565 # 2x variation is to measure caching impact.
1565 # 2x variation is to measure caching impact.
1566 (lambda: resolvenodes(allnodes),
1566 (lambda: resolvenodes(allnodes),
1567 b'look up all nodes (forward)'),
1567 b'look up all nodes (forward)'),
1568 (lambda: resolvenodes(allnodes, 2),
1568 (lambda: resolvenodes(allnodes, 2),
1569 b'look up all nodes 2x (forward)'),
1569 b'look up all nodes 2x (forward)'),
1570 (lambda: resolvenodes(allnodesrev),
1570 (lambda: resolvenodes(allnodesrev),
1571 b'look up all nodes (reverse)'),
1571 b'look up all nodes (reverse)'),
1572 (lambda: resolvenodes(allnodesrev, 2),
1572 (lambda: resolvenodes(allnodesrev, 2),
1573 b'look up all nodes 2x (reverse)'),
1573 b'look up all nodes 2x (reverse)'),
1574 (lambda: getentries(allrevs),
1574 (lambda: getentries(allrevs),
1575 b'retrieve all index entries (forward)'),
1575 b'retrieve all index entries (forward)'),
1576 (lambda: getentries(allrevs, 2),
1576 (lambda: getentries(allrevs, 2),
1577 b'retrieve all index entries 2x (forward)'),
1577 b'retrieve all index entries 2x (forward)'),
1578 (lambda: getentries(allrevsrev),
1578 (lambda: getentries(allrevsrev),
1579 b'retrieve all index entries (reverse)'),
1579 b'retrieve all index entries (reverse)'),
1580 (lambda: getentries(allrevsrev, 2),
1580 (lambda: getentries(allrevsrev, 2),
1581 b'retrieve all index entries 2x (reverse)'),
1581 b'retrieve all index entries 2x (reverse)'),
1582 ]
1582 ]
1583
1583
1584 for fn, title in benches:
1584 for fn, title in benches:
1585 timer, fm = gettimer(ui, opts)
1585 timer, fm = gettimer(ui, opts)
1586 timer(fn, title=title)
1586 timer(fn, title=title)
1587 fm.end()
1587 fm.end()
1588
1588
1589 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1589 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1590 [(b'd', b'dist', 100, b'distance between the revisions'),
1590 [(b'd', b'dist', 100, b'distance between the revisions'),
1591 (b's', b'startrev', 0, b'revision to start reading at'),
1591 (b's', b'startrev', 0, b'revision to start reading at'),
1592 (b'', b'reverse', False, b'read in reverse')],
1592 (b'', b'reverse', False, b'read in reverse')],
1593 b'-c|-m|FILE')
1593 b'-c|-m|FILE')
1594 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1594 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1595 **opts):
1595 **opts):
1596 """Benchmark reading a series of revisions from a revlog.
1596 """Benchmark reading a series of revisions from a revlog.
1597
1597
1598 By default, we read every ``-d/--dist`` revision from 0 to tip of
1598 By default, we read every ``-d/--dist`` revision from 0 to tip of
1599 the specified revlog.
1599 the specified revlog.
1600
1600
1601 The start revision can be defined via ``-s/--startrev``.
1601 The start revision can be defined via ``-s/--startrev``.
1602 """
1602 """
1603 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1604
1604
1605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1606 rllen = getlen(ui)(rl)
1606 rllen = getlen(ui)(rl)
1607
1607
1608 if startrev < 0:
1608 if startrev < 0:
1609 startrev = rllen + startrev
1609 startrev = rllen + startrev
1610
1610
1611 def d():
1611 def d():
1612 rl.clearcaches()
1612 rl.clearcaches()
1613
1613
1614 beginrev = startrev
1614 beginrev = startrev
1615 endrev = rllen
1615 endrev = rllen
1616 dist = opts[b'dist']
1616 dist = opts[b'dist']
1617
1617
1618 if reverse:
1618 if reverse:
1619 beginrev, endrev = endrev - 1, beginrev - 1
1619 beginrev, endrev = endrev - 1, beginrev - 1
1620 dist = -1 * dist
1620 dist = -1 * dist
1621
1621
1622 for x in _xrange(beginrev, endrev, dist):
1622 for x in _xrange(beginrev, endrev, dist):
1623 # Old revisions don't support passing int.
1623 # Old revisions don't support passing int.
1624 n = rl.node(x)
1624 n = rl.node(x)
1625 rl.revision(n)
1625 rl.revision(n)
1626
1626
1627 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1628 timer(d)
1628 timer(d)
1629 fm.end()
1629 fm.end()
1630
1630
1631 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1631 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1632 [(b's', b'startrev', 1000, b'revision to start writing at'),
1632 [(b's', b'startrev', 1000, b'revision to start writing at'),
1633 (b'', b'stoprev', -1, b'last revision to write'),
1633 (b'', b'stoprev', -1, b'last revision to write'),
1634 (b'', b'count', 3, b'last revision to write'),
1634 (b'', b'count', 3, b'last revision to write'),
1635 (b'', b'details', False, b'print timing for every revisions tested'),
1635 (b'', b'details', False, b'print timing for every revisions tested'),
1636 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1636 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1637 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1637 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1638 ],
1638 ],
1639 b'-c|-m|FILE')
1639 b'-c|-m|FILE')
1640 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1640 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1641 """Benchmark writing a series of revisions to a revlog.
1641 """Benchmark writing a series of revisions to a revlog.
1642
1642
1643 Possible source values are:
1643 Possible source values are:
1644 * `full`: add from a full text (default).
1644 * `full`: add from a full text (default).
1645 * `parent-1`: add from a delta to the first parent
1645 * `parent-1`: add from a delta to the first parent
1646 * `parent-2`: add from a delta to the second parent if it exists
1646 * `parent-2`: add from a delta to the second parent if it exists
1647 (use a delta from the first parent otherwise)
1647 (use a delta from the first parent otherwise)
1648 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1648 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1649 * `storage`: add from the existing precomputed deltas
1649 * `storage`: add from the existing precomputed deltas
1650 """
1650 """
1651 opts = _byteskwargs(opts)
1651 opts = _byteskwargs(opts)
1652
1652
1653 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1653 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1654 rllen = getlen(ui)(rl)
1654 rllen = getlen(ui)(rl)
1655 if startrev < 0:
1655 if startrev < 0:
1656 startrev = rllen + startrev
1656 startrev = rllen + startrev
1657 if stoprev < 0:
1657 if stoprev < 0:
1658 stoprev = rllen + stoprev
1658 stoprev = rllen + stoprev
1659
1659
1660 lazydeltabase = opts['lazydeltabase']
1660 lazydeltabase = opts['lazydeltabase']
1661 source = opts['source']
1661 source = opts['source']
1662 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1662 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1663 b'storage')
1663 b'storage')
1664 if source not in validsource:
1664 if source not in validsource:
1665 raise error.Abort('invalid source type: %s' % source)
1665 raise error.Abort('invalid source type: %s' % source)
1666
1666
1667 ### actually gather results
1667 ### actually gather results
1668 count = opts['count']
1668 count = opts['count']
1669 if count <= 0:
1669 if count <= 0:
1670 raise error.Abort('invalide run count: %d' % count)
1670 raise error.Abort('invalide run count: %d' % count)
1671 allresults = []
1671 allresults = []
1672 for c in range(count):
1672 for c in range(count):
1673 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1673 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1674 lazydeltabase=lazydeltabase)
1674 lazydeltabase=lazydeltabase)
1675 allresults.append(timing)
1675 allresults.append(timing)
1676
1676
1677 ### consolidate the results in a single list
1677 ### consolidate the results in a single list
1678 results = []
1678 results = []
1679 for idx, (rev, t) in enumerate(allresults[0]):
1679 for idx, (rev, t) in enumerate(allresults[0]):
1680 ts = [t]
1680 ts = [t]
1681 for other in allresults[1:]:
1681 for other in allresults[1:]:
1682 orev, ot = other[idx]
1682 orev, ot = other[idx]
1683 assert orev == rev
1683 assert orev == rev
1684 ts.append(ot)
1684 ts.append(ot)
1685 results.append((rev, ts))
1685 results.append((rev, ts))
1686 resultcount = len(results)
1686 resultcount = len(results)
1687
1687
1688 ### Compute and display relevant statistics
1688 ### Compute and display relevant statistics
1689
1689
1690 # get a formatter
1690 # get a formatter
1691 fm = ui.formatter(b'perf', opts)
1691 fm = ui.formatter(b'perf', opts)
1692 displayall = ui.configbool(b"perf", b"all-timing", False)
1692 displayall = ui.configbool(b"perf", b"all-timing", False)
1693
1693
1694 # print individual details if requested
1694 # print individual details if requested
1695 if opts['details']:
1695 if opts['details']:
1696 for idx, item in enumerate(results, 1):
1696 for idx, item in enumerate(results, 1):
1697 rev, data = item
1697 rev, data = item
1698 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1698 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1699 formatone(fm, data, title=title, displayall=displayall)
1699 formatone(fm, data, title=title, displayall=displayall)
1700
1700
1701 # sorts results by median time
1701 # sorts results by median time
1702 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1702 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1703 # list of (name, index) to display)
1703 # list of (name, index) to display)
1704 relevants = [
1704 relevants = [
1705 ("min", 0),
1705 ("min", 0),
1706 ("10%", resultcount * 10 // 100),
1706 ("10%", resultcount * 10 // 100),
1707 ("25%", resultcount * 25 // 100),
1707 ("25%", resultcount * 25 // 100),
1708 ("50%", resultcount * 70 // 100),
1708 ("50%", resultcount * 70 // 100),
1709 ("75%", resultcount * 75 // 100),
1709 ("75%", resultcount * 75 // 100),
1710 ("90%", resultcount * 90 // 100),
1710 ("90%", resultcount * 90 // 100),
1711 ("95%", resultcount * 95 // 100),
1711 ("95%", resultcount * 95 // 100),
1712 ("99%", resultcount * 99 // 100),
1712 ("99%", resultcount * 99 // 100),
1713 ("max", -1),
1713 ("max", -1),
1714 ]
1714 ]
1715 if not ui.quiet:
1715 if not ui.quiet:
1716 for name, idx in relevants:
1716 for name, idx in relevants:
1717 data = results[idx]
1717 data = results[idx]
1718 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1718 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1719 formatone(fm, data[1], title=title, displayall=displayall)
1719 formatone(fm, data[1], title=title, displayall=displayall)
1720
1720
1721 # XXX summing that many float will not be very precise, we ignore this fact
1721 # XXX summing that many float will not be very precise, we ignore this fact
1722 # for now
1722 # for now
1723 totaltime = []
1723 totaltime = []
1724 for item in allresults:
1724 for item in allresults:
1725 totaltime.append((sum(x[1][0] for x in item),
1725 totaltime.append((sum(x[1][0] for x in item),
1726 sum(x[1][1] for x in item),
1726 sum(x[1][1] for x in item),
1727 sum(x[1][2] for x in item),)
1727 sum(x[1][2] for x in item),)
1728 )
1728 )
1729 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1729 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1730 displayall=displayall)
1730 displayall=displayall)
1731 fm.end()
1731 fm.end()
1732
1732
1733 class _faketr(object):
1733 class _faketr(object):
1734 def add(s, x, y, z=None):
1734 def add(s, x, y, z=None):
1735 return None
1735 return None
1736
1736
1737 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1737 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1738 lazydeltabase=True):
1738 lazydeltabase=True):
1739 timings = []
1739 timings = []
1740 tr = _faketr()
1740 tr = _faketr()
1741 with _temprevlog(ui, orig, startrev) as dest:
1741 with _temprevlog(ui, orig, startrev) as dest:
1742 dest._lazydeltabase = lazydeltabase
1742 dest._lazydeltabase = lazydeltabase
1743 revs = list(orig.revs(startrev, stoprev))
1743 revs = list(orig.revs(startrev, stoprev))
1744 total = len(revs)
1744 total = len(revs)
1745 topic = 'adding'
1745 topic = 'adding'
1746 if runidx is not None:
1746 if runidx is not None:
1747 topic += ' (run #%d)' % runidx
1747 topic += ' (run #%d)' % runidx
1748 for idx, rev in enumerate(revs):
1748 for idx, rev in enumerate(revs):
1749 ui.progress(topic, idx, unit='revs', total=total)
1749 ui.progress(topic, idx, unit='revs', total=total)
1750 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1750 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1751 with timeone() as r:
1751 with timeone() as r:
1752 dest.addrawrevision(*addargs, **addkwargs)
1752 dest.addrawrevision(*addargs, **addkwargs)
1753 timings.append((rev, r[0]))
1753 timings.append((rev, r[0]))
1754 ui.progress(topic, total, unit='revs', total=total)
1754 ui.progress(topic, total, unit='revs', total=total)
1755 ui.progress(topic, None, unit='revs', total=total)
1755 ui.progress(topic, None, unit='revs', total=total)
1756 return timings
1756 return timings
1757
1757
1758 def _getrevisionseed(orig, rev, tr, source):
1758 def _getrevisionseed(orig, rev, tr, source):
1759 from mercurial.node import nullid
1759 from mercurial.node import nullid
1760
1760
1761 linkrev = orig.linkrev(rev)
1761 linkrev = orig.linkrev(rev)
1762 node = orig.node(rev)
1762 node = orig.node(rev)
1763 p1, p2 = orig.parents(node)
1763 p1, p2 = orig.parents(node)
1764 flags = orig.flags(rev)
1764 flags = orig.flags(rev)
1765 cachedelta = None
1765 cachedelta = None
1766 text = None
1766 text = None
1767
1767
1768 if source == b'full':
1768 if source == b'full':
1769 text = orig.revision(rev)
1769 text = orig.revision(rev)
1770 elif source == b'parent-1':
1770 elif source == b'parent-1':
1771 baserev = orig.rev(p1)
1771 baserev = orig.rev(p1)
1772 cachedelta = (baserev, orig.revdiff(p1, rev))
1772 cachedelta = (baserev, orig.revdiff(p1, rev))
1773 elif source == b'parent-2':
1773 elif source == b'parent-2':
1774 parent = p2
1774 parent = p2
1775 if p2 == nullid:
1775 if p2 == nullid:
1776 parent = p1
1776 parent = p1
1777 baserev = orig.rev(parent)
1777 baserev = orig.rev(parent)
1778 cachedelta = (baserev, orig.revdiff(parent, rev))
1778 cachedelta = (baserev, orig.revdiff(parent, rev))
1779 elif source == b'parent-smallest':
1779 elif source == b'parent-smallest':
1780 p1diff = orig.revdiff(p1, rev)
1780 p1diff = orig.revdiff(p1, rev)
1781 parent = p1
1781 parent = p1
1782 diff = p1diff
1782 diff = p1diff
1783 if p2 != nullid:
1783 if p2 != nullid:
1784 p2diff = orig.revdiff(p2, rev)
1784 p2diff = orig.revdiff(p2, rev)
1785 if len(p1diff) > len(p2diff):
1785 if len(p1diff) > len(p2diff):
1786 parent = p2
1786 parent = p2
1787 diff = p2diff
1787 diff = p2diff
1788 baserev = orig.rev(parent)
1788 baserev = orig.rev(parent)
1789 cachedelta = (baserev, diff)
1789 cachedelta = (baserev, diff)
1790 elif source == b'storage':
1790 elif source == b'storage':
1791 baserev = orig.deltaparent(rev)
1791 baserev = orig.deltaparent(rev)
1792 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1792 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1793
1793
1794 return ((text, tr, linkrev, p1, p2),
1794 return ((text, tr, linkrev, p1, p2),
1795 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1795 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1796
1796
1797 @contextlib.contextmanager
1797 @contextlib.contextmanager
1798 def _temprevlog(ui, orig, truncaterev):
1798 def _temprevlog(ui, orig, truncaterev):
1799 from mercurial import vfs as vfsmod
1799 from mercurial import vfs as vfsmod
1800
1800
1801 if orig._inline:
1801 if orig._inline:
1802 raise error.Abort('not supporting inline revlog (yet)')
1802 raise error.Abort('not supporting inline revlog (yet)')
1803
1803
1804 origindexpath = orig.opener.join(orig.indexfile)
1804 origindexpath = orig.opener.join(orig.indexfile)
1805 origdatapath = orig.opener.join(orig.datafile)
1805 origdatapath = orig.opener.join(orig.datafile)
1806 indexname = 'revlog.i'
1806 indexname = 'revlog.i'
1807 dataname = 'revlog.d'
1807 dataname = 'revlog.d'
1808
1808
1809 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1809 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1810 try:
1810 try:
1811 # copy the data file in a temporary directory
1811 # copy the data file in a temporary directory
1812 ui.debug('copying data in %s\n' % tmpdir)
1812 ui.debug('copying data in %s\n' % tmpdir)
1813 destindexpath = os.path.join(tmpdir, 'revlog.i')
1813 destindexpath = os.path.join(tmpdir, 'revlog.i')
1814 destdatapath = os.path.join(tmpdir, 'revlog.d')
1814 destdatapath = os.path.join(tmpdir, 'revlog.d')
1815 shutil.copyfile(origindexpath, destindexpath)
1815 shutil.copyfile(origindexpath, destindexpath)
1816 shutil.copyfile(origdatapath, destdatapath)
1816 shutil.copyfile(origdatapath, destdatapath)
1817
1817
1818 # remove the data we want to add again
1818 # remove the data we want to add again
1819 ui.debug('truncating data to be rewritten\n')
1819 ui.debug('truncating data to be rewritten\n')
1820 with open(destindexpath, 'ab') as index:
1820 with open(destindexpath, 'ab') as index:
1821 index.seek(0)
1821 index.seek(0)
1822 index.truncate(truncaterev * orig._io.size)
1822 index.truncate(truncaterev * orig._io.size)
1823 with open(destdatapath, 'ab') as data:
1823 with open(destdatapath, 'ab') as data:
1824 data.seek(0)
1824 data.seek(0)
1825 data.truncate(orig.start(truncaterev))
1825 data.truncate(orig.start(truncaterev))
1826
1826
1827 # instantiate a new revlog from the temporary copy
1827 # instantiate a new revlog from the temporary copy
1828 ui.debug('truncating adding to be rewritten\n')
1828 ui.debug('truncating adding to be rewritten\n')
1829 vfs = vfsmod.vfs(tmpdir)
1829 vfs = vfsmod.vfs(tmpdir)
1830 vfs.options = getattr(orig.opener, 'options', None)
1830 vfs.options = getattr(orig.opener, 'options', None)
1831
1831
1832 dest = revlog.revlog(vfs,
1832 dest = revlog.revlog(vfs,
1833 indexfile=indexname,
1833 indexfile=indexname,
1834 datafile=dataname)
1834 datafile=dataname)
1835 if dest._inline:
1835 if dest._inline:
1836 raise error.Abort('not supporting inline revlog (yet)')
1836 raise error.Abort('not supporting inline revlog (yet)')
1837 # make sure internals are initialized
1837 # make sure internals are initialized
1838 dest.revision(len(dest) - 1)
1838 dest.revision(len(dest) - 1)
1839 yield dest
1839 yield dest
1840 del dest, vfs
1840 del dest, vfs
1841 finally:
1841 finally:
1842 shutil.rmtree(tmpdir, True)
1842 shutil.rmtree(tmpdir, True)
1843
1843
1844 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1844 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1845 [(b'e', b'engines', b'', b'compression engines to use'),
1845 [(b'e', b'engines', b'', b'compression engines to use'),
1846 (b's', b'startrev', 0, b'revision to start at')],
1846 (b's', b'startrev', 0, b'revision to start at')],
1847 b'-c|-m|FILE')
1847 b'-c|-m|FILE')
1848 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1848 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1849 """Benchmark operations on revlog chunks.
1849 """Benchmark operations on revlog chunks.
1850
1850
1851 Logically, each revlog is a collection of fulltext revisions. However,
1851 Logically, each revlog is a collection of fulltext revisions. However,
1852 stored within each revlog are "chunks" of possibly compressed data. This
1852 stored within each revlog are "chunks" of possibly compressed data. This
1853 data needs to be read and decompressed or compressed and written.
1853 data needs to be read and decompressed or compressed and written.
1854
1854
1855 This command measures the time it takes to read+decompress and recompress
1855 This command measures the time it takes to read+decompress and recompress
1856 chunks in a revlog. It effectively isolates I/O and compression performance.
1856 chunks in a revlog. It effectively isolates I/O and compression performance.
1857 For measurements of higher-level operations like resolving revisions,
1857 For measurements of higher-level operations like resolving revisions,
1858 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1858 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1859 """
1859 """
1860 opts = _byteskwargs(opts)
1860 opts = _byteskwargs(opts)
1861
1861
1862 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1862 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1863
1863
1864 # _chunkraw was renamed to _getsegmentforrevs.
1864 # _chunkraw was renamed to _getsegmentforrevs.
1865 try:
1865 try:
1866 segmentforrevs = rl._getsegmentforrevs
1866 segmentforrevs = rl._getsegmentforrevs
1867 except AttributeError:
1867 except AttributeError:
1868 segmentforrevs = rl._chunkraw
1868 segmentforrevs = rl._chunkraw
1869
1869
1870 # Verify engines argument.
1870 # Verify engines argument.
1871 if engines:
1871 if engines:
1872 engines = set(e.strip() for e in engines.split(b','))
1872 engines = set(e.strip() for e in engines.split(b','))
1873 for engine in engines:
1873 for engine in engines:
1874 try:
1874 try:
1875 util.compressionengines[engine]
1875 util.compressionengines[engine]
1876 except KeyError:
1876 except KeyError:
1877 raise error.Abort(b'unknown compression engine: %s' % engine)
1877 raise error.Abort(b'unknown compression engine: %s' % engine)
1878 else:
1878 else:
1879 engines = []
1879 engines = []
1880 for e in util.compengines:
1880 for e in util.compengines:
1881 engine = util.compengines[e]
1881 engine = util.compengines[e]
1882 try:
1882 try:
1883 if engine.available():
1883 if engine.available():
1884 engine.revlogcompressor().compress(b'dummy')
1884 engine.revlogcompressor().compress(b'dummy')
1885 engines.append(e)
1885 engines.append(e)
1886 except NotImplementedError:
1886 except NotImplementedError:
1887 pass
1887 pass
1888
1888
1889 revs = list(rl.revs(startrev, len(rl) - 1))
1889 revs = list(rl.revs(startrev, len(rl) - 1))
1890
1890
1891 def rlfh(rl):
1891 def rlfh(rl):
1892 if rl._inline:
1892 if rl._inline:
1893 return getsvfs(repo)(rl.indexfile)
1893 return getsvfs(repo)(rl.indexfile)
1894 else:
1894 else:
1895 return getsvfs(repo)(rl.datafile)
1895 return getsvfs(repo)(rl.datafile)
1896
1896
1897 def doread():
1897 def doread():
1898 rl.clearcaches()
1898 rl.clearcaches()
1899 for rev in revs:
1899 for rev in revs:
1900 segmentforrevs(rev, rev)
1900 segmentforrevs(rev, rev)
1901
1901
1902 def doreadcachedfh():
1902 def doreadcachedfh():
1903 rl.clearcaches()
1903 rl.clearcaches()
1904 fh = rlfh(rl)
1904 fh = rlfh(rl)
1905 for rev in revs:
1905 for rev in revs:
1906 segmentforrevs(rev, rev, df=fh)
1906 segmentforrevs(rev, rev, df=fh)
1907
1907
1908 def doreadbatch():
1908 def doreadbatch():
1909 rl.clearcaches()
1909 rl.clearcaches()
1910 segmentforrevs(revs[0], revs[-1])
1910 segmentforrevs(revs[0], revs[-1])
1911
1911
1912 def doreadbatchcachedfh():
1912 def doreadbatchcachedfh():
1913 rl.clearcaches()
1913 rl.clearcaches()
1914 fh = rlfh(rl)
1914 fh = rlfh(rl)
1915 segmentforrevs(revs[0], revs[-1], df=fh)
1915 segmentforrevs(revs[0], revs[-1], df=fh)
1916
1916
1917 def dochunk():
1917 def dochunk():
1918 rl.clearcaches()
1918 rl.clearcaches()
1919 fh = rlfh(rl)
1919 fh = rlfh(rl)
1920 for rev in revs:
1920 for rev in revs:
1921 rl._chunk(rev, df=fh)
1921 rl._chunk(rev, df=fh)
1922
1922
1923 chunks = [None]
1923 chunks = [None]
1924
1924
1925 def dochunkbatch():
1925 def dochunkbatch():
1926 rl.clearcaches()
1926 rl.clearcaches()
1927 fh = rlfh(rl)
1927 fh = rlfh(rl)
1928 # Save chunks as a side-effect.
1928 # Save chunks as a side-effect.
1929 chunks[0] = rl._chunks(revs, df=fh)
1929 chunks[0] = rl._chunks(revs, df=fh)
1930
1930
1931 def docompress(compressor):
1931 def docompress(compressor):
1932 rl.clearcaches()
1932 rl.clearcaches()
1933
1933
1934 try:
1934 try:
1935 # Swap in the requested compression engine.
1935 # Swap in the requested compression engine.
1936 oldcompressor = rl._compressor
1936 oldcompressor = rl._compressor
1937 rl._compressor = compressor
1937 rl._compressor = compressor
1938 for chunk in chunks[0]:
1938 for chunk in chunks[0]:
1939 rl.compress(chunk)
1939 rl.compress(chunk)
1940 finally:
1940 finally:
1941 rl._compressor = oldcompressor
1941 rl._compressor = oldcompressor
1942
1942
1943 benches = [
1943 benches = [
1944 (lambda: doread(), b'read'),
1944 (lambda: doread(), b'read'),
1945 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1945 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1946 (lambda: doreadbatch(), b'read batch'),
1946 (lambda: doreadbatch(), b'read batch'),
1947 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1947 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1948 (lambda: dochunk(), b'chunk'),
1948 (lambda: dochunk(), b'chunk'),
1949 (lambda: dochunkbatch(), b'chunk batch'),
1949 (lambda: dochunkbatch(), b'chunk batch'),
1950 ]
1950 ]
1951
1951
1952 for engine in sorted(engines):
1952 for engine in sorted(engines):
1953 compressor = util.compengines[engine].revlogcompressor()
1953 compressor = util.compengines[engine].revlogcompressor()
1954 benches.append((functools.partial(docompress, compressor),
1954 benches.append((functools.partial(docompress, compressor),
1955 b'compress w/ %s' % engine))
1955 b'compress w/ %s' % engine))
1956
1956
1957 for fn, title in benches:
1957 for fn, title in benches:
1958 timer, fm = gettimer(ui, opts)
1958 timer, fm = gettimer(ui, opts)
1959 timer(fn, title=title)
1959 timer(fn, title=title)
1960 fm.end()
1960 fm.end()
1961
1961
1962 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1962 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1963 [(b'', b'cache', False, b'use caches instead of clearing')],
1963 [(b'', b'cache', False, b'use caches instead of clearing')],
1964 b'-c|-m|FILE REV')
1964 b'-c|-m|FILE REV')
1965 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1965 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1966 """Benchmark obtaining a revlog revision.
1966 """Benchmark obtaining a revlog revision.
1967
1967
1968 Obtaining a revlog revision consists of roughly the following steps:
1968 Obtaining a revlog revision consists of roughly the following steps:
1969
1969
1970 1. Compute the delta chain
1970 1. Compute the delta chain
1971 2. Slice the delta chain if applicable
1971 2. Slice the delta chain if applicable
1972 3. Obtain the raw chunks for that delta chain
1972 3. Obtain the raw chunks for that delta chain
1973 4. Decompress each raw chunk
1973 4. Decompress each raw chunk
1974 5. Apply binary patches to obtain fulltext
1974 5. Apply binary patches to obtain fulltext
1975 6. Verify hash of fulltext
1975 6. Verify hash of fulltext
1976
1976
1977 This command measures the time spent in each of these phases.
1977 This command measures the time spent in each of these phases.
1978 """
1978 """
1979 opts = _byteskwargs(opts)
1979 opts = _byteskwargs(opts)
1980
1980
1981 if opts.get(b'changelog') or opts.get(b'manifest'):
1981 if opts.get(b'changelog') or opts.get(b'manifest'):
1982 file_, rev = None, file_
1982 file_, rev = None, file_
1983 elif rev is None:
1983 elif rev is None:
1984 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1984 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1985
1985
1986 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1986 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1987
1987
1988 # _chunkraw was renamed to _getsegmentforrevs.
1988 # _chunkraw was renamed to _getsegmentforrevs.
1989 try:
1989 try:
1990 segmentforrevs = r._getsegmentforrevs
1990 segmentforrevs = r._getsegmentforrevs
1991 except AttributeError:
1991 except AttributeError:
1992 segmentforrevs = r._chunkraw
1992 segmentforrevs = r._chunkraw
1993
1993
1994 node = r.lookup(rev)
1994 node = r.lookup(rev)
1995 rev = r.rev(node)
1995 rev = r.rev(node)
1996
1996
1997 def getrawchunks(data, chain):
1997 def getrawchunks(data, chain):
1998 start = r.start
1998 start = r.start
1999 length = r.length
1999 length = r.length
2000 inline = r._inline
2000 inline = r._inline
2001 iosize = r._io.size
2001 iosize = r._io.size
2002 buffer = util.buffer
2002 buffer = util.buffer
2003
2003
2004 chunks = []
2004 chunks = []
2005 ladd = chunks.append
2005 ladd = chunks.append
2006 for idx, item in enumerate(chain):
2006 for idx, item in enumerate(chain):
2007 offset = start(item[0])
2007 offset = start(item[0])
2008 bits = data[idx]
2008 bits = data[idx]
2009 for rev in item:
2009 for rev in item:
2010 chunkstart = start(rev)
2010 chunkstart = start(rev)
2011 if inline:
2011 if inline:
2012 chunkstart += (rev + 1) * iosize
2012 chunkstart += (rev + 1) * iosize
2013 chunklength = length(rev)
2013 chunklength = length(rev)
2014 ladd(buffer(bits, chunkstart - offset, chunklength))
2014 ladd(buffer(bits, chunkstart - offset, chunklength))
2015
2015
2016 return chunks
2016 return chunks
2017
2017
2018 def dodeltachain(rev):
2018 def dodeltachain(rev):
2019 if not cache:
2019 if not cache:
2020 r.clearcaches()
2020 r.clearcaches()
2021 r._deltachain(rev)
2021 r._deltachain(rev)
2022
2022
2023 def doread(chain):
2023 def doread(chain):
2024 if not cache:
2024 if not cache:
2025 r.clearcaches()
2025 r.clearcaches()
2026 for item in slicedchain:
2026 for item in slicedchain:
2027 segmentforrevs(item[0], item[-1])
2027 segmentforrevs(item[0], item[-1])
2028
2028
2029 def doslice(r, chain, size):
2029 def doslice(r, chain, size):
2030 for s in slicechunk(r, chain, targetsize=size):
2030 for s in slicechunk(r, chain, targetsize=size):
2031 pass
2031 pass
2032
2032
2033 def dorawchunks(data, chain):
2033 def dorawchunks(data, chain):
2034 if not cache:
2034 if not cache:
2035 r.clearcaches()
2035 r.clearcaches()
2036 getrawchunks(data, chain)
2036 getrawchunks(data, chain)
2037
2037
2038 def dodecompress(chunks):
2038 def dodecompress(chunks):
2039 decomp = r.decompress
2039 decomp = r.decompress
2040 for chunk in chunks:
2040 for chunk in chunks:
2041 decomp(chunk)
2041 decomp(chunk)
2042
2042
2043 def dopatch(text, bins):
2043 def dopatch(text, bins):
2044 if not cache:
2044 if not cache:
2045 r.clearcaches()
2045 r.clearcaches()
2046 mdiff.patches(text, bins)
2046 mdiff.patches(text, bins)
2047
2047
2048 def dohash(text):
2048 def dohash(text):
2049 if not cache:
2049 if not cache:
2050 r.clearcaches()
2050 r.clearcaches()
2051 r.checkhash(text, node, rev=rev)
2051 r.checkhash(text, node, rev=rev)
2052
2052
2053 def dorevision():
2053 def dorevision():
2054 if not cache:
2054 if not cache:
2055 r.clearcaches()
2055 r.clearcaches()
2056 r.revision(node)
2056 r.revision(node)
2057
2057
2058 try:
2058 try:
2059 from mercurial.revlogutils.deltas import slicechunk
2059 from mercurial.revlogutils.deltas import slicechunk
2060 except ImportError:
2060 except ImportError:
2061 slicechunk = getattr(revlog, '_slicechunk', None)
2061 slicechunk = getattr(revlog, '_slicechunk', None)
2062
2062
2063 size = r.length(rev)
2063 size = r.length(rev)
2064 chain = r._deltachain(rev)[0]
2064 chain = r._deltachain(rev)[0]
2065 if not getattr(r, '_withsparseread', False):
2065 if not getattr(r, '_withsparseread', False):
2066 slicedchain = (chain,)
2066 slicedchain = (chain,)
2067 else:
2067 else:
2068 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2068 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2069 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2069 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2070 rawchunks = getrawchunks(data, slicedchain)
2070 rawchunks = getrawchunks(data, slicedchain)
2071 bins = r._chunks(chain)
2071 bins = r._chunks(chain)
2072 text = bytes(bins[0])
2072 text = bytes(bins[0])
2073 bins = bins[1:]
2073 bins = bins[1:]
2074 text = mdiff.patches(text, bins)
2074 text = mdiff.patches(text, bins)
2075
2075
2076 benches = [
2076 benches = [
2077 (lambda: dorevision(), b'full'),
2077 (lambda: dorevision(), b'full'),
2078 (lambda: dodeltachain(rev), b'deltachain'),
2078 (lambda: dodeltachain(rev), b'deltachain'),
2079 (lambda: doread(chain), b'read'),
2079 (lambda: doread(chain), b'read'),
2080 ]
2080 ]
2081
2081
2082 if getattr(r, '_withsparseread', False):
2082 if getattr(r, '_withsparseread', False):
2083 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2083 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2084 benches.append(slicing)
2084 benches.append(slicing)
2085
2085
2086 benches.extend([
2086 benches.extend([
2087 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2087 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2088 (lambda: dodecompress(rawchunks), b'decompress'),
2088 (lambda: dodecompress(rawchunks), b'decompress'),
2089 (lambda: dopatch(text, bins), b'patch'),
2089 (lambda: dopatch(text, bins), b'patch'),
2090 (lambda: dohash(text), b'hash'),
2090 (lambda: dohash(text), b'hash'),
2091 ])
2091 ])
2092
2092
2093 timer, fm = gettimer(ui, opts)
2093 timer, fm = gettimer(ui, opts)
2094 for fn, title in benches:
2094 for fn, title in benches:
2095 timer(fn, title=title)
2095 timer(fn, title=title)
2096 fm.end()
2096 fm.end()
2097
2097
2098 @command(b'perfrevset',
2098 @command(b'perfrevset',
2099 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2099 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2100 (b'', b'contexts', False, b'obtain changectx for each revision')]
2100 (b'', b'contexts', False, b'obtain changectx for each revision')]
2101 + formatteropts, b"REVSET")
2101 + formatteropts, b"REVSET")
2102 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2102 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2103 """benchmark the execution time of a revset
2103 """benchmark the execution time of a revset
2104
2104
2105 Use the --clean option if need to evaluate the impact of build volatile
2105 Use the --clean option if need to evaluate the impact of build volatile
2106 revisions set cache on the revset execution. Volatile cache hold filtered
2106 revisions set cache on the revset execution. Volatile cache hold filtered
2107 and obsolete related cache."""
2107 and obsolete related cache."""
2108 opts = _byteskwargs(opts)
2108 opts = _byteskwargs(opts)
2109
2109
2110 timer, fm = gettimer(ui, opts)
2110 timer, fm = gettimer(ui, opts)
2111 def d():
2111 def d():
2112 if clear:
2112 if clear:
2113 repo.invalidatevolatilesets()
2113 repo.invalidatevolatilesets()
2114 if contexts:
2114 if contexts:
2115 for ctx in repo.set(expr): pass
2115 for ctx in repo.set(expr): pass
2116 else:
2116 else:
2117 for r in repo.revs(expr): pass
2117 for r in repo.revs(expr): pass
2118 timer(d)
2118 timer(d)
2119 fm.end()
2119 fm.end()
2120
2120
2121 @command(b'perfvolatilesets',
2121 @command(b'perfvolatilesets',
2122 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2122 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2123 ] + formatteropts)
2123 ] + formatteropts)
2124 def perfvolatilesets(ui, repo, *names, **opts):
2124 def perfvolatilesets(ui, repo, *names, **opts):
2125 """benchmark the computation of various volatile set
2125 """benchmark the computation of various volatile set
2126
2126
2127 Volatile set computes element related to filtering and obsolescence."""
2127 Volatile set computes element related to filtering and obsolescence."""
2128 opts = _byteskwargs(opts)
2128 opts = _byteskwargs(opts)
2129 timer, fm = gettimer(ui, opts)
2129 timer, fm = gettimer(ui, opts)
2130 repo = repo.unfiltered()
2130 repo = repo.unfiltered()
2131
2131
2132 def getobs(name):
2132 def getobs(name):
2133 def d():
2133 def d():
2134 repo.invalidatevolatilesets()
2134 repo.invalidatevolatilesets()
2135 if opts[b'clear_obsstore']:
2135 if opts[b'clear_obsstore']:
2136 clearfilecache(repo, b'obsstore')
2136 clearfilecache(repo, b'obsstore')
2137 obsolete.getrevs(repo, name)
2137 obsolete.getrevs(repo, name)
2138 return d
2138 return d
2139
2139
2140 allobs = sorted(obsolete.cachefuncs)
2140 allobs = sorted(obsolete.cachefuncs)
2141 if names:
2141 if names:
2142 allobs = [n for n in allobs if n in names]
2142 allobs = [n for n in allobs if n in names]
2143
2143
2144 for name in allobs:
2144 for name in allobs:
2145 timer(getobs(name), title=name)
2145 timer(getobs(name), title=name)
2146
2146
2147 def getfiltered(name):
2147 def getfiltered(name):
2148 def d():
2148 def d():
2149 repo.invalidatevolatilesets()
2149 repo.invalidatevolatilesets()
2150 if opts[b'clear_obsstore']:
2150 if opts[b'clear_obsstore']:
2151 clearfilecache(repo, b'obsstore')
2151 clearfilecache(repo, b'obsstore')
2152 repoview.filterrevs(repo, name)
2152 repoview.filterrevs(repo, name)
2153 return d
2153 return d
2154
2154
2155 allfilter = sorted(repoview.filtertable)
2155 allfilter = sorted(repoview.filtertable)
2156 if names:
2156 if names:
2157 allfilter = [n for n in allfilter if n in names]
2157 allfilter = [n for n in allfilter if n in names]
2158
2158
2159 for name in allfilter:
2159 for name in allfilter:
2160 timer(getfiltered(name), title=name)
2160 timer(getfiltered(name), title=name)
2161 fm.end()
2161 fm.end()
2162
2162
2163 @command(b'perfbranchmap',
2163 @command(b'perfbranchmap',
2164 [(b'f', b'full', False,
2164 [(b'f', b'full', False,
2165 b'Includes build time of subset'),
2165 b'Includes build time of subset'),
2166 (b'', b'clear-revbranch', False,
2166 (b'', b'clear-revbranch', False,
2167 b'purge the revbranch cache between computation'),
2167 b'purge the revbranch cache between computation'),
2168 ] + formatteropts)
2168 ] + formatteropts)
2169 def perfbranchmap(ui, repo, *filternames, **opts):
2169 def perfbranchmap(ui, repo, *filternames, **opts):
2170 """benchmark the update of a branchmap
2170 """benchmark the update of a branchmap
2171
2171
2172 This benchmarks the full repo.branchmap() call with read and write disabled
2172 This benchmarks the full repo.branchmap() call with read and write disabled
2173 """
2173 """
2174 opts = _byteskwargs(opts)
2174 opts = _byteskwargs(opts)
2175 full = opts.get(b"full", False)
2175 full = opts.get(b"full", False)
2176 clear_revbranch = opts.get(b"clear_revbranch", False)
2176 clear_revbranch = opts.get(b"clear_revbranch", False)
2177 timer, fm = gettimer(ui, opts)
2177 timer, fm = gettimer(ui, opts)
2178 def getbranchmap(filtername):
2178 def getbranchmap(filtername):
2179 """generate a benchmark function for the filtername"""
2179 """generate a benchmark function for the filtername"""
2180 if filtername is None:
2180 if filtername is None:
2181 view = repo
2181 view = repo
2182 else:
2182 else:
2183 view = repo.filtered(filtername)
2183 view = repo.filtered(filtername)
2184 def d():
2184 def d():
2185 if clear_revbranch:
2185 if clear_revbranch:
2186 repo.revbranchcache()._clear()
2186 repo.revbranchcache()._clear()
2187 if full:
2187 if full:
2188 view._branchcaches.clear()
2188 view._branchcaches.clear()
2189 else:
2189 else:
2190 view._branchcaches.pop(filtername, None)
2190 view._branchcaches.pop(filtername, None)
2191 view.branchmap()
2191 view.branchmap()
2192 return d
2192 return d
2193 # add filter in smaller subset to bigger subset
2193 # add filter in smaller subset to bigger subset
2194 possiblefilters = set(repoview.filtertable)
2194 possiblefilters = set(repoview.filtertable)
2195 if filternames:
2195 if filternames:
2196 possiblefilters &= set(filternames)
2196 possiblefilters &= set(filternames)
2197 subsettable = getbranchmapsubsettable()
2197 subsettable = getbranchmapsubsettable()
2198 allfilters = []
2198 allfilters = []
2199 while possiblefilters:
2199 while possiblefilters:
2200 for name in possiblefilters:
2200 for name in possiblefilters:
2201 subset = subsettable.get(name)
2201 subset = subsettable.get(name)
2202 if subset not in possiblefilters:
2202 if subset not in possiblefilters:
2203 break
2203 break
2204 else:
2204 else:
2205 assert False, b'subset cycle %s!' % possiblefilters
2205 assert False, b'subset cycle %s!' % possiblefilters
2206 allfilters.append(name)
2206 allfilters.append(name)
2207 possiblefilters.remove(name)
2207 possiblefilters.remove(name)
2208
2208
2209 # warm the cache
2209 # warm the cache
2210 if not full:
2210 if not full:
2211 for name in allfilters:
2211 for name in allfilters:
2212 repo.filtered(name).branchmap()
2212 repo.filtered(name).branchmap()
2213 if not filternames or b'unfiltered' in filternames:
2213 if not filternames or b'unfiltered' in filternames:
2214 # add unfiltered
2214 # add unfiltered
2215 allfilters.append(None)
2215 allfilters.append(None)
2216
2216
2217 branchcacheread = safeattrsetter(branchmap, b'read')
2217 branchcacheread = safeattrsetter(branchmap, b'read')
2218 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2218 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2219 branchcacheread.set(lambda repo: None)
2219 branchcacheread.set(lambda repo: None)
2220 branchcachewrite.set(lambda bc, repo: None)
2220 branchcachewrite.set(lambda bc, repo: None)
2221 try:
2221 try:
2222 for name in allfilters:
2222 for name in allfilters:
2223 printname = name
2223 printname = name
2224 if name is None:
2224 if name is None:
2225 printname = b'unfiltered'
2225 printname = b'unfiltered'
2226 timer(getbranchmap(name), title=str(printname))
2226 timer(getbranchmap(name), title=str(printname))
2227 finally:
2227 finally:
2228 branchcacheread.restore()
2228 branchcacheread.restore()
2229 branchcachewrite.restore()
2229 branchcachewrite.restore()
2230 fm.end()
2230 fm.end()
2231
2231
2232 @command(b'perfbranchmapload', [
2232 @command(b'perfbranchmapload', [
2233 (b'f', b'filter', b'', b'Specify repoview filter'),
2233 (b'f', b'filter', b'', b'Specify repoview filter'),
2234 (b'', b'list', False, b'List brachmap filter caches'),
2234 (b'', b'list', False, b'List brachmap filter caches'),
2235 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2236
2235 ] + formatteropts)
2237 ] + formatteropts)
2236 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2238 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2237 """benchmark reading the branchmap"""
2239 """benchmark reading the branchmap"""
2238 opts = _byteskwargs(opts)
2240 opts = _byteskwargs(opts)
2241 clearrevlogs = opts[b'clear_revlogs']
2239
2242
2240 if list:
2243 if list:
2241 for name, kind, st in repo.cachevfs.readdir(stat=True):
2244 for name, kind, st in repo.cachevfs.readdir(stat=True):
2242 if name.startswith(b'branch2'):
2245 if name.startswith(b'branch2'):
2243 filtername = name.partition(b'-')[2] or b'unfiltered'
2246 filtername = name.partition(b'-')[2] or b'unfiltered'
2244 ui.status(b'%s - %s\n'
2247 ui.status(b'%s - %s\n'
2245 % (filtername, util.bytecount(st.st_size)))
2248 % (filtername, util.bytecount(st.st_size)))
2246 return
2249 return
2247 if filter:
2250 if filter:
2248 repo = repoview.repoview(repo, filter)
2251 repo = repoview.repoview(repo, filter)
2249 else:
2252 else:
2250 repo = repo.unfiltered()
2253 repo = repo.unfiltered()
2251 # try once without timer, the filter may not be cached
2254 # try once without timer, the filter may not be cached
2252 if branchmap.read(repo) is None:
2255 if branchmap.read(repo) is None:
2253 raise error.Abort(b'No branchmap cached for %s repo'
2256 raise error.Abort(b'No branchmap cached for %s repo'
2254 % (filter or b'unfiltered'))
2257 % (filter or b'unfiltered'))
2255 timer, fm = gettimer(ui, opts)
2258 timer, fm = gettimer(ui, opts)
2259 def setup():
2260 if clearrevlogs:
2261 clearchangelog(repo)
2256 def bench():
2262 def bench():
2257 branchmap.read(repo)
2263 branchmap.read(repo)
2258 timer(bench)
2264 timer(bench, setup=setup)
2259 fm.end()
2265 fm.end()
2260
2266
2261 @command(b'perfloadmarkers')
2267 @command(b'perfloadmarkers')
2262 def perfloadmarkers(ui, repo):
2268 def perfloadmarkers(ui, repo):
2263 """benchmark the time to parse the on-disk markers for a repo
2269 """benchmark the time to parse the on-disk markers for a repo
2264
2270
2265 Result is the number of markers in the repo."""
2271 Result is the number of markers in the repo."""
2266 timer, fm = gettimer(ui)
2272 timer, fm = gettimer(ui)
2267 svfs = getsvfs(repo)
2273 svfs = getsvfs(repo)
2268 timer(lambda: len(obsolete.obsstore(svfs)))
2274 timer(lambda: len(obsolete.obsstore(svfs)))
2269 fm.end()
2275 fm.end()
2270
2276
2271 @command(b'perflrucachedict', formatteropts +
2277 @command(b'perflrucachedict', formatteropts +
2272 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2278 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2273 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2279 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2274 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2280 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2275 (b'', b'size', 4, b'size of cache'),
2281 (b'', b'size', 4, b'size of cache'),
2276 (b'', b'gets', 10000, b'number of key lookups'),
2282 (b'', b'gets', 10000, b'number of key lookups'),
2277 (b'', b'sets', 10000, b'number of key sets'),
2283 (b'', b'sets', 10000, b'number of key sets'),
2278 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2284 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2279 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2285 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2280 norepo=True)
2286 norepo=True)
2281 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2287 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2282 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2288 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2283 opts = _byteskwargs(opts)
2289 opts = _byteskwargs(opts)
2284
2290
2285 def doinit():
2291 def doinit():
2286 for i in _xrange(10000):
2292 for i in _xrange(10000):
2287 util.lrucachedict(size)
2293 util.lrucachedict(size)
2288
2294
2289 costrange = list(range(mincost, maxcost + 1))
2295 costrange = list(range(mincost, maxcost + 1))
2290
2296
2291 values = []
2297 values = []
2292 for i in _xrange(size):
2298 for i in _xrange(size):
2293 values.append(random.randint(0, _maxint))
2299 values.append(random.randint(0, _maxint))
2294
2300
2295 # Get mode fills the cache and tests raw lookup performance with no
2301 # Get mode fills the cache and tests raw lookup performance with no
2296 # eviction.
2302 # eviction.
2297 getseq = []
2303 getseq = []
2298 for i in _xrange(gets):
2304 for i in _xrange(gets):
2299 getseq.append(random.choice(values))
2305 getseq.append(random.choice(values))
2300
2306
2301 def dogets():
2307 def dogets():
2302 d = util.lrucachedict(size)
2308 d = util.lrucachedict(size)
2303 for v in values:
2309 for v in values:
2304 d[v] = v
2310 d[v] = v
2305 for key in getseq:
2311 for key in getseq:
2306 value = d[key]
2312 value = d[key]
2307 value # silence pyflakes warning
2313 value # silence pyflakes warning
2308
2314
2309 def dogetscost():
2315 def dogetscost():
2310 d = util.lrucachedict(size, maxcost=costlimit)
2316 d = util.lrucachedict(size, maxcost=costlimit)
2311 for i, v in enumerate(values):
2317 for i, v in enumerate(values):
2312 d.insert(v, v, cost=costs[i])
2318 d.insert(v, v, cost=costs[i])
2313 for key in getseq:
2319 for key in getseq:
2314 try:
2320 try:
2315 value = d[key]
2321 value = d[key]
2316 value # silence pyflakes warning
2322 value # silence pyflakes warning
2317 except KeyError:
2323 except KeyError:
2318 pass
2324 pass
2319
2325
2320 # Set mode tests insertion speed with cache eviction.
2326 # Set mode tests insertion speed with cache eviction.
2321 setseq = []
2327 setseq = []
2322 costs = []
2328 costs = []
2323 for i in _xrange(sets):
2329 for i in _xrange(sets):
2324 setseq.append(random.randint(0, _maxint))
2330 setseq.append(random.randint(0, _maxint))
2325 costs.append(random.choice(costrange))
2331 costs.append(random.choice(costrange))
2326
2332
2327 def doinserts():
2333 def doinserts():
2328 d = util.lrucachedict(size)
2334 d = util.lrucachedict(size)
2329 for v in setseq:
2335 for v in setseq:
2330 d.insert(v, v)
2336 d.insert(v, v)
2331
2337
2332 def doinsertscost():
2338 def doinsertscost():
2333 d = util.lrucachedict(size, maxcost=costlimit)
2339 d = util.lrucachedict(size, maxcost=costlimit)
2334 for i, v in enumerate(setseq):
2340 for i, v in enumerate(setseq):
2335 d.insert(v, v, cost=costs[i])
2341 d.insert(v, v, cost=costs[i])
2336
2342
2337 def dosets():
2343 def dosets():
2338 d = util.lrucachedict(size)
2344 d = util.lrucachedict(size)
2339 for v in setseq:
2345 for v in setseq:
2340 d[v] = v
2346 d[v] = v
2341
2347
2342 # Mixed mode randomly performs gets and sets with eviction.
2348 # Mixed mode randomly performs gets and sets with eviction.
2343 mixedops = []
2349 mixedops = []
2344 for i in _xrange(mixed):
2350 for i in _xrange(mixed):
2345 r = random.randint(0, 100)
2351 r = random.randint(0, 100)
2346 if r < mixedgetfreq:
2352 if r < mixedgetfreq:
2347 op = 0
2353 op = 0
2348 else:
2354 else:
2349 op = 1
2355 op = 1
2350
2356
2351 mixedops.append((op,
2357 mixedops.append((op,
2352 random.randint(0, size * 2),
2358 random.randint(0, size * 2),
2353 random.choice(costrange)))
2359 random.choice(costrange)))
2354
2360
2355 def domixed():
2361 def domixed():
2356 d = util.lrucachedict(size)
2362 d = util.lrucachedict(size)
2357
2363
2358 for op, v, cost in mixedops:
2364 for op, v, cost in mixedops:
2359 if op == 0:
2365 if op == 0:
2360 try:
2366 try:
2361 d[v]
2367 d[v]
2362 except KeyError:
2368 except KeyError:
2363 pass
2369 pass
2364 else:
2370 else:
2365 d[v] = v
2371 d[v] = v
2366
2372
2367 def domixedcost():
2373 def domixedcost():
2368 d = util.lrucachedict(size, maxcost=costlimit)
2374 d = util.lrucachedict(size, maxcost=costlimit)
2369
2375
2370 for op, v, cost in mixedops:
2376 for op, v, cost in mixedops:
2371 if op == 0:
2377 if op == 0:
2372 try:
2378 try:
2373 d[v]
2379 d[v]
2374 except KeyError:
2380 except KeyError:
2375 pass
2381 pass
2376 else:
2382 else:
2377 d.insert(v, v, cost=cost)
2383 d.insert(v, v, cost=cost)
2378
2384
2379 benches = [
2385 benches = [
2380 (doinit, b'init'),
2386 (doinit, b'init'),
2381 ]
2387 ]
2382
2388
2383 if costlimit:
2389 if costlimit:
2384 benches.extend([
2390 benches.extend([
2385 (dogetscost, b'gets w/ cost limit'),
2391 (dogetscost, b'gets w/ cost limit'),
2386 (doinsertscost, b'inserts w/ cost limit'),
2392 (doinsertscost, b'inserts w/ cost limit'),
2387 (domixedcost, b'mixed w/ cost limit'),
2393 (domixedcost, b'mixed w/ cost limit'),
2388 ])
2394 ])
2389 else:
2395 else:
2390 benches.extend([
2396 benches.extend([
2391 (dogets, b'gets'),
2397 (dogets, b'gets'),
2392 (doinserts, b'inserts'),
2398 (doinserts, b'inserts'),
2393 (dosets, b'sets'),
2399 (dosets, b'sets'),
2394 (domixed, b'mixed')
2400 (domixed, b'mixed')
2395 ])
2401 ])
2396
2402
2397 for fn, title in benches:
2403 for fn, title in benches:
2398 timer, fm = gettimer(ui, opts)
2404 timer, fm = gettimer(ui, opts)
2399 timer(fn, title=title)
2405 timer(fn, title=title)
2400 fm.end()
2406 fm.end()
2401
2407
2402 @command(b'perfwrite', formatteropts)
2408 @command(b'perfwrite', formatteropts)
2403 def perfwrite(ui, repo, **opts):
2409 def perfwrite(ui, repo, **opts):
2404 """microbenchmark ui.write
2410 """microbenchmark ui.write
2405 """
2411 """
2406 opts = _byteskwargs(opts)
2412 opts = _byteskwargs(opts)
2407
2413
2408 timer, fm = gettimer(ui, opts)
2414 timer, fm = gettimer(ui, opts)
2409 def write():
2415 def write():
2410 for i in range(100000):
2416 for i in range(100000):
2411 ui.write((b'Testing write performance\n'))
2417 ui.write((b'Testing write performance\n'))
2412 timer(write)
2418 timer(write)
2413 fm.end()
2419 fm.end()
2414
2420
2415 def uisetup(ui):
2421 def uisetup(ui):
2416 if (util.safehasattr(cmdutil, b'openrevlog') and
2422 if (util.safehasattr(cmdutil, b'openrevlog') and
2417 not util.safehasattr(commands, b'debugrevlogopts')):
2423 not util.safehasattr(commands, b'debugrevlogopts')):
2418 # for "historical portability":
2424 # for "historical portability":
2419 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2425 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2420 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2426 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2421 # openrevlog() should cause failure, because it has been
2427 # openrevlog() should cause failure, because it has been
2422 # available since 3.5 (or 49c583ca48c4).
2428 # available since 3.5 (or 49c583ca48c4).
2423 def openrevlog(orig, repo, cmd, file_, opts):
2429 def openrevlog(orig, repo, cmd, file_, opts):
2424 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2430 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2425 raise error.Abort(b"This version doesn't support --dir option",
2431 raise error.Abort(b"This version doesn't support --dir option",
2426 hint=b"use 3.5 or later")
2432 hint=b"use 3.5 or later")
2427 return orig(repo, cmd, file_, opts)
2433 return orig(repo, cmd, file_, opts)
2428 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2434 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now