##// END OF EJS Templates
perf: introduce a function to fully "unload" a changelog...
Boris Feld -
r40737:e4ea6385 default
parent child Browse files
Show More
@@ -1,2422 +1,2428 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, setup=None, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 if setup is not None:
298 if setup is not None:
299 setup()
299 setup()
300 with timeone() as item:
300 with timeone() as item:
301 r = func()
301 r = func()
302 count += 1
302 count += 1
303 results.append(item[0])
303 results.append(item[0])
304 cstop = util.timer()
304 cstop = util.timer()
305 if cstop - begin > 3 and count >= 100:
305 if cstop - begin > 3 and count >= 100:
306 break
306 break
307 if cstop - begin > 10 and count >= 3:
307 if cstop - begin > 10 and count >= 3:
308 break
308 break
309
309
310 formatone(fm, results, title=title, result=r,
310 formatone(fm, results, title=title, result=r,
311 displayall=displayall)
311 displayall=displayall)
312
312
313 def formatone(fm, timings, title=None, result=None, displayall=False):
313 def formatone(fm, timings, title=None, result=None, displayall=False):
314
314
315 count = len(timings)
315 count = len(timings)
316
316
317 fm.startitem()
317 fm.startitem()
318
318
319 if title:
319 if title:
320 fm.write(b'title', b'! %s\n', title)
320 fm.write(b'title', b'! %s\n', title)
321 if result:
321 if result:
322 fm.write(b'result', b'! result: %s\n', result)
322 fm.write(b'result', b'! result: %s\n', result)
323 def display(role, entry):
323 def display(role, entry):
324 prefix = b''
324 prefix = b''
325 if role != b'best':
325 if role != b'best':
326 prefix = b'%s.' % role
326 prefix = b'%s.' % role
327 fm.plain(b'!')
327 fm.plain(b'!')
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 fm.write(prefix + b'user', b' user %f', entry[1])
330 fm.write(prefix + b'user', b' user %f', entry[1])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 fm.plain(b'\n')
333 fm.plain(b'\n')
334 timings.sort()
334 timings.sort()
335 min_val = timings[0]
335 min_val = timings[0]
336 display(b'best', min_val)
336 display(b'best', min_val)
337 if displayall:
337 if displayall:
338 max_val = timings[-1]
338 max_val = timings[-1]
339 display(b'max', max_val)
339 display(b'max', max_val)
340 avg = tuple([sum(x) / count for x in zip(*timings)])
340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 display(b'avg', avg)
341 display(b'avg', avg)
342 median = timings[len(timings) // 2]
342 median = timings[len(timings) // 2]
343 display(b'median', median)
343 display(b'median', median)
344
344
345 # utilities for historical portability
345 # utilities for historical portability
346
346
347 def getint(ui, section, name, default):
347 def getint(ui, section, name, default):
348 # for "historical portability":
348 # for "historical portability":
349 # ui.configint has been available since 1.9 (or fa2b596db182)
349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 v = ui.config(section, name, None)
350 v = ui.config(section, name, None)
351 if v is None:
351 if v is None:
352 return default
352 return default
353 try:
353 try:
354 return int(v)
354 return int(v)
355 except ValueError:
355 except ValueError:
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 % (section, name, v))
357 % (section, name, v))
358
358
359 def safeattrsetter(obj, name, ignoremissing=False):
359 def safeattrsetter(obj, name, ignoremissing=False):
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361
361
362 This function is aborted, if 'obj' doesn't have 'name' attribute
362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 at runtime. This avoids overlooking removal of an attribute, which
363 at runtime. This avoids overlooking removal of an attribute, which
364 breaks assumption of performance measurement, in the future.
364 breaks assumption of performance measurement, in the future.
365
365
366 This function returns the object to (1) assign a new value, and
366 This function returns the object to (1) assign a new value, and
367 (2) restore an original value to the attribute.
367 (2) restore an original value to the attribute.
368
368
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 abortion, and this function returns None. This is useful to
370 abortion, and this function returns None. This is useful to
371 examine an attribute, which isn't ensured in all Mercurial
371 examine an attribute, which isn't ensured in all Mercurial
372 versions.
372 versions.
373 """
373 """
374 if not util.safehasattr(obj, name):
374 if not util.safehasattr(obj, name):
375 if ignoremissing:
375 if ignoremissing:
376 return None
376 return None
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 b" of performance measurement") % (name, obj))
378 b" of performance measurement") % (name, obj))
379
379
380 origvalue = getattr(obj, _sysstr(name))
380 origvalue = getattr(obj, _sysstr(name))
381 class attrutil(object):
381 class attrutil(object):
382 def set(self, newvalue):
382 def set(self, newvalue):
383 setattr(obj, _sysstr(name), newvalue)
383 setattr(obj, _sysstr(name), newvalue)
384 def restore(self):
384 def restore(self):
385 setattr(obj, _sysstr(name), origvalue)
385 setattr(obj, _sysstr(name), origvalue)
386
386
387 return attrutil()
387 return attrutil()
388
388
389 # utilities to examine each internal API changes
389 # utilities to examine each internal API changes
390
390
391 def getbranchmapsubsettable():
391 def getbranchmapsubsettable():
392 # for "historical portability":
392 # for "historical portability":
393 # subsettable is defined in:
393 # subsettable is defined in:
394 # - branchmap since 2.9 (or 175c6fd8cacc)
394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 # - repoview since 2.5 (or 59a9f18d4587)
395 # - repoview since 2.5 (or 59a9f18d4587)
396 for mod in (branchmap, repoview):
396 for mod in (branchmap, repoview):
397 subsettable = getattr(mod, 'subsettable', None)
397 subsettable = getattr(mod, 'subsettable', None)
398 if subsettable:
398 if subsettable:
399 return subsettable
399 return subsettable
400
400
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 # branchmap and repoview modules exist, but subsettable attribute
402 # branchmap and repoview modules exist, but subsettable attribute
403 # doesn't)
403 # doesn't)
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 hint=b"use 2.5 or later")
405 hint=b"use 2.5 or later")
406
406
407 def getsvfs(repo):
407 def getsvfs(repo):
408 """Return appropriate object to access files under .hg/store
408 """Return appropriate object to access files under .hg/store
409 """
409 """
410 # for "historical portability":
410 # for "historical portability":
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 svfs = getattr(repo, 'svfs', None)
412 svfs = getattr(repo, 'svfs', None)
413 if svfs:
413 if svfs:
414 return svfs
414 return svfs
415 else:
415 else:
416 return getattr(repo, 'sopener')
416 return getattr(repo, 'sopener')
417
417
418 def getvfs(repo):
418 def getvfs(repo):
419 """Return appropriate object to access files under .hg
419 """Return appropriate object to access files under .hg
420 """
420 """
421 # for "historical portability":
421 # for "historical portability":
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 vfs = getattr(repo, 'vfs', None)
423 vfs = getattr(repo, 'vfs', None)
424 if vfs:
424 if vfs:
425 return vfs
425 return vfs
426 else:
426 else:
427 return getattr(repo, 'opener')
427 return getattr(repo, 'opener')
428
428
429 def repocleartagscachefunc(repo):
429 def repocleartagscachefunc(repo):
430 """Return the function to clear tags cache according to repo internal API
430 """Return the function to clear tags cache according to repo internal API
431 """
431 """
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 # correct way to clear tags cache, because existing code paths
434 # correct way to clear tags cache, because existing code paths
435 # expect _tagscache to be a structured object.
435 # expect _tagscache to be a structured object.
436 def clearcache():
436 def clearcache():
437 # _tagscache has been filteredpropertycache since 2.5 (or
437 # _tagscache has been filteredpropertycache since 2.5 (or
438 # 98c867ac1330), and delattr() can't work in such case
438 # 98c867ac1330), and delattr() can't work in such case
439 if b'_tagscache' in vars(repo):
439 if b'_tagscache' in vars(repo):
440 del repo.__dict__[b'_tagscache']
440 del repo.__dict__[b'_tagscache']
441 return clearcache
441 return clearcache
442
442
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 if repotags: # since 1.4 (or 5614a628d173)
444 if repotags: # since 1.4 (or 5614a628d173)
445 return lambda : repotags.set(None)
445 return lambda : repotags.set(None)
446
446
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 return lambda : repotagscache.set(None)
449 return lambda : repotagscache.set(None)
450
450
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 # this point, but it isn't so problematic, because:
452 # this point, but it isn't so problematic, because:
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 # in perftags() causes failure soon
454 # in perftags() causes failure soon
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 raise error.Abort((b"tags API of this hg command is unknown"))
456 raise error.Abort((b"tags API of this hg command is unknown"))
457
457
458 # utilities to clear cache
458 # utilities to clear cache
459
459
460 def clearfilecache(obj, attrname):
460 def clearfilecache(obj, attrname):
461 unfiltered = getattr(obj, 'unfiltered', None)
461 unfiltered = getattr(obj, 'unfiltered', None)
462 if unfiltered is not None:
462 if unfiltered is not None:
463 obj = obj.unfiltered()
463 obj = obj.unfiltered()
464 if attrname in vars(obj):
464 if attrname in vars(obj):
465 delattr(obj, attrname)
465 delattr(obj, attrname)
466 obj._filecache.pop(attrname, None)
466 obj._filecache.pop(attrname, None)
467
467
468 def clearchangelog(repo):
469 if repo is not repo.unfiltered():
470 object.__setattr__(repo, r'_clcachekey', None)
471 object.__setattr__(repo, r'_clcache', None)
472 clearfilecache(repo.unfiltered(), 'changelog')
473
468 # perf commands
474 # perf commands
469
475
470 @command(b'perfwalk', formatteropts)
476 @command(b'perfwalk', formatteropts)
471 def perfwalk(ui, repo, *pats, **opts):
477 def perfwalk(ui, repo, *pats, **opts):
472 opts = _byteskwargs(opts)
478 opts = _byteskwargs(opts)
473 timer, fm = gettimer(ui, opts)
479 timer, fm = gettimer(ui, opts)
474 m = scmutil.match(repo[None], pats, {})
480 m = scmutil.match(repo[None], pats, {})
475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
481 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
476 ignored=False))))
482 ignored=False))))
477 fm.end()
483 fm.end()
478
484
479 @command(b'perfannotate', formatteropts)
485 @command(b'perfannotate', formatteropts)
480 def perfannotate(ui, repo, f, **opts):
486 def perfannotate(ui, repo, f, **opts):
481 opts = _byteskwargs(opts)
487 opts = _byteskwargs(opts)
482 timer, fm = gettimer(ui, opts)
488 timer, fm = gettimer(ui, opts)
483 fc = repo[b'.'][f]
489 fc = repo[b'.'][f]
484 timer(lambda: len(fc.annotate(True)))
490 timer(lambda: len(fc.annotate(True)))
485 fm.end()
491 fm.end()
486
492
487 @command(b'perfstatus',
493 @command(b'perfstatus',
488 [(b'u', b'unknown', False,
494 [(b'u', b'unknown', False,
489 b'ask status to look for unknown files')] + formatteropts)
495 b'ask status to look for unknown files')] + formatteropts)
490 def perfstatus(ui, repo, **opts):
496 def perfstatus(ui, repo, **opts):
491 opts = _byteskwargs(opts)
497 opts = _byteskwargs(opts)
492 #m = match.always(repo.root, repo.getcwd())
498 #m = match.always(repo.root, repo.getcwd())
493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
499 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
494 # False))))
500 # False))))
495 timer, fm = gettimer(ui, opts)
501 timer, fm = gettimer(ui, opts)
496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
502 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
497 fm.end()
503 fm.end()
498
504
499 @command(b'perfaddremove', formatteropts)
505 @command(b'perfaddremove', formatteropts)
500 def perfaddremove(ui, repo, **opts):
506 def perfaddremove(ui, repo, **opts):
501 opts = _byteskwargs(opts)
507 opts = _byteskwargs(opts)
502 timer, fm = gettimer(ui, opts)
508 timer, fm = gettimer(ui, opts)
503 try:
509 try:
504 oldquiet = repo.ui.quiet
510 oldquiet = repo.ui.quiet
505 repo.ui.quiet = True
511 repo.ui.quiet = True
506 matcher = scmutil.match(repo[None])
512 matcher = scmutil.match(repo[None])
507 opts[b'dry_run'] = True
513 opts[b'dry_run'] = True
508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
514 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
509 finally:
515 finally:
510 repo.ui.quiet = oldquiet
516 repo.ui.quiet = oldquiet
511 fm.end()
517 fm.end()
512
518
513 def clearcaches(cl):
519 def clearcaches(cl):
514 # behave somewhat consistently across internal API changes
520 # behave somewhat consistently across internal API changes
515 if util.safehasattr(cl, b'clearcaches'):
521 if util.safehasattr(cl, b'clearcaches'):
516 cl.clearcaches()
522 cl.clearcaches()
517 elif util.safehasattr(cl, b'_nodecache'):
523 elif util.safehasattr(cl, b'_nodecache'):
518 from mercurial.node import nullid, nullrev
524 from mercurial.node import nullid, nullrev
519 cl._nodecache = {nullid: nullrev}
525 cl._nodecache = {nullid: nullrev}
520 cl._nodepos = None
526 cl._nodepos = None
521
527
522 @command(b'perfheads', formatteropts)
528 @command(b'perfheads', formatteropts)
523 def perfheads(ui, repo, **opts):
529 def perfheads(ui, repo, **opts):
524 opts = _byteskwargs(opts)
530 opts = _byteskwargs(opts)
525 timer, fm = gettimer(ui, opts)
531 timer, fm = gettimer(ui, opts)
526 cl = repo.changelog
532 cl = repo.changelog
527 def d():
533 def d():
528 len(cl.headrevs())
534 len(cl.headrevs())
529 clearcaches(cl)
535 clearcaches(cl)
530 timer(d)
536 timer(d)
531 fm.end()
537 fm.end()
532
538
533 @command(b'perftags', formatteropts)
539 @command(b'perftags', formatteropts)
534 def perftags(ui, repo, **opts):
540 def perftags(ui, repo, **opts):
535 import mercurial.changelog
541 import mercurial.changelog
536 import mercurial.manifest
542 import mercurial.manifest
537
543
538 opts = _byteskwargs(opts)
544 opts = _byteskwargs(opts)
539 timer, fm = gettimer(ui, opts)
545 timer, fm = gettimer(ui, opts)
540 svfs = getsvfs(repo)
546 svfs = getsvfs(repo)
541 repocleartagscache = repocleartagscachefunc(repo)
547 repocleartagscache = repocleartagscachefunc(repo)
542 def s():
548 def s():
543 repo.changelog = mercurial.changelog.changelog(svfs)
549 repo.changelog = mercurial.changelog.changelog(svfs)
544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
550 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
551 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
546 rootmanifest)
552 rootmanifest)
547 repocleartagscache()
553 repocleartagscache()
548 def t():
554 def t():
549 return len(repo.tags())
555 return len(repo.tags())
550 timer(t, setup=s)
556 timer(t, setup=s)
551 fm.end()
557 fm.end()
552
558
553 @command(b'perfancestors', formatteropts)
559 @command(b'perfancestors', formatteropts)
554 def perfancestors(ui, repo, **opts):
560 def perfancestors(ui, repo, **opts):
555 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
556 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
557 heads = repo.changelog.headrevs()
563 heads = repo.changelog.headrevs()
558 def d():
564 def d():
559 for a in repo.changelog.ancestors(heads):
565 for a in repo.changelog.ancestors(heads):
560 pass
566 pass
561 timer(d)
567 timer(d)
562 fm.end()
568 fm.end()
563
569
564 @command(b'perfancestorset', formatteropts)
570 @command(b'perfancestorset', formatteropts)
565 def perfancestorset(ui, repo, revset, **opts):
571 def perfancestorset(ui, repo, revset, **opts):
566 opts = _byteskwargs(opts)
572 opts = _byteskwargs(opts)
567 timer, fm = gettimer(ui, opts)
573 timer, fm = gettimer(ui, opts)
568 revs = repo.revs(revset)
574 revs = repo.revs(revset)
569 heads = repo.changelog.headrevs()
575 heads = repo.changelog.headrevs()
570 def d():
576 def d():
571 s = repo.changelog.ancestors(heads)
577 s = repo.changelog.ancestors(heads)
572 for rev in revs:
578 for rev in revs:
573 rev in s
579 rev in s
574 timer(d)
580 timer(d)
575 fm.end()
581 fm.end()
576
582
577 @command(b'perfbookmarks', formatteropts)
583 @command(b'perfbookmarks', formatteropts)
578 def perfbookmarks(ui, repo, **opts):
584 def perfbookmarks(ui, repo, **opts):
579 """benchmark parsing bookmarks from disk to memory"""
585 """benchmark parsing bookmarks from disk to memory"""
580 opts = _byteskwargs(opts)
586 opts = _byteskwargs(opts)
581 timer, fm = gettimer(ui, opts)
587 timer, fm = gettimer(ui, opts)
582
588
583 def s():
589 def s():
584 clearfilecache(repo, b'_bookmarks')
590 clearfilecache(repo, b'_bookmarks')
585 def d():
591 def d():
586 repo._bookmarks
592 repo._bookmarks
587 timer(d, setup=s)
593 timer(d, setup=s)
588 fm.end()
594 fm.end()
589
595
590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
596 @command(b'perfbundleread', formatteropts, b'BUNDLE')
591 def perfbundleread(ui, repo, bundlepath, **opts):
597 def perfbundleread(ui, repo, bundlepath, **opts):
592 """Benchmark reading of bundle files.
598 """Benchmark reading of bundle files.
593
599
594 This command is meant to isolate the I/O part of bundle reading as
600 This command is meant to isolate the I/O part of bundle reading as
595 much as possible.
601 much as possible.
596 """
602 """
597 from mercurial import (
603 from mercurial import (
598 bundle2,
604 bundle2,
599 exchange,
605 exchange,
600 streamclone,
606 streamclone,
601 )
607 )
602
608
603 opts = _byteskwargs(opts)
609 opts = _byteskwargs(opts)
604
610
605 def makebench(fn):
611 def makebench(fn):
606 def run():
612 def run():
607 with open(bundlepath, b'rb') as fh:
613 with open(bundlepath, b'rb') as fh:
608 bundle = exchange.readbundle(ui, fh, bundlepath)
614 bundle = exchange.readbundle(ui, fh, bundlepath)
609 fn(bundle)
615 fn(bundle)
610
616
611 return run
617 return run
612
618
613 def makereadnbytes(size):
619 def makereadnbytes(size):
614 def run():
620 def run():
615 with open(bundlepath, b'rb') as fh:
621 with open(bundlepath, b'rb') as fh:
616 bundle = exchange.readbundle(ui, fh, bundlepath)
622 bundle = exchange.readbundle(ui, fh, bundlepath)
617 while bundle.read(size):
623 while bundle.read(size):
618 pass
624 pass
619
625
620 return run
626 return run
621
627
622 def makestdioread(size):
628 def makestdioread(size):
623 def run():
629 def run():
624 with open(bundlepath, b'rb') as fh:
630 with open(bundlepath, b'rb') as fh:
625 while fh.read(size):
631 while fh.read(size):
626 pass
632 pass
627
633
628 return run
634 return run
629
635
630 # bundle1
636 # bundle1
631
637
632 def deltaiter(bundle):
638 def deltaiter(bundle):
633 for delta in bundle.deltaiter():
639 for delta in bundle.deltaiter():
634 pass
640 pass
635
641
636 def iterchunks(bundle):
642 def iterchunks(bundle):
637 for chunk in bundle.getchunks():
643 for chunk in bundle.getchunks():
638 pass
644 pass
639
645
640 # bundle2
646 # bundle2
641
647
642 def forwardchunks(bundle):
648 def forwardchunks(bundle):
643 for chunk in bundle._forwardchunks():
649 for chunk in bundle._forwardchunks():
644 pass
650 pass
645
651
646 def iterparts(bundle):
652 def iterparts(bundle):
647 for part in bundle.iterparts():
653 for part in bundle.iterparts():
648 pass
654 pass
649
655
650 def iterpartsseekable(bundle):
656 def iterpartsseekable(bundle):
651 for part in bundle.iterparts(seekable=True):
657 for part in bundle.iterparts(seekable=True):
652 pass
658 pass
653
659
654 def seek(bundle):
660 def seek(bundle):
655 for part in bundle.iterparts(seekable=True):
661 for part in bundle.iterparts(seekable=True):
656 part.seek(0, os.SEEK_END)
662 part.seek(0, os.SEEK_END)
657
663
658 def makepartreadnbytes(size):
664 def makepartreadnbytes(size):
659 def run():
665 def run():
660 with open(bundlepath, b'rb') as fh:
666 with open(bundlepath, b'rb') as fh:
661 bundle = exchange.readbundle(ui, fh, bundlepath)
667 bundle = exchange.readbundle(ui, fh, bundlepath)
662 for part in bundle.iterparts():
668 for part in bundle.iterparts():
663 while part.read(size):
669 while part.read(size):
664 pass
670 pass
665
671
666 return run
672 return run
667
673
668 benches = [
674 benches = [
669 (makestdioread(8192), b'read(8k)'),
675 (makestdioread(8192), b'read(8k)'),
670 (makestdioread(16384), b'read(16k)'),
676 (makestdioread(16384), b'read(16k)'),
671 (makestdioread(32768), b'read(32k)'),
677 (makestdioread(32768), b'read(32k)'),
672 (makestdioread(131072), b'read(128k)'),
678 (makestdioread(131072), b'read(128k)'),
673 ]
679 ]
674
680
675 with open(bundlepath, b'rb') as fh:
681 with open(bundlepath, b'rb') as fh:
676 bundle = exchange.readbundle(ui, fh, bundlepath)
682 bundle = exchange.readbundle(ui, fh, bundlepath)
677
683
678 if isinstance(bundle, changegroup.cg1unpacker):
684 if isinstance(bundle, changegroup.cg1unpacker):
679 benches.extend([
685 benches.extend([
680 (makebench(deltaiter), b'cg1 deltaiter()'),
686 (makebench(deltaiter), b'cg1 deltaiter()'),
681 (makebench(iterchunks), b'cg1 getchunks()'),
687 (makebench(iterchunks), b'cg1 getchunks()'),
682 (makereadnbytes(8192), b'cg1 read(8k)'),
688 (makereadnbytes(8192), b'cg1 read(8k)'),
683 (makereadnbytes(16384), b'cg1 read(16k)'),
689 (makereadnbytes(16384), b'cg1 read(16k)'),
684 (makereadnbytes(32768), b'cg1 read(32k)'),
690 (makereadnbytes(32768), b'cg1 read(32k)'),
685 (makereadnbytes(131072), b'cg1 read(128k)'),
691 (makereadnbytes(131072), b'cg1 read(128k)'),
686 ])
692 ])
687 elif isinstance(bundle, bundle2.unbundle20):
693 elif isinstance(bundle, bundle2.unbundle20):
688 benches.extend([
694 benches.extend([
689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
695 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
690 (makebench(iterparts), b'bundle2 iterparts()'),
696 (makebench(iterparts), b'bundle2 iterparts()'),
691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
697 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
692 (makebench(seek), b'bundle2 part seek()'),
698 (makebench(seek), b'bundle2 part seek()'),
693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
699 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
700 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
701 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
702 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
697 ])
703 ])
698 elif isinstance(bundle, streamclone.streamcloneapplier):
704 elif isinstance(bundle, streamclone.streamcloneapplier):
699 raise error.Abort(b'stream clone bundles not supported')
705 raise error.Abort(b'stream clone bundles not supported')
700 else:
706 else:
701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
707 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
702
708
703 for fn, title in benches:
709 for fn, title in benches:
704 timer, fm = gettimer(ui, opts)
710 timer, fm = gettimer(ui, opts)
705 timer(fn, title=title)
711 timer(fn, title=title)
706 fm.end()
712 fm.end()
707
713
708 @command(b'perfchangegroupchangelog', formatteropts +
714 @command(b'perfchangegroupchangelog', formatteropts +
709 [(b'', b'version', b'02', b'changegroup version'),
715 [(b'', b'version', b'02', b'changegroup version'),
710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
716 (b'r', b'rev', b'', b'revisions to add to changegroup')])
711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
717 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
712 """Benchmark producing a changelog group for a changegroup.
718 """Benchmark producing a changelog group for a changegroup.
713
719
714 This measures the time spent processing the changelog during a
720 This measures the time spent processing the changelog during a
715 bundle operation. This occurs during `hg bundle` and on a server
721 bundle operation. This occurs during `hg bundle` and on a server
716 processing a `getbundle` wire protocol request (handles clones
722 processing a `getbundle` wire protocol request (handles clones
717 and pull requests).
723 and pull requests).
718
724
719 By default, all revisions are added to the changegroup.
725 By default, all revisions are added to the changegroup.
720 """
726 """
721 opts = _byteskwargs(opts)
727 opts = _byteskwargs(opts)
722 cl = repo.changelog
728 cl = repo.changelog
723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
729 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
724 bundler = changegroup.getbundler(version, repo)
730 bundler = changegroup.getbundler(version, repo)
725
731
726 def d():
732 def d():
727 state, chunks = bundler._generatechangelog(cl, nodes)
733 state, chunks = bundler._generatechangelog(cl, nodes)
728 for chunk in chunks:
734 for chunk in chunks:
729 pass
735 pass
730
736
731 timer, fm = gettimer(ui, opts)
737 timer, fm = gettimer(ui, opts)
732
738
733 # Terminal printing can interfere with timing. So disable it.
739 # Terminal printing can interfere with timing. So disable it.
734 with ui.configoverride({(b'progress', b'disable'): True}):
740 with ui.configoverride({(b'progress', b'disable'): True}):
735 timer(d)
741 timer(d)
736
742
737 fm.end()
743 fm.end()
738
744
739 @command(b'perfdirs', formatteropts)
745 @command(b'perfdirs', formatteropts)
740 def perfdirs(ui, repo, **opts):
746 def perfdirs(ui, repo, **opts):
741 opts = _byteskwargs(opts)
747 opts = _byteskwargs(opts)
742 timer, fm = gettimer(ui, opts)
748 timer, fm = gettimer(ui, opts)
743 dirstate = repo.dirstate
749 dirstate = repo.dirstate
744 b'a' in dirstate
750 b'a' in dirstate
745 def d():
751 def d():
746 dirstate.hasdir(b'a')
752 dirstate.hasdir(b'a')
747 del dirstate._map._dirs
753 del dirstate._map._dirs
748 timer(d)
754 timer(d)
749 fm.end()
755 fm.end()
750
756
751 @command(b'perfdirstate', formatteropts)
757 @command(b'perfdirstate', formatteropts)
752 def perfdirstate(ui, repo, **opts):
758 def perfdirstate(ui, repo, **opts):
753 opts = _byteskwargs(opts)
759 opts = _byteskwargs(opts)
754 timer, fm = gettimer(ui, opts)
760 timer, fm = gettimer(ui, opts)
755 b"a" in repo.dirstate
761 b"a" in repo.dirstate
756 def d():
762 def d():
757 repo.dirstate.invalidate()
763 repo.dirstate.invalidate()
758 b"a" in repo.dirstate
764 b"a" in repo.dirstate
759 timer(d)
765 timer(d)
760 fm.end()
766 fm.end()
761
767
762 @command(b'perfdirstatedirs', formatteropts)
768 @command(b'perfdirstatedirs', formatteropts)
763 def perfdirstatedirs(ui, repo, **opts):
769 def perfdirstatedirs(ui, repo, **opts):
764 opts = _byteskwargs(opts)
770 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
771 timer, fm = gettimer(ui, opts)
766 b"a" in repo.dirstate
772 b"a" in repo.dirstate
767 def d():
773 def d():
768 repo.dirstate.hasdir(b"a")
774 repo.dirstate.hasdir(b"a")
769 del repo.dirstate._map._dirs
775 del repo.dirstate._map._dirs
770 timer(d)
776 timer(d)
771 fm.end()
777 fm.end()
772
778
773 @command(b'perfdirstatefoldmap', formatteropts)
779 @command(b'perfdirstatefoldmap', formatteropts)
774 def perfdirstatefoldmap(ui, repo, **opts):
780 def perfdirstatefoldmap(ui, repo, **opts):
775 opts = _byteskwargs(opts)
781 opts = _byteskwargs(opts)
776 timer, fm = gettimer(ui, opts)
782 timer, fm = gettimer(ui, opts)
777 dirstate = repo.dirstate
783 dirstate = repo.dirstate
778 b'a' in dirstate
784 b'a' in dirstate
779 def d():
785 def d():
780 dirstate._map.filefoldmap.get(b'a')
786 dirstate._map.filefoldmap.get(b'a')
781 del dirstate._map.filefoldmap
787 del dirstate._map.filefoldmap
782 timer(d)
788 timer(d)
783 fm.end()
789 fm.end()
784
790
785 @command(b'perfdirfoldmap', formatteropts)
791 @command(b'perfdirfoldmap', formatteropts)
786 def perfdirfoldmap(ui, repo, **opts):
792 def perfdirfoldmap(ui, repo, **opts):
787 opts = _byteskwargs(opts)
793 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
794 timer, fm = gettimer(ui, opts)
789 dirstate = repo.dirstate
795 dirstate = repo.dirstate
790 b'a' in dirstate
796 b'a' in dirstate
791 def d():
797 def d():
792 dirstate._map.dirfoldmap.get(b'a')
798 dirstate._map.dirfoldmap.get(b'a')
793 del dirstate._map.dirfoldmap
799 del dirstate._map.dirfoldmap
794 del dirstate._map._dirs
800 del dirstate._map._dirs
795 timer(d)
801 timer(d)
796 fm.end()
802 fm.end()
797
803
798 @command(b'perfdirstatewrite', formatteropts)
804 @command(b'perfdirstatewrite', formatteropts)
799 def perfdirstatewrite(ui, repo, **opts):
805 def perfdirstatewrite(ui, repo, **opts):
800 opts = _byteskwargs(opts)
806 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
807 timer, fm = gettimer(ui, opts)
802 ds = repo.dirstate
808 ds = repo.dirstate
803 b"a" in ds
809 b"a" in ds
804 def d():
810 def d():
805 ds._dirty = True
811 ds._dirty = True
806 ds.write(repo.currenttransaction())
812 ds.write(repo.currenttransaction())
807 timer(d)
813 timer(d)
808 fm.end()
814 fm.end()
809
815
810 @command(b'perfmergecalculate',
816 @command(b'perfmergecalculate',
811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
817 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
812 def perfmergecalculate(ui, repo, rev, **opts):
818 def perfmergecalculate(ui, repo, rev, **opts):
813 opts = _byteskwargs(opts)
819 opts = _byteskwargs(opts)
814 timer, fm = gettimer(ui, opts)
820 timer, fm = gettimer(ui, opts)
815 wctx = repo[None]
821 wctx = repo[None]
816 rctx = scmutil.revsingle(repo, rev, rev)
822 rctx = scmutil.revsingle(repo, rev, rev)
817 ancestor = wctx.ancestor(rctx)
823 ancestor = wctx.ancestor(rctx)
818 # we don't want working dir files to be stat'd in the benchmark, so prime
824 # we don't want working dir files to be stat'd in the benchmark, so prime
819 # that cache
825 # that cache
820 wctx.dirty()
826 wctx.dirty()
821 def d():
827 def d():
822 # acceptremote is True because we don't want prompts in the middle of
828 # acceptremote is True because we don't want prompts in the middle of
823 # our benchmark
829 # our benchmark
824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
830 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
825 acceptremote=True, followcopies=True)
831 acceptremote=True, followcopies=True)
826 timer(d)
832 timer(d)
827 fm.end()
833 fm.end()
828
834
829 @command(b'perfpathcopies', [], b"REV REV")
835 @command(b'perfpathcopies', [], b"REV REV")
830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
836 def perfpathcopies(ui, repo, rev1, rev2, **opts):
831 opts = _byteskwargs(opts)
837 opts = _byteskwargs(opts)
832 timer, fm = gettimer(ui, opts)
838 timer, fm = gettimer(ui, opts)
833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
839 ctx1 = scmutil.revsingle(repo, rev1, rev1)
834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
840 ctx2 = scmutil.revsingle(repo, rev2, rev2)
835 def d():
841 def d():
836 copies.pathcopies(ctx1, ctx2)
842 copies.pathcopies(ctx1, ctx2)
837 timer(d)
843 timer(d)
838 fm.end()
844 fm.end()
839
845
840 @command(b'perfphases',
846 @command(b'perfphases',
841 [(b'', b'full', False, b'include file reading time too'),
847 [(b'', b'full', False, b'include file reading time too'),
842 ], b"")
848 ], b"")
843 def perfphases(ui, repo, **opts):
849 def perfphases(ui, repo, **opts):
844 """benchmark phasesets computation"""
850 """benchmark phasesets computation"""
845 opts = _byteskwargs(opts)
851 opts = _byteskwargs(opts)
846 timer, fm = gettimer(ui, opts)
852 timer, fm = gettimer(ui, opts)
847 _phases = repo._phasecache
853 _phases = repo._phasecache
848 full = opts.get(b'full')
854 full = opts.get(b'full')
849 def d():
855 def d():
850 phases = _phases
856 phases = _phases
851 if full:
857 if full:
852 clearfilecache(repo, b'_phasecache')
858 clearfilecache(repo, b'_phasecache')
853 phases = repo._phasecache
859 phases = repo._phasecache
854 phases.invalidate()
860 phases.invalidate()
855 phases.loadphaserevs(repo)
861 phases.loadphaserevs(repo)
856 timer(d)
862 timer(d)
857 fm.end()
863 fm.end()
858
864
859 @command(b'perfphasesremote',
865 @command(b'perfphasesremote',
860 [], b"[DEST]")
866 [], b"[DEST]")
861 def perfphasesremote(ui, repo, dest=None, **opts):
867 def perfphasesremote(ui, repo, dest=None, **opts):
862 """benchmark time needed to analyse phases of the remote server"""
868 """benchmark time needed to analyse phases of the remote server"""
863 from mercurial.node import (
869 from mercurial.node import (
864 bin,
870 bin,
865 )
871 )
866 from mercurial import (
872 from mercurial import (
867 exchange,
873 exchange,
868 hg,
874 hg,
869 phases,
875 phases,
870 )
876 )
871 opts = _byteskwargs(opts)
877 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
878 timer, fm = gettimer(ui, opts)
873
879
874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
880 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
875 if not path:
881 if not path:
876 raise error.Abort((b'default repository not configured!'),
882 raise error.Abort((b'default repository not configured!'),
877 hint=(b"see 'hg help config.paths'"))
883 hint=(b"see 'hg help config.paths'"))
878 dest = path.pushloc or path.loc
884 dest = path.pushloc or path.loc
879 branches = (path.branch, opts.get(b'branch') or [])
885 branches = (path.branch, opts.get(b'branch') or [])
880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
886 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
887 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
882 other = hg.peer(repo, opts, dest)
888 other = hg.peer(repo, opts, dest)
883
889
884 # easier to perform discovery through the operation
890 # easier to perform discovery through the operation
885 op = exchange.pushoperation(repo, other)
891 op = exchange.pushoperation(repo, other)
886 exchange._pushdiscoverychangeset(op)
892 exchange._pushdiscoverychangeset(op)
887
893
888 remotesubset = op.fallbackheads
894 remotesubset = op.fallbackheads
889
895
890 with other.commandexecutor() as e:
896 with other.commandexecutor() as e:
891 remotephases = e.callcommand(b'listkeys',
897 remotephases = e.callcommand(b'listkeys',
892 {b'namespace': b'phases'}).result()
898 {b'namespace': b'phases'}).result()
893 del other
899 del other
894 publishing = remotephases.get(b'publishing', False)
900 publishing = remotephases.get(b'publishing', False)
895 if publishing:
901 if publishing:
896 ui.status((b'publishing: yes\n'))
902 ui.status((b'publishing: yes\n'))
897 else:
903 else:
898 ui.status((b'publishing: no\n'))
904 ui.status((b'publishing: no\n'))
899
905
900 nodemap = repo.changelog.nodemap
906 nodemap = repo.changelog.nodemap
901 nonpublishroots = 0
907 nonpublishroots = 0
902 for nhex, phase in remotephases.iteritems():
908 for nhex, phase in remotephases.iteritems():
903 if nhex == b'publishing': # ignore data related to publish option
909 if nhex == b'publishing': # ignore data related to publish option
904 continue
910 continue
905 node = bin(nhex)
911 node = bin(nhex)
906 if node in nodemap and int(phase):
912 if node in nodemap and int(phase):
907 nonpublishroots += 1
913 nonpublishroots += 1
908 ui.status((b'number of roots: %d\n') % len(remotephases))
914 ui.status((b'number of roots: %d\n') % len(remotephases))
909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
915 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
910 def d():
916 def d():
911 phases.remotephasessummary(repo,
917 phases.remotephasessummary(repo,
912 remotesubset,
918 remotesubset,
913 remotephases)
919 remotephases)
914 timer(d)
920 timer(d)
915 fm.end()
921 fm.end()
916
922
917 @command(b'perfmanifest',[
923 @command(b'perfmanifest',[
918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
924 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
925 (b'', b'clear-disk', False, b'clear on-disk caches too'),
920 ] + formatteropts, b'REV|NODE')
926 ] + formatteropts, b'REV|NODE')
921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
927 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
922 """benchmark the time to read a manifest from disk and return a usable
928 """benchmark the time to read a manifest from disk and return a usable
923 dict-like object
929 dict-like object
924
930
925 Manifest caches are cleared before retrieval."""
931 Manifest caches are cleared before retrieval."""
926 opts = _byteskwargs(opts)
932 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
933 timer, fm = gettimer(ui, opts)
928 if not manifest_rev:
934 if not manifest_rev:
929 ctx = scmutil.revsingle(repo, rev, rev)
935 ctx = scmutil.revsingle(repo, rev, rev)
930 t = ctx.manifestnode()
936 t = ctx.manifestnode()
931 else:
937 else:
932 from mercurial.node import bin
938 from mercurial.node import bin
933
939
934 if len(rev) == 40:
940 if len(rev) == 40:
935 t = bin(rev)
941 t = bin(rev)
936 else:
942 else:
937 try:
943 try:
938 rev = int(rev)
944 rev = int(rev)
939
945
940 if util.safehasattr(repo.manifestlog, b'getstorage'):
946 if util.safehasattr(repo.manifestlog, b'getstorage'):
941 t = repo.manifestlog.getstorage(b'').node(rev)
947 t = repo.manifestlog.getstorage(b'').node(rev)
942 else:
948 else:
943 t = repo.manifestlog._revlog.lookup(rev)
949 t = repo.manifestlog._revlog.lookup(rev)
944 except ValueError:
950 except ValueError:
945 raise error.Abort(b'manifest revision must be integer or full '
951 raise error.Abort(b'manifest revision must be integer or full '
946 b'node')
952 b'node')
947 def d():
953 def d():
948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
954 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
949 repo.manifestlog[t].read()
955 repo.manifestlog[t].read()
950 timer(d)
956 timer(d)
951 fm.end()
957 fm.end()
952
958
953 @command(b'perfchangeset', formatteropts)
959 @command(b'perfchangeset', formatteropts)
954 def perfchangeset(ui, repo, rev, **opts):
960 def perfchangeset(ui, repo, rev, **opts):
955 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
956 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
957 n = scmutil.revsingle(repo, rev).node()
963 n = scmutil.revsingle(repo, rev).node()
958 def d():
964 def d():
959 repo.changelog.read(n)
965 repo.changelog.read(n)
960 #repo.changelog._cache = None
966 #repo.changelog._cache = None
961 timer(d)
967 timer(d)
962 fm.end()
968 fm.end()
963
969
964 @command(b'perfindex', formatteropts)
970 @command(b'perfindex', formatteropts)
965 def perfindex(ui, repo, **opts):
971 def perfindex(ui, repo, **opts):
966 import mercurial.revlog
972 import mercurial.revlog
967 opts = _byteskwargs(opts)
973 opts = _byteskwargs(opts)
968 timer, fm = gettimer(ui, opts)
974 timer, fm = gettimer(ui, opts)
969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
975 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
970 n = repo[b"tip"].node()
976 n = repo[b"tip"].node()
971 svfs = getsvfs(repo)
977 svfs = getsvfs(repo)
972 def d():
978 def d():
973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
979 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
974 cl.rev(n)
980 cl.rev(n)
975 timer(d)
981 timer(d)
976 fm.end()
982 fm.end()
977
983
978 @command(b'perfstartup', formatteropts)
984 @command(b'perfstartup', formatteropts)
979 def perfstartup(ui, repo, **opts):
985 def perfstartup(ui, repo, **opts):
980 opts = _byteskwargs(opts)
986 opts = _byteskwargs(opts)
981 timer, fm = gettimer(ui, opts)
987 timer, fm = gettimer(ui, opts)
982 def d():
988 def d():
983 if os.name != r'nt':
989 if os.name != r'nt':
984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
990 os.system(b"HGRCPATH= %s version -q > /dev/null" %
985 fsencode(sys.argv[0]))
991 fsencode(sys.argv[0]))
986 else:
992 else:
987 os.environ[r'HGRCPATH'] = r' '
993 os.environ[r'HGRCPATH'] = r' '
988 os.system(r"%s version -q > NUL" % sys.argv[0])
994 os.system(r"%s version -q > NUL" % sys.argv[0])
989 timer(d)
995 timer(d)
990 fm.end()
996 fm.end()
991
997
992 @command(b'perfparents', formatteropts)
998 @command(b'perfparents', formatteropts)
993 def perfparents(ui, repo, **opts):
999 def perfparents(ui, repo, **opts):
994 opts = _byteskwargs(opts)
1000 opts = _byteskwargs(opts)
995 timer, fm = gettimer(ui, opts)
1001 timer, fm = gettimer(ui, opts)
996 # control the number of commits perfparents iterates over
1002 # control the number of commits perfparents iterates over
997 # experimental config: perf.parentscount
1003 # experimental config: perf.parentscount
998 count = getint(ui, b"perf", b"parentscount", 1000)
1004 count = getint(ui, b"perf", b"parentscount", 1000)
999 if len(repo.changelog) < count:
1005 if len(repo.changelog) < count:
1000 raise error.Abort(b"repo needs %d commits for this test" % count)
1006 raise error.Abort(b"repo needs %d commits for this test" % count)
1001 repo = repo.unfiltered()
1007 repo = repo.unfiltered()
1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1008 nl = [repo.changelog.node(i) for i in _xrange(count)]
1003 def d():
1009 def d():
1004 for n in nl:
1010 for n in nl:
1005 repo.changelog.parents(n)
1011 repo.changelog.parents(n)
1006 timer(d)
1012 timer(d)
1007 fm.end()
1013 fm.end()
1008
1014
1009 @command(b'perfctxfiles', formatteropts)
1015 @command(b'perfctxfiles', formatteropts)
1010 def perfctxfiles(ui, repo, x, **opts):
1016 def perfctxfiles(ui, repo, x, **opts):
1011 opts = _byteskwargs(opts)
1017 opts = _byteskwargs(opts)
1012 x = int(x)
1018 x = int(x)
1013 timer, fm = gettimer(ui, opts)
1019 timer, fm = gettimer(ui, opts)
1014 def d():
1020 def d():
1015 len(repo[x].files())
1021 len(repo[x].files())
1016 timer(d)
1022 timer(d)
1017 fm.end()
1023 fm.end()
1018
1024
1019 @command(b'perfrawfiles', formatteropts)
1025 @command(b'perfrawfiles', formatteropts)
1020 def perfrawfiles(ui, repo, x, **opts):
1026 def perfrawfiles(ui, repo, x, **opts):
1021 opts = _byteskwargs(opts)
1027 opts = _byteskwargs(opts)
1022 x = int(x)
1028 x = int(x)
1023 timer, fm = gettimer(ui, opts)
1029 timer, fm = gettimer(ui, opts)
1024 cl = repo.changelog
1030 cl = repo.changelog
1025 def d():
1031 def d():
1026 len(cl.read(x)[3])
1032 len(cl.read(x)[3])
1027 timer(d)
1033 timer(d)
1028 fm.end()
1034 fm.end()
1029
1035
1030 @command(b'perflookup', formatteropts)
1036 @command(b'perflookup', formatteropts)
1031 def perflookup(ui, repo, rev, **opts):
1037 def perflookup(ui, repo, rev, **opts):
1032 opts = _byteskwargs(opts)
1038 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1039 timer, fm = gettimer(ui, opts)
1034 timer(lambda: len(repo.lookup(rev)))
1040 timer(lambda: len(repo.lookup(rev)))
1035 fm.end()
1041 fm.end()
1036
1042
1037 @command(b'perflinelogedits',
1043 @command(b'perflinelogedits',
1038 [(b'n', b'edits', 10000, b'number of edits'),
1044 [(b'n', b'edits', 10000, b'number of edits'),
1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1045 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1040 ], norepo=True)
1046 ], norepo=True)
1041 def perflinelogedits(ui, **opts):
1047 def perflinelogedits(ui, **opts):
1042 from mercurial import linelog
1048 from mercurial import linelog
1043
1049
1044 opts = _byteskwargs(opts)
1050 opts = _byteskwargs(opts)
1045
1051
1046 edits = opts[b'edits']
1052 edits = opts[b'edits']
1047 maxhunklines = opts[b'max_hunk_lines']
1053 maxhunklines = opts[b'max_hunk_lines']
1048
1054
1049 maxb1 = 100000
1055 maxb1 = 100000
1050 random.seed(0)
1056 random.seed(0)
1051 randint = random.randint
1057 randint = random.randint
1052 currentlines = 0
1058 currentlines = 0
1053 arglist = []
1059 arglist = []
1054 for rev in _xrange(edits):
1060 for rev in _xrange(edits):
1055 a1 = randint(0, currentlines)
1061 a1 = randint(0, currentlines)
1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1062 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1057 b1 = randint(0, maxb1)
1063 b1 = randint(0, maxb1)
1058 b2 = randint(b1, b1 + maxhunklines)
1064 b2 = randint(b1, b1 + maxhunklines)
1059 currentlines += (b2 - b1) - (a2 - a1)
1065 currentlines += (b2 - b1) - (a2 - a1)
1060 arglist.append((rev, a1, a2, b1, b2))
1066 arglist.append((rev, a1, a2, b1, b2))
1061
1067
1062 def d():
1068 def d():
1063 ll = linelog.linelog()
1069 ll = linelog.linelog()
1064 for args in arglist:
1070 for args in arglist:
1065 ll.replacelines(*args)
1071 ll.replacelines(*args)
1066
1072
1067 timer, fm = gettimer(ui, opts)
1073 timer, fm = gettimer(ui, opts)
1068 timer(d)
1074 timer(d)
1069 fm.end()
1075 fm.end()
1070
1076
1071 @command(b'perfrevrange', formatteropts)
1077 @command(b'perfrevrange', formatteropts)
1072 def perfrevrange(ui, repo, *specs, **opts):
1078 def perfrevrange(ui, repo, *specs, **opts):
1073 opts = _byteskwargs(opts)
1079 opts = _byteskwargs(opts)
1074 timer, fm = gettimer(ui, opts)
1080 timer, fm = gettimer(ui, opts)
1075 revrange = scmutil.revrange
1081 revrange = scmutil.revrange
1076 timer(lambda: len(revrange(repo, specs)))
1082 timer(lambda: len(revrange(repo, specs)))
1077 fm.end()
1083 fm.end()
1078
1084
1079 @command(b'perfnodelookup', formatteropts)
1085 @command(b'perfnodelookup', formatteropts)
1080 def perfnodelookup(ui, repo, rev, **opts):
1086 def perfnodelookup(ui, repo, rev, **opts):
1081 opts = _byteskwargs(opts)
1087 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1088 timer, fm = gettimer(ui, opts)
1083 import mercurial.revlog
1089 import mercurial.revlog
1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1090 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1085 n = scmutil.revsingle(repo, rev).node()
1091 n = scmutil.revsingle(repo, rev).node()
1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1092 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1087 def d():
1093 def d():
1088 cl.rev(n)
1094 cl.rev(n)
1089 clearcaches(cl)
1095 clearcaches(cl)
1090 timer(d)
1096 timer(d)
1091 fm.end()
1097 fm.end()
1092
1098
1093 @command(b'perflog',
1099 @command(b'perflog',
1094 [(b'', b'rename', False, b'ask log to follow renames')
1100 [(b'', b'rename', False, b'ask log to follow renames')
1095 ] + formatteropts)
1101 ] + formatteropts)
1096 def perflog(ui, repo, rev=None, **opts):
1102 def perflog(ui, repo, rev=None, **opts):
1097 opts = _byteskwargs(opts)
1103 opts = _byteskwargs(opts)
1098 if rev is None:
1104 if rev is None:
1099 rev=[]
1105 rev=[]
1100 timer, fm = gettimer(ui, opts)
1106 timer, fm = gettimer(ui, opts)
1101 ui.pushbuffer()
1107 ui.pushbuffer()
1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1108 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1103 copies=opts.get(b'rename')))
1109 copies=opts.get(b'rename')))
1104 ui.popbuffer()
1110 ui.popbuffer()
1105 fm.end()
1111 fm.end()
1106
1112
1107 @command(b'perfmoonwalk', formatteropts)
1113 @command(b'perfmoonwalk', formatteropts)
1108 def perfmoonwalk(ui, repo, **opts):
1114 def perfmoonwalk(ui, repo, **opts):
1109 """benchmark walking the changelog backwards
1115 """benchmark walking the changelog backwards
1110
1116
1111 This also loads the changelog data for each revision in the changelog.
1117 This also loads the changelog data for each revision in the changelog.
1112 """
1118 """
1113 opts = _byteskwargs(opts)
1119 opts = _byteskwargs(opts)
1114 timer, fm = gettimer(ui, opts)
1120 timer, fm = gettimer(ui, opts)
1115 def moonwalk():
1121 def moonwalk():
1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1122 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1117 ctx = repo[i]
1123 ctx = repo[i]
1118 ctx.branch() # read changelog data (in addition to the index)
1124 ctx.branch() # read changelog data (in addition to the index)
1119 timer(moonwalk)
1125 timer(moonwalk)
1120 fm.end()
1126 fm.end()
1121
1127
1122 @command(b'perftemplating',
1128 @command(b'perftemplating',
1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1129 [(b'r', b'rev', [], b'revisions to run the template on'),
1124 ] + formatteropts)
1130 ] + formatteropts)
1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1131 def perftemplating(ui, repo, testedtemplate=None, **opts):
1126 """test the rendering time of a given template"""
1132 """test the rendering time of a given template"""
1127 if makelogtemplater is None:
1133 if makelogtemplater is None:
1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1134 raise error.Abort((b"perftemplating not available with this Mercurial"),
1129 hint=b"use 4.3 or later")
1135 hint=b"use 4.3 or later")
1130
1136
1131 opts = _byteskwargs(opts)
1137 opts = _byteskwargs(opts)
1132
1138
1133 nullui = ui.copy()
1139 nullui = ui.copy()
1134 nullui.fout = open(os.devnull, r'wb')
1140 nullui.fout = open(os.devnull, r'wb')
1135 nullui.disablepager()
1141 nullui.disablepager()
1136 revs = opts.get(b'rev')
1142 revs = opts.get(b'rev')
1137 if not revs:
1143 if not revs:
1138 revs = [b'all()']
1144 revs = [b'all()']
1139 revs = list(scmutil.revrange(repo, revs))
1145 revs = list(scmutil.revrange(repo, revs))
1140
1146
1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1147 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1142 b' {author|person}: {desc|firstline}\n')
1148 b' {author|person}: {desc|firstline}\n')
1143 if testedtemplate is None:
1149 if testedtemplate is None:
1144 testedtemplate = defaulttemplate
1150 testedtemplate = defaulttemplate
1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1151 displayer = makelogtemplater(nullui, repo, testedtemplate)
1146 def format():
1152 def format():
1147 for r in revs:
1153 for r in revs:
1148 ctx = repo[r]
1154 ctx = repo[r]
1149 displayer.show(ctx)
1155 displayer.show(ctx)
1150 displayer.flush(ctx)
1156 displayer.flush(ctx)
1151
1157
1152 timer, fm = gettimer(ui, opts)
1158 timer, fm = gettimer(ui, opts)
1153 timer(format)
1159 timer(format)
1154 fm.end()
1160 fm.end()
1155
1161
1156 @command(b'perfhelper-tracecopies', formatteropts +
1162 @command(b'perfhelper-tracecopies', formatteropts +
1157 [
1163 [
1158 (b'r', b'revs', [], b'restrict search to these revisions'),
1164 (b'r', b'revs', [], b'restrict search to these revisions'),
1159 ])
1165 ])
1160 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1166 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1161 """find statistic about potential parameters for the `perftracecopies`
1167 """find statistic about potential parameters for the `perftracecopies`
1162
1168
1163 This command find source-destination pair relevant for copytracing testing.
1169 This command find source-destination pair relevant for copytracing testing.
1164 It report value for some of the parameters that impact copy tracing time.
1170 It report value for some of the parameters that impact copy tracing time.
1165 """
1171 """
1166 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1167 fm = ui.formatter(b'perf', opts)
1173 fm = ui.formatter(b'perf', opts)
1168 header = '%12s %12s %12s %12s\n'
1174 header = '%12s %12s %12s %12s\n'
1169 output = ("%(source)12s %(destination)12s "
1175 output = ("%(source)12s %(destination)12s "
1170 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1176 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1171 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1177 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1172
1178
1173 if not revs:
1179 if not revs:
1174 revs = ['all()']
1180 revs = ['all()']
1175 revs = scmutil.revrange(repo, revs)
1181 revs = scmutil.revrange(repo, revs)
1176
1182
1177 roi = repo.revs('merge() and %ld', revs)
1183 roi = repo.revs('merge() and %ld', revs)
1178 for r in roi:
1184 for r in roi:
1179 ctx = repo[r]
1185 ctx = repo[r]
1180 p1 = ctx.p1().rev()
1186 p1 = ctx.p1().rev()
1181 p2 = ctx.p2().rev()
1187 p2 = ctx.p2().rev()
1182 bases = repo.changelog._commonancestorsheads(p1, p2)
1188 bases = repo.changelog._commonancestorsheads(p1, p2)
1183 for p in (p1, p2):
1189 for p in (p1, p2):
1184 for b in bases:
1190 for b in bases:
1185 base = repo[b]
1191 base = repo[b]
1186 parent = repo[p]
1192 parent = repo[p]
1187 missing = copies._computeforwardmissing(base, parent)
1193 missing = copies._computeforwardmissing(base, parent)
1188 if not missing:
1194 if not missing:
1189 continue
1195 continue
1190 fm.startitem()
1196 fm.startitem()
1191 data = {
1197 data = {
1192 b'source': base.hex(),
1198 b'source': base.hex(),
1193 b'destination': parent.hex(),
1199 b'destination': parent.hex(),
1194 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1200 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1195 b'nbmissingfiles': len(missing),
1201 b'nbmissingfiles': len(missing),
1196 }
1202 }
1197 fm.data(**data)
1203 fm.data(**data)
1198 out = data.copy()
1204 out = data.copy()
1199 out['source'] = fm.hexfunc(base.node())
1205 out['source'] = fm.hexfunc(base.node())
1200 out['destination'] = fm.hexfunc(parent.node())
1206 out['destination'] = fm.hexfunc(parent.node())
1201 fm.plain(output % out)
1207 fm.plain(output % out)
1202 fm.end()
1208 fm.end()
1203
1209
1204 @command(b'perfcca', formatteropts)
1210 @command(b'perfcca', formatteropts)
1205 def perfcca(ui, repo, **opts):
1211 def perfcca(ui, repo, **opts):
1206 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1207 timer, fm = gettimer(ui, opts)
1213 timer, fm = gettimer(ui, opts)
1208 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1214 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1209 fm.end()
1215 fm.end()
1210
1216
1211 @command(b'perffncacheload', formatteropts)
1217 @command(b'perffncacheload', formatteropts)
1212 def perffncacheload(ui, repo, **opts):
1218 def perffncacheload(ui, repo, **opts):
1213 opts = _byteskwargs(opts)
1219 opts = _byteskwargs(opts)
1214 timer, fm = gettimer(ui, opts)
1220 timer, fm = gettimer(ui, opts)
1215 s = repo.store
1221 s = repo.store
1216 def d():
1222 def d():
1217 s.fncache._load()
1223 s.fncache._load()
1218 timer(d)
1224 timer(d)
1219 fm.end()
1225 fm.end()
1220
1226
1221 @command(b'perffncachewrite', formatteropts)
1227 @command(b'perffncachewrite', formatteropts)
1222 def perffncachewrite(ui, repo, **opts):
1228 def perffncachewrite(ui, repo, **opts):
1223 opts = _byteskwargs(opts)
1229 opts = _byteskwargs(opts)
1224 timer, fm = gettimer(ui, opts)
1230 timer, fm = gettimer(ui, opts)
1225 s = repo.store
1231 s = repo.store
1226 lock = repo.lock()
1232 lock = repo.lock()
1227 s.fncache._load()
1233 s.fncache._load()
1228 tr = repo.transaction(b'perffncachewrite')
1234 tr = repo.transaction(b'perffncachewrite')
1229 tr.addbackup(b'fncache')
1235 tr.addbackup(b'fncache')
1230 def d():
1236 def d():
1231 s.fncache._dirty = True
1237 s.fncache._dirty = True
1232 s.fncache.write(tr)
1238 s.fncache.write(tr)
1233 timer(d)
1239 timer(d)
1234 tr.close()
1240 tr.close()
1235 lock.release()
1241 lock.release()
1236 fm.end()
1242 fm.end()
1237
1243
1238 @command(b'perffncacheencode', formatteropts)
1244 @command(b'perffncacheencode', formatteropts)
1239 def perffncacheencode(ui, repo, **opts):
1245 def perffncacheencode(ui, repo, **opts):
1240 opts = _byteskwargs(opts)
1246 opts = _byteskwargs(opts)
1241 timer, fm = gettimer(ui, opts)
1247 timer, fm = gettimer(ui, opts)
1242 s = repo.store
1248 s = repo.store
1243 s.fncache._load()
1249 s.fncache._load()
1244 def d():
1250 def d():
1245 for p in s.fncache.entries:
1251 for p in s.fncache.entries:
1246 s.encode(p)
1252 s.encode(p)
1247 timer(d)
1253 timer(d)
1248 fm.end()
1254 fm.end()
1249
1255
1250 def _bdiffworker(q, blocks, xdiff, ready, done):
1256 def _bdiffworker(q, blocks, xdiff, ready, done):
1251 while not done.is_set():
1257 while not done.is_set():
1252 pair = q.get()
1258 pair = q.get()
1253 while pair is not None:
1259 while pair is not None:
1254 if xdiff:
1260 if xdiff:
1255 mdiff.bdiff.xdiffblocks(*pair)
1261 mdiff.bdiff.xdiffblocks(*pair)
1256 elif blocks:
1262 elif blocks:
1257 mdiff.bdiff.blocks(*pair)
1263 mdiff.bdiff.blocks(*pair)
1258 else:
1264 else:
1259 mdiff.textdiff(*pair)
1265 mdiff.textdiff(*pair)
1260 q.task_done()
1266 q.task_done()
1261 pair = q.get()
1267 pair = q.get()
1262 q.task_done() # for the None one
1268 q.task_done() # for the None one
1263 with ready:
1269 with ready:
1264 ready.wait()
1270 ready.wait()
1265
1271
1266 def _manifestrevision(repo, mnode):
1272 def _manifestrevision(repo, mnode):
1267 ml = repo.manifestlog
1273 ml = repo.manifestlog
1268
1274
1269 if util.safehasattr(ml, b'getstorage'):
1275 if util.safehasattr(ml, b'getstorage'):
1270 store = ml.getstorage(b'')
1276 store = ml.getstorage(b'')
1271 else:
1277 else:
1272 store = ml._revlog
1278 store = ml._revlog
1273
1279
1274 return store.revision(mnode)
1280 return store.revision(mnode)
1275
1281
1276 @command(b'perfbdiff', revlogopts + formatteropts + [
1282 @command(b'perfbdiff', revlogopts + formatteropts + [
1277 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1283 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1278 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1284 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1279 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1285 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1280 (b'', b'blocks', False, b'test computing diffs into blocks'),
1286 (b'', b'blocks', False, b'test computing diffs into blocks'),
1281 (b'', b'xdiff', False, b'use xdiff algorithm'),
1287 (b'', b'xdiff', False, b'use xdiff algorithm'),
1282 ],
1288 ],
1283
1289
1284 b'-c|-m|FILE REV')
1290 b'-c|-m|FILE REV')
1285 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1291 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1286 """benchmark a bdiff between revisions
1292 """benchmark a bdiff between revisions
1287
1293
1288 By default, benchmark a bdiff between its delta parent and itself.
1294 By default, benchmark a bdiff between its delta parent and itself.
1289
1295
1290 With ``--count``, benchmark bdiffs between delta parents and self for N
1296 With ``--count``, benchmark bdiffs between delta parents and self for N
1291 revisions starting at the specified revision.
1297 revisions starting at the specified revision.
1292
1298
1293 With ``--alldata``, assume the requested revision is a changeset and
1299 With ``--alldata``, assume the requested revision is a changeset and
1294 measure bdiffs for all changes related to that changeset (manifest
1300 measure bdiffs for all changes related to that changeset (manifest
1295 and filelogs).
1301 and filelogs).
1296 """
1302 """
1297 opts = _byteskwargs(opts)
1303 opts = _byteskwargs(opts)
1298
1304
1299 if opts[b'xdiff'] and not opts[b'blocks']:
1305 if opts[b'xdiff'] and not opts[b'blocks']:
1300 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1306 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1301
1307
1302 if opts[b'alldata']:
1308 if opts[b'alldata']:
1303 opts[b'changelog'] = True
1309 opts[b'changelog'] = True
1304
1310
1305 if opts.get(b'changelog') or opts.get(b'manifest'):
1311 if opts.get(b'changelog') or opts.get(b'manifest'):
1306 file_, rev = None, file_
1312 file_, rev = None, file_
1307 elif rev is None:
1313 elif rev is None:
1308 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1314 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1309
1315
1310 blocks = opts[b'blocks']
1316 blocks = opts[b'blocks']
1311 xdiff = opts[b'xdiff']
1317 xdiff = opts[b'xdiff']
1312 textpairs = []
1318 textpairs = []
1313
1319
1314 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1320 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1315
1321
1316 startrev = r.rev(r.lookup(rev))
1322 startrev = r.rev(r.lookup(rev))
1317 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1323 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1318 if opts[b'alldata']:
1324 if opts[b'alldata']:
1319 # Load revisions associated with changeset.
1325 # Load revisions associated with changeset.
1320 ctx = repo[rev]
1326 ctx = repo[rev]
1321 mtext = _manifestrevision(repo, ctx.manifestnode())
1327 mtext = _manifestrevision(repo, ctx.manifestnode())
1322 for pctx in ctx.parents():
1328 for pctx in ctx.parents():
1323 pman = _manifestrevision(repo, pctx.manifestnode())
1329 pman = _manifestrevision(repo, pctx.manifestnode())
1324 textpairs.append((pman, mtext))
1330 textpairs.append((pman, mtext))
1325
1331
1326 # Load filelog revisions by iterating manifest delta.
1332 # Load filelog revisions by iterating manifest delta.
1327 man = ctx.manifest()
1333 man = ctx.manifest()
1328 pman = ctx.p1().manifest()
1334 pman = ctx.p1().manifest()
1329 for filename, change in pman.diff(man).items():
1335 for filename, change in pman.diff(man).items():
1330 fctx = repo.file(filename)
1336 fctx = repo.file(filename)
1331 f1 = fctx.revision(change[0][0] or -1)
1337 f1 = fctx.revision(change[0][0] or -1)
1332 f2 = fctx.revision(change[1][0] or -1)
1338 f2 = fctx.revision(change[1][0] or -1)
1333 textpairs.append((f1, f2))
1339 textpairs.append((f1, f2))
1334 else:
1340 else:
1335 dp = r.deltaparent(rev)
1341 dp = r.deltaparent(rev)
1336 textpairs.append((r.revision(dp), r.revision(rev)))
1342 textpairs.append((r.revision(dp), r.revision(rev)))
1337
1343
1338 withthreads = threads > 0
1344 withthreads = threads > 0
1339 if not withthreads:
1345 if not withthreads:
1340 def d():
1346 def d():
1341 for pair in textpairs:
1347 for pair in textpairs:
1342 if xdiff:
1348 if xdiff:
1343 mdiff.bdiff.xdiffblocks(*pair)
1349 mdiff.bdiff.xdiffblocks(*pair)
1344 elif blocks:
1350 elif blocks:
1345 mdiff.bdiff.blocks(*pair)
1351 mdiff.bdiff.blocks(*pair)
1346 else:
1352 else:
1347 mdiff.textdiff(*pair)
1353 mdiff.textdiff(*pair)
1348 else:
1354 else:
1349 q = queue()
1355 q = queue()
1350 for i in _xrange(threads):
1356 for i in _xrange(threads):
1351 q.put(None)
1357 q.put(None)
1352 ready = threading.Condition()
1358 ready = threading.Condition()
1353 done = threading.Event()
1359 done = threading.Event()
1354 for i in _xrange(threads):
1360 for i in _xrange(threads):
1355 threading.Thread(target=_bdiffworker,
1361 threading.Thread(target=_bdiffworker,
1356 args=(q, blocks, xdiff, ready, done)).start()
1362 args=(q, blocks, xdiff, ready, done)).start()
1357 q.join()
1363 q.join()
1358 def d():
1364 def d():
1359 for pair in textpairs:
1365 for pair in textpairs:
1360 q.put(pair)
1366 q.put(pair)
1361 for i in _xrange(threads):
1367 for i in _xrange(threads):
1362 q.put(None)
1368 q.put(None)
1363 with ready:
1369 with ready:
1364 ready.notify_all()
1370 ready.notify_all()
1365 q.join()
1371 q.join()
1366 timer, fm = gettimer(ui, opts)
1372 timer, fm = gettimer(ui, opts)
1367 timer(d)
1373 timer(d)
1368 fm.end()
1374 fm.end()
1369
1375
1370 if withthreads:
1376 if withthreads:
1371 done.set()
1377 done.set()
1372 for i in _xrange(threads):
1378 for i in _xrange(threads):
1373 q.put(None)
1379 q.put(None)
1374 with ready:
1380 with ready:
1375 ready.notify_all()
1381 ready.notify_all()
1376
1382
1377 @command(b'perfunidiff', revlogopts + formatteropts + [
1383 @command(b'perfunidiff', revlogopts + formatteropts + [
1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1384 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1379 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1385 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1380 ], b'-c|-m|FILE REV')
1386 ], b'-c|-m|FILE REV')
1381 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1387 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1382 """benchmark a unified diff between revisions
1388 """benchmark a unified diff between revisions
1383
1389
1384 This doesn't include any copy tracing - it's just a unified diff
1390 This doesn't include any copy tracing - it's just a unified diff
1385 of the texts.
1391 of the texts.
1386
1392
1387 By default, benchmark a diff between its delta parent and itself.
1393 By default, benchmark a diff between its delta parent and itself.
1388
1394
1389 With ``--count``, benchmark diffs between delta parents and self for N
1395 With ``--count``, benchmark diffs between delta parents and self for N
1390 revisions starting at the specified revision.
1396 revisions starting at the specified revision.
1391
1397
1392 With ``--alldata``, assume the requested revision is a changeset and
1398 With ``--alldata``, assume the requested revision is a changeset and
1393 measure diffs for all changes related to that changeset (manifest
1399 measure diffs for all changes related to that changeset (manifest
1394 and filelogs).
1400 and filelogs).
1395 """
1401 """
1396 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1397 if opts[b'alldata']:
1403 if opts[b'alldata']:
1398 opts[b'changelog'] = True
1404 opts[b'changelog'] = True
1399
1405
1400 if opts.get(b'changelog') or opts.get(b'manifest'):
1406 if opts.get(b'changelog') or opts.get(b'manifest'):
1401 file_, rev = None, file_
1407 file_, rev = None, file_
1402 elif rev is None:
1408 elif rev is None:
1403 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1409 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1404
1410
1405 textpairs = []
1411 textpairs = []
1406
1412
1407 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1413 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1408
1414
1409 startrev = r.rev(r.lookup(rev))
1415 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1416 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1417 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1418 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1419 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1420 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1421 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1422 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1423 textpairs.append((pman, mtext))
1418
1424
1419 # Load filelog revisions by iterating manifest delta.
1425 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1426 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1427 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1428 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1429 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1430 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1431 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1432 textpairs.append((f1, f2))
1427 else:
1433 else:
1428 dp = r.deltaparent(rev)
1434 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1435 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1436
1431 def d():
1437 def d():
1432 for left, right in textpairs:
1438 for left, right in textpairs:
1433 # The date strings don't matter, so we pass empty strings.
1439 # The date strings don't matter, so we pass empty strings.
1434 headerlines, hunks = mdiff.unidiff(
1440 headerlines, hunks = mdiff.unidiff(
1435 left, b'', right, b'', b'left', b'right', binary=False)
1441 left, b'', right, b'', b'left', b'right', binary=False)
1436 # consume iterators in roughly the way patch.py does
1442 # consume iterators in roughly the way patch.py does
1437 b'\n'.join(headerlines)
1443 b'\n'.join(headerlines)
1438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1444 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1439 timer, fm = gettimer(ui, opts)
1445 timer, fm = gettimer(ui, opts)
1440 timer(d)
1446 timer(d)
1441 fm.end()
1447 fm.end()
1442
1448
1443 @command(b'perfdiffwd', formatteropts)
1449 @command(b'perfdiffwd', formatteropts)
1444 def perfdiffwd(ui, repo, **opts):
1450 def perfdiffwd(ui, repo, **opts):
1445 """Profile diff of working directory changes"""
1451 """Profile diff of working directory changes"""
1446 opts = _byteskwargs(opts)
1452 opts = _byteskwargs(opts)
1447 timer, fm = gettimer(ui, opts)
1453 timer, fm = gettimer(ui, opts)
1448 options = {
1454 options = {
1449 'w': 'ignore_all_space',
1455 'w': 'ignore_all_space',
1450 'b': 'ignore_space_change',
1456 'b': 'ignore_space_change',
1451 'B': 'ignore_blank_lines',
1457 'B': 'ignore_blank_lines',
1452 }
1458 }
1453
1459
1454 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1460 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1455 opts = dict((options[c], b'1') for c in diffopt)
1461 opts = dict((options[c], b'1') for c in diffopt)
1456 def d():
1462 def d():
1457 ui.pushbuffer()
1463 ui.pushbuffer()
1458 commands.diff(ui, repo, **opts)
1464 commands.diff(ui, repo, **opts)
1459 ui.popbuffer()
1465 ui.popbuffer()
1460 diffopt = diffopt.encode('ascii')
1466 diffopt = diffopt.encode('ascii')
1461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1467 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1462 timer(d, title=title)
1468 timer(d, title=title)
1463 fm.end()
1469 fm.end()
1464
1470
1465 @command(b'perfrevlogindex', revlogopts + formatteropts,
1471 @command(b'perfrevlogindex', revlogopts + formatteropts,
1466 b'-c|-m|FILE')
1472 b'-c|-m|FILE')
1467 def perfrevlogindex(ui, repo, file_=None, **opts):
1473 def perfrevlogindex(ui, repo, file_=None, **opts):
1468 """Benchmark operations against a revlog index.
1474 """Benchmark operations against a revlog index.
1469
1475
1470 This tests constructing a revlog instance, reading index data,
1476 This tests constructing a revlog instance, reading index data,
1471 parsing index data, and performing various operations related to
1477 parsing index data, and performing various operations related to
1472 index data.
1478 index data.
1473 """
1479 """
1474
1480
1475 opts = _byteskwargs(opts)
1481 opts = _byteskwargs(opts)
1476
1482
1477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1483 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1478
1484
1479 opener = getattr(rl, 'opener') # trick linter
1485 opener = getattr(rl, 'opener') # trick linter
1480 indexfile = rl.indexfile
1486 indexfile = rl.indexfile
1481 data = opener.read(indexfile)
1487 data = opener.read(indexfile)
1482
1488
1483 header = struct.unpack(b'>I', data[0:4])[0]
1489 header = struct.unpack(b'>I', data[0:4])[0]
1484 version = header & 0xFFFF
1490 version = header & 0xFFFF
1485 if version == 1:
1491 if version == 1:
1486 revlogio = revlog.revlogio()
1492 revlogio = revlog.revlogio()
1487 inline = header & (1 << 16)
1493 inline = header & (1 << 16)
1488 else:
1494 else:
1489 raise error.Abort((b'unsupported revlog version: %d') % version)
1495 raise error.Abort((b'unsupported revlog version: %d') % version)
1490
1496
1491 rllen = len(rl)
1497 rllen = len(rl)
1492
1498
1493 node0 = rl.node(0)
1499 node0 = rl.node(0)
1494 node25 = rl.node(rllen // 4)
1500 node25 = rl.node(rllen // 4)
1495 node50 = rl.node(rllen // 2)
1501 node50 = rl.node(rllen // 2)
1496 node75 = rl.node(rllen // 4 * 3)
1502 node75 = rl.node(rllen // 4 * 3)
1497 node100 = rl.node(rllen - 1)
1503 node100 = rl.node(rllen - 1)
1498
1504
1499 allrevs = range(rllen)
1505 allrevs = range(rllen)
1500 allrevsrev = list(reversed(allrevs))
1506 allrevsrev = list(reversed(allrevs))
1501 allnodes = [rl.node(rev) for rev in range(rllen)]
1507 allnodes = [rl.node(rev) for rev in range(rllen)]
1502 allnodesrev = list(reversed(allnodes))
1508 allnodesrev = list(reversed(allnodes))
1503
1509
1504 def constructor():
1510 def constructor():
1505 revlog.revlog(opener, indexfile)
1511 revlog.revlog(opener, indexfile)
1506
1512
1507 def read():
1513 def read():
1508 with opener(indexfile) as fh:
1514 with opener(indexfile) as fh:
1509 fh.read()
1515 fh.read()
1510
1516
1511 def parseindex():
1517 def parseindex():
1512 revlogio.parseindex(data, inline)
1518 revlogio.parseindex(data, inline)
1513
1519
1514 def getentry(revornode):
1520 def getentry(revornode):
1515 index = revlogio.parseindex(data, inline)[0]
1521 index = revlogio.parseindex(data, inline)[0]
1516 index[revornode]
1522 index[revornode]
1517
1523
1518 def getentries(revs, count=1):
1524 def getentries(revs, count=1):
1519 index = revlogio.parseindex(data, inline)[0]
1525 index = revlogio.parseindex(data, inline)[0]
1520
1526
1521 for i in range(count):
1527 for i in range(count):
1522 for rev in revs:
1528 for rev in revs:
1523 index[rev]
1529 index[rev]
1524
1530
1525 def resolvenode(node):
1531 def resolvenode(node):
1526 nodemap = revlogio.parseindex(data, inline)[1]
1532 nodemap = revlogio.parseindex(data, inline)[1]
1527 # This only works for the C code.
1533 # This only works for the C code.
1528 if nodemap is None:
1534 if nodemap is None:
1529 return
1535 return
1530
1536
1531 try:
1537 try:
1532 nodemap[node]
1538 nodemap[node]
1533 except error.RevlogError:
1539 except error.RevlogError:
1534 pass
1540 pass
1535
1541
1536 def resolvenodes(nodes, count=1):
1542 def resolvenodes(nodes, count=1):
1537 nodemap = revlogio.parseindex(data, inline)[1]
1543 nodemap = revlogio.parseindex(data, inline)[1]
1538 if nodemap is None:
1544 if nodemap is None:
1539 return
1545 return
1540
1546
1541 for i in range(count):
1547 for i in range(count):
1542 for node in nodes:
1548 for node in nodes:
1543 try:
1549 try:
1544 nodemap[node]
1550 nodemap[node]
1545 except error.RevlogError:
1551 except error.RevlogError:
1546 pass
1552 pass
1547
1553
1548 benches = [
1554 benches = [
1549 (constructor, b'revlog constructor'),
1555 (constructor, b'revlog constructor'),
1550 (read, b'read'),
1556 (read, b'read'),
1551 (parseindex, b'create index object'),
1557 (parseindex, b'create index object'),
1552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1558 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1559 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1554 (lambda: resolvenode(node0), b'look up node at rev 0'),
1560 (lambda: resolvenode(node0), b'look up node at rev 0'),
1555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1561 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1562 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1563 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1558 (lambda: resolvenode(node100), b'look up node at tip'),
1564 (lambda: resolvenode(node100), b'look up node at tip'),
1559 # 2x variation is to measure caching impact.
1565 # 2x variation is to measure caching impact.
1560 (lambda: resolvenodes(allnodes),
1566 (lambda: resolvenodes(allnodes),
1561 b'look up all nodes (forward)'),
1567 b'look up all nodes (forward)'),
1562 (lambda: resolvenodes(allnodes, 2),
1568 (lambda: resolvenodes(allnodes, 2),
1563 b'look up all nodes 2x (forward)'),
1569 b'look up all nodes 2x (forward)'),
1564 (lambda: resolvenodes(allnodesrev),
1570 (lambda: resolvenodes(allnodesrev),
1565 b'look up all nodes (reverse)'),
1571 b'look up all nodes (reverse)'),
1566 (lambda: resolvenodes(allnodesrev, 2),
1572 (lambda: resolvenodes(allnodesrev, 2),
1567 b'look up all nodes 2x (reverse)'),
1573 b'look up all nodes 2x (reverse)'),
1568 (lambda: getentries(allrevs),
1574 (lambda: getentries(allrevs),
1569 b'retrieve all index entries (forward)'),
1575 b'retrieve all index entries (forward)'),
1570 (lambda: getentries(allrevs, 2),
1576 (lambda: getentries(allrevs, 2),
1571 b'retrieve all index entries 2x (forward)'),
1577 b'retrieve all index entries 2x (forward)'),
1572 (lambda: getentries(allrevsrev),
1578 (lambda: getentries(allrevsrev),
1573 b'retrieve all index entries (reverse)'),
1579 b'retrieve all index entries (reverse)'),
1574 (lambda: getentries(allrevsrev, 2),
1580 (lambda: getentries(allrevsrev, 2),
1575 b'retrieve all index entries 2x (reverse)'),
1581 b'retrieve all index entries 2x (reverse)'),
1576 ]
1582 ]
1577
1583
1578 for fn, title in benches:
1584 for fn, title in benches:
1579 timer, fm = gettimer(ui, opts)
1585 timer, fm = gettimer(ui, opts)
1580 timer(fn, title=title)
1586 timer(fn, title=title)
1581 fm.end()
1587 fm.end()
1582
1588
1583 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1589 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1584 [(b'd', b'dist', 100, b'distance between the revisions'),
1590 [(b'd', b'dist', 100, b'distance between the revisions'),
1585 (b's', b'startrev', 0, b'revision to start reading at'),
1591 (b's', b'startrev', 0, b'revision to start reading at'),
1586 (b'', b'reverse', False, b'read in reverse')],
1592 (b'', b'reverse', False, b'read in reverse')],
1587 b'-c|-m|FILE')
1593 b'-c|-m|FILE')
1588 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1594 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1589 **opts):
1595 **opts):
1590 """Benchmark reading a series of revisions from a revlog.
1596 """Benchmark reading a series of revisions from a revlog.
1591
1597
1592 By default, we read every ``-d/--dist`` revision from 0 to tip of
1598 By default, we read every ``-d/--dist`` revision from 0 to tip of
1593 the specified revlog.
1599 the specified revlog.
1594
1600
1595 The start revision can be defined via ``-s/--startrev``.
1601 The start revision can be defined via ``-s/--startrev``.
1596 """
1602 """
1597 opts = _byteskwargs(opts)
1603 opts = _byteskwargs(opts)
1598
1604
1599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1605 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1600 rllen = getlen(ui)(rl)
1606 rllen = getlen(ui)(rl)
1601
1607
1602 if startrev < 0:
1608 if startrev < 0:
1603 startrev = rllen + startrev
1609 startrev = rllen + startrev
1604
1610
1605 def d():
1611 def d():
1606 rl.clearcaches()
1612 rl.clearcaches()
1607
1613
1608 beginrev = startrev
1614 beginrev = startrev
1609 endrev = rllen
1615 endrev = rllen
1610 dist = opts[b'dist']
1616 dist = opts[b'dist']
1611
1617
1612 if reverse:
1618 if reverse:
1613 beginrev, endrev = endrev - 1, beginrev - 1
1619 beginrev, endrev = endrev - 1, beginrev - 1
1614 dist = -1 * dist
1620 dist = -1 * dist
1615
1621
1616 for x in _xrange(beginrev, endrev, dist):
1622 for x in _xrange(beginrev, endrev, dist):
1617 # Old revisions don't support passing int.
1623 # Old revisions don't support passing int.
1618 n = rl.node(x)
1624 n = rl.node(x)
1619 rl.revision(n)
1625 rl.revision(n)
1620
1626
1621 timer, fm = gettimer(ui, opts)
1627 timer, fm = gettimer(ui, opts)
1622 timer(d)
1628 timer(d)
1623 fm.end()
1629 fm.end()
1624
1630
1625 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1631 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1626 [(b's', b'startrev', 1000, b'revision to start writing at'),
1632 [(b's', b'startrev', 1000, b'revision to start writing at'),
1627 (b'', b'stoprev', -1, b'last revision to write'),
1633 (b'', b'stoprev', -1, b'last revision to write'),
1628 (b'', b'count', 3, b'last revision to write'),
1634 (b'', b'count', 3, b'last revision to write'),
1629 (b'', b'details', False, b'print timing for every revisions tested'),
1635 (b'', b'details', False, b'print timing for every revisions tested'),
1630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1636 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1637 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1632 ],
1638 ],
1633 b'-c|-m|FILE')
1639 b'-c|-m|FILE')
1634 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1640 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1635 """Benchmark writing a series of revisions to a revlog.
1641 """Benchmark writing a series of revisions to a revlog.
1636
1642
1637 Possible source values are:
1643 Possible source values are:
1638 * `full`: add from a full text (default).
1644 * `full`: add from a full text (default).
1639 * `parent-1`: add from a delta to the first parent
1645 * `parent-1`: add from a delta to the first parent
1640 * `parent-2`: add from a delta to the second parent if it exists
1646 * `parent-2`: add from a delta to the second parent if it exists
1641 (use a delta from the first parent otherwise)
1647 (use a delta from the first parent otherwise)
1642 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1648 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1643 * `storage`: add from the existing precomputed deltas
1649 * `storage`: add from the existing precomputed deltas
1644 """
1650 """
1645 opts = _byteskwargs(opts)
1651 opts = _byteskwargs(opts)
1646
1652
1647 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1653 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1648 rllen = getlen(ui)(rl)
1654 rllen = getlen(ui)(rl)
1649 if startrev < 0:
1655 if startrev < 0:
1650 startrev = rllen + startrev
1656 startrev = rllen + startrev
1651 if stoprev < 0:
1657 if stoprev < 0:
1652 stoprev = rllen + stoprev
1658 stoprev = rllen + stoprev
1653
1659
1654 lazydeltabase = opts['lazydeltabase']
1660 lazydeltabase = opts['lazydeltabase']
1655 source = opts['source']
1661 source = opts['source']
1656 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1662 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1657 b'storage')
1663 b'storage')
1658 if source not in validsource:
1664 if source not in validsource:
1659 raise error.Abort('invalid source type: %s' % source)
1665 raise error.Abort('invalid source type: %s' % source)
1660
1666
1661 ### actually gather results
1667 ### actually gather results
1662 count = opts['count']
1668 count = opts['count']
1663 if count <= 0:
1669 if count <= 0:
1664 raise error.Abort('invalide run count: %d' % count)
1670 raise error.Abort('invalide run count: %d' % count)
1665 allresults = []
1671 allresults = []
1666 for c in range(count):
1672 for c in range(count):
1667 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1673 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1668 lazydeltabase=lazydeltabase)
1674 lazydeltabase=lazydeltabase)
1669 allresults.append(timing)
1675 allresults.append(timing)
1670
1676
1671 ### consolidate the results in a single list
1677 ### consolidate the results in a single list
1672 results = []
1678 results = []
1673 for idx, (rev, t) in enumerate(allresults[0]):
1679 for idx, (rev, t) in enumerate(allresults[0]):
1674 ts = [t]
1680 ts = [t]
1675 for other in allresults[1:]:
1681 for other in allresults[1:]:
1676 orev, ot = other[idx]
1682 orev, ot = other[idx]
1677 assert orev == rev
1683 assert orev == rev
1678 ts.append(ot)
1684 ts.append(ot)
1679 results.append((rev, ts))
1685 results.append((rev, ts))
1680 resultcount = len(results)
1686 resultcount = len(results)
1681
1687
1682 ### Compute and display relevant statistics
1688 ### Compute and display relevant statistics
1683
1689
1684 # get a formatter
1690 # get a formatter
1685 fm = ui.formatter(b'perf', opts)
1691 fm = ui.formatter(b'perf', opts)
1686 displayall = ui.configbool(b"perf", b"all-timing", False)
1692 displayall = ui.configbool(b"perf", b"all-timing", False)
1687
1693
1688 # print individual details if requested
1694 # print individual details if requested
1689 if opts['details']:
1695 if opts['details']:
1690 for idx, item in enumerate(results, 1):
1696 for idx, item in enumerate(results, 1):
1691 rev, data = item
1697 rev, data = item
1692 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1698 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1693 formatone(fm, data, title=title, displayall=displayall)
1699 formatone(fm, data, title=title, displayall=displayall)
1694
1700
1695 # sorts results by median time
1701 # sorts results by median time
1696 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1702 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1697 # list of (name, index) to display)
1703 # list of (name, index) to display)
1698 relevants = [
1704 relevants = [
1699 ("min", 0),
1705 ("min", 0),
1700 ("10%", resultcount * 10 // 100),
1706 ("10%", resultcount * 10 // 100),
1701 ("25%", resultcount * 25 // 100),
1707 ("25%", resultcount * 25 // 100),
1702 ("50%", resultcount * 70 // 100),
1708 ("50%", resultcount * 70 // 100),
1703 ("75%", resultcount * 75 // 100),
1709 ("75%", resultcount * 75 // 100),
1704 ("90%", resultcount * 90 // 100),
1710 ("90%", resultcount * 90 // 100),
1705 ("95%", resultcount * 95 // 100),
1711 ("95%", resultcount * 95 // 100),
1706 ("99%", resultcount * 99 // 100),
1712 ("99%", resultcount * 99 // 100),
1707 ("max", -1),
1713 ("max", -1),
1708 ]
1714 ]
1709 if not ui.quiet:
1715 if not ui.quiet:
1710 for name, idx in relevants:
1716 for name, idx in relevants:
1711 data = results[idx]
1717 data = results[idx]
1712 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1718 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1713 formatone(fm, data[1], title=title, displayall=displayall)
1719 formatone(fm, data[1], title=title, displayall=displayall)
1714
1720
1715 # XXX summing that many float will not be very precise, we ignore this fact
1721 # XXX summing that many float will not be very precise, we ignore this fact
1716 # for now
1722 # for now
1717 totaltime = []
1723 totaltime = []
1718 for item in allresults:
1724 for item in allresults:
1719 totaltime.append((sum(x[1][0] for x in item),
1725 totaltime.append((sum(x[1][0] for x in item),
1720 sum(x[1][1] for x in item),
1726 sum(x[1][1] for x in item),
1721 sum(x[1][2] for x in item),)
1727 sum(x[1][2] for x in item),)
1722 )
1728 )
1723 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1729 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1724 displayall=displayall)
1730 displayall=displayall)
1725 fm.end()
1731 fm.end()
1726
1732
1727 class _faketr(object):
1733 class _faketr(object):
1728 def add(s, x, y, z=None):
1734 def add(s, x, y, z=None):
1729 return None
1735 return None
1730
1736
1731 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1737 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1732 lazydeltabase=True):
1738 lazydeltabase=True):
1733 timings = []
1739 timings = []
1734 tr = _faketr()
1740 tr = _faketr()
1735 with _temprevlog(ui, orig, startrev) as dest:
1741 with _temprevlog(ui, orig, startrev) as dest:
1736 dest._lazydeltabase = lazydeltabase
1742 dest._lazydeltabase = lazydeltabase
1737 revs = list(orig.revs(startrev, stoprev))
1743 revs = list(orig.revs(startrev, stoprev))
1738 total = len(revs)
1744 total = len(revs)
1739 topic = 'adding'
1745 topic = 'adding'
1740 if runidx is not None:
1746 if runidx is not None:
1741 topic += ' (run #%d)' % runidx
1747 topic += ' (run #%d)' % runidx
1742 for idx, rev in enumerate(revs):
1748 for idx, rev in enumerate(revs):
1743 ui.progress(topic, idx, unit='revs', total=total)
1749 ui.progress(topic, idx, unit='revs', total=total)
1744 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1750 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1745 with timeone() as r:
1751 with timeone() as r:
1746 dest.addrawrevision(*addargs, **addkwargs)
1752 dest.addrawrevision(*addargs, **addkwargs)
1747 timings.append((rev, r[0]))
1753 timings.append((rev, r[0]))
1748 ui.progress(topic, total, unit='revs', total=total)
1754 ui.progress(topic, total, unit='revs', total=total)
1749 ui.progress(topic, None, unit='revs', total=total)
1755 ui.progress(topic, None, unit='revs', total=total)
1750 return timings
1756 return timings
1751
1757
1752 def _getrevisionseed(orig, rev, tr, source):
1758 def _getrevisionseed(orig, rev, tr, source):
1753 from mercurial.node import nullid
1759 from mercurial.node import nullid
1754
1760
1755 linkrev = orig.linkrev(rev)
1761 linkrev = orig.linkrev(rev)
1756 node = orig.node(rev)
1762 node = orig.node(rev)
1757 p1, p2 = orig.parents(node)
1763 p1, p2 = orig.parents(node)
1758 flags = orig.flags(rev)
1764 flags = orig.flags(rev)
1759 cachedelta = None
1765 cachedelta = None
1760 text = None
1766 text = None
1761
1767
1762 if source == b'full':
1768 if source == b'full':
1763 text = orig.revision(rev)
1769 text = orig.revision(rev)
1764 elif source == b'parent-1':
1770 elif source == b'parent-1':
1765 baserev = orig.rev(p1)
1771 baserev = orig.rev(p1)
1766 cachedelta = (baserev, orig.revdiff(p1, rev))
1772 cachedelta = (baserev, orig.revdiff(p1, rev))
1767 elif source == b'parent-2':
1773 elif source == b'parent-2':
1768 parent = p2
1774 parent = p2
1769 if p2 == nullid:
1775 if p2 == nullid:
1770 parent = p1
1776 parent = p1
1771 baserev = orig.rev(parent)
1777 baserev = orig.rev(parent)
1772 cachedelta = (baserev, orig.revdiff(parent, rev))
1778 cachedelta = (baserev, orig.revdiff(parent, rev))
1773 elif source == b'parent-smallest':
1779 elif source == b'parent-smallest':
1774 p1diff = orig.revdiff(p1, rev)
1780 p1diff = orig.revdiff(p1, rev)
1775 parent = p1
1781 parent = p1
1776 diff = p1diff
1782 diff = p1diff
1777 if p2 != nullid:
1783 if p2 != nullid:
1778 p2diff = orig.revdiff(p2, rev)
1784 p2diff = orig.revdiff(p2, rev)
1779 if len(p1diff) > len(p2diff):
1785 if len(p1diff) > len(p2diff):
1780 parent = p2
1786 parent = p2
1781 diff = p2diff
1787 diff = p2diff
1782 baserev = orig.rev(parent)
1788 baserev = orig.rev(parent)
1783 cachedelta = (baserev, diff)
1789 cachedelta = (baserev, diff)
1784 elif source == b'storage':
1790 elif source == b'storage':
1785 baserev = orig.deltaparent(rev)
1791 baserev = orig.deltaparent(rev)
1786 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1792 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1787
1793
1788 return ((text, tr, linkrev, p1, p2),
1794 return ((text, tr, linkrev, p1, p2),
1789 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1795 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1790
1796
1791 @contextlib.contextmanager
1797 @contextlib.contextmanager
1792 def _temprevlog(ui, orig, truncaterev):
1798 def _temprevlog(ui, orig, truncaterev):
1793 from mercurial import vfs as vfsmod
1799 from mercurial import vfs as vfsmod
1794
1800
1795 if orig._inline:
1801 if orig._inline:
1796 raise error.Abort('not supporting inline revlog (yet)')
1802 raise error.Abort('not supporting inline revlog (yet)')
1797
1803
1798 origindexpath = orig.opener.join(orig.indexfile)
1804 origindexpath = orig.opener.join(orig.indexfile)
1799 origdatapath = orig.opener.join(orig.datafile)
1805 origdatapath = orig.opener.join(orig.datafile)
1800 indexname = 'revlog.i'
1806 indexname = 'revlog.i'
1801 dataname = 'revlog.d'
1807 dataname = 'revlog.d'
1802
1808
1803 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1809 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1804 try:
1810 try:
1805 # copy the data file in a temporary directory
1811 # copy the data file in a temporary directory
1806 ui.debug('copying data in %s\n' % tmpdir)
1812 ui.debug('copying data in %s\n' % tmpdir)
1807 destindexpath = os.path.join(tmpdir, 'revlog.i')
1813 destindexpath = os.path.join(tmpdir, 'revlog.i')
1808 destdatapath = os.path.join(tmpdir, 'revlog.d')
1814 destdatapath = os.path.join(tmpdir, 'revlog.d')
1809 shutil.copyfile(origindexpath, destindexpath)
1815 shutil.copyfile(origindexpath, destindexpath)
1810 shutil.copyfile(origdatapath, destdatapath)
1816 shutil.copyfile(origdatapath, destdatapath)
1811
1817
1812 # remove the data we want to add again
1818 # remove the data we want to add again
1813 ui.debug('truncating data to be rewritten\n')
1819 ui.debug('truncating data to be rewritten\n')
1814 with open(destindexpath, 'ab') as index:
1820 with open(destindexpath, 'ab') as index:
1815 index.seek(0)
1821 index.seek(0)
1816 index.truncate(truncaterev * orig._io.size)
1822 index.truncate(truncaterev * orig._io.size)
1817 with open(destdatapath, 'ab') as data:
1823 with open(destdatapath, 'ab') as data:
1818 data.seek(0)
1824 data.seek(0)
1819 data.truncate(orig.start(truncaterev))
1825 data.truncate(orig.start(truncaterev))
1820
1826
1821 # instantiate a new revlog from the temporary copy
1827 # instantiate a new revlog from the temporary copy
1822 ui.debug('truncating adding to be rewritten\n')
1828 ui.debug('truncating adding to be rewritten\n')
1823 vfs = vfsmod.vfs(tmpdir)
1829 vfs = vfsmod.vfs(tmpdir)
1824 vfs.options = getattr(orig.opener, 'options', None)
1830 vfs.options = getattr(orig.opener, 'options', None)
1825
1831
1826 dest = revlog.revlog(vfs,
1832 dest = revlog.revlog(vfs,
1827 indexfile=indexname,
1833 indexfile=indexname,
1828 datafile=dataname)
1834 datafile=dataname)
1829 if dest._inline:
1835 if dest._inline:
1830 raise error.Abort('not supporting inline revlog (yet)')
1836 raise error.Abort('not supporting inline revlog (yet)')
1831 # make sure internals are initialized
1837 # make sure internals are initialized
1832 dest.revision(len(dest) - 1)
1838 dest.revision(len(dest) - 1)
1833 yield dest
1839 yield dest
1834 del dest, vfs
1840 del dest, vfs
1835 finally:
1841 finally:
1836 shutil.rmtree(tmpdir, True)
1842 shutil.rmtree(tmpdir, True)
1837
1843
1838 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1844 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1839 [(b'e', b'engines', b'', b'compression engines to use'),
1845 [(b'e', b'engines', b'', b'compression engines to use'),
1840 (b's', b'startrev', 0, b'revision to start at')],
1846 (b's', b'startrev', 0, b'revision to start at')],
1841 b'-c|-m|FILE')
1847 b'-c|-m|FILE')
1842 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1848 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1843 """Benchmark operations on revlog chunks.
1849 """Benchmark operations on revlog chunks.
1844
1850
1845 Logically, each revlog is a collection of fulltext revisions. However,
1851 Logically, each revlog is a collection of fulltext revisions. However,
1846 stored within each revlog are "chunks" of possibly compressed data. This
1852 stored within each revlog are "chunks" of possibly compressed data. This
1847 data needs to be read and decompressed or compressed and written.
1853 data needs to be read and decompressed or compressed and written.
1848
1854
1849 This command measures the time it takes to read+decompress and recompress
1855 This command measures the time it takes to read+decompress and recompress
1850 chunks in a revlog. It effectively isolates I/O and compression performance.
1856 chunks in a revlog. It effectively isolates I/O and compression performance.
1851 For measurements of higher-level operations like resolving revisions,
1857 For measurements of higher-level operations like resolving revisions,
1852 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1858 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1853 """
1859 """
1854 opts = _byteskwargs(opts)
1860 opts = _byteskwargs(opts)
1855
1861
1856 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1862 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1857
1863
1858 # _chunkraw was renamed to _getsegmentforrevs.
1864 # _chunkraw was renamed to _getsegmentforrevs.
1859 try:
1865 try:
1860 segmentforrevs = rl._getsegmentforrevs
1866 segmentforrevs = rl._getsegmentforrevs
1861 except AttributeError:
1867 except AttributeError:
1862 segmentforrevs = rl._chunkraw
1868 segmentforrevs = rl._chunkraw
1863
1869
1864 # Verify engines argument.
1870 # Verify engines argument.
1865 if engines:
1871 if engines:
1866 engines = set(e.strip() for e in engines.split(b','))
1872 engines = set(e.strip() for e in engines.split(b','))
1867 for engine in engines:
1873 for engine in engines:
1868 try:
1874 try:
1869 util.compressionengines[engine]
1875 util.compressionengines[engine]
1870 except KeyError:
1876 except KeyError:
1871 raise error.Abort(b'unknown compression engine: %s' % engine)
1877 raise error.Abort(b'unknown compression engine: %s' % engine)
1872 else:
1878 else:
1873 engines = []
1879 engines = []
1874 for e in util.compengines:
1880 for e in util.compengines:
1875 engine = util.compengines[e]
1881 engine = util.compengines[e]
1876 try:
1882 try:
1877 if engine.available():
1883 if engine.available():
1878 engine.revlogcompressor().compress(b'dummy')
1884 engine.revlogcompressor().compress(b'dummy')
1879 engines.append(e)
1885 engines.append(e)
1880 except NotImplementedError:
1886 except NotImplementedError:
1881 pass
1887 pass
1882
1888
1883 revs = list(rl.revs(startrev, len(rl) - 1))
1889 revs = list(rl.revs(startrev, len(rl) - 1))
1884
1890
1885 def rlfh(rl):
1891 def rlfh(rl):
1886 if rl._inline:
1892 if rl._inline:
1887 return getsvfs(repo)(rl.indexfile)
1893 return getsvfs(repo)(rl.indexfile)
1888 else:
1894 else:
1889 return getsvfs(repo)(rl.datafile)
1895 return getsvfs(repo)(rl.datafile)
1890
1896
1891 def doread():
1897 def doread():
1892 rl.clearcaches()
1898 rl.clearcaches()
1893 for rev in revs:
1899 for rev in revs:
1894 segmentforrevs(rev, rev)
1900 segmentforrevs(rev, rev)
1895
1901
1896 def doreadcachedfh():
1902 def doreadcachedfh():
1897 rl.clearcaches()
1903 rl.clearcaches()
1898 fh = rlfh(rl)
1904 fh = rlfh(rl)
1899 for rev in revs:
1905 for rev in revs:
1900 segmentforrevs(rev, rev, df=fh)
1906 segmentforrevs(rev, rev, df=fh)
1901
1907
1902 def doreadbatch():
1908 def doreadbatch():
1903 rl.clearcaches()
1909 rl.clearcaches()
1904 segmentforrevs(revs[0], revs[-1])
1910 segmentforrevs(revs[0], revs[-1])
1905
1911
1906 def doreadbatchcachedfh():
1912 def doreadbatchcachedfh():
1907 rl.clearcaches()
1913 rl.clearcaches()
1908 fh = rlfh(rl)
1914 fh = rlfh(rl)
1909 segmentforrevs(revs[0], revs[-1], df=fh)
1915 segmentforrevs(revs[0], revs[-1], df=fh)
1910
1916
1911 def dochunk():
1917 def dochunk():
1912 rl.clearcaches()
1918 rl.clearcaches()
1913 fh = rlfh(rl)
1919 fh = rlfh(rl)
1914 for rev in revs:
1920 for rev in revs:
1915 rl._chunk(rev, df=fh)
1921 rl._chunk(rev, df=fh)
1916
1922
1917 chunks = [None]
1923 chunks = [None]
1918
1924
1919 def dochunkbatch():
1925 def dochunkbatch():
1920 rl.clearcaches()
1926 rl.clearcaches()
1921 fh = rlfh(rl)
1927 fh = rlfh(rl)
1922 # Save chunks as a side-effect.
1928 # Save chunks as a side-effect.
1923 chunks[0] = rl._chunks(revs, df=fh)
1929 chunks[0] = rl._chunks(revs, df=fh)
1924
1930
1925 def docompress(compressor):
1931 def docompress(compressor):
1926 rl.clearcaches()
1932 rl.clearcaches()
1927
1933
1928 try:
1934 try:
1929 # Swap in the requested compression engine.
1935 # Swap in the requested compression engine.
1930 oldcompressor = rl._compressor
1936 oldcompressor = rl._compressor
1931 rl._compressor = compressor
1937 rl._compressor = compressor
1932 for chunk in chunks[0]:
1938 for chunk in chunks[0]:
1933 rl.compress(chunk)
1939 rl.compress(chunk)
1934 finally:
1940 finally:
1935 rl._compressor = oldcompressor
1941 rl._compressor = oldcompressor
1936
1942
1937 benches = [
1943 benches = [
1938 (lambda: doread(), b'read'),
1944 (lambda: doread(), b'read'),
1939 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1945 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1940 (lambda: doreadbatch(), b'read batch'),
1946 (lambda: doreadbatch(), b'read batch'),
1941 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1947 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1942 (lambda: dochunk(), b'chunk'),
1948 (lambda: dochunk(), b'chunk'),
1943 (lambda: dochunkbatch(), b'chunk batch'),
1949 (lambda: dochunkbatch(), b'chunk batch'),
1944 ]
1950 ]
1945
1951
1946 for engine in sorted(engines):
1952 for engine in sorted(engines):
1947 compressor = util.compengines[engine].revlogcompressor()
1953 compressor = util.compengines[engine].revlogcompressor()
1948 benches.append((functools.partial(docompress, compressor),
1954 benches.append((functools.partial(docompress, compressor),
1949 b'compress w/ %s' % engine))
1955 b'compress w/ %s' % engine))
1950
1956
1951 for fn, title in benches:
1957 for fn, title in benches:
1952 timer, fm = gettimer(ui, opts)
1958 timer, fm = gettimer(ui, opts)
1953 timer(fn, title=title)
1959 timer(fn, title=title)
1954 fm.end()
1960 fm.end()
1955
1961
1956 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1962 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1957 [(b'', b'cache', False, b'use caches instead of clearing')],
1963 [(b'', b'cache', False, b'use caches instead of clearing')],
1958 b'-c|-m|FILE REV')
1964 b'-c|-m|FILE REV')
1959 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1965 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1960 """Benchmark obtaining a revlog revision.
1966 """Benchmark obtaining a revlog revision.
1961
1967
1962 Obtaining a revlog revision consists of roughly the following steps:
1968 Obtaining a revlog revision consists of roughly the following steps:
1963
1969
1964 1. Compute the delta chain
1970 1. Compute the delta chain
1965 2. Slice the delta chain if applicable
1971 2. Slice the delta chain if applicable
1966 3. Obtain the raw chunks for that delta chain
1972 3. Obtain the raw chunks for that delta chain
1967 4. Decompress each raw chunk
1973 4. Decompress each raw chunk
1968 5. Apply binary patches to obtain fulltext
1974 5. Apply binary patches to obtain fulltext
1969 6. Verify hash of fulltext
1975 6. Verify hash of fulltext
1970
1976
1971 This command measures the time spent in each of these phases.
1977 This command measures the time spent in each of these phases.
1972 """
1978 """
1973 opts = _byteskwargs(opts)
1979 opts = _byteskwargs(opts)
1974
1980
1975 if opts.get(b'changelog') or opts.get(b'manifest'):
1981 if opts.get(b'changelog') or opts.get(b'manifest'):
1976 file_, rev = None, file_
1982 file_, rev = None, file_
1977 elif rev is None:
1983 elif rev is None:
1978 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1984 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1979
1985
1980 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1986 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1981
1987
1982 # _chunkraw was renamed to _getsegmentforrevs.
1988 # _chunkraw was renamed to _getsegmentforrevs.
1983 try:
1989 try:
1984 segmentforrevs = r._getsegmentforrevs
1990 segmentforrevs = r._getsegmentforrevs
1985 except AttributeError:
1991 except AttributeError:
1986 segmentforrevs = r._chunkraw
1992 segmentforrevs = r._chunkraw
1987
1993
1988 node = r.lookup(rev)
1994 node = r.lookup(rev)
1989 rev = r.rev(node)
1995 rev = r.rev(node)
1990
1996
1991 def getrawchunks(data, chain):
1997 def getrawchunks(data, chain):
1992 start = r.start
1998 start = r.start
1993 length = r.length
1999 length = r.length
1994 inline = r._inline
2000 inline = r._inline
1995 iosize = r._io.size
2001 iosize = r._io.size
1996 buffer = util.buffer
2002 buffer = util.buffer
1997
2003
1998 chunks = []
2004 chunks = []
1999 ladd = chunks.append
2005 ladd = chunks.append
2000 for idx, item in enumerate(chain):
2006 for idx, item in enumerate(chain):
2001 offset = start(item[0])
2007 offset = start(item[0])
2002 bits = data[idx]
2008 bits = data[idx]
2003 for rev in item:
2009 for rev in item:
2004 chunkstart = start(rev)
2010 chunkstart = start(rev)
2005 if inline:
2011 if inline:
2006 chunkstart += (rev + 1) * iosize
2012 chunkstart += (rev + 1) * iosize
2007 chunklength = length(rev)
2013 chunklength = length(rev)
2008 ladd(buffer(bits, chunkstart - offset, chunklength))
2014 ladd(buffer(bits, chunkstart - offset, chunklength))
2009
2015
2010 return chunks
2016 return chunks
2011
2017
2012 def dodeltachain(rev):
2018 def dodeltachain(rev):
2013 if not cache:
2019 if not cache:
2014 r.clearcaches()
2020 r.clearcaches()
2015 r._deltachain(rev)
2021 r._deltachain(rev)
2016
2022
2017 def doread(chain):
2023 def doread(chain):
2018 if not cache:
2024 if not cache:
2019 r.clearcaches()
2025 r.clearcaches()
2020 for item in slicedchain:
2026 for item in slicedchain:
2021 segmentforrevs(item[0], item[-1])
2027 segmentforrevs(item[0], item[-1])
2022
2028
2023 def doslice(r, chain, size):
2029 def doslice(r, chain, size):
2024 for s in slicechunk(r, chain, targetsize=size):
2030 for s in slicechunk(r, chain, targetsize=size):
2025 pass
2031 pass
2026
2032
2027 def dorawchunks(data, chain):
2033 def dorawchunks(data, chain):
2028 if not cache:
2034 if not cache:
2029 r.clearcaches()
2035 r.clearcaches()
2030 getrawchunks(data, chain)
2036 getrawchunks(data, chain)
2031
2037
2032 def dodecompress(chunks):
2038 def dodecompress(chunks):
2033 decomp = r.decompress
2039 decomp = r.decompress
2034 for chunk in chunks:
2040 for chunk in chunks:
2035 decomp(chunk)
2041 decomp(chunk)
2036
2042
2037 def dopatch(text, bins):
2043 def dopatch(text, bins):
2038 if not cache:
2044 if not cache:
2039 r.clearcaches()
2045 r.clearcaches()
2040 mdiff.patches(text, bins)
2046 mdiff.patches(text, bins)
2041
2047
2042 def dohash(text):
2048 def dohash(text):
2043 if not cache:
2049 if not cache:
2044 r.clearcaches()
2050 r.clearcaches()
2045 r.checkhash(text, node, rev=rev)
2051 r.checkhash(text, node, rev=rev)
2046
2052
2047 def dorevision():
2053 def dorevision():
2048 if not cache:
2054 if not cache:
2049 r.clearcaches()
2055 r.clearcaches()
2050 r.revision(node)
2056 r.revision(node)
2051
2057
2052 try:
2058 try:
2053 from mercurial.revlogutils.deltas import slicechunk
2059 from mercurial.revlogutils.deltas import slicechunk
2054 except ImportError:
2060 except ImportError:
2055 slicechunk = getattr(revlog, '_slicechunk', None)
2061 slicechunk = getattr(revlog, '_slicechunk', None)
2056
2062
2057 size = r.length(rev)
2063 size = r.length(rev)
2058 chain = r._deltachain(rev)[0]
2064 chain = r._deltachain(rev)[0]
2059 if not getattr(r, '_withsparseread', False):
2065 if not getattr(r, '_withsparseread', False):
2060 slicedchain = (chain,)
2066 slicedchain = (chain,)
2061 else:
2067 else:
2062 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2068 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2063 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2069 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2064 rawchunks = getrawchunks(data, slicedchain)
2070 rawchunks = getrawchunks(data, slicedchain)
2065 bins = r._chunks(chain)
2071 bins = r._chunks(chain)
2066 text = bytes(bins[0])
2072 text = bytes(bins[0])
2067 bins = bins[1:]
2073 bins = bins[1:]
2068 text = mdiff.patches(text, bins)
2074 text = mdiff.patches(text, bins)
2069
2075
2070 benches = [
2076 benches = [
2071 (lambda: dorevision(), b'full'),
2077 (lambda: dorevision(), b'full'),
2072 (lambda: dodeltachain(rev), b'deltachain'),
2078 (lambda: dodeltachain(rev), b'deltachain'),
2073 (lambda: doread(chain), b'read'),
2079 (lambda: doread(chain), b'read'),
2074 ]
2080 ]
2075
2081
2076 if getattr(r, '_withsparseread', False):
2082 if getattr(r, '_withsparseread', False):
2077 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2083 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2078 benches.append(slicing)
2084 benches.append(slicing)
2079
2085
2080 benches.extend([
2086 benches.extend([
2081 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2087 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2082 (lambda: dodecompress(rawchunks), b'decompress'),
2088 (lambda: dodecompress(rawchunks), b'decompress'),
2083 (lambda: dopatch(text, bins), b'patch'),
2089 (lambda: dopatch(text, bins), b'patch'),
2084 (lambda: dohash(text), b'hash'),
2090 (lambda: dohash(text), b'hash'),
2085 ])
2091 ])
2086
2092
2087 timer, fm = gettimer(ui, opts)
2093 timer, fm = gettimer(ui, opts)
2088 for fn, title in benches:
2094 for fn, title in benches:
2089 timer(fn, title=title)
2095 timer(fn, title=title)
2090 fm.end()
2096 fm.end()
2091
2097
2092 @command(b'perfrevset',
2098 @command(b'perfrevset',
2093 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2099 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2094 (b'', b'contexts', False, b'obtain changectx for each revision')]
2100 (b'', b'contexts', False, b'obtain changectx for each revision')]
2095 + formatteropts, b"REVSET")
2101 + formatteropts, b"REVSET")
2096 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2102 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2097 """benchmark the execution time of a revset
2103 """benchmark the execution time of a revset
2098
2104
2099 Use the --clean option if need to evaluate the impact of build volatile
2105 Use the --clean option if need to evaluate the impact of build volatile
2100 revisions set cache on the revset execution. Volatile cache hold filtered
2106 revisions set cache on the revset execution. Volatile cache hold filtered
2101 and obsolete related cache."""
2107 and obsolete related cache."""
2102 opts = _byteskwargs(opts)
2108 opts = _byteskwargs(opts)
2103
2109
2104 timer, fm = gettimer(ui, opts)
2110 timer, fm = gettimer(ui, opts)
2105 def d():
2111 def d():
2106 if clear:
2112 if clear:
2107 repo.invalidatevolatilesets()
2113 repo.invalidatevolatilesets()
2108 if contexts:
2114 if contexts:
2109 for ctx in repo.set(expr): pass
2115 for ctx in repo.set(expr): pass
2110 else:
2116 else:
2111 for r in repo.revs(expr): pass
2117 for r in repo.revs(expr): pass
2112 timer(d)
2118 timer(d)
2113 fm.end()
2119 fm.end()
2114
2120
2115 @command(b'perfvolatilesets',
2121 @command(b'perfvolatilesets',
2116 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2122 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2117 ] + formatteropts)
2123 ] + formatteropts)
2118 def perfvolatilesets(ui, repo, *names, **opts):
2124 def perfvolatilesets(ui, repo, *names, **opts):
2119 """benchmark the computation of various volatile set
2125 """benchmark the computation of various volatile set
2120
2126
2121 Volatile set computes element related to filtering and obsolescence."""
2127 Volatile set computes element related to filtering and obsolescence."""
2122 opts = _byteskwargs(opts)
2128 opts = _byteskwargs(opts)
2123 timer, fm = gettimer(ui, opts)
2129 timer, fm = gettimer(ui, opts)
2124 repo = repo.unfiltered()
2130 repo = repo.unfiltered()
2125
2131
2126 def getobs(name):
2132 def getobs(name):
2127 def d():
2133 def d():
2128 repo.invalidatevolatilesets()
2134 repo.invalidatevolatilesets()
2129 if opts[b'clear_obsstore']:
2135 if opts[b'clear_obsstore']:
2130 clearfilecache(repo, b'obsstore')
2136 clearfilecache(repo, b'obsstore')
2131 obsolete.getrevs(repo, name)
2137 obsolete.getrevs(repo, name)
2132 return d
2138 return d
2133
2139
2134 allobs = sorted(obsolete.cachefuncs)
2140 allobs = sorted(obsolete.cachefuncs)
2135 if names:
2141 if names:
2136 allobs = [n for n in allobs if n in names]
2142 allobs = [n for n in allobs if n in names]
2137
2143
2138 for name in allobs:
2144 for name in allobs:
2139 timer(getobs(name), title=name)
2145 timer(getobs(name), title=name)
2140
2146
2141 def getfiltered(name):
2147 def getfiltered(name):
2142 def d():
2148 def d():
2143 repo.invalidatevolatilesets()
2149 repo.invalidatevolatilesets()
2144 if opts[b'clear_obsstore']:
2150 if opts[b'clear_obsstore']:
2145 clearfilecache(repo, b'obsstore')
2151 clearfilecache(repo, b'obsstore')
2146 repoview.filterrevs(repo, name)
2152 repoview.filterrevs(repo, name)
2147 return d
2153 return d
2148
2154
2149 allfilter = sorted(repoview.filtertable)
2155 allfilter = sorted(repoview.filtertable)
2150 if names:
2156 if names:
2151 allfilter = [n for n in allfilter if n in names]
2157 allfilter = [n for n in allfilter if n in names]
2152
2158
2153 for name in allfilter:
2159 for name in allfilter:
2154 timer(getfiltered(name), title=name)
2160 timer(getfiltered(name), title=name)
2155 fm.end()
2161 fm.end()
2156
2162
2157 @command(b'perfbranchmap',
2163 @command(b'perfbranchmap',
2158 [(b'f', b'full', False,
2164 [(b'f', b'full', False,
2159 b'Includes build time of subset'),
2165 b'Includes build time of subset'),
2160 (b'', b'clear-revbranch', False,
2166 (b'', b'clear-revbranch', False,
2161 b'purge the revbranch cache between computation'),
2167 b'purge the revbranch cache between computation'),
2162 ] + formatteropts)
2168 ] + formatteropts)
2163 def perfbranchmap(ui, repo, *filternames, **opts):
2169 def perfbranchmap(ui, repo, *filternames, **opts):
2164 """benchmark the update of a branchmap
2170 """benchmark the update of a branchmap
2165
2171
2166 This benchmarks the full repo.branchmap() call with read and write disabled
2172 This benchmarks the full repo.branchmap() call with read and write disabled
2167 """
2173 """
2168 opts = _byteskwargs(opts)
2174 opts = _byteskwargs(opts)
2169 full = opts.get(b"full", False)
2175 full = opts.get(b"full", False)
2170 clear_revbranch = opts.get(b"clear_revbranch", False)
2176 clear_revbranch = opts.get(b"clear_revbranch", False)
2171 timer, fm = gettimer(ui, opts)
2177 timer, fm = gettimer(ui, opts)
2172 def getbranchmap(filtername):
2178 def getbranchmap(filtername):
2173 """generate a benchmark function for the filtername"""
2179 """generate a benchmark function for the filtername"""
2174 if filtername is None:
2180 if filtername is None:
2175 view = repo
2181 view = repo
2176 else:
2182 else:
2177 view = repo.filtered(filtername)
2183 view = repo.filtered(filtername)
2178 def d():
2184 def d():
2179 if clear_revbranch:
2185 if clear_revbranch:
2180 repo.revbranchcache()._clear()
2186 repo.revbranchcache()._clear()
2181 if full:
2187 if full:
2182 view._branchcaches.clear()
2188 view._branchcaches.clear()
2183 else:
2189 else:
2184 view._branchcaches.pop(filtername, None)
2190 view._branchcaches.pop(filtername, None)
2185 view.branchmap()
2191 view.branchmap()
2186 return d
2192 return d
2187 # add filter in smaller subset to bigger subset
2193 # add filter in smaller subset to bigger subset
2188 possiblefilters = set(repoview.filtertable)
2194 possiblefilters = set(repoview.filtertable)
2189 if filternames:
2195 if filternames:
2190 possiblefilters &= set(filternames)
2196 possiblefilters &= set(filternames)
2191 subsettable = getbranchmapsubsettable()
2197 subsettable = getbranchmapsubsettable()
2192 allfilters = []
2198 allfilters = []
2193 while possiblefilters:
2199 while possiblefilters:
2194 for name in possiblefilters:
2200 for name in possiblefilters:
2195 subset = subsettable.get(name)
2201 subset = subsettable.get(name)
2196 if subset not in possiblefilters:
2202 if subset not in possiblefilters:
2197 break
2203 break
2198 else:
2204 else:
2199 assert False, b'subset cycle %s!' % possiblefilters
2205 assert False, b'subset cycle %s!' % possiblefilters
2200 allfilters.append(name)
2206 allfilters.append(name)
2201 possiblefilters.remove(name)
2207 possiblefilters.remove(name)
2202
2208
2203 # warm the cache
2209 # warm the cache
2204 if not full:
2210 if not full:
2205 for name in allfilters:
2211 for name in allfilters:
2206 repo.filtered(name).branchmap()
2212 repo.filtered(name).branchmap()
2207 if not filternames or b'unfiltered' in filternames:
2213 if not filternames or b'unfiltered' in filternames:
2208 # add unfiltered
2214 # add unfiltered
2209 allfilters.append(None)
2215 allfilters.append(None)
2210
2216
2211 branchcacheread = safeattrsetter(branchmap, b'read')
2217 branchcacheread = safeattrsetter(branchmap, b'read')
2212 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2218 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2213 branchcacheread.set(lambda repo: None)
2219 branchcacheread.set(lambda repo: None)
2214 branchcachewrite.set(lambda bc, repo: None)
2220 branchcachewrite.set(lambda bc, repo: None)
2215 try:
2221 try:
2216 for name in allfilters:
2222 for name in allfilters:
2217 printname = name
2223 printname = name
2218 if name is None:
2224 if name is None:
2219 printname = b'unfiltered'
2225 printname = b'unfiltered'
2220 timer(getbranchmap(name), title=str(printname))
2226 timer(getbranchmap(name), title=str(printname))
2221 finally:
2227 finally:
2222 branchcacheread.restore()
2228 branchcacheread.restore()
2223 branchcachewrite.restore()
2229 branchcachewrite.restore()
2224 fm.end()
2230 fm.end()
2225
2231
2226 @command(b'perfbranchmapload', [
2232 @command(b'perfbranchmapload', [
2227 (b'f', b'filter', b'', b'Specify repoview filter'),
2233 (b'f', b'filter', b'', b'Specify repoview filter'),
2228 (b'', b'list', False, b'List brachmap filter caches'),
2234 (b'', b'list', False, b'List brachmap filter caches'),
2229 ] + formatteropts)
2235 ] + formatteropts)
2230 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2236 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2231 """benchmark reading the branchmap"""
2237 """benchmark reading the branchmap"""
2232 opts = _byteskwargs(opts)
2238 opts = _byteskwargs(opts)
2233
2239
2234 if list:
2240 if list:
2235 for name, kind, st in repo.cachevfs.readdir(stat=True):
2241 for name, kind, st in repo.cachevfs.readdir(stat=True):
2236 if name.startswith(b'branch2'):
2242 if name.startswith(b'branch2'):
2237 filtername = name.partition(b'-')[2] or b'unfiltered'
2243 filtername = name.partition(b'-')[2] or b'unfiltered'
2238 ui.status(b'%s - %s\n'
2244 ui.status(b'%s - %s\n'
2239 % (filtername, util.bytecount(st.st_size)))
2245 % (filtername, util.bytecount(st.st_size)))
2240 return
2246 return
2241 if filter:
2247 if filter:
2242 repo = repoview.repoview(repo, filter)
2248 repo = repoview.repoview(repo, filter)
2243 else:
2249 else:
2244 repo = repo.unfiltered()
2250 repo = repo.unfiltered()
2245 # try once without timer, the filter may not be cached
2251 # try once without timer, the filter may not be cached
2246 if branchmap.read(repo) is None:
2252 if branchmap.read(repo) is None:
2247 raise error.Abort(b'No branchmap cached for %s repo'
2253 raise error.Abort(b'No branchmap cached for %s repo'
2248 % (filter or b'unfiltered'))
2254 % (filter or b'unfiltered'))
2249 timer, fm = gettimer(ui, opts)
2255 timer, fm = gettimer(ui, opts)
2250 def bench():
2256 def bench():
2251 branchmap.read(repo)
2257 branchmap.read(repo)
2252 timer(bench)
2258 timer(bench)
2253 fm.end()
2259 fm.end()
2254
2260
2255 @command(b'perfloadmarkers')
2261 @command(b'perfloadmarkers')
2256 def perfloadmarkers(ui, repo):
2262 def perfloadmarkers(ui, repo):
2257 """benchmark the time to parse the on-disk markers for a repo
2263 """benchmark the time to parse the on-disk markers for a repo
2258
2264
2259 Result is the number of markers in the repo."""
2265 Result is the number of markers in the repo."""
2260 timer, fm = gettimer(ui)
2266 timer, fm = gettimer(ui)
2261 svfs = getsvfs(repo)
2267 svfs = getsvfs(repo)
2262 timer(lambda: len(obsolete.obsstore(svfs)))
2268 timer(lambda: len(obsolete.obsstore(svfs)))
2263 fm.end()
2269 fm.end()
2264
2270
2265 @command(b'perflrucachedict', formatteropts +
2271 @command(b'perflrucachedict', formatteropts +
2266 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2272 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2267 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2273 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2268 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2274 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2269 (b'', b'size', 4, b'size of cache'),
2275 (b'', b'size', 4, b'size of cache'),
2270 (b'', b'gets', 10000, b'number of key lookups'),
2276 (b'', b'gets', 10000, b'number of key lookups'),
2271 (b'', b'sets', 10000, b'number of key sets'),
2277 (b'', b'sets', 10000, b'number of key sets'),
2272 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2278 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2273 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2279 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2274 norepo=True)
2280 norepo=True)
2275 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2281 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2276 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2282 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2277 opts = _byteskwargs(opts)
2283 opts = _byteskwargs(opts)
2278
2284
2279 def doinit():
2285 def doinit():
2280 for i in _xrange(10000):
2286 for i in _xrange(10000):
2281 util.lrucachedict(size)
2287 util.lrucachedict(size)
2282
2288
2283 costrange = list(range(mincost, maxcost + 1))
2289 costrange = list(range(mincost, maxcost + 1))
2284
2290
2285 values = []
2291 values = []
2286 for i in _xrange(size):
2292 for i in _xrange(size):
2287 values.append(random.randint(0, _maxint))
2293 values.append(random.randint(0, _maxint))
2288
2294
2289 # Get mode fills the cache and tests raw lookup performance with no
2295 # Get mode fills the cache and tests raw lookup performance with no
2290 # eviction.
2296 # eviction.
2291 getseq = []
2297 getseq = []
2292 for i in _xrange(gets):
2298 for i in _xrange(gets):
2293 getseq.append(random.choice(values))
2299 getseq.append(random.choice(values))
2294
2300
2295 def dogets():
2301 def dogets():
2296 d = util.lrucachedict(size)
2302 d = util.lrucachedict(size)
2297 for v in values:
2303 for v in values:
2298 d[v] = v
2304 d[v] = v
2299 for key in getseq:
2305 for key in getseq:
2300 value = d[key]
2306 value = d[key]
2301 value # silence pyflakes warning
2307 value # silence pyflakes warning
2302
2308
2303 def dogetscost():
2309 def dogetscost():
2304 d = util.lrucachedict(size, maxcost=costlimit)
2310 d = util.lrucachedict(size, maxcost=costlimit)
2305 for i, v in enumerate(values):
2311 for i, v in enumerate(values):
2306 d.insert(v, v, cost=costs[i])
2312 d.insert(v, v, cost=costs[i])
2307 for key in getseq:
2313 for key in getseq:
2308 try:
2314 try:
2309 value = d[key]
2315 value = d[key]
2310 value # silence pyflakes warning
2316 value # silence pyflakes warning
2311 except KeyError:
2317 except KeyError:
2312 pass
2318 pass
2313
2319
2314 # Set mode tests insertion speed with cache eviction.
2320 # Set mode tests insertion speed with cache eviction.
2315 setseq = []
2321 setseq = []
2316 costs = []
2322 costs = []
2317 for i in _xrange(sets):
2323 for i in _xrange(sets):
2318 setseq.append(random.randint(0, _maxint))
2324 setseq.append(random.randint(0, _maxint))
2319 costs.append(random.choice(costrange))
2325 costs.append(random.choice(costrange))
2320
2326
2321 def doinserts():
2327 def doinserts():
2322 d = util.lrucachedict(size)
2328 d = util.lrucachedict(size)
2323 for v in setseq:
2329 for v in setseq:
2324 d.insert(v, v)
2330 d.insert(v, v)
2325
2331
2326 def doinsertscost():
2332 def doinsertscost():
2327 d = util.lrucachedict(size, maxcost=costlimit)
2333 d = util.lrucachedict(size, maxcost=costlimit)
2328 for i, v in enumerate(setseq):
2334 for i, v in enumerate(setseq):
2329 d.insert(v, v, cost=costs[i])
2335 d.insert(v, v, cost=costs[i])
2330
2336
2331 def dosets():
2337 def dosets():
2332 d = util.lrucachedict(size)
2338 d = util.lrucachedict(size)
2333 for v in setseq:
2339 for v in setseq:
2334 d[v] = v
2340 d[v] = v
2335
2341
2336 # Mixed mode randomly performs gets and sets with eviction.
2342 # Mixed mode randomly performs gets and sets with eviction.
2337 mixedops = []
2343 mixedops = []
2338 for i in _xrange(mixed):
2344 for i in _xrange(mixed):
2339 r = random.randint(0, 100)
2345 r = random.randint(0, 100)
2340 if r < mixedgetfreq:
2346 if r < mixedgetfreq:
2341 op = 0
2347 op = 0
2342 else:
2348 else:
2343 op = 1
2349 op = 1
2344
2350
2345 mixedops.append((op,
2351 mixedops.append((op,
2346 random.randint(0, size * 2),
2352 random.randint(0, size * 2),
2347 random.choice(costrange)))
2353 random.choice(costrange)))
2348
2354
2349 def domixed():
2355 def domixed():
2350 d = util.lrucachedict(size)
2356 d = util.lrucachedict(size)
2351
2357
2352 for op, v, cost in mixedops:
2358 for op, v, cost in mixedops:
2353 if op == 0:
2359 if op == 0:
2354 try:
2360 try:
2355 d[v]
2361 d[v]
2356 except KeyError:
2362 except KeyError:
2357 pass
2363 pass
2358 else:
2364 else:
2359 d[v] = v
2365 d[v] = v
2360
2366
2361 def domixedcost():
2367 def domixedcost():
2362 d = util.lrucachedict(size, maxcost=costlimit)
2368 d = util.lrucachedict(size, maxcost=costlimit)
2363
2369
2364 for op, v, cost in mixedops:
2370 for op, v, cost in mixedops:
2365 if op == 0:
2371 if op == 0:
2366 try:
2372 try:
2367 d[v]
2373 d[v]
2368 except KeyError:
2374 except KeyError:
2369 pass
2375 pass
2370 else:
2376 else:
2371 d.insert(v, v, cost=cost)
2377 d.insert(v, v, cost=cost)
2372
2378
2373 benches = [
2379 benches = [
2374 (doinit, b'init'),
2380 (doinit, b'init'),
2375 ]
2381 ]
2376
2382
2377 if costlimit:
2383 if costlimit:
2378 benches.extend([
2384 benches.extend([
2379 (dogetscost, b'gets w/ cost limit'),
2385 (dogetscost, b'gets w/ cost limit'),
2380 (doinsertscost, b'inserts w/ cost limit'),
2386 (doinsertscost, b'inserts w/ cost limit'),
2381 (domixedcost, b'mixed w/ cost limit'),
2387 (domixedcost, b'mixed w/ cost limit'),
2382 ])
2388 ])
2383 else:
2389 else:
2384 benches.extend([
2390 benches.extend([
2385 (dogets, b'gets'),
2391 (dogets, b'gets'),
2386 (doinserts, b'inserts'),
2392 (doinserts, b'inserts'),
2387 (dosets, b'sets'),
2393 (dosets, b'sets'),
2388 (domixed, b'mixed')
2394 (domixed, b'mixed')
2389 ])
2395 ])
2390
2396
2391 for fn, title in benches:
2397 for fn, title in benches:
2392 timer, fm = gettimer(ui, opts)
2398 timer, fm = gettimer(ui, opts)
2393 timer(fn, title=title)
2399 timer(fn, title=title)
2394 fm.end()
2400 fm.end()
2395
2401
2396 @command(b'perfwrite', formatteropts)
2402 @command(b'perfwrite', formatteropts)
2397 def perfwrite(ui, repo, **opts):
2403 def perfwrite(ui, repo, **opts):
2398 """microbenchmark ui.write
2404 """microbenchmark ui.write
2399 """
2405 """
2400 opts = _byteskwargs(opts)
2406 opts = _byteskwargs(opts)
2401
2407
2402 timer, fm = gettimer(ui, opts)
2408 timer, fm = gettimer(ui, opts)
2403 def write():
2409 def write():
2404 for i in range(100000):
2410 for i in range(100000):
2405 ui.write((b'Testing write performance\n'))
2411 ui.write((b'Testing write performance\n'))
2406 timer(write)
2412 timer(write)
2407 fm.end()
2413 fm.end()
2408
2414
2409 def uisetup(ui):
2415 def uisetup(ui):
2410 if (util.safehasattr(cmdutil, b'openrevlog') and
2416 if (util.safehasattr(cmdutil, b'openrevlog') and
2411 not util.safehasattr(commands, b'debugrevlogopts')):
2417 not util.safehasattr(commands, b'debugrevlogopts')):
2412 # for "historical portability":
2418 # for "historical portability":
2413 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2419 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2414 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2420 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2415 # openrevlog() should cause failure, because it has been
2421 # openrevlog() should cause failure, because it has been
2416 # available since 3.5 (or 49c583ca48c4).
2422 # available since 3.5 (or 49c583ca48c4).
2417 def openrevlog(orig, repo, cmd, file_, opts):
2423 def openrevlog(orig, repo, cmd, file_, opts):
2418 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2424 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2419 raise error.Abort(b"This version doesn't support --dir option",
2425 raise error.Abort(b"This version doesn't support --dir option",
2420 hint=b"use 3.5 or later")
2426 hint=b"use 3.5 or later")
2421 return orig(repo, cmd, file_, opts)
2427 return orig(repo, cmd, file_, opts)
2422 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2428 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now