##// END OF EJS Templates
perf: document config options...
marmoute -
r42182:dbca2e55 default
parent child Browse files
Show More
@@ -1,2781 +1,2799
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance
3
4 Configurations
5 ==============
6
7 ``perf``
8 --------
9
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
13 (default: off).
14
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
17
18 ``stub``
19 When set, benchmark will only be run once, useful for testing (default: off)
20 '''
3
21
4 # "historical portability" policy of perf.py:
22 # "historical portability" policy of perf.py:
5 #
23 #
6 # We have to do:
24 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
25 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
26 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
28 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
29 # version as possible
12 #
30 #
13 # We have to do, if possible with reasonable cost:
31 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
32 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
33 # with early Mercurial
16 #
34 #
17 # We don't have to do:
35 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
36 # - make perf command for recent feature work correctly with early
19 # Mercurial
37 # Mercurial
20
38
21 from __future__ import absolute_import
39 from __future__ import absolute_import
22 import contextlib
40 import contextlib
23 import functools
41 import functools
24 import gc
42 import gc
25 import os
43 import os
26 import random
44 import random
27 import shutil
45 import shutil
28 import struct
46 import struct
29 import sys
47 import sys
30 import tempfile
48 import tempfile
31 import threading
49 import threading
32 import time
50 import time
33 from mercurial import (
51 from mercurial import (
34 changegroup,
52 changegroup,
35 cmdutil,
53 cmdutil,
36 commands,
54 commands,
37 copies,
55 copies,
38 error,
56 error,
39 extensions,
57 extensions,
40 hg,
58 hg,
41 mdiff,
59 mdiff,
42 merge,
60 merge,
43 revlog,
61 revlog,
44 util,
62 util,
45 )
63 )
46
64
47 # for "historical portability":
65 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
66 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
67 # failure, because these aren't available with early Mercurial
50 try:
68 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
70 except ImportError:
53 pass
71 pass
54 try:
72 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
74 except ImportError:
57 pass
75 pass
58 try:
76 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
78 dir(registrar) # forcibly load it
61 except ImportError:
79 except ImportError:
62 registrar = None
80 registrar = None
63 try:
81 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
83 except ImportError:
66 pass
84 pass
67 try:
85 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
87 except ImportError:
70 pass
88 pass
71 try:
89 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
91 except ImportError:
74 pass
92 pass
75
93
76
94
77 def identity(a):
95 def identity(a):
78 return a
96 return a
79
97
80 try:
98 try:
81 from mercurial import pycompat
99 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
100 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
105 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
107 else:
90 _maxint = sys.maxint
108 _maxint = sys.maxint
91 except (ImportError, AttributeError):
109 except (ImportError, AttributeError):
92 import inspect
110 import inspect
93 getargspec = inspect.getargspec
111 getargspec = inspect.getargspec
94 _byteskwargs = identity
112 _byteskwargs = identity
95 fsencode = identity # no py3 support
113 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
114 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
115 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
116 _xrange = xrange
99
117
100 try:
118 try:
101 # 4.7+
119 # 4.7+
102 queue = pycompat.queue.Queue
120 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
121 except (AttributeError, ImportError):
104 # <4.7.
122 # <4.7.
105 try:
123 try:
106 queue = pycompat.queue
124 queue = pycompat.queue
107 except (AttributeError, ImportError):
125 except (AttributeError, ImportError):
108 queue = util.queue
126 queue = util.queue
109
127
110 try:
128 try:
111 from mercurial import logcmdutil
129 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
130 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
131 except (AttributeError, ImportError):
114 try:
132 try:
115 makelogtemplater = cmdutil.makelogtemplater
133 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
134 except (AttributeError, ImportError):
117 makelogtemplater = None
135 makelogtemplater = None
118
136
119 # for "historical portability":
137 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
138 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
139 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
140 _undefined = object()
123 def safehasattr(thing, attr):
141 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
143 setattr(util, 'safehasattr', safehasattr)
126
144
127 # for "historical portability":
145 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
146 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
147 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
148 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
149 util.timer = time.perf_counter
132 elif os.name == b'nt':
150 elif os.name == b'nt':
133 util.timer = time.clock
151 util.timer = time.clock
134 else:
152 else:
135 util.timer = time.time
153 util.timer = time.time
136
154
137 # for "historical portability":
155 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
156 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
157 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
159 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
160 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
161 getattr(commands, "formatteropts", []))
144
162
145 # for "historical portability":
163 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
164 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
165 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
167 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
169 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
170 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
171 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
173 ]))
156
174
157 cmdtable = {}
175 cmdtable = {}
158
176
159 # for "historical portability":
177 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
178 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
179 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
180 def parsealiases(cmd):
163 return cmd.split(b"|")
181 return cmd.split(b"|")
164
182
165 if safehasattr(registrar, 'command'):
183 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
184 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
185 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
186 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
187 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
188 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
189 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
190 # been available since 3.1 (or 75a96326cecb)
173 _command = command
191 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
192 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
193 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
195 return _command(name, list(options), synopsis)
178 else:
196 else:
179 # for "historical portability":
197 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
198 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
199 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
200 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
201 def decorator(func):
184 if synopsis:
202 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
203 cmdtable[name] = func, list(options), synopsis
186 else:
204 else:
187 cmdtable[name] = func, list(options)
205 cmdtable[name] = func, list(options)
188 if norepo:
206 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
208 return func
191 return decorator
209 return decorator
192
210
193 try:
211 try:
194 import mercurial.registrar
212 import mercurial.registrar
195 import mercurial.configitems
213 import mercurial.configitems
196 configtable = {}
214 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
215 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
216 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
217 default=mercurial.configitems.dynamicdefault,
200 )
218 )
201 configitem(b'perf', b'stub',
219 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
220 default=mercurial.configitems.dynamicdefault,
203 )
221 )
204 configitem(b'perf', b'parentscount',
222 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
223 default=mercurial.configitems.dynamicdefault,
206 )
224 )
207 configitem(b'perf', b'all-timing',
225 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
226 default=mercurial.configitems.dynamicdefault,
209 )
227 )
210 except (ImportError, AttributeError):
228 except (ImportError, AttributeError):
211 pass
229 pass
212
230
213 def getlen(ui):
231 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
232 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
233 return lambda x: 1
216 return len
234 return len
217
235
218 def gettimer(ui, opts=None):
236 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
237 """return a timer function and formatter: (timer, formatter)
220
238
221 This function exists to gather the creation of formatter in a single
239 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
240 place instead of duplicating it in all performance commands."""
223
241
224 # enforce an idle period before execution to counteract power management
242 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
243 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
245
228 if opts is None:
246 if opts is None:
229 opts = {}
247 opts = {}
230 # redirect all to stderr unless buffer api is in use
248 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
249 if not ui._buffers:
232 ui = ui.copy()
250 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
252 if uifout:
235 # for "historical portability":
253 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
255 uifout.set(ui.ferr)
238
256
239 # get a formatter
257 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
258 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
259 if uiformatter:
242 fm = uiformatter(b'perf', opts)
260 fm = uiformatter(b'perf', opts)
243 else:
261 else:
244 # for "historical portability":
262 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
263 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
264 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
265 from mercurial import node
248 class defaultformatter(object):
266 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
267 """Minimized composition of baseformatter and plainformatter
250 """
268 """
251 def __init__(self, ui, topic, opts):
269 def __init__(self, ui, topic, opts):
252 self._ui = ui
270 self._ui = ui
253 if ui.debugflag:
271 if ui.debugflag:
254 self.hexfunc = node.hex
272 self.hexfunc = node.hex
255 else:
273 else:
256 self.hexfunc = node.short
274 self.hexfunc = node.short
257 def __nonzero__(self):
275 def __nonzero__(self):
258 return False
276 return False
259 __bool__ = __nonzero__
277 __bool__ = __nonzero__
260 def startitem(self):
278 def startitem(self):
261 pass
279 pass
262 def data(self, **data):
280 def data(self, **data):
263 pass
281 pass
264 def write(self, fields, deftext, *fielddata, **opts):
282 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
283 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
285 if cond:
268 self._ui.write(deftext % fielddata, **opts)
286 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
287 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
288 self._ui.write(text, **opts)
271 def end(self):
289 def end(self):
272 pass
290 pass
273 fm = defaultformatter(ui, b'perf', opts)
291 fm = defaultformatter(ui, b'perf', opts)
274
292
275 # stub function, runs code only once instead of in a loop
293 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
294 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
295 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
296 return functools.partial(stub_timer, fm), fm
279
297
280 # experimental config: perf.all-timing
298 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
299 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
300 return functools.partial(_timer, fm, displayall=displayall), fm
283
301
284 def stub_timer(fm, func, setup=None, title=None):
302 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
303 if setup is not None:
286 setup()
304 setup()
287 func()
305 func()
288
306
289 @contextlib.contextmanager
307 @contextlib.contextmanager
290 def timeone():
308 def timeone():
291 r = []
309 r = []
292 ostart = os.times()
310 ostart = os.times()
293 cstart = util.timer()
311 cstart = util.timer()
294 yield r
312 yield r
295 cstop = util.timer()
313 cstop = util.timer()
296 ostop = os.times()
314 ostop = os.times()
297 a, b = ostart, ostop
315 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
317
300 def _timer(fm, func, setup=None, title=None, displayall=False):
318 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
319 gc.collect()
302 results = []
320 results = []
303 begin = util.timer()
321 begin = util.timer()
304 count = 0
322 count = 0
305 while True:
323 while True:
306 if setup is not None:
324 if setup is not None:
307 setup()
325 setup()
308 with timeone() as item:
326 with timeone() as item:
309 r = func()
327 r = func()
310 count += 1
328 count += 1
311 results.append(item[0])
329 results.append(item[0])
312 cstop = util.timer()
330 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
331 if cstop - begin > 3 and count >= 100:
314 break
332 break
315 if cstop - begin > 10 and count >= 3:
333 if cstop - begin > 10 and count >= 3:
316 break
334 break
317
335
318 formatone(fm, results, title=title, result=r,
336 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
337 displayall=displayall)
320
338
321 def formatone(fm, timings, title=None, result=None, displayall=False):
339 def formatone(fm, timings, title=None, result=None, displayall=False):
322
340
323 count = len(timings)
341 count = len(timings)
324
342
325 fm.startitem()
343 fm.startitem()
326
344
327 if title:
345 if title:
328 fm.write(b'title', b'! %s\n', title)
346 fm.write(b'title', b'! %s\n', title)
329 if result:
347 if result:
330 fm.write(b'result', b'! result: %s\n', result)
348 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
349 def display(role, entry):
332 prefix = b''
350 prefix = b''
333 if role != b'best':
351 if role != b'best':
334 prefix = b'%s.' % role
352 prefix = b'%s.' % role
335 fm.plain(b'!')
353 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
354 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
355 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
356 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
357 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
358 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
359 fm.plain(b'\n')
342 timings.sort()
360 timings.sort()
343 min_val = timings[0]
361 min_val = timings[0]
344 display(b'best', min_val)
362 display(b'best', min_val)
345 if displayall:
363 if displayall:
346 max_val = timings[-1]
364 max_val = timings[-1]
347 display(b'max', max_val)
365 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
366 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
367 display(b'avg', avg)
350 median = timings[len(timings) // 2]
368 median = timings[len(timings) // 2]
351 display(b'median', median)
369 display(b'median', median)
352
370
353 # utilities for historical portability
371 # utilities for historical portability
354
372
355 def getint(ui, section, name, default):
373 def getint(ui, section, name, default):
356 # for "historical portability":
374 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
375 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
376 v = ui.config(section, name, None)
359 if v is None:
377 if v is None:
360 return default
378 return default
361 try:
379 try:
362 return int(v)
380 return int(v)
363 except ValueError:
381 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
382 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
383 % (section, name, v))
366
384
367 def safeattrsetter(obj, name, ignoremissing=False):
385 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
386 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
387
370 This function is aborted, if 'obj' doesn't have 'name' attribute
388 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
389 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
390 breaks assumption of performance measurement, in the future.
373
391
374 This function returns the object to (1) assign a new value, and
392 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
393 (2) restore an original value to the attribute.
376
394
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
395 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
396 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
397 examine an attribute, which isn't ensured in all Mercurial
380 versions.
398 versions.
381 """
399 """
382 if not util.safehasattr(obj, name):
400 if not util.safehasattr(obj, name):
383 if ignoremissing:
401 if ignoremissing:
384 return None
402 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
403 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
404 b" of performance measurement") % (name, obj))
387
405
388 origvalue = getattr(obj, _sysstr(name))
406 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
407 class attrutil(object):
390 def set(self, newvalue):
408 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
409 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
410 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
411 setattr(obj, _sysstr(name), origvalue)
394
412
395 return attrutil()
413 return attrutil()
396
414
397 # utilities to examine each internal API changes
415 # utilities to examine each internal API changes
398
416
399 def getbranchmapsubsettable():
417 def getbranchmapsubsettable():
400 # for "historical portability":
418 # for "historical portability":
401 # subsettable is defined in:
419 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
420 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
421 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
422 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
423 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
424 if subsettable:
407 return subsettable
425 return subsettable
408
426
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
427 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
428 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
429 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
430 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
431 hint=b"use 2.5 or later")
414
432
415 def getsvfs(repo):
433 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
434 """Return appropriate object to access files under .hg/store
417 """
435 """
418 # for "historical portability":
436 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
437 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
438 svfs = getattr(repo, 'svfs', None)
421 if svfs:
439 if svfs:
422 return svfs
440 return svfs
423 else:
441 else:
424 return getattr(repo, 'sopener')
442 return getattr(repo, 'sopener')
425
443
426 def getvfs(repo):
444 def getvfs(repo):
427 """Return appropriate object to access files under .hg
445 """Return appropriate object to access files under .hg
428 """
446 """
429 # for "historical portability":
447 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
448 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
449 vfs = getattr(repo, 'vfs', None)
432 if vfs:
450 if vfs:
433 return vfs
451 return vfs
434 else:
452 else:
435 return getattr(repo, 'opener')
453 return getattr(repo, 'opener')
436
454
437 def repocleartagscachefunc(repo):
455 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
456 """Return the function to clear tags cache according to repo internal API
439 """
457 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
458 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
459 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
460 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
461 # expect _tagscache to be a structured object.
444 def clearcache():
462 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
463 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
464 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
465 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
466 del repo.__dict__[b'_tagscache']
449 return clearcache
467 return clearcache
450
468
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
469 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
470 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
471 return lambda : repotags.set(None)
454
472
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
473 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
474 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
475 return lambda : repotagscache.set(None)
458
476
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
477 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
478 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
479 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
480 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
481 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
482 raise error.Abort((b"tags API of this hg command is unknown"))
465
483
466 # utilities to clear cache
484 # utilities to clear cache
467
485
468 def clearfilecache(obj, attrname):
486 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
487 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
488 if unfiltered is not None:
471 obj = obj.unfiltered()
489 obj = obj.unfiltered()
472 if attrname in vars(obj):
490 if attrname in vars(obj):
473 delattr(obj, attrname)
491 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
492 obj._filecache.pop(attrname, None)
475
493
476 def clearchangelog(repo):
494 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
495 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
496 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
497 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
498 clearfilecache(repo.unfiltered(), 'changelog')
481
499
482 # perf commands
500 # perf commands
483
501
484 @command(b'perfwalk', formatteropts)
502 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
503 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
504 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
505 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
506 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
507 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
508 ignored=False))))
491 fm.end()
509 fm.end()
492
510
493 @command(b'perfannotate', formatteropts)
511 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
512 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
513 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
514 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
515 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
516 timer(lambda: len(fc.annotate(True)))
499 fm.end()
517 fm.end()
500
518
501 @command(b'perfstatus',
519 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
520 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
521 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
522 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
523 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
524 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
525 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
526 # False))))
509 timer, fm = gettimer(ui, opts)
527 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
528 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
529 fm.end()
512
530
513 @command(b'perfaddremove', formatteropts)
531 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
532 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
533 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
534 timer, fm = gettimer(ui, opts)
517 try:
535 try:
518 oldquiet = repo.ui.quiet
536 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
537 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
538 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
539 opts[b'dry_run'] = True
522 if b'uipathfn' in getargspec(scmutil.addremove).args:
540 if b'uipathfn' in getargspec(scmutil.addremove).args:
523 uipathfn = scmutil.getuipathfn(repo)
541 uipathfn = scmutil.getuipathfn(repo)
524 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
542 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
525 else:
543 else:
526 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
544 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
527 finally:
545 finally:
528 repo.ui.quiet = oldquiet
546 repo.ui.quiet = oldquiet
529 fm.end()
547 fm.end()
530
548
531 def clearcaches(cl):
549 def clearcaches(cl):
532 # behave somewhat consistently across internal API changes
550 # behave somewhat consistently across internal API changes
533 if util.safehasattr(cl, b'clearcaches'):
551 if util.safehasattr(cl, b'clearcaches'):
534 cl.clearcaches()
552 cl.clearcaches()
535 elif util.safehasattr(cl, b'_nodecache'):
553 elif util.safehasattr(cl, b'_nodecache'):
536 from mercurial.node import nullid, nullrev
554 from mercurial.node import nullid, nullrev
537 cl._nodecache = {nullid: nullrev}
555 cl._nodecache = {nullid: nullrev}
538 cl._nodepos = None
556 cl._nodepos = None
539
557
540 @command(b'perfheads', formatteropts)
558 @command(b'perfheads', formatteropts)
541 def perfheads(ui, repo, **opts):
559 def perfheads(ui, repo, **opts):
542 """benchmark the computation of a changelog heads"""
560 """benchmark the computation of a changelog heads"""
543 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
544 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
545 cl = repo.changelog
563 cl = repo.changelog
546 def s():
564 def s():
547 clearcaches(cl)
565 clearcaches(cl)
548 def d():
566 def d():
549 len(cl.headrevs())
567 len(cl.headrevs())
550 timer(d, setup=s)
568 timer(d, setup=s)
551 fm.end()
569 fm.end()
552
570
553 @command(b'perftags', formatteropts+
571 @command(b'perftags', formatteropts+
554 [
572 [
555 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
556 ])
574 ])
557 def perftags(ui, repo, **opts):
575 def perftags(ui, repo, **opts):
558 opts = _byteskwargs(opts)
576 opts = _byteskwargs(opts)
559 timer, fm = gettimer(ui, opts)
577 timer, fm = gettimer(ui, opts)
560 repocleartagscache = repocleartagscachefunc(repo)
578 repocleartagscache = repocleartagscachefunc(repo)
561 clearrevlogs = opts[b'clear_revlogs']
579 clearrevlogs = opts[b'clear_revlogs']
562 def s():
580 def s():
563 if clearrevlogs:
581 if clearrevlogs:
564 clearchangelog(repo)
582 clearchangelog(repo)
565 clearfilecache(repo.unfiltered(), 'manifest')
583 clearfilecache(repo.unfiltered(), 'manifest')
566 repocleartagscache()
584 repocleartagscache()
567 def t():
585 def t():
568 return len(repo.tags())
586 return len(repo.tags())
569 timer(t, setup=s)
587 timer(t, setup=s)
570 fm.end()
588 fm.end()
571
589
572 @command(b'perfancestors', formatteropts)
590 @command(b'perfancestors', formatteropts)
573 def perfancestors(ui, repo, **opts):
591 def perfancestors(ui, repo, **opts):
574 opts = _byteskwargs(opts)
592 opts = _byteskwargs(opts)
575 timer, fm = gettimer(ui, opts)
593 timer, fm = gettimer(ui, opts)
576 heads = repo.changelog.headrevs()
594 heads = repo.changelog.headrevs()
577 def d():
595 def d():
578 for a in repo.changelog.ancestors(heads):
596 for a in repo.changelog.ancestors(heads):
579 pass
597 pass
580 timer(d)
598 timer(d)
581 fm.end()
599 fm.end()
582
600
583 @command(b'perfancestorset', formatteropts)
601 @command(b'perfancestorset', formatteropts)
584 def perfancestorset(ui, repo, revset, **opts):
602 def perfancestorset(ui, repo, revset, **opts):
585 opts = _byteskwargs(opts)
603 opts = _byteskwargs(opts)
586 timer, fm = gettimer(ui, opts)
604 timer, fm = gettimer(ui, opts)
587 revs = repo.revs(revset)
605 revs = repo.revs(revset)
588 heads = repo.changelog.headrevs()
606 heads = repo.changelog.headrevs()
589 def d():
607 def d():
590 s = repo.changelog.ancestors(heads)
608 s = repo.changelog.ancestors(heads)
591 for rev in revs:
609 for rev in revs:
592 rev in s
610 rev in s
593 timer(d)
611 timer(d)
594 fm.end()
612 fm.end()
595
613
596 @command(b'perfdiscovery', formatteropts, b'PATH')
614 @command(b'perfdiscovery', formatteropts, b'PATH')
597 def perfdiscovery(ui, repo, path, **opts):
615 def perfdiscovery(ui, repo, path, **opts):
598 """benchmark discovery between local repo and the peer at given path
616 """benchmark discovery between local repo and the peer at given path
599 """
617 """
600 repos = [repo, None]
618 repos = [repo, None]
601 timer, fm = gettimer(ui, opts)
619 timer, fm = gettimer(ui, opts)
602 path = ui.expandpath(path)
620 path = ui.expandpath(path)
603
621
604 def s():
622 def s():
605 repos[1] = hg.peer(ui, opts, path)
623 repos[1] = hg.peer(ui, opts, path)
606 def d():
624 def d():
607 setdiscovery.findcommonheads(ui, *repos)
625 setdiscovery.findcommonheads(ui, *repos)
608 timer(d, setup=s)
626 timer(d, setup=s)
609 fm.end()
627 fm.end()
610
628
611 @command(b'perfbookmarks', formatteropts +
629 @command(b'perfbookmarks', formatteropts +
612 [
630 [
613 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
631 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
614 ])
632 ])
615 def perfbookmarks(ui, repo, **opts):
633 def perfbookmarks(ui, repo, **opts):
616 """benchmark parsing bookmarks from disk to memory"""
634 """benchmark parsing bookmarks from disk to memory"""
617 opts = _byteskwargs(opts)
635 opts = _byteskwargs(opts)
618 timer, fm = gettimer(ui, opts)
636 timer, fm = gettimer(ui, opts)
619
637
620 clearrevlogs = opts[b'clear_revlogs']
638 clearrevlogs = opts[b'clear_revlogs']
621 def s():
639 def s():
622 if clearrevlogs:
640 if clearrevlogs:
623 clearchangelog(repo)
641 clearchangelog(repo)
624 clearfilecache(repo, b'_bookmarks')
642 clearfilecache(repo, b'_bookmarks')
625 def d():
643 def d():
626 repo._bookmarks
644 repo._bookmarks
627 timer(d, setup=s)
645 timer(d, setup=s)
628 fm.end()
646 fm.end()
629
647
630 @command(b'perfbundleread', formatteropts, b'BUNDLE')
648 @command(b'perfbundleread', formatteropts, b'BUNDLE')
631 def perfbundleread(ui, repo, bundlepath, **opts):
649 def perfbundleread(ui, repo, bundlepath, **opts):
632 """Benchmark reading of bundle files.
650 """Benchmark reading of bundle files.
633
651
634 This command is meant to isolate the I/O part of bundle reading as
652 This command is meant to isolate the I/O part of bundle reading as
635 much as possible.
653 much as possible.
636 """
654 """
637 from mercurial import (
655 from mercurial import (
638 bundle2,
656 bundle2,
639 exchange,
657 exchange,
640 streamclone,
658 streamclone,
641 )
659 )
642
660
643 opts = _byteskwargs(opts)
661 opts = _byteskwargs(opts)
644
662
645 def makebench(fn):
663 def makebench(fn):
646 def run():
664 def run():
647 with open(bundlepath, b'rb') as fh:
665 with open(bundlepath, b'rb') as fh:
648 bundle = exchange.readbundle(ui, fh, bundlepath)
666 bundle = exchange.readbundle(ui, fh, bundlepath)
649 fn(bundle)
667 fn(bundle)
650
668
651 return run
669 return run
652
670
653 def makereadnbytes(size):
671 def makereadnbytes(size):
654 def run():
672 def run():
655 with open(bundlepath, b'rb') as fh:
673 with open(bundlepath, b'rb') as fh:
656 bundle = exchange.readbundle(ui, fh, bundlepath)
674 bundle = exchange.readbundle(ui, fh, bundlepath)
657 while bundle.read(size):
675 while bundle.read(size):
658 pass
676 pass
659
677
660 return run
678 return run
661
679
662 def makestdioread(size):
680 def makestdioread(size):
663 def run():
681 def run():
664 with open(bundlepath, b'rb') as fh:
682 with open(bundlepath, b'rb') as fh:
665 while fh.read(size):
683 while fh.read(size):
666 pass
684 pass
667
685
668 return run
686 return run
669
687
670 # bundle1
688 # bundle1
671
689
672 def deltaiter(bundle):
690 def deltaiter(bundle):
673 for delta in bundle.deltaiter():
691 for delta in bundle.deltaiter():
674 pass
692 pass
675
693
676 def iterchunks(bundle):
694 def iterchunks(bundle):
677 for chunk in bundle.getchunks():
695 for chunk in bundle.getchunks():
678 pass
696 pass
679
697
680 # bundle2
698 # bundle2
681
699
682 def forwardchunks(bundle):
700 def forwardchunks(bundle):
683 for chunk in bundle._forwardchunks():
701 for chunk in bundle._forwardchunks():
684 pass
702 pass
685
703
686 def iterparts(bundle):
704 def iterparts(bundle):
687 for part in bundle.iterparts():
705 for part in bundle.iterparts():
688 pass
706 pass
689
707
690 def iterpartsseekable(bundle):
708 def iterpartsseekable(bundle):
691 for part in bundle.iterparts(seekable=True):
709 for part in bundle.iterparts(seekable=True):
692 pass
710 pass
693
711
694 def seek(bundle):
712 def seek(bundle):
695 for part in bundle.iterparts(seekable=True):
713 for part in bundle.iterparts(seekable=True):
696 part.seek(0, os.SEEK_END)
714 part.seek(0, os.SEEK_END)
697
715
698 def makepartreadnbytes(size):
716 def makepartreadnbytes(size):
699 def run():
717 def run():
700 with open(bundlepath, b'rb') as fh:
718 with open(bundlepath, b'rb') as fh:
701 bundle = exchange.readbundle(ui, fh, bundlepath)
719 bundle = exchange.readbundle(ui, fh, bundlepath)
702 for part in bundle.iterparts():
720 for part in bundle.iterparts():
703 while part.read(size):
721 while part.read(size):
704 pass
722 pass
705
723
706 return run
724 return run
707
725
708 benches = [
726 benches = [
709 (makestdioread(8192), b'read(8k)'),
727 (makestdioread(8192), b'read(8k)'),
710 (makestdioread(16384), b'read(16k)'),
728 (makestdioread(16384), b'read(16k)'),
711 (makestdioread(32768), b'read(32k)'),
729 (makestdioread(32768), b'read(32k)'),
712 (makestdioread(131072), b'read(128k)'),
730 (makestdioread(131072), b'read(128k)'),
713 ]
731 ]
714
732
715 with open(bundlepath, b'rb') as fh:
733 with open(bundlepath, b'rb') as fh:
716 bundle = exchange.readbundle(ui, fh, bundlepath)
734 bundle = exchange.readbundle(ui, fh, bundlepath)
717
735
718 if isinstance(bundle, changegroup.cg1unpacker):
736 if isinstance(bundle, changegroup.cg1unpacker):
719 benches.extend([
737 benches.extend([
720 (makebench(deltaiter), b'cg1 deltaiter()'),
738 (makebench(deltaiter), b'cg1 deltaiter()'),
721 (makebench(iterchunks), b'cg1 getchunks()'),
739 (makebench(iterchunks), b'cg1 getchunks()'),
722 (makereadnbytes(8192), b'cg1 read(8k)'),
740 (makereadnbytes(8192), b'cg1 read(8k)'),
723 (makereadnbytes(16384), b'cg1 read(16k)'),
741 (makereadnbytes(16384), b'cg1 read(16k)'),
724 (makereadnbytes(32768), b'cg1 read(32k)'),
742 (makereadnbytes(32768), b'cg1 read(32k)'),
725 (makereadnbytes(131072), b'cg1 read(128k)'),
743 (makereadnbytes(131072), b'cg1 read(128k)'),
726 ])
744 ])
727 elif isinstance(bundle, bundle2.unbundle20):
745 elif isinstance(bundle, bundle2.unbundle20):
728 benches.extend([
746 benches.extend([
729 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
747 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
730 (makebench(iterparts), b'bundle2 iterparts()'),
748 (makebench(iterparts), b'bundle2 iterparts()'),
731 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
749 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
732 (makebench(seek), b'bundle2 part seek()'),
750 (makebench(seek), b'bundle2 part seek()'),
733 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
751 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
734 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
752 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
735 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
753 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
736 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
754 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
737 ])
755 ])
738 elif isinstance(bundle, streamclone.streamcloneapplier):
756 elif isinstance(bundle, streamclone.streamcloneapplier):
739 raise error.Abort(b'stream clone bundles not supported')
757 raise error.Abort(b'stream clone bundles not supported')
740 else:
758 else:
741 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
759 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
742
760
743 for fn, title in benches:
761 for fn, title in benches:
744 timer, fm = gettimer(ui, opts)
762 timer, fm = gettimer(ui, opts)
745 timer(fn, title=title)
763 timer(fn, title=title)
746 fm.end()
764 fm.end()
747
765
748 @command(b'perfchangegroupchangelog', formatteropts +
766 @command(b'perfchangegroupchangelog', formatteropts +
749 [(b'', b'cgversion', b'02', b'changegroup version'),
767 [(b'', b'cgversion', b'02', b'changegroup version'),
750 (b'r', b'rev', b'', b'revisions to add to changegroup')])
768 (b'r', b'rev', b'', b'revisions to add to changegroup')])
751 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
769 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
752 """Benchmark producing a changelog group for a changegroup.
770 """Benchmark producing a changelog group for a changegroup.
753
771
754 This measures the time spent processing the changelog during a
772 This measures the time spent processing the changelog during a
755 bundle operation. This occurs during `hg bundle` and on a server
773 bundle operation. This occurs during `hg bundle` and on a server
756 processing a `getbundle` wire protocol request (handles clones
774 processing a `getbundle` wire protocol request (handles clones
757 and pull requests).
775 and pull requests).
758
776
759 By default, all revisions are added to the changegroup.
777 By default, all revisions are added to the changegroup.
760 """
778 """
761 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
762 cl = repo.changelog
780 cl = repo.changelog
763 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
781 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
764 bundler = changegroup.getbundler(cgversion, repo)
782 bundler = changegroup.getbundler(cgversion, repo)
765
783
766 def d():
784 def d():
767 state, chunks = bundler._generatechangelog(cl, nodes)
785 state, chunks = bundler._generatechangelog(cl, nodes)
768 for chunk in chunks:
786 for chunk in chunks:
769 pass
787 pass
770
788
771 timer, fm = gettimer(ui, opts)
789 timer, fm = gettimer(ui, opts)
772
790
773 # Terminal printing can interfere with timing. So disable it.
791 # Terminal printing can interfere with timing. So disable it.
774 with ui.configoverride({(b'progress', b'disable'): True}):
792 with ui.configoverride({(b'progress', b'disable'): True}):
775 timer(d)
793 timer(d)
776
794
777 fm.end()
795 fm.end()
778
796
779 @command(b'perfdirs', formatteropts)
797 @command(b'perfdirs', formatteropts)
780 def perfdirs(ui, repo, **opts):
798 def perfdirs(ui, repo, **opts):
781 opts = _byteskwargs(opts)
799 opts = _byteskwargs(opts)
782 timer, fm = gettimer(ui, opts)
800 timer, fm = gettimer(ui, opts)
783 dirstate = repo.dirstate
801 dirstate = repo.dirstate
784 b'a' in dirstate
802 b'a' in dirstate
785 def d():
803 def d():
786 dirstate.hasdir(b'a')
804 dirstate.hasdir(b'a')
787 del dirstate._map._dirs
805 del dirstate._map._dirs
788 timer(d)
806 timer(d)
789 fm.end()
807 fm.end()
790
808
791 @command(b'perfdirstate', formatteropts)
809 @command(b'perfdirstate', formatteropts)
792 def perfdirstate(ui, repo, **opts):
810 def perfdirstate(ui, repo, **opts):
793 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
794 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
795 b"a" in repo.dirstate
813 b"a" in repo.dirstate
796 def d():
814 def d():
797 repo.dirstate.invalidate()
815 repo.dirstate.invalidate()
798 b"a" in repo.dirstate
816 b"a" in repo.dirstate
799 timer(d)
817 timer(d)
800 fm.end()
818 fm.end()
801
819
802 @command(b'perfdirstatedirs', formatteropts)
820 @command(b'perfdirstatedirs', formatteropts)
803 def perfdirstatedirs(ui, repo, **opts):
821 def perfdirstatedirs(ui, repo, **opts):
804 opts = _byteskwargs(opts)
822 opts = _byteskwargs(opts)
805 timer, fm = gettimer(ui, opts)
823 timer, fm = gettimer(ui, opts)
806 b"a" in repo.dirstate
824 b"a" in repo.dirstate
807 def d():
825 def d():
808 repo.dirstate.hasdir(b"a")
826 repo.dirstate.hasdir(b"a")
809 del repo.dirstate._map._dirs
827 del repo.dirstate._map._dirs
810 timer(d)
828 timer(d)
811 fm.end()
829 fm.end()
812
830
813 @command(b'perfdirstatefoldmap', formatteropts)
831 @command(b'perfdirstatefoldmap', formatteropts)
814 def perfdirstatefoldmap(ui, repo, **opts):
832 def perfdirstatefoldmap(ui, repo, **opts):
815 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
816 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
817 dirstate = repo.dirstate
835 dirstate = repo.dirstate
818 b'a' in dirstate
836 b'a' in dirstate
819 def d():
837 def d():
820 dirstate._map.filefoldmap.get(b'a')
838 dirstate._map.filefoldmap.get(b'a')
821 del dirstate._map.filefoldmap
839 del dirstate._map.filefoldmap
822 timer(d)
840 timer(d)
823 fm.end()
841 fm.end()
824
842
825 @command(b'perfdirfoldmap', formatteropts)
843 @command(b'perfdirfoldmap', formatteropts)
826 def perfdirfoldmap(ui, repo, **opts):
844 def perfdirfoldmap(ui, repo, **opts):
827 opts = _byteskwargs(opts)
845 opts = _byteskwargs(opts)
828 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
829 dirstate = repo.dirstate
847 dirstate = repo.dirstate
830 b'a' in dirstate
848 b'a' in dirstate
831 def d():
849 def d():
832 dirstate._map.dirfoldmap.get(b'a')
850 dirstate._map.dirfoldmap.get(b'a')
833 del dirstate._map.dirfoldmap
851 del dirstate._map.dirfoldmap
834 del dirstate._map._dirs
852 del dirstate._map._dirs
835 timer(d)
853 timer(d)
836 fm.end()
854 fm.end()
837
855
838 @command(b'perfdirstatewrite', formatteropts)
856 @command(b'perfdirstatewrite', formatteropts)
839 def perfdirstatewrite(ui, repo, **opts):
857 def perfdirstatewrite(ui, repo, **opts):
840 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
841 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
842 ds = repo.dirstate
860 ds = repo.dirstate
843 b"a" in ds
861 b"a" in ds
844 def d():
862 def d():
845 ds._dirty = True
863 ds._dirty = True
846 ds.write(repo.currenttransaction())
864 ds.write(repo.currenttransaction())
847 timer(d)
865 timer(d)
848 fm.end()
866 fm.end()
849
867
850 @command(b'perfmergecalculate',
868 @command(b'perfmergecalculate',
851 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
869 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
852 def perfmergecalculate(ui, repo, rev, **opts):
870 def perfmergecalculate(ui, repo, rev, **opts):
853 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
855 wctx = repo[None]
873 wctx = repo[None]
856 rctx = scmutil.revsingle(repo, rev, rev)
874 rctx = scmutil.revsingle(repo, rev, rev)
857 ancestor = wctx.ancestor(rctx)
875 ancestor = wctx.ancestor(rctx)
858 # we don't want working dir files to be stat'd in the benchmark, so prime
876 # we don't want working dir files to be stat'd in the benchmark, so prime
859 # that cache
877 # that cache
860 wctx.dirty()
878 wctx.dirty()
861 def d():
879 def d():
862 # acceptremote is True because we don't want prompts in the middle of
880 # acceptremote is True because we don't want prompts in the middle of
863 # our benchmark
881 # our benchmark
864 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
882 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
865 acceptremote=True, followcopies=True)
883 acceptremote=True, followcopies=True)
866 timer(d)
884 timer(d)
867 fm.end()
885 fm.end()
868
886
869 @command(b'perfpathcopies', [], b"REV REV")
887 @command(b'perfpathcopies', [], b"REV REV")
870 def perfpathcopies(ui, repo, rev1, rev2, **opts):
888 def perfpathcopies(ui, repo, rev1, rev2, **opts):
871 """benchmark the copy tracing logic"""
889 """benchmark the copy tracing logic"""
872 opts = _byteskwargs(opts)
890 opts = _byteskwargs(opts)
873 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
874 ctx1 = scmutil.revsingle(repo, rev1, rev1)
892 ctx1 = scmutil.revsingle(repo, rev1, rev1)
875 ctx2 = scmutil.revsingle(repo, rev2, rev2)
893 ctx2 = scmutil.revsingle(repo, rev2, rev2)
876 def d():
894 def d():
877 copies.pathcopies(ctx1, ctx2)
895 copies.pathcopies(ctx1, ctx2)
878 timer(d)
896 timer(d)
879 fm.end()
897 fm.end()
880
898
881 @command(b'perfphases',
899 @command(b'perfphases',
882 [(b'', b'full', False, b'include file reading time too'),
900 [(b'', b'full', False, b'include file reading time too'),
883 ], b"")
901 ], b"")
884 def perfphases(ui, repo, **opts):
902 def perfphases(ui, repo, **opts):
885 """benchmark phasesets computation"""
903 """benchmark phasesets computation"""
886 opts = _byteskwargs(opts)
904 opts = _byteskwargs(opts)
887 timer, fm = gettimer(ui, opts)
905 timer, fm = gettimer(ui, opts)
888 _phases = repo._phasecache
906 _phases = repo._phasecache
889 full = opts.get(b'full')
907 full = opts.get(b'full')
890 def d():
908 def d():
891 phases = _phases
909 phases = _phases
892 if full:
910 if full:
893 clearfilecache(repo, b'_phasecache')
911 clearfilecache(repo, b'_phasecache')
894 phases = repo._phasecache
912 phases = repo._phasecache
895 phases.invalidate()
913 phases.invalidate()
896 phases.loadphaserevs(repo)
914 phases.loadphaserevs(repo)
897 timer(d)
915 timer(d)
898 fm.end()
916 fm.end()
899
917
900 @command(b'perfphasesremote',
918 @command(b'perfphasesremote',
901 [], b"[DEST]")
919 [], b"[DEST]")
902 def perfphasesremote(ui, repo, dest=None, **opts):
920 def perfphasesremote(ui, repo, dest=None, **opts):
903 """benchmark time needed to analyse phases of the remote server"""
921 """benchmark time needed to analyse phases of the remote server"""
904 from mercurial.node import (
922 from mercurial.node import (
905 bin,
923 bin,
906 )
924 )
907 from mercurial import (
925 from mercurial import (
908 exchange,
926 exchange,
909 hg,
927 hg,
910 phases,
928 phases,
911 )
929 )
912 opts = _byteskwargs(opts)
930 opts = _byteskwargs(opts)
913 timer, fm = gettimer(ui, opts)
931 timer, fm = gettimer(ui, opts)
914
932
915 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
933 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
916 if not path:
934 if not path:
917 raise error.Abort((b'default repository not configured!'),
935 raise error.Abort((b'default repository not configured!'),
918 hint=(b"see 'hg help config.paths'"))
936 hint=(b"see 'hg help config.paths'"))
919 dest = path.pushloc or path.loc
937 dest = path.pushloc or path.loc
920 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
938 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
921 other = hg.peer(repo, opts, dest)
939 other = hg.peer(repo, opts, dest)
922
940
923 # easier to perform discovery through the operation
941 # easier to perform discovery through the operation
924 op = exchange.pushoperation(repo, other)
942 op = exchange.pushoperation(repo, other)
925 exchange._pushdiscoverychangeset(op)
943 exchange._pushdiscoverychangeset(op)
926
944
927 remotesubset = op.fallbackheads
945 remotesubset = op.fallbackheads
928
946
929 with other.commandexecutor() as e:
947 with other.commandexecutor() as e:
930 remotephases = e.callcommand(b'listkeys',
948 remotephases = e.callcommand(b'listkeys',
931 {b'namespace': b'phases'}).result()
949 {b'namespace': b'phases'}).result()
932 del other
950 del other
933 publishing = remotephases.get(b'publishing', False)
951 publishing = remotephases.get(b'publishing', False)
934 if publishing:
952 if publishing:
935 ui.status((b'publishing: yes\n'))
953 ui.status((b'publishing: yes\n'))
936 else:
954 else:
937 ui.status((b'publishing: no\n'))
955 ui.status((b'publishing: no\n'))
938
956
939 nodemap = repo.changelog.nodemap
957 nodemap = repo.changelog.nodemap
940 nonpublishroots = 0
958 nonpublishroots = 0
941 for nhex, phase in remotephases.iteritems():
959 for nhex, phase in remotephases.iteritems():
942 if nhex == b'publishing': # ignore data related to publish option
960 if nhex == b'publishing': # ignore data related to publish option
943 continue
961 continue
944 node = bin(nhex)
962 node = bin(nhex)
945 if node in nodemap and int(phase):
963 if node in nodemap and int(phase):
946 nonpublishroots += 1
964 nonpublishroots += 1
947 ui.status((b'number of roots: %d\n') % len(remotephases))
965 ui.status((b'number of roots: %d\n') % len(remotephases))
948 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
966 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
949 def d():
967 def d():
950 phases.remotephasessummary(repo,
968 phases.remotephasessummary(repo,
951 remotesubset,
969 remotesubset,
952 remotephases)
970 remotephases)
953 timer(d)
971 timer(d)
954 fm.end()
972 fm.end()
955
973
956 @command(b'perfmanifest',[
974 @command(b'perfmanifest',[
957 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
975 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
958 (b'', b'clear-disk', False, b'clear on-disk caches too'),
976 (b'', b'clear-disk', False, b'clear on-disk caches too'),
959 ] + formatteropts, b'REV|NODE')
977 ] + formatteropts, b'REV|NODE')
960 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
978 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
961 """benchmark the time to read a manifest from disk and return a usable
979 """benchmark the time to read a manifest from disk and return a usable
962 dict-like object
980 dict-like object
963
981
964 Manifest caches are cleared before retrieval."""
982 Manifest caches are cleared before retrieval."""
965 opts = _byteskwargs(opts)
983 opts = _byteskwargs(opts)
966 timer, fm = gettimer(ui, opts)
984 timer, fm = gettimer(ui, opts)
967 if not manifest_rev:
985 if not manifest_rev:
968 ctx = scmutil.revsingle(repo, rev, rev)
986 ctx = scmutil.revsingle(repo, rev, rev)
969 t = ctx.manifestnode()
987 t = ctx.manifestnode()
970 else:
988 else:
971 from mercurial.node import bin
989 from mercurial.node import bin
972
990
973 if len(rev) == 40:
991 if len(rev) == 40:
974 t = bin(rev)
992 t = bin(rev)
975 else:
993 else:
976 try:
994 try:
977 rev = int(rev)
995 rev = int(rev)
978
996
979 if util.safehasattr(repo.manifestlog, b'getstorage'):
997 if util.safehasattr(repo.manifestlog, b'getstorage'):
980 t = repo.manifestlog.getstorage(b'').node(rev)
998 t = repo.manifestlog.getstorage(b'').node(rev)
981 else:
999 else:
982 t = repo.manifestlog._revlog.lookup(rev)
1000 t = repo.manifestlog._revlog.lookup(rev)
983 except ValueError:
1001 except ValueError:
984 raise error.Abort(b'manifest revision must be integer or full '
1002 raise error.Abort(b'manifest revision must be integer or full '
985 b'node')
1003 b'node')
986 def d():
1004 def d():
987 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1005 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
988 repo.manifestlog[t].read()
1006 repo.manifestlog[t].read()
989 timer(d)
1007 timer(d)
990 fm.end()
1008 fm.end()
991
1009
992 @command(b'perfchangeset', formatteropts)
1010 @command(b'perfchangeset', formatteropts)
993 def perfchangeset(ui, repo, rev, **opts):
1011 def perfchangeset(ui, repo, rev, **opts):
994 opts = _byteskwargs(opts)
1012 opts = _byteskwargs(opts)
995 timer, fm = gettimer(ui, opts)
1013 timer, fm = gettimer(ui, opts)
996 n = scmutil.revsingle(repo, rev).node()
1014 n = scmutil.revsingle(repo, rev).node()
997 def d():
1015 def d():
998 repo.changelog.read(n)
1016 repo.changelog.read(n)
999 #repo.changelog._cache = None
1017 #repo.changelog._cache = None
1000 timer(d)
1018 timer(d)
1001 fm.end()
1019 fm.end()
1002
1020
1003 @command(b'perfignore', formatteropts)
1021 @command(b'perfignore', formatteropts)
1004 def perfignore(ui, repo, **opts):
1022 def perfignore(ui, repo, **opts):
1005 """benchmark operation related to computing ignore"""
1023 """benchmark operation related to computing ignore"""
1006 opts = _byteskwargs(opts)
1024 opts = _byteskwargs(opts)
1007 timer, fm = gettimer(ui, opts)
1025 timer, fm = gettimer(ui, opts)
1008 dirstate = repo.dirstate
1026 dirstate = repo.dirstate
1009
1027
1010 def setupone():
1028 def setupone():
1011 dirstate.invalidate()
1029 dirstate.invalidate()
1012 clearfilecache(dirstate, b'_ignore')
1030 clearfilecache(dirstate, b'_ignore')
1013
1031
1014 def runone():
1032 def runone():
1015 dirstate._ignore
1033 dirstate._ignore
1016
1034
1017 timer(runone, setup=setupone, title=b"load")
1035 timer(runone, setup=setupone, title=b"load")
1018 fm.end()
1036 fm.end()
1019
1037
1020 @command(b'perfindex', [
1038 @command(b'perfindex', [
1021 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1039 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1022 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1040 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1023 ] + formatteropts)
1041 ] + formatteropts)
1024 def perfindex(ui, repo, **opts):
1042 def perfindex(ui, repo, **opts):
1025 """benchmark index creation time followed by a lookup
1043 """benchmark index creation time followed by a lookup
1026
1044
1027 The default is to look `tip` up. Depending on the index implementation,
1045 The default is to look `tip` up. Depending on the index implementation,
1028 the revision looked up can matters. For example, an implementation
1046 the revision looked up can matters. For example, an implementation
1029 scanning the index will have a faster lookup time for `--rev tip` than for
1047 scanning the index will have a faster lookup time for `--rev tip` than for
1030 `--rev 0`. The number of looked up revisions and their order can also
1048 `--rev 0`. The number of looked up revisions and their order can also
1031 matters.
1049 matters.
1032
1050
1033 Example of useful set to test:
1051 Example of useful set to test:
1034 * tip
1052 * tip
1035 * 0
1053 * 0
1036 * -10:
1054 * -10:
1037 * :10
1055 * :10
1038 * -10: + :10
1056 * -10: + :10
1039 * :10: + -10:
1057 * :10: + -10:
1040 * -10000:
1058 * -10000:
1041 * -10000: + 0
1059 * -10000: + 0
1042
1060
1043 It is not currently possible to check for lookup of a missing node. For
1061 It is not currently possible to check for lookup of a missing node. For
1044 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1062 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1045 import mercurial.revlog
1063 import mercurial.revlog
1046 opts = _byteskwargs(opts)
1064 opts = _byteskwargs(opts)
1047 timer, fm = gettimer(ui, opts)
1065 timer, fm = gettimer(ui, opts)
1048 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1049 if opts[b'no_lookup']:
1067 if opts[b'no_lookup']:
1050 if opts['rev']:
1068 if opts['rev']:
1051 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1069 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1052 nodes = []
1070 nodes = []
1053 elif not opts[b'rev']:
1071 elif not opts[b'rev']:
1054 nodes = [repo[b"tip"].node()]
1072 nodes = [repo[b"tip"].node()]
1055 else:
1073 else:
1056 revs = scmutil.revrange(repo, opts[b'rev'])
1074 revs = scmutil.revrange(repo, opts[b'rev'])
1057 cl = repo.changelog
1075 cl = repo.changelog
1058 nodes = [cl.node(r) for r in revs]
1076 nodes = [cl.node(r) for r in revs]
1059
1077
1060 unfi = repo.unfiltered()
1078 unfi = repo.unfiltered()
1061 # find the filecache func directly
1079 # find the filecache func directly
1062 # This avoid polluting the benchmark with the filecache logic
1080 # This avoid polluting the benchmark with the filecache logic
1063 makecl = unfi.__class__.changelog.func
1081 makecl = unfi.__class__.changelog.func
1064 def setup():
1082 def setup():
1065 # probably not necessary, but for good measure
1083 # probably not necessary, but for good measure
1066 clearchangelog(unfi)
1084 clearchangelog(unfi)
1067 def d():
1085 def d():
1068 cl = makecl(unfi)
1086 cl = makecl(unfi)
1069 for n in nodes:
1087 for n in nodes:
1070 cl.rev(n)
1088 cl.rev(n)
1071 timer(d, setup=setup)
1089 timer(d, setup=setup)
1072 fm.end()
1090 fm.end()
1073
1091
1074 @command(b'perfnodemap', [
1092 @command(b'perfnodemap', [
1075 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1093 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1076 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1094 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1077 ] + formatteropts)
1095 ] + formatteropts)
1078 def perfnodemap(ui, repo, **opts):
1096 def perfnodemap(ui, repo, **opts):
1079 """benchmark the time necessary to look up revision from a cold nodemap
1097 """benchmark the time necessary to look up revision from a cold nodemap
1080
1098
1081 Depending on the implementation, the amount and order of revision we look
1099 Depending on the implementation, the amount and order of revision we look
1082 up can varies. Example of useful set to test:
1100 up can varies. Example of useful set to test:
1083 * tip
1101 * tip
1084 * 0
1102 * 0
1085 * -10:
1103 * -10:
1086 * :10
1104 * :10
1087 * -10: + :10
1105 * -10: + :10
1088 * :10: + -10:
1106 * :10: + -10:
1089 * -10000:
1107 * -10000:
1090 * -10000: + 0
1108 * -10000: + 0
1091
1109
1092 The command currently focus on valid binary lookup. Benchmarking for
1110 The command currently focus on valid binary lookup. Benchmarking for
1093 hexlookup, prefix lookup and missing lookup would also be valuable.
1111 hexlookup, prefix lookup and missing lookup would also be valuable.
1094 """
1112 """
1095 import mercurial.revlog
1113 import mercurial.revlog
1096 opts = _byteskwargs(opts)
1114 opts = _byteskwargs(opts)
1097 timer, fm = gettimer(ui, opts)
1115 timer, fm = gettimer(ui, opts)
1098 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1116 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1099
1117
1100 unfi = repo.unfiltered()
1118 unfi = repo.unfiltered()
1101 clearcaches = opts['clear_caches']
1119 clearcaches = opts['clear_caches']
1102 # find the filecache func directly
1120 # find the filecache func directly
1103 # This avoid polluting the benchmark with the filecache logic
1121 # This avoid polluting the benchmark with the filecache logic
1104 makecl = unfi.__class__.changelog.func
1122 makecl = unfi.__class__.changelog.func
1105 if not opts[b'rev']:
1123 if not opts[b'rev']:
1106 raise error.Abort('use --rev to specify revisions to look up')
1124 raise error.Abort('use --rev to specify revisions to look up')
1107 revs = scmutil.revrange(repo, opts[b'rev'])
1125 revs = scmutil.revrange(repo, opts[b'rev'])
1108 cl = repo.changelog
1126 cl = repo.changelog
1109 nodes = [cl.node(r) for r in revs]
1127 nodes = [cl.node(r) for r in revs]
1110
1128
1111 # use a list to pass reference to a nodemap from one closure to the next
1129 # use a list to pass reference to a nodemap from one closure to the next
1112 nodeget = [None]
1130 nodeget = [None]
1113 def setnodeget():
1131 def setnodeget():
1114 # probably not necessary, but for good measure
1132 # probably not necessary, but for good measure
1115 clearchangelog(unfi)
1133 clearchangelog(unfi)
1116 nodeget[0] = makecl(unfi).nodemap.get
1134 nodeget[0] = makecl(unfi).nodemap.get
1117
1135
1118 def d():
1136 def d():
1119 get = nodeget[0]
1137 get = nodeget[0]
1120 for n in nodes:
1138 for n in nodes:
1121 get(n)
1139 get(n)
1122
1140
1123 setup = None
1141 setup = None
1124 if clearcaches:
1142 if clearcaches:
1125 def setup():
1143 def setup():
1126 setnodeget()
1144 setnodeget()
1127 else:
1145 else:
1128 setnodeget()
1146 setnodeget()
1129 d() # prewarm the data structure
1147 d() # prewarm the data structure
1130 timer(d, setup=setup)
1148 timer(d, setup=setup)
1131 fm.end()
1149 fm.end()
1132
1150
1133 @command(b'perfstartup', formatteropts)
1151 @command(b'perfstartup', formatteropts)
1134 def perfstartup(ui, repo, **opts):
1152 def perfstartup(ui, repo, **opts):
1135 opts = _byteskwargs(opts)
1153 opts = _byteskwargs(opts)
1136 timer, fm = gettimer(ui, opts)
1154 timer, fm = gettimer(ui, opts)
1137 def d():
1155 def d():
1138 if os.name != r'nt':
1156 if os.name != r'nt':
1139 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1157 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1140 fsencode(sys.argv[0]))
1158 fsencode(sys.argv[0]))
1141 else:
1159 else:
1142 os.environ[r'HGRCPATH'] = r' '
1160 os.environ[r'HGRCPATH'] = r' '
1143 os.system(r"%s version -q > NUL" % sys.argv[0])
1161 os.system(r"%s version -q > NUL" % sys.argv[0])
1144 timer(d)
1162 timer(d)
1145 fm.end()
1163 fm.end()
1146
1164
1147 @command(b'perfparents', formatteropts)
1165 @command(b'perfparents', formatteropts)
1148 def perfparents(ui, repo, **opts):
1166 def perfparents(ui, repo, **opts):
1149 opts = _byteskwargs(opts)
1167 opts = _byteskwargs(opts)
1150 timer, fm = gettimer(ui, opts)
1168 timer, fm = gettimer(ui, opts)
1151 # control the number of commits perfparents iterates over
1169 # control the number of commits perfparents iterates over
1152 # experimental config: perf.parentscount
1170 # experimental config: perf.parentscount
1153 count = getint(ui, b"perf", b"parentscount", 1000)
1171 count = getint(ui, b"perf", b"parentscount", 1000)
1154 if len(repo.changelog) < count:
1172 if len(repo.changelog) < count:
1155 raise error.Abort(b"repo needs %d commits for this test" % count)
1173 raise error.Abort(b"repo needs %d commits for this test" % count)
1156 repo = repo.unfiltered()
1174 repo = repo.unfiltered()
1157 nl = [repo.changelog.node(i) for i in _xrange(count)]
1175 nl = [repo.changelog.node(i) for i in _xrange(count)]
1158 def d():
1176 def d():
1159 for n in nl:
1177 for n in nl:
1160 repo.changelog.parents(n)
1178 repo.changelog.parents(n)
1161 timer(d)
1179 timer(d)
1162 fm.end()
1180 fm.end()
1163
1181
1164 @command(b'perfctxfiles', formatteropts)
1182 @command(b'perfctxfiles', formatteropts)
1165 def perfctxfiles(ui, repo, x, **opts):
1183 def perfctxfiles(ui, repo, x, **opts):
1166 opts = _byteskwargs(opts)
1184 opts = _byteskwargs(opts)
1167 x = int(x)
1185 x = int(x)
1168 timer, fm = gettimer(ui, opts)
1186 timer, fm = gettimer(ui, opts)
1169 def d():
1187 def d():
1170 len(repo[x].files())
1188 len(repo[x].files())
1171 timer(d)
1189 timer(d)
1172 fm.end()
1190 fm.end()
1173
1191
1174 @command(b'perfrawfiles', formatteropts)
1192 @command(b'perfrawfiles', formatteropts)
1175 def perfrawfiles(ui, repo, x, **opts):
1193 def perfrawfiles(ui, repo, x, **opts):
1176 opts = _byteskwargs(opts)
1194 opts = _byteskwargs(opts)
1177 x = int(x)
1195 x = int(x)
1178 timer, fm = gettimer(ui, opts)
1196 timer, fm = gettimer(ui, opts)
1179 cl = repo.changelog
1197 cl = repo.changelog
1180 def d():
1198 def d():
1181 len(cl.read(x)[3])
1199 len(cl.read(x)[3])
1182 timer(d)
1200 timer(d)
1183 fm.end()
1201 fm.end()
1184
1202
1185 @command(b'perflookup', formatteropts)
1203 @command(b'perflookup', formatteropts)
1186 def perflookup(ui, repo, rev, **opts):
1204 def perflookup(ui, repo, rev, **opts):
1187 opts = _byteskwargs(opts)
1205 opts = _byteskwargs(opts)
1188 timer, fm = gettimer(ui, opts)
1206 timer, fm = gettimer(ui, opts)
1189 timer(lambda: len(repo.lookup(rev)))
1207 timer(lambda: len(repo.lookup(rev)))
1190 fm.end()
1208 fm.end()
1191
1209
1192 @command(b'perflinelogedits',
1210 @command(b'perflinelogedits',
1193 [(b'n', b'edits', 10000, b'number of edits'),
1211 [(b'n', b'edits', 10000, b'number of edits'),
1194 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1212 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1195 ], norepo=True)
1213 ], norepo=True)
1196 def perflinelogedits(ui, **opts):
1214 def perflinelogedits(ui, **opts):
1197 from mercurial import linelog
1215 from mercurial import linelog
1198
1216
1199 opts = _byteskwargs(opts)
1217 opts = _byteskwargs(opts)
1200
1218
1201 edits = opts[b'edits']
1219 edits = opts[b'edits']
1202 maxhunklines = opts[b'max_hunk_lines']
1220 maxhunklines = opts[b'max_hunk_lines']
1203
1221
1204 maxb1 = 100000
1222 maxb1 = 100000
1205 random.seed(0)
1223 random.seed(0)
1206 randint = random.randint
1224 randint = random.randint
1207 currentlines = 0
1225 currentlines = 0
1208 arglist = []
1226 arglist = []
1209 for rev in _xrange(edits):
1227 for rev in _xrange(edits):
1210 a1 = randint(0, currentlines)
1228 a1 = randint(0, currentlines)
1211 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1229 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1212 b1 = randint(0, maxb1)
1230 b1 = randint(0, maxb1)
1213 b2 = randint(b1, b1 + maxhunklines)
1231 b2 = randint(b1, b1 + maxhunklines)
1214 currentlines += (b2 - b1) - (a2 - a1)
1232 currentlines += (b2 - b1) - (a2 - a1)
1215 arglist.append((rev, a1, a2, b1, b2))
1233 arglist.append((rev, a1, a2, b1, b2))
1216
1234
1217 def d():
1235 def d():
1218 ll = linelog.linelog()
1236 ll = linelog.linelog()
1219 for args in arglist:
1237 for args in arglist:
1220 ll.replacelines(*args)
1238 ll.replacelines(*args)
1221
1239
1222 timer, fm = gettimer(ui, opts)
1240 timer, fm = gettimer(ui, opts)
1223 timer(d)
1241 timer(d)
1224 fm.end()
1242 fm.end()
1225
1243
1226 @command(b'perfrevrange', formatteropts)
1244 @command(b'perfrevrange', formatteropts)
1227 def perfrevrange(ui, repo, *specs, **opts):
1245 def perfrevrange(ui, repo, *specs, **opts):
1228 opts = _byteskwargs(opts)
1246 opts = _byteskwargs(opts)
1229 timer, fm = gettimer(ui, opts)
1247 timer, fm = gettimer(ui, opts)
1230 revrange = scmutil.revrange
1248 revrange = scmutil.revrange
1231 timer(lambda: len(revrange(repo, specs)))
1249 timer(lambda: len(revrange(repo, specs)))
1232 fm.end()
1250 fm.end()
1233
1251
1234 @command(b'perfnodelookup', formatteropts)
1252 @command(b'perfnodelookup', formatteropts)
1235 def perfnodelookup(ui, repo, rev, **opts):
1253 def perfnodelookup(ui, repo, rev, **opts):
1236 opts = _byteskwargs(opts)
1254 opts = _byteskwargs(opts)
1237 timer, fm = gettimer(ui, opts)
1255 timer, fm = gettimer(ui, opts)
1238 import mercurial.revlog
1256 import mercurial.revlog
1239 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1257 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1240 n = scmutil.revsingle(repo, rev).node()
1258 n = scmutil.revsingle(repo, rev).node()
1241 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1259 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1242 def d():
1260 def d():
1243 cl.rev(n)
1261 cl.rev(n)
1244 clearcaches(cl)
1262 clearcaches(cl)
1245 timer(d)
1263 timer(d)
1246 fm.end()
1264 fm.end()
1247
1265
1248 @command(b'perflog',
1266 @command(b'perflog',
1249 [(b'', b'rename', False, b'ask log to follow renames')
1267 [(b'', b'rename', False, b'ask log to follow renames')
1250 ] + formatteropts)
1268 ] + formatteropts)
1251 def perflog(ui, repo, rev=None, **opts):
1269 def perflog(ui, repo, rev=None, **opts):
1252 opts = _byteskwargs(opts)
1270 opts = _byteskwargs(opts)
1253 if rev is None:
1271 if rev is None:
1254 rev=[]
1272 rev=[]
1255 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1256 ui.pushbuffer()
1274 ui.pushbuffer()
1257 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1275 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1258 copies=opts.get(b'rename')))
1276 copies=opts.get(b'rename')))
1259 ui.popbuffer()
1277 ui.popbuffer()
1260 fm.end()
1278 fm.end()
1261
1279
1262 @command(b'perfmoonwalk', formatteropts)
1280 @command(b'perfmoonwalk', formatteropts)
1263 def perfmoonwalk(ui, repo, **opts):
1281 def perfmoonwalk(ui, repo, **opts):
1264 """benchmark walking the changelog backwards
1282 """benchmark walking the changelog backwards
1265
1283
1266 This also loads the changelog data for each revision in the changelog.
1284 This also loads the changelog data for each revision in the changelog.
1267 """
1285 """
1268 opts = _byteskwargs(opts)
1286 opts = _byteskwargs(opts)
1269 timer, fm = gettimer(ui, opts)
1287 timer, fm = gettimer(ui, opts)
1270 def moonwalk():
1288 def moonwalk():
1271 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1289 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1272 ctx = repo[i]
1290 ctx = repo[i]
1273 ctx.branch() # read changelog data (in addition to the index)
1291 ctx.branch() # read changelog data (in addition to the index)
1274 timer(moonwalk)
1292 timer(moonwalk)
1275 fm.end()
1293 fm.end()
1276
1294
1277 @command(b'perftemplating',
1295 @command(b'perftemplating',
1278 [(b'r', b'rev', [], b'revisions to run the template on'),
1296 [(b'r', b'rev', [], b'revisions to run the template on'),
1279 ] + formatteropts)
1297 ] + formatteropts)
1280 def perftemplating(ui, repo, testedtemplate=None, **opts):
1298 def perftemplating(ui, repo, testedtemplate=None, **opts):
1281 """test the rendering time of a given template"""
1299 """test the rendering time of a given template"""
1282 if makelogtemplater is None:
1300 if makelogtemplater is None:
1283 raise error.Abort((b"perftemplating not available with this Mercurial"),
1301 raise error.Abort((b"perftemplating not available with this Mercurial"),
1284 hint=b"use 4.3 or later")
1302 hint=b"use 4.3 or later")
1285
1303
1286 opts = _byteskwargs(opts)
1304 opts = _byteskwargs(opts)
1287
1305
1288 nullui = ui.copy()
1306 nullui = ui.copy()
1289 nullui.fout = open(os.devnull, r'wb')
1307 nullui.fout = open(os.devnull, r'wb')
1290 nullui.disablepager()
1308 nullui.disablepager()
1291 revs = opts.get(b'rev')
1309 revs = opts.get(b'rev')
1292 if not revs:
1310 if not revs:
1293 revs = [b'all()']
1311 revs = [b'all()']
1294 revs = list(scmutil.revrange(repo, revs))
1312 revs = list(scmutil.revrange(repo, revs))
1295
1313
1296 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1314 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1297 b' {author|person}: {desc|firstline}\n')
1315 b' {author|person}: {desc|firstline}\n')
1298 if testedtemplate is None:
1316 if testedtemplate is None:
1299 testedtemplate = defaulttemplate
1317 testedtemplate = defaulttemplate
1300 displayer = makelogtemplater(nullui, repo, testedtemplate)
1318 displayer = makelogtemplater(nullui, repo, testedtemplate)
1301 def format():
1319 def format():
1302 for r in revs:
1320 for r in revs:
1303 ctx = repo[r]
1321 ctx = repo[r]
1304 displayer.show(ctx)
1322 displayer.show(ctx)
1305 displayer.flush(ctx)
1323 displayer.flush(ctx)
1306
1324
1307 timer, fm = gettimer(ui, opts)
1325 timer, fm = gettimer(ui, opts)
1308 timer(format)
1326 timer(format)
1309 fm.end()
1327 fm.end()
1310
1328
1311 @command(b'perfhelper-pathcopies', formatteropts +
1329 @command(b'perfhelper-pathcopies', formatteropts +
1312 [
1330 [
1313 (b'r', b'revs', [], b'restrict search to these revisions'),
1331 (b'r', b'revs', [], b'restrict search to these revisions'),
1314 (b'', b'timing', False, b'provides extra data (costly)'),
1332 (b'', b'timing', False, b'provides extra data (costly)'),
1315 ])
1333 ])
1316 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1334 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1317 """find statistic about potential parameters for the `perftracecopies`
1335 """find statistic about potential parameters for the `perftracecopies`
1318
1336
1319 This command find source-destination pair relevant for copytracing testing.
1337 This command find source-destination pair relevant for copytracing testing.
1320 It report value for some of the parameters that impact copy tracing time.
1338 It report value for some of the parameters that impact copy tracing time.
1321
1339
1322 If `--timing` is set, rename detection is run and the associated timing
1340 If `--timing` is set, rename detection is run and the associated timing
1323 will be reported. The extra details comes at the cost of a slower command
1341 will be reported. The extra details comes at the cost of a slower command
1324 execution.
1342 execution.
1325
1343
1326 Since the rename detection is only run once, other factors might easily
1344 Since the rename detection is only run once, other factors might easily
1327 affect the precision of the timing. However it should give a good
1345 affect the precision of the timing. However it should give a good
1328 approximation of which revision pairs are very costly.
1346 approximation of which revision pairs are very costly.
1329 """
1347 """
1330 opts = _byteskwargs(opts)
1348 opts = _byteskwargs(opts)
1331 fm = ui.formatter(b'perf', opts)
1349 fm = ui.formatter(b'perf', opts)
1332 dotiming = opts[b'timing']
1350 dotiming = opts[b'timing']
1333
1351
1334 if dotiming:
1352 if dotiming:
1335 header = '%12s %12s %12s %12s %12s %12s\n'
1353 header = '%12s %12s %12s %12s %12s %12s\n'
1336 output = ("%(source)12s %(destination)12s "
1354 output = ("%(source)12s %(destination)12s "
1337 "%(nbrevs)12d %(nbmissingfiles)12d "
1355 "%(nbrevs)12d %(nbmissingfiles)12d "
1338 "%(nbrenamedfiles)12d %(time)18.5f\n")
1356 "%(nbrenamedfiles)12d %(time)18.5f\n")
1339 header_names = ("source", "destination", "nb-revs", "nb-files",
1357 header_names = ("source", "destination", "nb-revs", "nb-files",
1340 "nb-renames", "time")
1358 "nb-renames", "time")
1341 fm.plain(header % header_names)
1359 fm.plain(header % header_names)
1342 else:
1360 else:
1343 header = '%12s %12s %12s %12s\n'
1361 header = '%12s %12s %12s %12s\n'
1344 output = ("%(source)12s %(destination)12s "
1362 output = ("%(source)12s %(destination)12s "
1345 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1363 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1346 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1364 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1347
1365
1348 if not revs:
1366 if not revs:
1349 revs = ['all()']
1367 revs = ['all()']
1350 revs = scmutil.revrange(repo, revs)
1368 revs = scmutil.revrange(repo, revs)
1351
1369
1352 roi = repo.revs('merge() and %ld', revs)
1370 roi = repo.revs('merge() and %ld', revs)
1353 for r in roi:
1371 for r in roi:
1354 ctx = repo[r]
1372 ctx = repo[r]
1355 p1 = ctx.p1().rev()
1373 p1 = ctx.p1().rev()
1356 p2 = ctx.p2().rev()
1374 p2 = ctx.p2().rev()
1357 bases = repo.changelog._commonancestorsheads(p1, p2)
1375 bases = repo.changelog._commonancestorsheads(p1, p2)
1358 for p in (p1, p2):
1376 for p in (p1, p2):
1359 for b in bases:
1377 for b in bases:
1360 base = repo[b]
1378 base = repo[b]
1361 parent = repo[p]
1379 parent = repo[p]
1362 missing = copies._computeforwardmissing(base, parent)
1380 missing = copies._computeforwardmissing(base, parent)
1363 if not missing:
1381 if not missing:
1364 continue
1382 continue
1365 data = {
1383 data = {
1366 b'source': base.hex(),
1384 b'source': base.hex(),
1367 b'destination': parent.hex(),
1385 b'destination': parent.hex(),
1368 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1386 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1369 b'nbmissingfiles': len(missing),
1387 b'nbmissingfiles': len(missing),
1370 }
1388 }
1371 if dotiming:
1389 if dotiming:
1372 begin = util.timer()
1390 begin = util.timer()
1373 renames = copies.pathcopies(base, parent)
1391 renames = copies.pathcopies(base, parent)
1374 end = util.timer()
1392 end = util.timer()
1375 # not very stable timing since we did only one run
1393 # not very stable timing since we did only one run
1376 data['time'] = end - begin
1394 data['time'] = end - begin
1377 data['nbrenamedfiles'] = len(renames)
1395 data['nbrenamedfiles'] = len(renames)
1378 fm.startitem()
1396 fm.startitem()
1379 fm.data(**data)
1397 fm.data(**data)
1380 out = data.copy()
1398 out = data.copy()
1381 out['source'] = fm.hexfunc(base.node())
1399 out['source'] = fm.hexfunc(base.node())
1382 out['destination'] = fm.hexfunc(parent.node())
1400 out['destination'] = fm.hexfunc(parent.node())
1383 fm.plain(output % out)
1401 fm.plain(output % out)
1384
1402
1385 fm.end()
1403 fm.end()
1386
1404
1387 @command(b'perfcca', formatteropts)
1405 @command(b'perfcca', formatteropts)
1388 def perfcca(ui, repo, **opts):
1406 def perfcca(ui, repo, **opts):
1389 opts = _byteskwargs(opts)
1407 opts = _byteskwargs(opts)
1390 timer, fm = gettimer(ui, opts)
1408 timer, fm = gettimer(ui, opts)
1391 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1409 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1392 fm.end()
1410 fm.end()
1393
1411
1394 @command(b'perffncacheload', formatteropts)
1412 @command(b'perffncacheload', formatteropts)
1395 def perffncacheload(ui, repo, **opts):
1413 def perffncacheload(ui, repo, **opts):
1396 opts = _byteskwargs(opts)
1414 opts = _byteskwargs(opts)
1397 timer, fm = gettimer(ui, opts)
1415 timer, fm = gettimer(ui, opts)
1398 s = repo.store
1416 s = repo.store
1399 def d():
1417 def d():
1400 s.fncache._load()
1418 s.fncache._load()
1401 timer(d)
1419 timer(d)
1402 fm.end()
1420 fm.end()
1403
1421
1404 @command(b'perffncachewrite', formatteropts)
1422 @command(b'perffncachewrite', formatteropts)
1405 def perffncachewrite(ui, repo, **opts):
1423 def perffncachewrite(ui, repo, **opts):
1406 opts = _byteskwargs(opts)
1424 opts = _byteskwargs(opts)
1407 timer, fm = gettimer(ui, opts)
1425 timer, fm = gettimer(ui, opts)
1408 s = repo.store
1426 s = repo.store
1409 lock = repo.lock()
1427 lock = repo.lock()
1410 s.fncache._load()
1428 s.fncache._load()
1411 tr = repo.transaction(b'perffncachewrite')
1429 tr = repo.transaction(b'perffncachewrite')
1412 tr.addbackup(b'fncache')
1430 tr.addbackup(b'fncache')
1413 def d():
1431 def d():
1414 s.fncache._dirty = True
1432 s.fncache._dirty = True
1415 s.fncache.write(tr)
1433 s.fncache.write(tr)
1416 timer(d)
1434 timer(d)
1417 tr.close()
1435 tr.close()
1418 lock.release()
1436 lock.release()
1419 fm.end()
1437 fm.end()
1420
1438
1421 @command(b'perffncacheencode', formatteropts)
1439 @command(b'perffncacheencode', formatteropts)
1422 def perffncacheencode(ui, repo, **opts):
1440 def perffncacheencode(ui, repo, **opts):
1423 opts = _byteskwargs(opts)
1441 opts = _byteskwargs(opts)
1424 timer, fm = gettimer(ui, opts)
1442 timer, fm = gettimer(ui, opts)
1425 s = repo.store
1443 s = repo.store
1426 s.fncache._load()
1444 s.fncache._load()
1427 def d():
1445 def d():
1428 for p in s.fncache.entries:
1446 for p in s.fncache.entries:
1429 s.encode(p)
1447 s.encode(p)
1430 timer(d)
1448 timer(d)
1431 fm.end()
1449 fm.end()
1432
1450
1433 def _bdiffworker(q, blocks, xdiff, ready, done):
1451 def _bdiffworker(q, blocks, xdiff, ready, done):
1434 while not done.is_set():
1452 while not done.is_set():
1435 pair = q.get()
1453 pair = q.get()
1436 while pair is not None:
1454 while pair is not None:
1437 if xdiff:
1455 if xdiff:
1438 mdiff.bdiff.xdiffblocks(*pair)
1456 mdiff.bdiff.xdiffblocks(*pair)
1439 elif blocks:
1457 elif blocks:
1440 mdiff.bdiff.blocks(*pair)
1458 mdiff.bdiff.blocks(*pair)
1441 else:
1459 else:
1442 mdiff.textdiff(*pair)
1460 mdiff.textdiff(*pair)
1443 q.task_done()
1461 q.task_done()
1444 pair = q.get()
1462 pair = q.get()
1445 q.task_done() # for the None one
1463 q.task_done() # for the None one
1446 with ready:
1464 with ready:
1447 ready.wait()
1465 ready.wait()
1448
1466
1449 def _manifestrevision(repo, mnode):
1467 def _manifestrevision(repo, mnode):
1450 ml = repo.manifestlog
1468 ml = repo.manifestlog
1451
1469
1452 if util.safehasattr(ml, b'getstorage'):
1470 if util.safehasattr(ml, b'getstorage'):
1453 store = ml.getstorage(b'')
1471 store = ml.getstorage(b'')
1454 else:
1472 else:
1455 store = ml._revlog
1473 store = ml._revlog
1456
1474
1457 return store.revision(mnode)
1475 return store.revision(mnode)
1458
1476
1459 @command(b'perfbdiff', revlogopts + formatteropts + [
1477 @command(b'perfbdiff', revlogopts + formatteropts + [
1460 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1478 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1461 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1479 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1462 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1480 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1463 (b'', b'blocks', False, b'test computing diffs into blocks'),
1481 (b'', b'blocks', False, b'test computing diffs into blocks'),
1464 (b'', b'xdiff', False, b'use xdiff algorithm'),
1482 (b'', b'xdiff', False, b'use xdiff algorithm'),
1465 ],
1483 ],
1466
1484
1467 b'-c|-m|FILE REV')
1485 b'-c|-m|FILE REV')
1468 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1486 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1469 """benchmark a bdiff between revisions
1487 """benchmark a bdiff between revisions
1470
1488
1471 By default, benchmark a bdiff between its delta parent and itself.
1489 By default, benchmark a bdiff between its delta parent and itself.
1472
1490
1473 With ``--count``, benchmark bdiffs between delta parents and self for N
1491 With ``--count``, benchmark bdiffs between delta parents and self for N
1474 revisions starting at the specified revision.
1492 revisions starting at the specified revision.
1475
1493
1476 With ``--alldata``, assume the requested revision is a changeset and
1494 With ``--alldata``, assume the requested revision is a changeset and
1477 measure bdiffs for all changes related to that changeset (manifest
1495 measure bdiffs for all changes related to that changeset (manifest
1478 and filelogs).
1496 and filelogs).
1479 """
1497 """
1480 opts = _byteskwargs(opts)
1498 opts = _byteskwargs(opts)
1481
1499
1482 if opts[b'xdiff'] and not opts[b'blocks']:
1500 if opts[b'xdiff'] and not opts[b'blocks']:
1483 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1501 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1484
1502
1485 if opts[b'alldata']:
1503 if opts[b'alldata']:
1486 opts[b'changelog'] = True
1504 opts[b'changelog'] = True
1487
1505
1488 if opts.get(b'changelog') or opts.get(b'manifest'):
1506 if opts.get(b'changelog') or opts.get(b'manifest'):
1489 file_, rev = None, file_
1507 file_, rev = None, file_
1490 elif rev is None:
1508 elif rev is None:
1491 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1509 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1492
1510
1493 blocks = opts[b'blocks']
1511 blocks = opts[b'blocks']
1494 xdiff = opts[b'xdiff']
1512 xdiff = opts[b'xdiff']
1495 textpairs = []
1513 textpairs = []
1496
1514
1497 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1515 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1498
1516
1499 startrev = r.rev(r.lookup(rev))
1517 startrev = r.rev(r.lookup(rev))
1500 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1518 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1501 if opts[b'alldata']:
1519 if opts[b'alldata']:
1502 # Load revisions associated with changeset.
1520 # Load revisions associated with changeset.
1503 ctx = repo[rev]
1521 ctx = repo[rev]
1504 mtext = _manifestrevision(repo, ctx.manifestnode())
1522 mtext = _manifestrevision(repo, ctx.manifestnode())
1505 for pctx in ctx.parents():
1523 for pctx in ctx.parents():
1506 pman = _manifestrevision(repo, pctx.manifestnode())
1524 pman = _manifestrevision(repo, pctx.manifestnode())
1507 textpairs.append((pman, mtext))
1525 textpairs.append((pman, mtext))
1508
1526
1509 # Load filelog revisions by iterating manifest delta.
1527 # Load filelog revisions by iterating manifest delta.
1510 man = ctx.manifest()
1528 man = ctx.manifest()
1511 pman = ctx.p1().manifest()
1529 pman = ctx.p1().manifest()
1512 for filename, change in pman.diff(man).items():
1530 for filename, change in pman.diff(man).items():
1513 fctx = repo.file(filename)
1531 fctx = repo.file(filename)
1514 f1 = fctx.revision(change[0][0] or -1)
1532 f1 = fctx.revision(change[0][0] or -1)
1515 f2 = fctx.revision(change[1][0] or -1)
1533 f2 = fctx.revision(change[1][0] or -1)
1516 textpairs.append((f1, f2))
1534 textpairs.append((f1, f2))
1517 else:
1535 else:
1518 dp = r.deltaparent(rev)
1536 dp = r.deltaparent(rev)
1519 textpairs.append((r.revision(dp), r.revision(rev)))
1537 textpairs.append((r.revision(dp), r.revision(rev)))
1520
1538
1521 withthreads = threads > 0
1539 withthreads = threads > 0
1522 if not withthreads:
1540 if not withthreads:
1523 def d():
1541 def d():
1524 for pair in textpairs:
1542 for pair in textpairs:
1525 if xdiff:
1543 if xdiff:
1526 mdiff.bdiff.xdiffblocks(*pair)
1544 mdiff.bdiff.xdiffblocks(*pair)
1527 elif blocks:
1545 elif blocks:
1528 mdiff.bdiff.blocks(*pair)
1546 mdiff.bdiff.blocks(*pair)
1529 else:
1547 else:
1530 mdiff.textdiff(*pair)
1548 mdiff.textdiff(*pair)
1531 else:
1549 else:
1532 q = queue()
1550 q = queue()
1533 for i in _xrange(threads):
1551 for i in _xrange(threads):
1534 q.put(None)
1552 q.put(None)
1535 ready = threading.Condition()
1553 ready = threading.Condition()
1536 done = threading.Event()
1554 done = threading.Event()
1537 for i in _xrange(threads):
1555 for i in _xrange(threads):
1538 threading.Thread(target=_bdiffworker,
1556 threading.Thread(target=_bdiffworker,
1539 args=(q, blocks, xdiff, ready, done)).start()
1557 args=(q, blocks, xdiff, ready, done)).start()
1540 q.join()
1558 q.join()
1541 def d():
1559 def d():
1542 for pair in textpairs:
1560 for pair in textpairs:
1543 q.put(pair)
1561 q.put(pair)
1544 for i in _xrange(threads):
1562 for i in _xrange(threads):
1545 q.put(None)
1563 q.put(None)
1546 with ready:
1564 with ready:
1547 ready.notify_all()
1565 ready.notify_all()
1548 q.join()
1566 q.join()
1549 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1550 timer(d)
1568 timer(d)
1551 fm.end()
1569 fm.end()
1552
1570
1553 if withthreads:
1571 if withthreads:
1554 done.set()
1572 done.set()
1555 for i in _xrange(threads):
1573 for i in _xrange(threads):
1556 q.put(None)
1574 q.put(None)
1557 with ready:
1575 with ready:
1558 ready.notify_all()
1576 ready.notify_all()
1559
1577
1560 @command(b'perfunidiff', revlogopts + formatteropts + [
1578 @command(b'perfunidiff', revlogopts + formatteropts + [
1561 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1579 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1562 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1580 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1563 ], b'-c|-m|FILE REV')
1581 ], b'-c|-m|FILE REV')
1564 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1582 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1565 """benchmark a unified diff between revisions
1583 """benchmark a unified diff between revisions
1566
1584
1567 This doesn't include any copy tracing - it's just a unified diff
1585 This doesn't include any copy tracing - it's just a unified diff
1568 of the texts.
1586 of the texts.
1569
1587
1570 By default, benchmark a diff between its delta parent and itself.
1588 By default, benchmark a diff between its delta parent and itself.
1571
1589
1572 With ``--count``, benchmark diffs between delta parents and self for N
1590 With ``--count``, benchmark diffs between delta parents and self for N
1573 revisions starting at the specified revision.
1591 revisions starting at the specified revision.
1574
1592
1575 With ``--alldata``, assume the requested revision is a changeset and
1593 With ``--alldata``, assume the requested revision is a changeset and
1576 measure diffs for all changes related to that changeset (manifest
1594 measure diffs for all changes related to that changeset (manifest
1577 and filelogs).
1595 and filelogs).
1578 """
1596 """
1579 opts = _byteskwargs(opts)
1597 opts = _byteskwargs(opts)
1580 if opts[b'alldata']:
1598 if opts[b'alldata']:
1581 opts[b'changelog'] = True
1599 opts[b'changelog'] = True
1582
1600
1583 if opts.get(b'changelog') or opts.get(b'manifest'):
1601 if opts.get(b'changelog') or opts.get(b'manifest'):
1584 file_, rev = None, file_
1602 file_, rev = None, file_
1585 elif rev is None:
1603 elif rev is None:
1586 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1604 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1587
1605
1588 textpairs = []
1606 textpairs = []
1589
1607
1590 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1608 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1591
1609
1592 startrev = r.rev(r.lookup(rev))
1610 startrev = r.rev(r.lookup(rev))
1593 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1611 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1594 if opts[b'alldata']:
1612 if opts[b'alldata']:
1595 # Load revisions associated with changeset.
1613 # Load revisions associated with changeset.
1596 ctx = repo[rev]
1614 ctx = repo[rev]
1597 mtext = _manifestrevision(repo, ctx.manifestnode())
1615 mtext = _manifestrevision(repo, ctx.manifestnode())
1598 for pctx in ctx.parents():
1616 for pctx in ctx.parents():
1599 pman = _manifestrevision(repo, pctx.manifestnode())
1617 pman = _manifestrevision(repo, pctx.manifestnode())
1600 textpairs.append((pman, mtext))
1618 textpairs.append((pman, mtext))
1601
1619
1602 # Load filelog revisions by iterating manifest delta.
1620 # Load filelog revisions by iterating manifest delta.
1603 man = ctx.manifest()
1621 man = ctx.manifest()
1604 pman = ctx.p1().manifest()
1622 pman = ctx.p1().manifest()
1605 for filename, change in pman.diff(man).items():
1623 for filename, change in pman.diff(man).items():
1606 fctx = repo.file(filename)
1624 fctx = repo.file(filename)
1607 f1 = fctx.revision(change[0][0] or -1)
1625 f1 = fctx.revision(change[0][0] or -1)
1608 f2 = fctx.revision(change[1][0] or -1)
1626 f2 = fctx.revision(change[1][0] or -1)
1609 textpairs.append((f1, f2))
1627 textpairs.append((f1, f2))
1610 else:
1628 else:
1611 dp = r.deltaparent(rev)
1629 dp = r.deltaparent(rev)
1612 textpairs.append((r.revision(dp), r.revision(rev)))
1630 textpairs.append((r.revision(dp), r.revision(rev)))
1613
1631
1614 def d():
1632 def d():
1615 for left, right in textpairs:
1633 for left, right in textpairs:
1616 # The date strings don't matter, so we pass empty strings.
1634 # The date strings don't matter, so we pass empty strings.
1617 headerlines, hunks = mdiff.unidiff(
1635 headerlines, hunks = mdiff.unidiff(
1618 left, b'', right, b'', b'left', b'right', binary=False)
1636 left, b'', right, b'', b'left', b'right', binary=False)
1619 # consume iterators in roughly the way patch.py does
1637 # consume iterators in roughly the way patch.py does
1620 b'\n'.join(headerlines)
1638 b'\n'.join(headerlines)
1621 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1639 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1622 timer, fm = gettimer(ui, opts)
1640 timer, fm = gettimer(ui, opts)
1623 timer(d)
1641 timer(d)
1624 fm.end()
1642 fm.end()
1625
1643
1626 @command(b'perfdiffwd', formatteropts)
1644 @command(b'perfdiffwd', formatteropts)
1627 def perfdiffwd(ui, repo, **opts):
1645 def perfdiffwd(ui, repo, **opts):
1628 """Profile diff of working directory changes"""
1646 """Profile diff of working directory changes"""
1629 opts = _byteskwargs(opts)
1647 opts = _byteskwargs(opts)
1630 timer, fm = gettimer(ui, opts)
1648 timer, fm = gettimer(ui, opts)
1631 options = {
1649 options = {
1632 'w': 'ignore_all_space',
1650 'w': 'ignore_all_space',
1633 'b': 'ignore_space_change',
1651 'b': 'ignore_space_change',
1634 'B': 'ignore_blank_lines',
1652 'B': 'ignore_blank_lines',
1635 }
1653 }
1636
1654
1637 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1655 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1638 opts = dict((options[c], b'1') for c in diffopt)
1656 opts = dict((options[c], b'1') for c in diffopt)
1639 def d():
1657 def d():
1640 ui.pushbuffer()
1658 ui.pushbuffer()
1641 commands.diff(ui, repo, **opts)
1659 commands.diff(ui, repo, **opts)
1642 ui.popbuffer()
1660 ui.popbuffer()
1643 diffopt = diffopt.encode('ascii')
1661 diffopt = diffopt.encode('ascii')
1644 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1662 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1645 timer(d, title=title)
1663 timer(d, title=title)
1646 fm.end()
1664 fm.end()
1647
1665
1648 @command(b'perfrevlogindex', revlogopts + formatteropts,
1666 @command(b'perfrevlogindex', revlogopts + formatteropts,
1649 b'-c|-m|FILE')
1667 b'-c|-m|FILE')
1650 def perfrevlogindex(ui, repo, file_=None, **opts):
1668 def perfrevlogindex(ui, repo, file_=None, **opts):
1651 """Benchmark operations against a revlog index.
1669 """Benchmark operations against a revlog index.
1652
1670
1653 This tests constructing a revlog instance, reading index data,
1671 This tests constructing a revlog instance, reading index data,
1654 parsing index data, and performing various operations related to
1672 parsing index data, and performing various operations related to
1655 index data.
1673 index data.
1656 """
1674 """
1657
1675
1658 opts = _byteskwargs(opts)
1676 opts = _byteskwargs(opts)
1659
1677
1660 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1678 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1661
1679
1662 opener = getattr(rl, 'opener') # trick linter
1680 opener = getattr(rl, 'opener') # trick linter
1663 indexfile = rl.indexfile
1681 indexfile = rl.indexfile
1664 data = opener.read(indexfile)
1682 data = opener.read(indexfile)
1665
1683
1666 header = struct.unpack(b'>I', data[0:4])[0]
1684 header = struct.unpack(b'>I', data[0:4])[0]
1667 version = header & 0xFFFF
1685 version = header & 0xFFFF
1668 if version == 1:
1686 if version == 1:
1669 revlogio = revlog.revlogio()
1687 revlogio = revlog.revlogio()
1670 inline = header & (1 << 16)
1688 inline = header & (1 << 16)
1671 else:
1689 else:
1672 raise error.Abort((b'unsupported revlog version: %d') % version)
1690 raise error.Abort((b'unsupported revlog version: %d') % version)
1673
1691
1674 rllen = len(rl)
1692 rllen = len(rl)
1675
1693
1676 node0 = rl.node(0)
1694 node0 = rl.node(0)
1677 node25 = rl.node(rllen // 4)
1695 node25 = rl.node(rllen // 4)
1678 node50 = rl.node(rllen // 2)
1696 node50 = rl.node(rllen // 2)
1679 node75 = rl.node(rllen // 4 * 3)
1697 node75 = rl.node(rllen // 4 * 3)
1680 node100 = rl.node(rllen - 1)
1698 node100 = rl.node(rllen - 1)
1681
1699
1682 allrevs = range(rllen)
1700 allrevs = range(rllen)
1683 allrevsrev = list(reversed(allrevs))
1701 allrevsrev = list(reversed(allrevs))
1684 allnodes = [rl.node(rev) for rev in range(rllen)]
1702 allnodes = [rl.node(rev) for rev in range(rllen)]
1685 allnodesrev = list(reversed(allnodes))
1703 allnodesrev = list(reversed(allnodes))
1686
1704
1687 def constructor():
1705 def constructor():
1688 revlog.revlog(opener, indexfile)
1706 revlog.revlog(opener, indexfile)
1689
1707
1690 def read():
1708 def read():
1691 with opener(indexfile) as fh:
1709 with opener(indexfile) as fh:
1692 fh.read()
1710 fh.read()
1693
1711
1694 def parseindex():
1712 def parseindex():
1695 revlogio.parseindex(data, inline)
1713 revlogio.parseindex(data, inline)
1696
1714
1697 def getentry(revornode):
1715 def getentry(revornode):
1698 index = revlogio.parseindex(data, inline)[0]
1716 index = revlogio.parseindex(data, inline)[0]
1699 index[revornode]
1717 index[revornode]
1700
1718
1701 def getentries(revs, count=1):
1719 def getentries(revs, count=1):
1702 index = revlogio.parseindex(data, inline)[0]
1720 index = revlogio.parseindex(data, inline)[0]
1703
1721
1704 for i in range(count):
1722 for i in range(count):
1705 for rev in revs:
1723 for rev in revs:
1706 index[rev]
1724 index[rev]
1707
1725
1708 def resolvenode(node):
1726 def resolvenode(node):
1709 nodemap = revlogio.parseindex(data, inline)[1]
1727 nodemap = revlogio.parseindex(data, inline)[1]
1710 # This only works for the C code.
1728 # This only works for the C code.
1711 if nodemap is None:
1729 if nodemap is None:
1712 return
1730 return
1713
1731
1714 try:
1732 try:
1715 nodemap[node]
1733 nodemap[node]
1716 except error.RevlogError:
1734 except error.RevlogError:
1717 pass
1735 pass
1718
1736
1719 def resolvenodes(nodes, count=1):
1737 def resolvenodes(nodes, count=1):
1720 nodemap = revlogio.parseindex(data, inline)[1]
1738 nodemap = revlogio.parseindex(data, inline)[1]
1721 if nodemap is None:
1739 if nodemap is None:
1722 return
1740 return
1723
1741
1724 for i in range(count):
1742 for i in range(count):
1725 for node in nodes:
1743 for node in nodes:
1726 try:
1744 try:
1727 nodemap[node]
1745 nodemap[node]
1728 except error.RevlogError:
1746 except error.RevlogError:
1729 pass
1747 pass
1730
1748
1731 benches = [
1749 benches = [
1732 (constructor, b'revlog constructor'),
1750 (constructor, b'revlog constructor'),
1733 (read, b'read'),
1751 (read, b'read'),
1734 (parseindex, b'create index object'),
1752 (parseindex, b'create index object'),
1735 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1753 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1736 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1754 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1737 (lambda: resolvenode(node0), b'look up node at rev 0'),
1755 (lambda: resolvenode(node0), b'look up node at rev 0'),
1738 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1756 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1739 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1757 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1740 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1758 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1741 (lambda: resolvenode(node100), b'look up node at tip'),
1759 (lambda: resolvenode(node100), b'look up node at tip'),
1742 # 2x variation is to measure caching impact.
1760 # 2x variation is to measure caching impact.
1743 (lambda: resolvenodes(allnodes),
1761 (lambda: resolvenodes(allnodes),
1744 b'look up all nodes (forward)'),
1762 b'look up all nodes (forward)'),
1745 (lambda: resolvenodes(allnodes, 2),
1763 (lambda: resolvenodes(allnodes, 2),
1746 b'look up all nodes 2x (forward)'),
1764 b'look up all nodes 2x (forward)'),
1747 (lambda: resolvenodes(allnodesrev),
1765 (lambda: resolvenodes(allnodesrev),
1748 b'look up all nodes (reverse)'),
1766 b'look up all nodes (reverse)'),
1749 (lambda: resolvenodes(allnodesrev, 2),
1767 (lambda: resolvenodes(allnodesrev, 2),
1750 b'look up all nodes 2x (reverse)'),
1768 b'look up all nodes 2x (reverse)'),
1751 (lambda: getentries(allrevs),
1769 (lambda: getentries(allrevs),
1752 b'retrieve all index entries (forward)'),
1770 b'retrieve all index entries (forward)'),
1753 (lambda: getentries(allrevs, 2),
1771 (lambda: getentries(allrevs, 2),
1754 b'retrieve all index entries 2x (forward)'),
1772 b'retrieve all index entries 2x (forward)'),
1755 (lambda: getentries(allrevsrev),
1773 (lambda: getentries(allrevsrev),
1756 b'retrieve all index entries (reverse)'),
1774 b'retrieve all index entries (reverse)'),
1757 (lambda: getentries(allrevsrev, 2),
1775 (lambda: getentries(allrevsrev, 2),
1758 b'retrieve all index entries 2x (reverse)'),
1776 b'retrieve all index entries 2x (reverse)'),
1759 ]
1777 ]
1760
1778
1761 for fn, title in benches:
1779 for fn, title in benches:
1762 timer, fm = gettimer(ui, opts)
1780 timer, fm = gettimer(ui, opts)
1763 timer(fn, title=title)
1781 timer(fn, title=title)
1764 fm.end()
1782 fm.end()
1765
1783
1766 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1784 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1767 [(b'd', b'dist', 100, b'distance between the revisions'),
1785 [(b'd', b'dist', 100, b'distance between the revisions'),
1768 (b's', b'startrev', 0, b'revision to start reading at'),
1786 (b's', b'startrev', 0, b'revision to start reading at'),
1769 (b'', b'reverse', False, b'read in reverse')],
1787 (b'', b'reverse', False, b'read in reverse')],
1770 b'-c|-m|FILE')
1788 b'-c|-m|FILE')
1771 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1789 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1772 **opts):
1790 **opts):
1773 """Benchmark reading a series of revisions from a revlog.
1791 """Benchmark reading a series of revisions from a revlog.
1774
1792
1775 By default, we read every ``-d/--dist`` revision from 0 to tip of
1793 By default, we read every ``-d/--dist`` revision from 0 to tip of
1776 the specified revlog.
1794 the specified revlog.
1777
1795
1778 The start revision can be defined via ``-s/--startrev``.
1796 The start revision can be defined via ``-s/--startrev``.
1779 """
1797 """
1780 opts = _byteskwargs(opts)
1798 opts = _byteskwargs(opts)
1781
1799
1782 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1800 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1783 rllen = getlen(ui)(rl)
1801 rllen = getlen(ui)(rl)
1784
1802
1785 if startrev < 0:
1803 if startrev < 0:
1786 startrev = rllen + startrev
1804 startrev = rllen + startrev
1787
1805
1788 def d():
1806 def d():
1789 rl.clearcaches()
1807 rl.clearcaches()
1790
1808
1791 beginrev = startrev
1809 beginrev = startrev
1792 endrev = rllen
1810 endrev = rllen
1793 dist = opts[b'dist']
1811 dist = opts[b'dist']
1794
1812
1795 if reverse:
1813 if reverse:
1796 beginrev, endrev = endrev - 1, beginrev - 1
1814 beginrev, endrev = endrev - 1, beginrev - 1
1797 dist = -1 * dist
1815 dist = -1 * dist
1798
1816
1799 for x in _xrange(beginrev, endrev, dist):
1817 for x in _xrange(beginrev, endrev, dist):
1800 # Old revisions don't support passing int.
1818 # Old revisions don't support passing int.
1801 n = rl.node(x)
1819 n = rl.node(x)
1802 rl.revision(n)
1820 rl.revision(n)
1803
1821
1804 timer, fm = gettimer(ui, opts)
1822 timer, fm = gettimer(ui, opts)
1805 timer(d)
1823 timer(d)
1806 fm.end()
1824 fm.end()
1807
1825
1808 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1826 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1809 [(b's', b'startrev', 1000, b'revision to start writing at'),
1827 [(b's', b'startrev', 1000, b'revision to start writing at'),
1810 (b'', b'stoprev', -1, b'last revision to write'),
1828 (b'', b'stoprev', -1, b'last revision to write'),
1811 (b'', b'count', 3, b'last revision to write'),
1829 (b'', b'count', 3, b'last revision to write'),
1812 (b'', b'details', False, b'print timing for every revisions tested'),
1830 (b'', b'details', False, b'print timing for every revisions tested'),
1813 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1831 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1814 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1832 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1815 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1833 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1816 ],
1834 ],
1817 b'-c|-m|FILE')
1835 b'-c|-m|FILE')
1818 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1836 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1819 """Benchmark writing a series of revisions to a revlog.
1837 """Benchmark writing a series of revisions to a revlog.
1820
1838
1821 Possible source values are:
1839 Possible source values are:
1822 * `full`: add from a full text (default).
1840 * `full`: add from a full text (default).
1823 * `parent-1`: add from a delta to the first parent
1841 * `parent-1`: add from a delta to the first parent
1824 * `parent-2`: add from a delta to the second parent if it exists
1842 * `parent-2`: add from a delta to the second parent if it exists
1825 (use a delta from the first parent otherwise)
1843 (use a delta from the first parent otherwise)
1826 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1844 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1827 * `storage`: add from the existing precomputed deltas
1845 * `storage`: add from the existing precomputed deltas
1828 """
1846 """
1829 opts = _byteskwargs(opts)
1847 opts = _byteskwargs(opts)
1830
1848
1831 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1849 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1832 rllen = getlen(ui)(rl)
1850 rllen = getlen(ui)(rl)
1833 if startrev < 0:
1851 if startrev < 0:
1834 startrev = rllen + startrev
1852 startrev = rllen + startrev
1835 if stoprev < 0:
1853 if stoprev < 0:
1836 stoprev = rllen + stoprev
1854 stoprev = rllen + stoprev
1837
1855
1838 lazydeltabase = opts['lazydeltabase']
1856 lazydeltabase = opts['lazydeltabase']
1839 source = opts['source']
1857 source = opts['source']
1840 clearcaches = opts['clear_caches']
1858 clearcaches = opts['clear_caches']
1841 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1859 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1842 b'storage')
1860 b'storage')
1843 if source not in validsource:
1861 if source not in validsource:
1844 raise error.Abort('invalid source type: %s' % source)
1862 raise error.Abort('invalid source type: %s' % source)
1845
1863
1846 ### actually gather results
1864 ### actually gather results
1847 count = opts['count']
1865 count = opts['count']
1848 if count <= 0:
1866 if count <= 0:
1849 raise error.Abort('invalide run count: %d' % count)
1867 raise error.Abort('invalide run count: %d' % count)
1850 allresults = []
1868 allresults = []
1851 for c in range(count):
1869 for c in range(count):
1852 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1870 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1853 lazydeltabase=lazydeltabase,
1871 lazydeltabase=lazydeltabase,
1854 clearcaches=clearcaches)
1872 clearcaches=clearcaches)
1855 allresults.append(timing)
1873 allresults.append(timing)
1856
1874
1857 ### consolidate the results in a single list
1875 ### consolidate the results in a single list
1858 results = []
1876 results = []
1859 for idx, (rev, t) in enumerate(allresults[0]):
1877 for idx, (rev, t) in enumerate(allresults[0]):
1860 ts = [t]
1878 ts = [t]
1861 for other in allresults[1:]:
1879 for other in allresults[1:]:
1862 orev, ot = other[idx]
1880 orev, ot = other[idx]
1863 assert orev == rev
1881 assert orev == rev
1864 ts.append(ot)
1882 ts.append(ot)
1865 results.append((rev, ts))
1883 results.append((rev, ts))
1866 resultcount = len(results)
1884 resultcount = len(results)
1867
1885
1868 ### Compute and display relevant statistics
1886 ### Compute and display relevant statistics
1869
1887
1870 # get a formatter
1888 # get a formatter
1871 fm = ui.formatter(b'perf', opts)
1889 fm = ui.formatter(b'perf', opts)
1872 displayall = ui.configbool(b"perf", b"all-timing", False)
1890 displayall = ui.configbool(b"perf", b"all-timing", False)
1873
1891
1874 # print individual details if requested
1892 # print individual details if requested
1875 if opts['details']:
1893 if opts['details']:
1876 for idx, item in enumerate(results, 1):
1894 for idx, item in enumerate(results, 1):
1877 rev, data = item
1895 rev, data = item
1878 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1896 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1879 formatone(fm, data, title=title, displayall=displayall)
1897 formatone(fm, data, title=title, displayall=displayall)
1880
1898
1881 # sorts results by median time
1899 # sorts results by median time
1882 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1900 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1883 # list of (name, index) to display)
1901 # list of (name, index) to display)
1884 relevants = [
1902 relevants = [
1885 ("min", 0),
1903 ("min", 0),
1886 ("10%", resultcount * 10 // 100),
1904 ("10%", resultcount * 10 // 100),
1887 ("25%", resultcount * 25 // 100),
1905 ("25%", resultcount * 25 // 100),
1888 ("50%", resultcount * 70 // 100),
1906 ("50%", resultcount * 70 // 100),
1889 ("75%", resultcount * 75 // 100),
1907 ("75%", resultcount * 75 // 100),
1890 ("90%", resultcount * 90 // 100),
1908 ("90%", resultcount * 90 // 100),
1891 ("95%", resultcount * 95 // 100),
1909 ("95%", resultcount * 95 // 100),
1892 ("99%", resultcount * 99 // 100),
1910 ("99%", resultcount * 99 // 100),
1893 ("99.9%", resultcount * 999 // 1000),
1911 ("99.9%", resultcount * 999 // 1000),
1894 ("99.99%", resultcount * 9999 // 10000),
1912 ("99.99%", resultcount * 9999 // 10000),
1895 ("99.999%", resultcount * 99999 // 100000),
1913 ("99.999%", resultcount * 99999 // 100000),
1896 ("max", -1),
1914 ("max", -1),
1897 ]
1915 ]
1898 if not ui.quiet:
1916 if not ui.quiet:
1899 for name, idx in relevants:
1917 for name, idx in relevants:
1900 data = results[idx]
1918 data = results[idx]
1901 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1919 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1902 formatone(fm, data[1], title=title, displayall=displayall)
1920 formatone(fm, data[1], title=title, displayall=displayall)
1903
1921
1904 # XXX summing that many float will not be very precise, we ignore this fact
1922 # XXX summing that many float will not be very precise, we ignore this fact
1905 # for now
1923 # for now
1906 totaltime = []
1924 totaltime = []
1907 for item in allresults:
1925 for item in allresults:
1908 totaltime.append((sum(x[1][0] for x in item),
1926 totaltime.append((sum(x[1][0] for x in item),
1909 sum(x[1][1] for x in item),
1927 sum(x[1][1] for x in item),
1910 sum(x[1][2] for x in item),)
1928 sum(x[1][2] for x in item),)
1911 )
1929 )
1912 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1930 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1913 displayall=displayall)
1931 displayall=displayall)
1914 fm.end()
1932 fm.end()
1915
1933
1916 class _faketr(object):
1934 class _faketr(object):
1917 def add(s, x, y, z=None):
1935 def add(s, x, y, z=None):
1918 return None
1936 return None
1919
1937
1920 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1938 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1921 lazydeltabase=True, clearcaches=True):
1939 lazydeltabase=True, clearcaches=True):
1922 timings = []
1940 timings = []
1923 tr = _faketr()
1941 tr = _faketr()
1924 with _temprevlog(ui, orig, startrev) as dest:
1942 with _temprevlog(ui, orig, startrev) as dest:
1925 dest._lazydeltabase = lazydeltabase
1943 dest._lazydeltabase = lazydeltabase
1926 revs = list(orig.revs(startrev, stoprev))
1944 revs = list(orig.revs(startrev, stoprev))
1927 total = len(revs)
1945 total = len(revs)
1928 topic = 'adding'
1946 topic = 'adding'
1929 if runidx is not None:
1947 if runidx is not None:
1930 topic += ' (run #%d)' % runidx
1948 topic += ' (run #%d)' % runidx
1931 # Support both old and new progress API
1949 # Support both old and new progress API
1932 if util.safehasattr(ui, 'makeprogress'):
1950 if util.safehasattr(ui, 'makeprogress'):
1933 progress = ui.makeprogress(topic, unit='revs', total=total)
1951 progress = ui.makeprogress(topic, unit='revs', total=total)
1934 def updateprogress(pos):
1952 def updateprogress(pos):
1935 progress.update(pos)
1953 progress.update(pos)
1936 def completeprogress():
1954 def completeprogress():
1937 progress.complete()
1955 progress.complete()
1938 else:
1956 else:
1939 def updateprogress(pos):
1957 def updateprogress(pos):
1940 ui.progress(topic, pos, unit='revs', total=total)
1958 ui.progress(topic, pos, unit='revs', total=total)
1941 def completeprogress():
1959 def completeprogress():
1942 ui.progress(topic, None, unit='revs', total=total)
1960 ui.progress(topic, None, unit='revs', total=total)
1943
1961
1944 for idx, rev in enumerate(revs):
1962 for idx, rev in enumerate(revs):
1945 updateprogress(idx)
1963 updateprogress(idx)
1946 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1964 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1947 if clearcaches:
1965 if clearcaches:
1948 dest.index.clearcaches()
1966 dest.index.clearcaches()
1949 dest.clearcaches()
1967 dest.clearcaches()
1950 with timeone() as r:
1968 with timeone() as r:
1951 dest.addrawrevision(*addargs, **addkwargs)
1969 dest.addrawrevision(*addargs, **addkwargs)
1952 timings.append((rev, r[0]))
1970 timings.append((rev, r[0]))
1953 updateprogress(total)
1971 updateprogress(total)
1954 completeprogress()
1972 completeprogress()
1955 return timings
1973 return timings
1956
1974
1957 def _getrevisionseed(orig, rev, tr, source):
1975 def _getrevisionseed(orig, rev, tr, source):
1958 from mercurial.node import nullid
1976 from mercurial.node import nullid
1959
1977
1960 linkrev = orig.linkrev(rev)
1978 linkrev = orig.linkrev(rev)
1961 node = orig.node(rev)
1979 node = orig.node(rev)
1962 p1, p2 = orig.parents(node)
1980 p1, p2 = orig.parents(node)
1963 flags = orig.flags(rev)
1981 flags = orig.flags(rev)
1964 cachedelta = None
1982 cachedelta = None
1965 text = None
1983 text = None
1966
1984
1967 if source == b'full':
1985 if source == b'full':
1968 text = orig.revision(rev)
1986 text = orig.revision(rev)
1969 elif source == b'parent-1':
1987 elif source == b'parent-1':
1970 baserev = orig.rev(p1)
1988 baserev = orig.rev(p1)
1971 cachedelta = (baserev, orig.revdiff(p1, rev))
1989 cachedelta = (baserev, orig.revdiff(p1, rev))
1972 elif source == b'parent-2':
1990 elif source == b'parent-2':
1973 parent = p2
1991 parent = p2
1974 if p2 == nullid:
1992 if p2 == nullid:
1975 parent = p1
1993 parent = p1
1976 baserev = orig.rev(parent)
1994 baserev = orig.rev(parent)
1977 cachedelta = (baserev, orig.revdiff(parent, rev))
1995 cachedelta = (baserev, orig.revdiff(parent, rev))
1978 elif source == b'parent-smallest':
1996 elif source == b'parent-smallest':
1979 p1diff = orig.revdiff(p1, rev)
1997 p1diff = orig.revdiff(p1, rev)
1980 parent = p1
1998 parent = p1
1981 diff = p1diff
1999 diff = p1diff
1982 if p2 != nullid:
2000 if p2 != nullid:
1983 p2diff = orig.revdiff(p2, rev)
2001 p2diff = orig.revdiff(p2, rev)
1984 if len(p1diff) > len(p2diff):
2002 if len(p1diff) > len(p2diff):
1985 parent = p2
2003 parent = p2
1986 diff = p2diff
2004 diff = p2diff
1987 baserev = orig.rev(parent)
2005 baserev = orig.rev(parent)
1988 cachedelta = (baserev, diff)
2006 cachedelta = (baserev, diff)
1989 elif source == b'storage':
2007 elif source == b'storage':
1990 baserev = orig.deltaparent(rev)
2008 baserev = orig.deltaparent(rev)
1991 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2009 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1992
2010
1993 return ((text, tr, linkrev, p1, p2),
2011 return ((text, tr, linkrev, p1, p2),
1994 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2012 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1995
2013
1996 @contextlib.contextmanager
2014 @contextlib.contextmanager
1997 def _temprevlog(ui, orig, truncaterev):
2015 def _temprevlog(ui, orig, truncaterev):
1998 from mercurial import vfs as vfsmod
2016 from mercurial import vfs as vfsmod
1999
2017
2000 if orig._inline:
2018 if orig._inline:
2001 raise error.Abort('not supporting inline revlog (yet)')
2019 raise error.Abort('not supporting inline revlog (yet)')
2002
2020
2003 origindexpath = orig.opener.join(orig.indexfile)
2021 origindexpath = orig.opener.join(orig.indexfile)
2004 origdatapath = orig.opener.join(orig.datafile)
2022 origdatapath = orig.opener.join(orig.datafile)
2005 indexname = 'revlog.i'
2023 indexname = 'revlog.i'
2006 dataname = 'revlog.d'
2024 dataname = 'revlog.d'
2007
2025
2008 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2026 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2009 try:
2027 try:
2010 # copy the data file in a temporary directory
2028 # copy the data file in a temporary directory
2011 ui.debug('copying data in %s\n' % tmpdir)
2029 ui.debug('copying data in %s\n' % tmpdir)
2012 destindexpath = os.path.join(tmpdir, 'revlog.i')
2030 destindexpath = os.path.join(tmpdir, 'revlog.i')
2013 destdatapath = os.path.join(tmpdir, 'revlog.d')
2031 destdatapath = os.path.join(tmpdir, 'revlog.d')
2014 shutil.copyfile(origindexpath, destindexpath)
2032 shutil.copyfile(origindexpath, destindexpath)
2015 shutil.copyfile(origdatapath, destdatapath)
2033 shutil.copyfile(origdatapath, destdatapath)
2016
2034
2017 # remove the data we want to add again
2035 # remove the data we want to add again
2018 ui.debug('truncating data to be rewritten\n')
2036 ui.debug('truncating data to be rewritten\n')
2019 with open(destindexpath, 'ab') as index:
2037 with open(destindexpath, 'ab') as index:
2020 index.seek(0)
2038 index.seek(0)
2021 index.truncate(truncaterev * orig._io.size)
2039 index.truncate(truncaterev * orig._io.size)
2022 with open(destdatapath, 'ab') as data:
2040 with open(destdatapath, 'ab') as data:
2023 data.seek(0)
2041 data.seek(0)
2024 data.truncate(orig.start(truncaterev))
2042 data.truncate(orig.start(truncaterev))
2025
2043
2026 # instantiate a new revlog from the temporary copy
2044 # instantiate a new revlog from the temporary copy
2027 ui.debug('truncating adding to be rewritten\n')
2045 ui.debug('truncating adding to be rewritten\n')
2028 vfs = vfsmod.vfs(tmpdir)
2046 vfs = vfsmod.vfs(tmpdir)
2029 vfs.options = getattr(orig.opener, 'options', None)
2047 vfs.options = getattr(orig.opener, 'options', None)
2030
2048
2031 dest = revlog.revlog(vfs,
2049 dest = revlog.revlog(vfs,
2032 indexfile=indexname,
2050 indexfile=indexname,
2033 datafile=dataname)
2051 datafile=dataname)
2034 if dest._inline:
2052 if dest._inline:
2035 raise error.Abort('not supporting inline revlog (yet)')
2053 raise error.Abort('not supporting inline revlog (yet)')
2036 # make sure internals are initialized
2054 # make sure internals are initialized
2037 dest.revision(len(dest) - 1)
2055 dest.revision(len(dest) - 1)
2038 yield dest
2056 yield dest
2039 del dest, vfs
2057 del dest, vfs
2040 finally:
2058 finally:
2041 shutil.rmtree(tmpdir, True)
2059 shutil.rmtree(tmpdir, True)
2042
2060
2043 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2061 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2044 [(b'e', b'engines', b'', b'compression engines to use'),
2062 [(b'e', b'engines', b'', b'compression engines to use'),
2045 (b's', b'startrev', 0, b'revision to start at')],
2063 (b's', b'startrev', 0, b'revision to start at')],
2046 b'-c|-m|FILE')
2064 b'-c|-m|FILE')
2047 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2065 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2048 """Benchmark operations on revlog chunks.
2066 """Benchmark operations on revlog chunks.
2049
2067
2050 Logically, each revlog is a collection of fulltext revisions. However,
2068 Logically, each revlog is a collection of fulltext revisions. However,
2051 stored within each revlog are "chunks" of possibly compressed data. This
2069 stored within each revlog are "chunks" of possibly compressed data. This
2052 data needs to be read and decompressed or compressed and written.
2070 data needs to be read and decompressed or compressed and written.
2053
2071
2054 This command measures the time it takes to read+decompress and recompress
2072 This command measures the time it takes to read+decompress and recompress
2055 chunks in a revlog. It effectively isolates I/O and compression performance.
2073 chunks in a revlog. It effectively isolates I/O and compression performance.
2056 For measurements of higher-level operations like resolving revisions,
2074 For measurements of higher-level operations like resolving revisions,
2057 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2075 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2058 """
2076 """
2059 opts = _byteskwargs(opts)
2077 opts = _byteskwargs(opts)
2060
2078
2061 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2079 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2062
2080
2063 # _chunkraw was renamed to _getsegmentforrevs.
2081 # _chunkraw was renamed to _getsegmentforrevs.
2064 try:
2082 try:
2065 segmentforrevs = rl._getsegmentforrevs
2083 segmentforrevs = rl._getsegmentforrevs
2066 except AttributeError:
2084 except AttributeError:
2067 segmentforrevs = rl._chunkraw
2085 segmentforrevs = rl._chunkraw
2068
2086
2069 # Verify engines argument.
2087 # Verify engines argument.
2070 if engines:
2088 if engines:
2071 engines = set(e.strip() for e in engines.split(b','))
2089 engines = set(e.strip() for e in engines.split(b','))
2072 for engine in engines:
2090 for engine in engines:
2073 try:
2091 try:
2074 util.compressionengines[engine]
2092 util.compressionengines[engine]
2075 except KeyError:
2093 except KeyError:
2076 raise error.Abort(b'unknown compression engine: %s' % engine)
2094 raise error.Abort(b'unknown compression engine: %s' % engine)
2077 else:
2095 else:
2078 engines = []
2096 engines = []
2079 for e in util.compengines:
2097 for e in util.compengines:
2080 engine = util.compengines[e]
2098 engine = util.compengines[e]
2081 try:
2099 try:
2082 if engine.available():
2100 if engine.available():
2083 engine.revlogcompressor().compress(b'dummy')
2101 engine.revlogcompressor().compress(b'dummy')
2084 engines.append(e)
2102 engines.append(e)
2085 except NotImplementedError:
2103 except NotImplementedError:
2086 pass
2104 pass
2087
2105
2088 revs = list(rl.revs(startrev, len(rl) - 1))
2106 revs = list(rl.revs(startrev, len(rl) - 1))
2089
2107
2090 def rlfh(rl):
2108 def rlfh(rl):
2091 if rl._inline:
2109 if rl._inline:
2092 return getsvfs(repo)(rl.indexfile)
2110 return getsvfs(repo)(rl.indexfile)
2093 else:
2111 else:
2094 return getsvfs(repo)(rl.datafile)
2112 return getsvfs(repo)(rl.datafile)
2095
2113
2096 def doread():
2114 def doread():
2097 rl.clearcaches()
2115 rl.clearcaches()
2098 for rev in revs:
2116 for rev in revs:
2099 segmentforrevs(rev, rev)
2117 segmentforrevs(rev, rev)
2100
2118
2101 def doreadcachedfh():
2119 def doreadcachedfh():
2102 rl.clearcaches()
2120 rl.clearcaches()
2103 fh = rlfh(rl)
2121 fh = rlfh(rl)
2104 for rev in revs:
2122 for rev in revs:
2105 segmentforrevs(rev, rev, df=fh)
2123 segmentforrevs(rev, rev, df=fh)
2106
2124
2107 def doreadbatch():
2125 def doreadbatch():
2108 rl.clearcaches()
2126 rl.clearcaches()
2109 segmentforrevs(revs[0], revs[-1])
2127 segmentforrevs(revs[0], revs[-1])
2110
2128
2111 def doreadbatchcachedfh():
2129 def doreadbatchcachedfh():
2112 rl.clearcaches()
2130 rl.clearcaches()
2113 fh = rlfh(rl)
2131 fh = rlfh(rl)
2114 segmentforrevs(revs[0], revs[-1], df=fh)
2132 segmentforrevs(revs[0], revs[-1], df=fh)
2115
2133
2116 def dochunk():
2134 def dochunk():
2117 rl.clearcaches()
2135 rl.clearcaches()
2118 fh = rlfh(rl)
2136 fh = rlfh(rl)
2119 for rev in revs:
2137 for rev in revs:
2120 rl._chunk(rev, df=fh)
2138 rl._chunk(rev, df=fh)
2121
2139
2122 chunks = [None]
2140 chunks = [None]
2123
2141
2124 def dochunkbatch():
2142 def dochunkbatch():
2125 rl.clearcaches()
2143 rl.clearcaches()
2126 fh = rlfh(rl)
2144 fh = rlfh(rl)
2127 # Save chunks as a side-effect.
2145 # Save chunks as a side-effect.
2128 chunks[0] = rl._chunks(revs, df=fh)
2146 chunks[0] = rl._chunks(revs, df=fh)
2129
2147
2130 def docompress(compressor):
2148 def docompress(compressor):
2131 rl.clearcaches()
2149 rl.clearcaches()
2132
2150
2133 try:
2151 try:
2134 # Swap in the requested compression engine.
2152 # Swap in the requested compression engine.
2135 oldcompressor = rl._compressor
2153 oldcompressor = rl._compressor
2136 rl._compressor = compressor
2154 rl._compressor = compressor
2137 for chunk in chunks[0]:
2155 for chunk in chunks[0]:
2138 rl.compress(chunk)
2156 rl.compress(chunk)
2139 finally:
2157 finally:
2140 rl._compressor = oldcompressor
2158 rl._compressor = oldcompressor
2141
2159
2142 benches = [
2160 benches = [
2143 (lambda: doread(), b'read'),
2161 (lambda: doread(), b'read'),
2144 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2162 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2145 (lambda: doreadbatch(), b'read batch'),
2163 (lambda: doreadbatch(), b'read batch'),
2146 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2164 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2147 (lambda: dochunk(), b'chunk'),
2165 (lambda: dochunk(), b'chunk'),
2148 (lambda: dochunkbatch(), b'chunk batch'),
2166 (lambda: dochunkbatch(), b'chunk batch'),
2149 ]
2167 ]
2150
2168
2151 for engine in sorted(engines):
2169 for engine in sorted(engines):
2152 compressor = util.compengines[engine].revlogcompressor()
2170 compressor = util.compengines[engine].revlogcompressor()
2153 benches.append((functools.partial(docompress, compressor),
2171 benches.append((functools.partial(docompress, compressor),
2154 b'compress w/ %s' % engine))
2172 b'compress w/ %s' % engine))
2155
2173
2156 for fn, title in benches:
2174 for fn, title in benches:
2157 timer, fm = gettimer(ui, opts)
2175 timer, fm = gettimer(ui, opts)
2158 timer(fn, title=title)
2176 timer(fn, title=title)
2159 fm.end()
2177 fm.end()
2160
2178
2161 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2179 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2162 [(b'', b'cache', False, b'use caches instead of clearing')],
2180 [(b'', b'cache', False, b'use caches instead of clearing')],
2163 b'-c|-m|FILE REV')
2181 b'-c|-m|FILE REV')
2164 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2182 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2165 """Benchmark obtaining a revlog revision.
2183 """Benchmark obtaining a revlog revision.
2166
2184
2167 Obtaining a revlog revision consists of roughly the following steps:
2185 Obtaining a revlog revision consists of roughly the following steps:
2168
2186
2169 1. Compute the delta chain
2187 1. Compute the delta chain
2170 2. Slice the delta chain if applicable
2188 2. Slice the delta chain if applicable
2171 3. Obtain the raw chunks for that delta chain
2189 3. Obtain the raw chunks for that delta chain
2172 4. Decompress each raw chunk
2190 4. Decompress each raw chunk
2173 5. Apply binary patches to obtain fulltext
2191 5. Apply binary patches to obtain fulltext
2174 6. Verify hash of fulltext
2192 6. Verify hash of fulltext
2175
2193
2176 This command measures the time spent in each of these phases.
2194 This command measures the time spent in each of these phases.
2177 """
2195 """
2178 opts = _byteskwargs(opts)
2196 opts = _byteskwargs(opts)
2179
2197
2180 if opts.get(b'changelog') or opts.get(b'manifest'):
2198 if opts.get(b'changelog') or opts.get(b'manifest'):
2181 file_, rev = None, file_
2199 file_, rev = None, file_
2182 elif rev is None:
2200 elif rev is None:
2183 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2201 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2184
2202
2185 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2203 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2186
2204
2187 # _chunkraw was renamed to _getsegmentforrevs.
2205 # _chunkraw was renamed to _getsegmentforrevs.
2188 try:
2206 try:
2189 segmentforrevs = r._getsegmentforrevs
2207 segmentforrevs = r._getsegmentforrevs
2190 except AttributeError:
2208 except AttributeError:
2191 segmentforrevs = r._chunkraw
2209 segmentforrevs = r._chunkraw
2192
2210
2193 node = r.lookup(rev)
2211 node = r.lookup(rev)
2194 rev = r.rev(node)
2212 rev = r.rev(node)
2195
2213
2196 def getrawchunks(data, chain):
2214 def getrawchunks(data, chain):
2197 start = r.start
2215 start = r.start
2198 length = r.length
2216 length = r.length
2199 inline = r._inline
2217 inline = r._inline
2200 iosize = r._io.size
2218 iosize = r._io.size
2201 buffer = util.buffer
2219 buffer = util.buffer
2202
2220
2203 chunks = []
2221 chunks = []
2204 ladd = chunks.append
2222 ladd = chunks.append
2205 for idx, item in enumerate(chain):
2223 for idx, item in enumerate(chain):
2206 offset = start(item[0])
2224 offset = start(item[0])
2207 bits = data[idx]
2225 bits = data[idx]
2208 for rev in item:
2226 for rev in item:
2209 chunkstart = start(rev)
2227 chunkstart = start(rev)
2210 if inline:
2228 if inline:
2211 chunkstart += (rev + 1) * iosize
2229 chunkstart += (rev + 1) * iosize
2212 chunklength = length(rev)
2230 chunklength = length(rev)
2213 ladd(buffer(bits, chunkstart - offset, chunklength))
2231 ladd(buffer(bits, chunkstart - offset, chunklength))
2214
2232
2215 return chunks
2233 return chunks
2216
2234
2217 def dodeltachain(rev):
2235 def dodeltachain(rev):
2218 if not cache:
2236 if not cache:
2219 r.clearcaches()
2237 r.clearcaches()
2220 r._deltachain(rev)
2238 r._deltachain(rev)
2221
2239
2222 def doread(chain):
2240 def doread(chain):
2223 if not cache:
2241 if not cache:
2224 r.clearcaches()
2242 r.clearcaches()
2225 for item in slicedchain:
2243 for item in slicedchain:
2226 segmentforrevs(item[0], item[-1])
2244 segmentforrevs(item[0], item[-1])
2227
2245
2228 def doslice(r, chain, size):
2246 def doslice(r, chain, size):
2229 for s in slicechunk(r, chain, targetsize=size):
2247 for s in slicechunk(r, chain, targetsize=size):
2230 pass
2248 pass
2231
2249
2232 def dorawchunks(data, chain):
2250 def dorawchunks(data, chain):
2233 if not cache:
2251 if not cache:
2234 r.clearcaches()
2252 r.clearcaches()
2235 getrawchunks(data, chain)
2253 getrawchunks(data, chain)
2236
2254
2237 def dodecompress(chunks):
2255 def dodecompress(chunks):
2238 decomp = r.decompress
2256 decomp = r.decompress
2239 for chunk in chunks:
2257 for chunk in chunks:
2240 decomp(chunk)
2258 decomp(chunk)
2241
2259
2242 def dopatch(text, bins):
2260 def dopatch(text, bins):
2243 if not cache:
2261 if not cache:
2244 r.clearcaches()
2262 r.clearcaches()
2245 mdiff.patches(text, bins)
2263 mdiff.patches(text, bins)
2246
2264
2247 def dohash(text):
2265 def dohash(text):
2248 if not cache:
2266 if not cache:
2249 r.clearcaches()
2267 r.clearcaches()
2250 r.checkhash(text, node, rev=rev)
2268 r.checkhash(text, node, rev=rev)
2251
2269
2252 def dorevision():
2270 def dorevision():
2253 if not cache:
2271 if not cache:
2254 r.clearcaches()
2272 r.clearcaches()
2255 r.revision(node)
2273 r.revision(node)
2256
2274
2257 try:
2275 try:
2258 from mercurial.revlogutils.deltas import slicechunk
2276 from mercurial.revlogutils.deltas import slicechunk
2259 except ImportError:
2277 except ImportError:
2260 slicechunk = getattr(revlog, '_slicechunk', None)
2278 slicechunk = getattr(revlog, '_slicechunk', None)
2261
2279
2262 size = r.length(rev)
2280 size = r.length(rev)
2263 chain = r._deltachain(rev)[0]
2281 chain = r._deltachain(rev)[0]
2264 if not getattr(r, '_withsparseread', False):
2282 if not getattr(r, '_withsparseread', False):
2265 slicedchain = (chain,)
2283 slicedchain = (chain,)
2266 else:
2284 else:
2267 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2285 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2268 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2286 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2269 rawchunks = getrawchunks(data, slicedchain)
2287 rawchunks = getrawchunks(data, slicedchain)
2270 bins = r._chunks(chain)
2288 bins = r._chunks(chain)
2271 text = bytes(bins[0])
2289 text = bytes(bins[0])
2272 bins = bins[1:]
2290 bins = bins[1:]
2273 text = mdiff.patches(text, bins)
2291 text = mdiff.patches(text, bins)
2274
2292
2275 benches = [
2293 benches = [
2276 (lambda: dorevision(), b'full'),
2294 (lambda: dorevision(), b'full'),
2277 (lambda: dodeltachain(rev), b'deltachain'),
2295 (lambda: dodeltachain(rev), b'deltachain'),
2278 (lambda: doread(chain), b'read'),
2296 (lambda: doread(chain), b'read'),
2279 ]
2297 ]
2280
2298
2281 if getattr(r, '_withsparseread', False):
2299 if getattr(r, '_withsparseread', False):
2282 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2300 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2283 benches.append(slicing)
2301 benches.append(slicing)
2284
2302
2285 benches.extend([
2303 benches.extend([
2286 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2304 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2287 (lambda: dodecompress(rawchunks), b'decompress'),
2305 (lambda: dodecompress(rawchunks), b'decompress'),
2288 (lambda: dopatch(text, bins), b'patch'),
2306 (lambda: dopatch(text, bins), b'patch'),
2289 (lambda: dohash(text), b'hash'),
2307 (lambda: dohash(text), b'hash'),
2290 ])
2308 ])
2291
2309
2292 timer, fm = gettimer(ui, opts)
2310 timer, fm = gettimer(ui, opts)
2293 for fn, title in benches:
2311 for fn, title in benches:
2294 timer(fn, title=title)
2312 timer(fn, title=title)
2295 fm.end()
2313 fm.end()
2296
2314
2297 @command(b'perfrevset',
2315 @command(b'perfrevset',
2298 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2316 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2299 (b'', b'contexts', False, b'obtain changectx for each revision')]
2317 (b'', b'contexts', False, b'obtain changectx for each revision')]
2300 + formatteropts, b"REVSET")
2318 + formatteropts, b"REVSET")
2301 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2319 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2302 """benchmark the execution time of a revset
2320 """benchmark the execution time of a revset
2303
2321
2304 Use the --clean option if need to evaluate the impact of build volatile
2322 Use the --clean option if need to evaluate the impact of build volatile
2305 revisions set cache on the revset execution. Volatile cache hold filtered
2323 revisions set cache on the revset execution. Volatile cache hold filtered
2306 and obsolete related cache."""
2324 and obsolete related cache."""
2307 opts = _byteskwargs(opts)
2325 opts = _byteskwargs(opts)
2308
2326
2309 timer, fm = gettimer(ui, opts)
2327 timer, fm = gettimer(ui, opts)
2310 def d():
2328 def d():
2311 if clear:
2329 if clear:
2312 repo.invalidatevolatilesets()
2330 repo.invalidatevolatilesets()
2313 if contexts:
2331 if contexts:
2314 for ctx in repo.set(expr): pass
2332 for ctx in repo.set(expr): pass
2315 else:
2333 else:
2316 for r in repo.revs(expr): pass
2334 for r in repo.revs(expr): pass
2317 timer(d)
2335 timer(d)
2318 fm.end()
2336 fm.end()
2319
2337
2320 @command(b'perfvolatilesets',
2338 @command(b'perfvolatilesets',
2321 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2339 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2322 ] + formatteropts)
2340 ] + formatteropts)
2323 def perfvolatilesets(ui, repo, *names, **opts):
2341 def perfvolatilesets(ui, repo, *names, **opts):
2324 """benchmark the computation of various volatile set
2342 """benchmark the computation of various volatile set
2325
2343
2326 Volatile set computes element related to filtering and obsolescence."""
2344 Volatile set computes element related to filtering and obsolescence."""
2327 opts = _byteskwargs(opts)
2345 opts = _byteskwargs(opts)
2328 timer, fm = gettimer(ui, opts)
2346 timer, fm = gettimer(ui, opts)
2329 repo = repo.unfiltered()
2347 repo = repo.unfiltered()
2330
2348
2331 def getobs(name):
2349 def getobs(name):
2332 def d():
2350 def d():
2333 repo.invalidatevolatilesets()
2351 repo.invalidatevolatilesets()
2334 if opts[b'clear_obsstore']:
2352 if opts[b'clear_obsstore']:
2335 clearfilecache(repo, b'obsstore')
2353 clearfilecache(repo, b'obsstore')
2336 obsolete.getrevs(repo, name)
2354 obsolete.getrevs(repo, name)
2337 return d
2355 return d
2338
2356
2339 allobs = sorted(obsolete.cachefuncs)
2357 allobs = sorted(obsolete.cachefuncs)
2340 if names:
2358 if names:
2341 allobs = [n for n in allobs if n in names]
2359 allobs = [n for n in allobs if n in names]
2342
2360
2343 for name in allobs:
2361 for name in allobs:
2344 timer(getobs(name), title=name)
2362 timer(getobs(name), title=name)
2345
2363
2346 def getfiltered(name):
2364 def getfiltered(name):
2347 def d():
2365 def d():
2348 repo.invalidatevolatilesets()
2366 repo.invalidatevolatilesets()
2349 if opts[b'clear_obsstore']:
2367 if opts[b'clear_obsstore']:
2350 clearfilecache(repo, b'obsstore')
2368 clearfilecache(repo, b'obsstore')
2351 repoview.filterrevs(repo, name)
2369 repoview.filterrevs(repo, name)
2352 return d
2370 return d
2353
2371
2354 allfilter = sorted(repoview.filtertable)
2372 allfilter = sorted(repoview.filtertable)
2355 if names:
2373 if names:
2356 allfilter = [n for n in allfilter if n in names]
2374 allfilter = [n for n in allfilter if n in names]
2357
2375
2358 for name in allfilter:
2376 for name in allfilter:
2359 timer(getfiltered(name), title=name)
2377 timer(getfiltered(name), title=name)
2360 fm.end()
2378 fm.end()
2361
2379
2362 @command(b'perfbranchmap',
2380 @command(b'perfbranchmap',
2363 [(b'f', b'full', False,
2381 [(b'f', b'full', False,
2364 b'Includes build time of subset'),
2382 b'Includes build time of subset'),
2365 (b'', b'clear-revbranch', False,
2383 (b'', b'clear-revbranch', False,
2366 b'purge the revbranch cache between computation'),
2384 b'purge the revbranch cache between computation'),
2367 ] + formatteropts)
2385 ] + formatteropts)
2368 def perfbranchmap(ui, repo, *filternames, **opts):
2386 def perfbranchmap(ui, repo, *filternames, **opts):
2369 """benchmark the update of a branchmap
2387 """benchmark the update of a branchmap
2370
2388
2371 This benchmarks the full repo.branchmap() call with read and write disabled
2389 This benchmarks the full repo.branchmap() call with read and write disabled
2372 """
2390 """
2373 opts = _byteskwargs(opts)
2391 opts = _byteskwargs(opts)
2374 full = opts.get(b"full", False)
2392 full = opts.get(b"full", False)
2375 clear_revbranch = opts.get(b"clear_revbranch", False)
2393 clear_revbranch = opts.get(b"clear_revbranch", False)
2376 timer, fm = gettimer(ui, opts)
2394 timer, fm = gettimer(ui, opts)
2377 def getbranchmap(filtername):
2395 def getbranchmap(filtername):
2378 """generate a benchmark function for the filtername"""
2396 """generate a benchmark function for the filtername"""
2379 if filtername is None:
2397 if filtername is None:
2380 view = repo
2398 view = repo
2381 else:
2399 else:
2382 view = repo.filtered(filtername)
2400 view = repo.filtered(filtername)
2383 if util.safehasattr(view._branchcaches, '_per_filter'):
2401 if util.safehasattr(view._branchcaches, '_per_filter'):
2384 filtered = view._branchcaches._per_filter
2402 filtered = view._branchcaches._per_filter
2385 else:
2403 else:
2386 # older versions
2404 # older versions
2387 filtered = view._branchcaches
2405 filtered = view._branchcaches
2388 def d():
2406 def d():
2389 if clear_revbranch:
2407 if clear_revbranch:
2390 repo.revbranchcache()._clear()
2408 repo.revbranchcache()._clear()
2391 if full:
2409 if full:
2392 view._branchcaches.clear()
2410 view._branchcaches.clear()
2393 else:
2411 else:
2394 filtered.pop(filtername, None)
2412 filtered.pop(filtername, None)
2395 view.branchmap()
2413 view.branchmap()
2396 return d
2414 return d
2397 # add filter in smaller subset to bigger subset
2415 # add filter in smaller subset to bigger subset
2398 possiblefilters = set(repoview.filtertable)
2416 possiblefilters = set(repoview.filtertable)
2399 if filternames:
2417 if filternames:
2400 possiblefilters &= set(filternames)
2418 possiblefilters &= set(filternames)
2401 subsettable = getbranchmapsubsettable()
2419 subsettable = getbranchmapsubsettable()
2402 allfilters = []
2420 allfilters = []
2403 while possiblefilters:
2421 while possiblefilters:
2404 for name in possiblefilters:
2422 for name in possiblefilters:
2405 subset = subsettable.get(name)
2423 subset = subsettable.get(name)
2406 if subset not in possiblefilters:
2424 if subset not in possiblefilters:
2407 break
2425 break
2408 else:
2426 else:
2409 assert False, b'subset cycle %s!' % possiblefilters
2427 assert False, b'subset cycle %s!' % possiblefilters
2410 allfilters.append(name)
2428 allfilters.append(name)
2411 possiblefilters.remove(name)
2429 possiblefilters.remove(name)
2412
2430
2413 # warm the cache
2431 # warm the cache
2414 if not full:
2432 if not full:
2415 for name in allfilters:
2433 for name in allfilters:
2416 repo.filtered(name).branchmap()
2434 repo.filtered(name).branchmap()
2417 if not filternames or b'unfiltered' in filternames:
2435 if not filternames or b'unfiltered' in filternames:
2418 # add unfiltered
2436 # add unfiltered
2419 allfilters.append(None)
2437 allfilters.append(None)
2420
2438
2421 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2422 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2423 branchcacheread.set(classmethod(lambda *args: None))
2441 branchcacheread.set(classmethod(lambda *args: None))
2424 else:
2442 else:
2425 # older versions
2443 # older versions
2426 branchcacheread = safeattrsetter(branchmap, b'read')
2444 branchcacheread = safeattrsetter(branchmap, b'read')
2427 branchcacheread.set(lambda *args: None)
2445 branchcacheread.set(lambda *args: None)
2428 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2429 branchcachewrite.set(lambda *args: None)
2447 branchcachewrite.set(lambda *args: None)
2430 try:
2448 try:
2431 for name in allfilters:
2449 for name in allfilters:
2432 printname = name
2450 printname = name
2433 if name is None:
2451 if name is None:
2434 printname = b'unfiltered'
2452 printname = b'unfiltered'
2435 timer(getbranchmap(name), title=str(printname))
2453 timer(getbranchmap(name), title=str(printname))
2436 finally:
2454 finally:
2437 branchcacheread.restore()
2455 branchcacheread.restore()
2438 branchcachewrite.restore()
2456 branchcachewrite.restore()
2439 fm.end()
2457 fm.end()
2440
2458
2441 @command(b'perfbranchmapupdate', [
2459 @command(b'perfbranchmapupdate', [
2442 (b'', b'base', [], b'subset of revision to start from'),
2460 (b'', b'base', [], b'subset of revision to start from'),
2443 (b'', b'target', [], b'subset of revision to end with'),
2461 (b'', b'target', [], b'subset of revision to end with'),
2444 (b'', b'clear-caches', False, b'clear cache between each runs')
2462 (b'', b'clear-caches', False, b'clear cache between each runs')
2445 ] + formatteropts)
2463 ] + formatteropts)
2446 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2464 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2447 """benchmark branchmap update from for <base> revs to <target> revs
2465 """benchmark branchmap update from for <base> revs to <target> revs
2448
2466
2449 If `--clear-caches` is passed, the following items will be reset before
2467 If `--clear-caches` is passed, the following items will be reset before
2450 each update:
2468 each update:
2451 * the changelog instance and associated indexes
2469 * the changelog instance and associated indexes
2452 * the rev-branch-cache instance
2470 * the rev-branch-cache instance
2453
2471
2454 Examples:
2472 Examples:
2455
2473
2456 # update for the one last revision
2474 # update for the one last revision
2457 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2475 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2458
2476
2459 $ update for change coming with a new branch
2477 $ update for change coming with a new branch
2460 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2478 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2461 """
2479 """
2462 from mercurial import branchmap
2480 from mercurial import branchmap
2463 from mercurial import repoview
2481 from mercurial import repoview
2464 opts = _byteskwargs(opts)
2482 opts = _byteskwargs(opts)
2465 timer, fm = gettimer(ui, opts)
2483 timer, fm = gettimer(ui, opts)
2466 clearcaches = opts[b'clear_caches']
2484 clearcaches = opts[b'clear_caches']
2467 unfi = repo.unfiltered()
2485 unfi = repo.unfiltered()
2468 x = [None] # used to pass data between closure
2486 x = [None] # used to pass data between closure
2469
2487
2470 # we use a `list` here to avoid possible side effect from smartset
2488 # we use a `list` here to avoid possible side effect from smartset
2471 baserevs = list(scmutil.revrange(repo, base))
2489 baserevs = list(scmutil.revrange(repo, base))
2472 targetrevs = list(scmutil.revrange(repo, target))
2490 targetrevs = list(scmutil.revrange(repo, target))
2473 if not baserevs:
2491 if not baserevs:
2474 raise error.Abort(b'no revisions selected for --base')
2492 raise error.Abort(b'no revisions selected for --base')
2475 if not targetrevs:
2493 if not targetrevs:
2476 raise error.Abort(b'no revisions selected for --target')
2494 raise error.Abort(b'no revisions selected for --target')
2477
2495
2478 # make sure the target branchmap also contains the one in the base
2496 # make sure the target branchmap also contains the one in the base
2479 targetrevs = list(set(baserevs) | set(targetrevs))
2497 targetrevs = list(set(baserevs) | set(targetrevs))
2480 targetrevs.sort()
2498 targetrevs.sort()
2481
2499
2482 cl = repo.changelog
2500 cl = repo.changelog
2483 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2501 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2484 allbaserevs.sort()
2502 allbaserevs.sort()
2485 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2503 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2486
2504
2487 newrevs = list(alltargetrevs.difference(allbaserevs))
2505 newrevs = list(alltargetrevs.difference(allbaserevs))
2488 newrevs.sort()
2506 newrevs.sort()
2489
2507
2490 allrevs = frozenset(unfi.changelog.revs())
2508 allrevs = frozenset(unfi.changelog.revs())
2491 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2509 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2492 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2510 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2493
2511
2494 def basefilter(repo, visibilityexceptions=None):
2512 def basefilter(repo, visibilityexceptions=None):
2495 return basefilterrevs
2513 return basefilterrevs
2496
2514
2497 def targetfilter(repo, visibilityexceptions=None):
2515 def targetfilter(repo, visibilityexceptions=None):
2498 return targetfilterrevs
2516 return targetfilterrevs
2499
2517
2500 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2518 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2501 ui.status(msg % (len(allbaserevs), len(newrevs)))
2519 ui.status(msg % (len(allbaserevs), len(newrevs)))
2502 if targetfilterrevs:
2520 if targetfilterrevs:
2503 msg = b'(%d revisions still filtered)\n'
2521 msg = b'(%d revisions still filtered)\n'
2504 ui.status(msg % len(targetfilterrevs))
2522 ui.status(msg % len(targetfilterrevs))
2505
2523
2506 try:
2524 try:
2507 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2525 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2508 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2526 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2509
2527
2510 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2528 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2511 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2529 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2512
2530
2513 # try to find an existing branchmap to reuse
2531 # try to find an existing branchmap to reuse
2514 subsettable = getbranchmapsubsettable()
2532 subsettable = getbranchmapsubsettable()
2515 candidatefilter = subsettable.get(None)
2533 candidatefilter = subsettable.get(None)
2516 while candidatefilter is not None:
2534 while candidatefilter is not None:
2517 candidatebm = repo.filtered(candidatefilter).branchmap()
2535 candidatebm = repo.filtered(candidatefilter).branchmap()
2518 if candidatebm.validfor(baserepo):
2536 if candidatebm.validfor(baserepo):
2519 filtered = repoview.filterrevs(repo, candidatefilter)
2537 filtered = repoview.filterrevs(repo, candidatefilter)
2520 missing = [r for r in allbaserevs if r in filtered]
2538 missing = [r for r in allbaserevs if r in filtered]
2521 base = candidatebm.copy()
2539 base = candidatebm.copy()
2522 base.update(baserepo, missing)
2540 base.update(baserepo, missing)
2523 break
2541 break
2524 candidatefilter = subsettable.get(candidatefilter)
2542 candidatefilter = subsettable.get(candidatefilter)
2525 else:
2543 else:
2526 # no suitable subset where found
2544 # no suitable subset where found
2527 base = branchmap.branchcache()
2545 base = branchmap.branchcache()
2528 base.update(baserepo, allbaserevs)
2546 base.update(baserepo, allbaserevs)
2529
2547
2530 def setup():
2548 def setup():
2531 x[0] = base.copy()
2549 x[0] = base.copy()
2532 if clearcaches:
2550 if clearcaches:
2533 unfi._revbranchcache = None
2551 unfi._revbranchcache = None
2534 clearchangelog(repo)
2552 clearchangelog(repo)
2535
2553
2536 def bench():
2554 def bench():
2537 x[0].update(targetrepo, newrevs)
2555 x[0].update(targetrepo, newrevs)
2538
2556
2539 timer(bench, setup=setup)
2557 timer(bench, setup=setup)
2540 fm.end()
2558 fm.end()
2541 finally:
2559 finally:
2542 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2560 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2543 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2561 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2544
2562
2545 @command(b'perfbranchmapload', [
2563 @command(b'perfbranchmapload', [
2546 (b'f', b'filter', b'', b'Specify repoview filter'),
2564 (b'f', b'filter', b'', b'Specify repoview filter'),
2547 (b'', b'list', False, b'List brachmap filter caches'),
2565 (b'', b'list', False, b'List brachmap filter caches'),
2548 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2566 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2549
2567
2550 ] + formatteropts)
2568 ] + formatteropts)
2551 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2569 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2552 """benchmark reading the branchmap"""
2570 """benchmark reading the branchmap"""
2553 opts = _byteskwargs(opts)
2571 opts = _byteskwargs(opts)
2554 clearrevlogs = opts[b'clear_revlogs']
2572 clearrevlogs = opts[b'clear_revlogs']
2555
2573
2556 if list:
2574 if list:
2557 for name, kind, st in repo.cachevfs.readdir(stat=True):
2575 for name, kind, st in repo.cachevfs.readdir(stat=True):
2558 if name.startswith(b'branch2'):
2576 if name.startswith(b'branch2'):
2559 filtername = name.partition(b'-')[2] or b'unfiltered'
2577 filtername = name.partition(b'-')[2] or b'unfiltered'
2560 ui.status(b'%s - %s\n'
2578 ui.status(b'%s - %s\n'
2561 % (filtername, util.bytecount(st.st_size)))
2579 % (filtername, util.bytecount(st.st_size)))
2562 return
2580 return
2563 if not filter:
2581 if not filter:
2564 filter = None
2582 filter = None
2565 subsettable = getbranchmapsubsettable()
2583 subsettable = getbranchmapsubsettable()
2566 if filter is None:
2584 if filter is None:
2567 repo = repo.unfiltered()
2585 repo = repo.unfiltered()
2568 else:
2586 else:
2569 repo = repoview.repoview(repo, filter)
2587 repo = repoview.repoview(repo, filter)
2570
2588
2571 repo.branchmap() # make sure we have a relevant, up to date branchmap
2589 repo.branchmap() # make sure we have a relevant, up to date branchmap
2572
2590
2573 try:
2591 try:
2574 fromfile = branchmap.branchcache.fromfile
2592 fromfile = branchmap.branchcache.fromfile
2575 except AttributeError:
2593 except AttributeError:
2576 # older versions
2594 # older versions
2577 fromfile = branchmap.read
2595 fromfile = branchmap.read
2578
2596
2579 currentfilter = filter
2597 currentfilter = filter
2580 # try once without timer, the filter may not be cached
2598 # try once without timer, the filter may not be cached
2581 while fromfile(repo) is None:
2599 while fromfile(repo) is None:
2582 currentfilter = subsettable.get(currentfilter)
2600 currentfilter = subsettable.get(currentfilter)
2583 if currentfilter is None:
2601 if currentfilter is None:
2584 raise error.Abort(b'No branchmap cached for %s repo'
2602 raise error.Abort(b'No branchmap cached for %s repo'
2585 % (filter or b'unfiltered'))
2603 % (filter or b'unfiltered'))
2586 repo = repo.filtered(currentfilter)
2604 repo = repo.filtered(currentfilter)
2587 timer, fm = gettimer(ui, opts)
2605 timer, fm = gettimer(ui, opts)
2588 def setup():
2606 def setup():
2589 if clearrevlogs:
2607 if clearrevlogs:
2590 clearchangelog(repo)
2608 clearchangelog(repo)
2591 def bench():
2609 def bench():
2592 fromfile(repo)
2610 fromfile(repo)
2593 timer(bench, setup=setup)
2611 timer(bench, setup=setup)
2594 fm.end()
2612 fm.end()
2595
2613
2596 @command(b'perfloadmarkers')
2614 @command(b'perfloadmarkers')
2597 def perfloadmarkers(ui, repo):
2615 def perfloadmarkers(ui, repo):
2598 """benchmark the time to parse the on-disk markers for a repo
2616 """benchmark the time to parse the on-disk markers for a repo
2599
2617
2600 Result is the number of markers in the repo."""
2618 Result is the number of markers in the repo."""
2601 timer, fm = gettimer(ui)
2619 timer, fm = gettimer(ui)
2602 svfs = getsvfs(repo)
2620 svfs = getsvfs(repo)
2603 timer(lambda: len(obsolete.obsstore(svfs)))
2621 timer(lambda: len(obsolete.obsstore(svfs)))
2604 fm.end()
2622 fm.end()
2605
2623
2606 @command(b'perflrucachedict', formatteropts +
2624 @command(b'perflrucachedict', formatteropts +
2607 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2625 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2608 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2626 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2609 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2627 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2610 (b'', b'size', 4, b'size of cache'),
2628 (b'', b'size', 4, b'size of cache'),
2611 (b'', b'gets', 10000, b'number of key lookups'),
2629 (b'', b'gets', 10000, b'number of key lookups'),
2612 (b'', b'sets', 10000, b'number of key sets'),
2630 (b'', b'sets', 10000, b'number of key sets'),
2613 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2631 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2614 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2632 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2615 norepo=True)
2633 norepo=True)
2616 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2634 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2617 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2635 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2618 opts = _byteskwargs(opts)
2636 opts = _byteskwargs(opts)
2619
2637
2620 def doinit():
2638 def doinit():
2621 for i in _xrange(10000):
2639 for i in _xrange(10000):
2622 util.lrucachedict(size)
2640 util.lrucachedict(size)
2623
2641
2624 costrange = list(range(mincost, maxcost + 1))
2642 costrange = list(range(mincost, maxcost + 1))
2625
2643
2626 values = []
2644 values = []
2627 for i in _xrange(size):
2645 for i in _xrange(size):
2628 values.append(random.randint(0, _maxint))
2646 values.append(random.randint(0, _maxint))
2629
2647
2630 # Get mode fills the cache and tests raw lookup performance with no
2648 # Get mode fills the cache and tests raw lookup performance with no
2631 # eviction.
2649 # eviction.
2632 getseq = []
2650 getseq = []
2633 for i in _xrange(gets):
2651 for i in _xrange(gets):
2634 getseq.append(random.choice(values))
2652 getseq.append(random.choice(values))
2635
2653
2636 def dogets():
2654 def dogets():
2637 d = util.lrucachedict(size)
2655 d = util.lrucachedict(size)
2638 for v in values:
2656 for v in values:
2639 d[v] = v
2657 d[v] = v
2640 for key in getseq:
2658 for key in getseq:
2641 value = d[key]
2659 value = d[key]
2642 value # silence pyflakes warning
2660 value # silence pyflakes warning
2643
2661
2644 def dogetscost():
2662 def dogetscost():
2645 d = util.lrucachedict(size, maxcost=costlimit)
2663 d = util.lrucachedict(size, maxcost=costlimit)
2646 for i, v in enumerate(values):
2664 for i, v in enumerate(values):
2647 d.insert(v, v, cost=costs[i])
2665 d.insert(v, v, cost=costs[i])
2648 for key in getseq:
2666 for key in getseq:
2649 try:
2667 try:
2650 value = d[key]
2668 value = d[key]
2651 value # silence pyflakes warning
2669 value # silence pyflakes warning
2652 except KeyError:
2670 except KeyError:
2653 pass
2671 pass
2654
2672
2655 # Set mode tests insertion speed with cache eviction.
2673 # Set mode tests insertion speed with cache eviction.
2656 setseq = []
2674 setseq = []
2657 costs = []
2675 costs = []
2658 for i in _xrange(sets):
2676 for i in _xrange(sets):
2659 setseq.append(random.randint(0, _maxint))
2677 setseq.append(random.randint(0, _maxint))
2660 costs.append(random.choice(costrange))
2678 costs.append(random.choice(costrange))
2661
2679
2662 def doinserts():
2680 def doinserts():
2663 d = util.lrucachedict(size)
2681 d = util.lrucachedict(size)
2664 for v in setseq:
2682 for v in setseq:
2665 d.insert(v, v)
2683 d.insert(v, v)
2666
2684
2667 def doinsertscost():
2685 def doinsertscost():
2668 d = util.lrucachedict(size, maxcost=costlimit)
2686 d = util.lrucachedict(size, maxcost=costlimit)
2669 for i, v in enumerate(setseq):
2687 for i, v in enumerate(setseq):
2670 d.insert(v, v, cost=costs[i])
2688 d.insert(v, v, cost=costs[i])
2671
2689
2672 def dosets():
2690 def dosets():
2673 d = util.lrucachedict(size)
2691 d = util.lrucachedict(size)
2674 for v in setseq:
2692 for v in setseq:
2675 d[v] = v
2693 d[v] = v
2676
2694
2677 # Mixed mode randomly performs gets and sets with eviction.
2695 # Mixed mode randomly performs gets and sets with eviction.
2678 mixedops = []
2696 mixedops = []
2679 for i in _xrange(mixed):
2697 for i in _xrange(mixed):
2680 r = random.randint(0, 100)
2698 r = random.randint(0, 100)
2681 if r < mixedgetfreq:
2699 if r < mixedgetfreq:
2682 op = 0
2700 op = 0
2683 else:
2701 else:
2684 op = 1
2702 op = 1
2685
2703
2686 mixedops.append((op,
2704 mixedops.append((op,
2687 random.randint(0, size * 2),
2705 random.randint(0, size * 2),
2688 random.choice(costrange)))
2706 random.choice(costrange)))
2689
2707
2690 def domixed():
2708 def domixed():
2691 d = util.lrucachedict(size)
2709 d = util.lrucachedict(size)
2692
2710
2693 for op, v, cost in mixedops:
2711 for op, v, cost in mixedops:
2694 if op == 0:
2712 if op == 0:
2695 try:
2713 try:
2696 d[v]
2714 d[v]
2697 except KeyError:
2715 except KeyError:
2698 pass
2716 pass
2699 else:
2717 else:
2700 d[v] = v
2718 d[v] = v
2701
2719
2702 def domixedcost():
2720 def domixedcost():
2703 d = util.lrucachedict(size, maxcost=costlimit)
2721 d = util.lrucachedict(size, maxcost=costlimit)
2704
2722
2705 for op, v, cost in mixedops:
2723 for op, v, cost in mixedops:
2706 if op == 0:
2724 if op == 0:
2707 try:
2725 try:
2708 d[v]
2726 d[v]
2709 except KeyError:
2727 except KeyError:
2710 pass
2728 pass
2711 else:
2729 else:
2712 d.insert(v, v, cost=cost)
2730 d.insert(v, v, cost=cost)
2713
2731
2714 benches = [
2732 benches = [
2715 (doinit, b'init'),
2733 (doinit, b'init'),
2716 ]
2734 ]
2717
2735
2718 if costlimit:
2736 if costlimit:
2719 benches.extend([
2737 benches.extend([
2720 (dogetscost, b'gets w/ cost limit'),
2738 (dogetscost, b'gets w/ cost limit'),
2721 (doinsertscost, b'inserts w/ cost limit'),
2739 (doinsertscost, b'inserts w/ cost limit'),
2722 (domixedcost, b'mixed w/ cost limit'),
2740 (domixedcost, b'mixed w/ cost limit'),
2723 ])
2741 ])
2724 else:
2742 else:
2725 benches.extend([
2743 benches.extend([
2726 (dogets, b'gets'),
2744 (dogets, b'gets'),
2727 (doinserts, b'inserts'),
2745 (doinserts, b'inserts'),
2728 (dosets, b'sets'),
2746 (dosets, b'sets'),
2729 (domixed, b'mixed')
2747 (domixed, b'mixed')
2730 ])
2748 ])
2731
2749
2732 for fn, title in benches:
2750 for fn, title in benches:
2733 timer, fm = gettimer(ui, opts)
2751 timer, fm = gettimer(ui, opts)
2734 timer(fn, title=title)
2752 timer(fn, title=title)
2735 fm.end()
2753 fm.end()
2736
2754
2737 @command(b'perfwrite', formatteropts)
2755 @command(b'perfwrite', formatteropts)
2738 def perfwrite(ui, repo, **opts):
2756 def perfwrite(ui, repo, **opts):
2739 """microbenchmark ui.write
2757 """microbenchmark ui.write
2740 """
2758 """
2741 opts = _byteskwargs(opts)
2759 opts = _byteskwargs(opts)
2742
2760
2743 timer, fm = gettimer(ui, opts)
2761 timer, fm = gettimer(ui, opts)
2744 def write():
2762 def write():
2745 for i in range(100000):
2763 for i in range(100000):
2746 ui.write((b'Testing write performance\n'))
2764 ui.write((b'Testing write performance\n'))
2747 timer(write)
2765 timer(write)
2748 fm.end()
2766 fm.end()
2749
2767
2750 def uisetup(ui):
2768 def uisetup(ui):
2751 if (util.safehasattr(cmdutil, b'openrevlog') and
2769 if (util.safehasattr(cmdutil, b'openrevlog') and
2752 not util.safehasattr(commands, b'debugrevlogopts')):
2770 not util.safehasattr(commands, b'debugrevlogopts')):
2753 # for "historical portability":
2771 # for "historical portability":
2754 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2772 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2755 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2773 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2756 # openrevlog() should cause failure, because it has been
2774 # openrevlog() should cause failure, because it has been
2757 # available since 3.5 (or 49c583ca48c4).
2775 # available since 3.5 (or 49c583ca48c4).
2758 def openrevlog(orig, repo, cmd, file_, opts):
2776 def openrevlog(orig, repo, cmd, file_, opts):
2759 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2777 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2760 raise error.Abort(b"This version doesn't support --dir option",
2778 raise error.Abort(b"This version doesn't support --dir option",
2761 hint=b"use 3.5 or later")
2779 hint=b"use 3.5 or later")
2762 return orig(repo, cmd, file_, opts)
2780 return orig(repo, cmd, file_, opts)
2763 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2781 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2764
2782
2765 @command(b'perfprogress', formatteropts + [
2783 @command(b'perfprogress', formatteropts + [
2766 (b'', b'topic', b'topic', b'topic for progress messages'),
2784 (b'', b'topic', b'topic', b'topic for progress messages'),
2767 (b'c', b'total', 1000000, b'total value we are progressing to'),
2785 (b'c', b'total', 1000000, b'total value we are progressing to'),
2768 ], norepo=True)
2786 ], norepo=True)
2769 def perfprogress(ui, topic=None, total=None, **opts):
2787 def perfprogress(ui, topic=None, total=None, **opts):
2770 """printing of progress bars"""
2788 """printing of progress bars"""
2771 opts = _byteskwargs(opts)
2789 opts = _byteskwargs(opts)
2772
2790
2773 timer, fm = gettimer(ui, opts)
2791 timer, fm = gettimer(ui, opts)
2774
2792
2775 def doprogress():
2793 def doprogress():
2776 with ui.makeprogress(topic, total=total) as progress:
2794 with ui.makeprogress(topic, total=total) as progress:
2777 for i in pycompat.xrange(total):
2795 for i in pycompat.xrange(total):
2778 progress.increment()
2796 progress.increment()
2779
2797
2780 timer(doprogress)
2798 timer(doprogress)
2781 fm.end()
2799 fm.end()
@@ -1,302 +1,320
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
45 ==============
46
47 "perf"
48 ------
49
50 "all-timing"
51 When set, additional statistic will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
53 (default: off).
54
55 "presleep"
56 number of second to wait before any group of run (default: 1)
57
58 "stub"
59 When set, benchmark will only be run once, useful for testing (default:
60 off)
61
44 list of commands:
62 list of commands:
45
63
46 perfaddremove
64 perfaddremove
47 (no help text available)
65 (no help text available)
48 perfancestors
66 perfancestors
49 (no help text available)
67 (no help text available)
50 perfancestorset
68 perfancestorset
51 (no help text available)
69 (no help text available)
52 perfannotate (no help text available)
70 perfannotate (no help text available)
53 perfbdiff benchmark a bdiff between revisions
71 perfbdiff benchmark a bdiff between revisions
54 perfbookmarks
72 perfbookmarks
55 benchmark parsing bookmarks from disk to memory
73 benchmark parsing bookmarks from disk to memory
56 perfbranchmap
74 perfbranchmap
57 benchmark the update of a branchmap
75 benchmark the update of a branchmap
58 perfbranchmapload
76 perfbranchmapload
59 benchmark reading the branchmap
77 benchmark reading the branchmap
60 perfbranchmapupdate
78 perfbranchmapupdate
61 benchmark branchmap update from for <base> revs to <target>
79 benchmark branchmap update from for <base> revs to <target>
62 revs
80 revs
63 perfbundleread
81 perfbundleread
64 Benchmark reading of bundle files.
82 Benchmark reading of bundle files.
65 perfcca (no help text available)
83 perfcca (no help text available)
66 perfchangegroupchangelog
84 perfchangegroupchangelog
67 Benchmark producing a changelog group for a changegroup.
85 Benchmark producing a changelog group for a changegroup.
68 perfchangeset
86 perfchangeset
69 (no help text available)
87 (no help text available)
70 perfctxfiles (no help text available)
88 perfctxfiles (no help text available)
71 perfdiffwd Profile diff of working directory changes
89 perfdiffwd Profile diff of working directory changes
72 perfdirfoldmap
90 perfdirfoldmap
73 (no help text available)
91 (no help text available)
74 perfdirs (no help text available)
92 perfdirs (no help text available)
75 perfdirstate (no help text available)
93 perfdirstate (no help text available)
76 perfdirstatedirs
94 perfdirstatedirs
77 (no help text available)
95 (no help text available)
78 perfdirstatefoldmap
96 perfdirstatefoldmap
79 (no help text available)
97 (no help text available)
80 perfdirstatewrite
98 perfdirstatewrite
81 (no help text available)
99 (no help text available)
82 perfdiscovery
100 perfdiscovery
83 benchmark discovery between local repo and the peer at given
101 benchmark discovery between local repo and the peer at given
84 path
102 path
85 perffncacheencode
103 perffncacheencode
86 (no help text available)
104 (no help text available)
87 perffncacheload
105 perffncacheload
88 (no help text available)
106 (no help text available)
89 perffncachewrite
107 perffncachewrite
90 (no help text available)
108 (no help text available)
91 perfheads benchmark the computation of a changelog heads
109 perfheads benchmark the computation of a changelog heads
92 perfhelper-pathcopies
110 perfhelper-pathcopies
93 find statistic about potential parameters for the
111 find statistic about potential parameters for the
94 'perftracecopies'
112 'perftracecopies'
95 perfignore benchmark operation related to computing ignore
113 perfignore benchmark operation related to computing ignore
96 perfindex benchmark index creation time followed by a lookup
114 perfindex benchmark index creation time followed by a lookup
97 perflinelogedits
115 perflinelogedits
98 (no help text available)
116 (no help text available)
99 perfloadmarkers
117 perfloadmarkers
100 benchmark the time to parse the on-disk markers for a repo
118 benchmark the time to parse the on-disk markers for a repo
101 perflog (no help text available)
119 perflog (no help text available)
102 perflookup (no help text available)
120 perflookup (no help text available)
103 perflrucachedict
121 perflrucachedict
104 (no help text available)
122 (no help text available)
105 perfmanifest benchmark the time to read a manifest from disk and return a
123 perfmanifest benchmark the time to read a manifest from disk and return a
106 usable
124 usable
107 perfmergecalculate
125 perfmergecalculate
108 (no help text available)
126 (no help text available)
109 perfmoonwalk benchmark walking the changelog backwards
127 perfmoonwalk benchmark walking the changelog backwards
110 perfnodelookup
128 perfnodelookup
111 (no help text available)
129 (no help text available)
112 perfnodemap benchmark the time necessary to look up revision from a cold
130 perfnodemap benchmark the time necessary to look up revision from a cold
113 nodemap
131 nodemap
114 perfparents (no help text available)
132 perfparents (no help text available)
115 perfpathcopies
133 perfpathcopies
116 benchmark the copy tracing logic
134 benchmark the copy tracing logic
117 perfphases benchmark phasesets computation
135 perfphases benchmark phasesets computation
118 perfphasesremote
136 perfphasesremote
119 benchmark time needed to analyse phases of the remote server
137 benchmark time needed to analyse phases of the remote server
120 perfprogress printing of progress bars
138 perfprogress printing of progress bars
121 perfrawfiles (no help text available)
139 perfrawfiles (no help text available)
122 perfrevlogchunks
140 perfrevlogchunks
123 Benchmark operations on revlog chunks.
141 Benchmark operations on revlog chunks.
124 perfrevlogindex
142 perfrevlogindex
125 Benchmark operations against a revlog index.
143 Benchmark operations against a revlog index.
126 perfrevlogrevision
144 perfrevlogrevision
127 Benchmark obtaining a revlog revision.
145 Benchmark obtaining a revlog revision.
128 perfrevlogrevisions
146 perfrevlogrevisions
129 Benchmark reading a series of revisions from a revlog.
147 Benchmark reading a series of revisions from a revlog.
130 perfrevlogwrite
148 perfrevlogwrite
131 Benchmark writing a series of revisions to a revlog.
149 Benchmark writing a series of revisions to a revlog.
132 perfrevrange (no help text available)
150 perfrevrange (no help text available)
133 perfrevset benchmark the execution time of a revset
151 perfrevset benchmark the execution time of a revset
134 perfstartup (no help text available)
152 perfstartup (no help text available)
135 perfstatus (no help text available)
153 perfstatus (no help text available)
136 perftags (no help text available)
154 perftags (no help text available)
137 perftemplating
155 perftemplating
138 test the rendering time of a given template
156 test the rendering time of a given template
139 perfunidiff benchmark a unified diff between revisions
157 perfunidiff benchmark a unified diff between revisions
140 perfvolatilesets
158 perfvolatilesets
141 benchmark the computation of various volatile set
159 benchmark the computation of various volatile set
142 perfwalk (no help text available)
160 perfwalk (no help text available)
143 perfwrite microbenchmark ui.write
161 perfwrite microbenchmark ui.write
144
162
145 (use 'hg help -v perf' to show built-in aliases and global options)
163 (use 'hg help -v perf' to show built-in aliases and global options)
146 $ hg perfaddremove
164 $ hg perfaddremove
147 $ hg perfancestors
165 $ hg perfancestors
148 $ hg perfancestorset 2
166 $ hg perfancestorset 2
149 $ hg perfannotate a
167 $ hg perfannotate a
150 $ hg perfbdiff -c 1
168 $ hg perfbdiff -c 1
151 $ hg perfbdiff --alldata 1
169 $ hg perfbdiff --alldata 1
152 $ hg perfunidiff -c 1
170 $ hg perfunidiff -c 1
153 $ hg perfunidiff --alldata 1
171 $ hg perfunidiff --alldata 1
154 $ hg perfbookmarks
172 $ hg perfbookmarks
155 $ hg perfbranchmap
173 $ hg perfbranchmap
156 $ hg perfbranchmapload
174 $ hg perfbranchmapload
157 $ hg perfbranchmapupdate --base "not tip" --target "tip"
175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
158 benchmark of branchmap with 3 revisions with 1 new ones
176 benchmark of branchmap with 3 revisions with 1 new ones
159 $ hg perfcca
177 $ hg perfcca
160 $ hg perfchangegroupchangelog
178 $ hg perfchangegroupchangelog
161 $ hg perfchangegroupchangelog --cgversion 01
179 $ hg perfchangegroupchangelog --cgversion 01
162 $ hg perfchangeset 2
180 $ hg perfchangeset 2
163 $ hg perfctxfiles 2
181 $ hg perfctxfiles 2
164 $ hg perfdiffwd
182 $ hg perfdiffwd
165 $ hg perfdirfoldmap
183 $ hg perfdirfoldmap
166 $ hg perfdirs
184 $ hg perfdirs
167 $ hg perfdirstate
185 $ hg perfdirstate
168 $ hg perfdirstatedirs
186 $ hg perfdirstatedirs
169 $ hg perfdirstatefoldmap
187 $ hg perfdirstatefoldmap
170 $ hg perfdirstatewrite
188 $ hg perfdirstatewrite
171 #if repofncache
189 #if repofncache
172 $ hg perffncacheencode
190 $ hg perffncacheencode
173 $ hg perffncacheload
191 $ hg perffncacheload
174 $ hg debugrebuildfncache
192 $ hg debugrebuildfncache
175 fncache already up to date
193 fncache already up to date
176 $ hg perffncachewrite
194 $ hg perffncachewrite
177 $ hg debugrebuildfncache
195 $ hg debugrebuildfncache
178 fncache already up to date
196 fncache already up to date
179 #endif
197 #endif
180 $ hg perfheads
198 $ hg perfheads
181 $ hg perfignore
199 $ hg perfignore
182 $ hg perfindex
200 $ hg perfindex
183 $ hg perflinelogedits -n 1
201 $ hg perflinelogedits -n 1
184 $ hg perfloadmarkers
202 $ hg perfloadmarkers
185 $ hg perflog
203 $ hg perflog
186 $ hg perflookup 2
204 $ hg perflookup 2
187 $ hg perflrucache
205 $ hg perflrucache
188 $ hg perfmanifest 2
206 $ hg perfmanifest 2
189 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
190 $ hg perfmanifest -m 44fe2c8352bb
208 $ hg perfmanifest -m 44fe2c8352bb
191 abort: manifest revision must be integer or full node
209 abort: manifest revision must be integer or full node
192 [255]
210 [255]
193 $ hg perfmergecalculate -r 3
211 $ hg perfmergecalculate -r 3
194 $ hg perfmoonwalk
212 $ hg perfmoonwalk
195 $ hg perfnodelookup 2
213 $ hg perfnodelookup 2
196 $ hg perfpathcopies 1 2
214 $ hg perfpathcopies 1 2
197 $ hg perfprogress --total 1000
215 $ hg perfprogress --total 1000
198 $ hg perfrawfiles 2
216 $ hg perfrawfiles 2
199 $ hg perfrevlogindex -c
217 $ hg perfrevlogindex -c
200 #if reporevlogstore
218 #if reporevlogstore
201 $ hg perfrevlogrevisions .hg/store/data/a.i
219 $ hg perfrevlogrevisions .hg/store/data/a.i
202 #endif
220 #endif
203 $ hg perfrevlogrevision -m 0
221 $ hg perfrevlogrevision -m 0
204 $ hg perfrevlogchunks -c
222 $ hg perfrevlogchunks -c
205 $ hg perfrevrange
223 $ hg perfrevrange
206 $ hg perfrevset 'all()'
224 $ hg perfrevset 'all()'
207 $ hg perfstartup
225 $ hg perfstartup
208 $ hg perfstatus
226 $ hg perfstatus
209 $ hg perftags
227 $ hg perftags
210 $ hg perftemplating
228 $ hg perftemplating
211 $ hg perfvolatilesets
229 $ hg perfvolatilesets
212 $ hg perfwalk
230 $ hg perfwalk
213 $ hg perfparents
231 $ hg perfparents
214 $ hg perfdiscovery -q .
232 $ hg perfdiscovery -q .
215
233
216 test actual output
234 test actual output
217 ------------------
235 ------------------
218
236
219 normal output:
237 normal output:
220
238
221 $ hg perfheads --config perf.stub=no
239 $ hg perfheads --config perf.stub=no
222 ! wall * comb * user * sys * (best of *) (glob)
240 ! wall * comb * user * sys * (best of *) (glob)
223
241
224 detailed output:
242 detailed output:
225
243
226 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
227 ! wall * comb * user * sys * (best of *) (glob)
245 ! wall * comb * user * sys * (best of *) (glob)
228 ! wall * comb * user * sys * (max of *) (glob)
246 ! wall * comb * user * sys * (max of *) (glob)
229 ! wall * comb * user * sys * (avg of *) (glob)
247 ! wall * comb * user * sys * (avg of *) (glob)
230 ! wall * comb * user * sys * (median of *) (glob)
248 ! wall * comb * user * sys * (median of *) (glob)
231
249
232 test json output
250 test json output
233 ----------------
251 ----------------
234
252
235 normal output:
253 normal output:
236
254
237 $ hg perfheads --template json --config perf.stub=no
255 $ hg perfheads --template json --config perf.stub=no
238 [
256 [
239 {
257 {
240 "comb": *, (glob)
258 "comb": *, (glob)
241 "count": *, (glob)
259 "count": *, (glob)
242 "sys": *, (glob)
260 "sys": *, (glob)
243 "user": *, (glob)
261 "user": *, (glob)
244 "wall": * (glob)
262 "wall": * (glob)
245 }
263 }
246 ]
264 ]
247
265
248 detailed output:
266 detailed output:
249
267
250 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
251 [
269 [
252 {
270 {
253 "avg.comb": *, (glob)
271 "avg.comb": *, (glob)
254 "avg.count": *, (glob)
272 "avg.count": *, (glob)
255 "avg.sys": *, (glob)
273 "avg.sys": *, (glob)
256 "avg.user": *, (glob)
274 "avg.user": *, (glob)
257 "avg.wall": *, (glob)
275 "avg.wall": *, (glob)
258 "comb": *, (glob)
276 "comb": *, (glob)
259 "count": *, (glob)
277 "count": *, (glob)
260 "max.comb": *, (glob)
278 "max.comb": *, (glob)
261 "max.count": *, (glob)
279 "max.count": *, (glob)
262 "max.sys": *, (glob)
280 "max.sys": *, (glob)
263 "max.user": *, (glob)
281 "max.user": *, (glob)
264 "max.wall": *, (glob)
282 "max.wall": *, (glob)
265 "median.comb": *, (glob)
283 "median.comb": *, (glob)
266 "median.count": *, (glob)
284 "median.count": *, (glob)
267 "median.sys": *, (glob)
285 "median.sys": *, (glob)
268 "median.user": *, (glob)
286 "median.user": *, (glob)
269 "median.wall": *, (glob)
287 "median.wall": *, (glob)
270 "sys": *, (glob)
288 "sys": *, (glob)
271 "user": *, (glob)
289 "user": *, (glob)
272 "wall": * (glob)
290 "wall": * (glob)
273 }
291 }
274 ]
292 ]
275
293
276 Check perf.py for historical portability
294 Check perf.py for historical portability
277 ----------------------------------------
295 ----------------------------------------
278
296
279 $ cd "$TESTDIR/.."
297 $ cd "$TESTDIR/.."
280
298
281 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
282 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
283 > "$TESTDIR"/check-perf-code.py contrib/perf.py
301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
284 contrib/perf.py:\d+: (re)
302 contrib/perf.py:\d+: (re)
285 > from mercurial import (
303 > from mercurial import (
286 import newer module separately in try clause for early Mercurial
304 import newer module separately in try clause for early Mercurial
287 contrib/perf.py:\d+: (re)
305 contrib/perf.py:\d+: (re)
288 > from mercurial import (
306 > from mercurial import (
289 import newer module separately in try clause for early Mercurial
307 import newer module separately in try clause for early Mercurial
290 contrib/perf.py:\d+: (re)
308 contrib/perf.py:\d+: (re)
291 > origindexpath = orig.opener.join(orig.indexfile)
309 > origindexpath = orig.opener.join(orig.indexfile)
292 use getvfs()/getsvfs() for early Mercurial
310 use getvfs()/getsvfs() for early Mercurial
293 contrib/perf.py:\d+: (re)
311 contrib/perf.py:\d+: (re)
294 > origdatapath = orig.opener.join(orig.datafile)
312 > origdatapath = orig.opener.join(orig.datafile)
295 use getvfs()/getsvfs() for early Mercurial
313 use getvfs()/getsvfs() for early Mercurial
296 contrib/perf.py:\d+: (re)
314 contrib/perf.py:\d+: (re)
297 > vfs = vfsmod.vfs(tmpdir)
315 > vfs = vfsmod.vfs(tmpdir)
298 use getvfs()/getsvfs() for early Mercurial
316 use getvfs()/getsvfs() for early Mercurial
299 contrib/perf.py:\d+: (re)
317 contrib/perf.py:\d+: (re)
300 > vfs.options = getattr(orig.opener, 'options', None)
318 > vfs.options = getattr(orig.opener, 'options', None)
301 use getvfs()/getsvfs() for early Mercurial
319 use getvfs()/getsvfs() for early Mercurial
302 [1]
320 [1]
General Comments 0
You need to be logged in to leave comments. Login now