##// END OF EJS Templates
perf: document perfparents
marmoute -
r42183:b900b392 default
parent child Browse files
Show More
@@ -1,2799 +1,2806 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance
2 '''helper extension to measure performance
3
3
4 Configurations
4 Configurations
5 ==============
5 ==============
6
6
7 ``perf``
7 ``perf``
8 --------
8 --------
9
9
10 ``all-timing``
10 ``all-timing``
11 When set, additional statistic will be reported for each benchmark: best,
11 When set, additional statistic will be reported for each benchmark: best,
12 worst, median average. If not set only the best timing is reported
12 worst, median average. If not set only the best timing is reported
13 (default: off).
13 (default: off).
14
14
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of run (default: 1)
16 number of second to wait before any group of run (default: 1)
17
17
18 ``stub``
18 ``stub``
19 When set, benchmark will only be run once, useful for testing (default: off)
19 When set, benchmark will only be run once, useful for testing (default: off)
20 '''
20 '''
21
21
22 # "historical portability" policy of perf.py:
22 # "historical portability" policy of perf.py:
23 #
23 #
24 # We have to do:
24 # We have to do:
25 # - make perf.py "loadable" with as wide Mercurial version as possible
25 # - make perf.py "loadable" with as wide Mercurial version as possible
26 # This doesn't mean that perf commands work correctly with that Mercurial.
26 # This doesn't mean that perf commands work correctly with that Mercurial.
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
27 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
28 # - make historical perf command work correctly with as wide Mercurial
28 # - make historical perf command work correctly with as wide Mercurial
29 # version as possible
29 # version as possible
30 #
30 #
31 # We have to do, if possible with reasonable cost:
31 # We have to do, if possible with reasonable cost:
32 # - make recent perf command for historical feature work correctly
32 # - make recent perf command for historical feature work correctly
33 # with early Mercurial
33 # with early Mercurial
34 #
34 #
35 # We don't have to do:
35 # We don't have to do:
36 # - make perf command for recent feature work correctly with early
36 # - make perf command for recent feature work correctly with early
37 # Mercurial
37 # Mercurial
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import contextlib
40 import contextlib
41 import functools
41 import functools
42 import gc
42 import gc
43 import os
43 import os
44 import random
44 import random
45 import shutil
45 import shutil
46 import struct
46 import struct
47 import sys
47 import sys
48 import tempfile
48 import tempfile
49 import threading
49 import threading
50 import time
50 import time
51 from mercurial import (
51 from mercurial import (
52 changegroup,
52 changegroup,
53 cmdutil,
53 cmdutil,
54 commands,
54 commands,
55 copies,
55 copies,
56 error,
56 error,
57 extensions,
57 extensions,
58 hg,
58 hg,
59 mdiff,
59 mdiff,
60 merge,
60 merge,
61 revlog,
61 revlog,
62 util,
62 util,
63 )
63 )
64
64
65 # for "historical portability":
65 # for "historical portability":
66 # try to import modules separately (in dict order), and ignore
66 # try to import modules separately (in dict order), and ignore
67 # failure, because these aren't available with early Mercurial
67 # failure, because these aren't available with early Mercurial
68 try:
68 try:
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
69 from mercurial import branchmap # since 2.5 (or bcee63733aad)
70 except ImportError:
70 except ImportError:
71 pass
71 pass
72 try:
72 try:
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
73 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
74 except ImportError:
74 except ImportError:
75 pass
75 pass
76 try:
76 try:
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
77 from mercurial import registrar # since 3.7 (or 37d50250b696)
78 dir(registrar) # forcibly load it
78 dir(registrar) # forcibly load it
79 except ImportError:
79 except ImportError:
80 registrar = None
80 registrar = None
81 try:
81 try:
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
82 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
83 except ImportError:
83 except ImportError:
84 pass
84 pass
85 try:
85 try:
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
86 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
87 except ImportError:
87 except ImportError:
88 pass
88 pass
89 try:
89 try:
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
90 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
91 except ImportError:
91 except ImportError:
92 pass
92 pass
93
93
94
94
95 def identity(a):
95 def identity(a):
96 return a
96 return a
97
97
98 try:
98 try:
99 from mercurial import pycompat
99 from mercurial import pycompat
100 getargspec = pycompat.getargspec # added to module after 4.5
100 getargspec = pycompat.getargspec # added to module after 4.5
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
101 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
102 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
103 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
104 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
105 if pycompat.ispy3:
105 if pycompat.ispy3:
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
106 _maxint = sys.maxsize # per py3 docs for replacing maxint
107 else:
107 else:
108 _maxint = sys.maxint
108 _maxint = sys.maxint
109 except (ImportError, AttributeError):
109 except (ImportError, AttributeError):
110 import inspect
110 import inspect
111 getargspec = inspect.getargspec
111 getargspec = inspect.getargspec
112 _byteskwargs = identity
112 _byteskwargs = identity
113 fsencode = identity # no py3 support
113 fsencode = identity # no py3 support
114 _maxint = sys.maxint # no py3 support
114 _maxint = sys.maxint # no py3 support
115 _sysstr = lambda x: x # no py3 support
115 _sysstr = lambda x: x # no py3 support
116 _xrange = xrange
116 _xrange = xrange
117
117
118 try:
118 try:
119 # 4.7+
119 # 4.7+
120 queue = pycompat.queue.Queue
120 queue = pycompat.queue.Queue
121 except (AttributeError, ImportError):
121 except (AttributeError, ImportError):
122 # <4.7.
122 # <4.7.
123 try:
123 try:
124 queue = pycompat.queue
124 queue = pycompat.queue
125 except (AttributeError, ImportError):
125 except (AttributeError, ImportError):
126 queue = util.queue
126 queue = util.queue
127
127
128 try:
128 try:
129 from mercurial import logcmdutil
129 from mercurial import logcmdutil
130 makelogtemplater = logcmdutil.maketemplater
130 makelogtemplater = logcmdutil.maketemplater
131 except (AttributeError, ImportError):
131 except (AttributeError, ImportError):
132 try:
132 try:
133 makelogtemplater = cmdutil.makelogtemplater
133 makelogtemplater = cmdutil.makelogtemplater
134 except (AttributeError, ImportError):
134 except (AttributeError, ImportError):
135 makelogtemplater = None
135 makelogtemplater = None
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # define util.safehasattr forcibly, because util.safehasattr has been
138 # define util.safehasattr forcibly, because util.safehasattr has been
139 # available since 1.9.3 (or 94b200a11cf7)
139 # available since 1.9.3 (or 94b200a11cf7)
140 _undefined = object()
140 _undefined = object()
141 def safehasattr(thing, attr):
141 def safehasattr(thing, attr):
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
142 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
143 setattr(util, 'safehasattr', safehasattr)
143 setattr(util, 'safehasattr', safehasattr)
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # define util.timer forcibly, because util.timer has been available
146 # define util.timer forcibly, because util.timer has been available
147 # since ae5d60bb70c9
147 # since ae5d60bb70c9
148 if safehasattr(time, 'perf_counter'):
148 if safehasattr(time, 'perf_counter'):
149 util.timer = time.perf_counter
149 util.timer = time.perf_counter
150 elif os.name == b'nt':
150 elif os.name == b'nt':
151 util.timer = time.clock
151 util.timer = time.clock
152 else:
152 else:
153 util.timer = time.time
153 util.timer = time.time
154
154
155 # for "historical portability":
155 # for "historical portability":
156 # use locally defined empty option list, if formatteropts isn't
156 # use locally defined empty option list, if formatteropts isn't
157 # available, because commands.formatteropts has been available since
157 # available, because commands.formatteropts has been available since
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
158 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
159 # available since 2.2 (or ae5f92e154d3)
159 # available since 2.2 (or ae5f92e154d3)
160 formatteropts = getattr(cmdutil, "formatteropts",
160 formatteropts = getattr(cmdutil, "formatteropts",
161 getattr(commands, "formatteropts", []))
161 getattr(commands, "formatteropts", []))
162
162
163 # for "historical portability":
163 # for "historical portability":
164 # use locally defined option list, if debugrevlogopts isn't available,
164 # use locally defined option list, if debugrevlogopts isn't available,
165 # because commands.debugrevlogopts has been available since 3.7 (or
165 # because commands.debugrevlogopts has been available since 3.7 (or
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
166 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
167 # since 1.9 (or a79fea6b3e77).
167 # since 1.9 (or a79fea6b3e77).
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
168 revlogopts = getattr(cmdutil, "debugrevlogopts",
169 getattr(commands, "debugrevlogopts", [
169 getattr(commands, "debugrevlogopts", [
170 (b'c', b'changelog', False, (b'open changelog')),
170 (b'c', b'changelog', False, (b'open changelog')),
171 (b'm', b'manifest', False, (b'open manifest')),
171 (b'm', b'manifest', False, (b'open manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
172 (b'', b'dir', False, (b'open directory manifest')),
173 ]))
173 ]))
174
174
175 cmdtable = {}
175 cmdtable = {}
176
176
177 # for "historical portability":
177 # for "historical portability":
178 # define parsealiases locally, because cmdutil.parsealiases has been
178 # define parsealiases locally, because cmdutil.parsealiases has been
179 # available since 1.5 (or 6252852b4332)
179 # available since 1.5 (or 6252852b4332)
180 def parsealiases(cmd):
180 def parsealiases(cmd):
181 return cmd.split(b"|")
181 return cmd.split(b"|")
182
182
183 if safehasattr(registrar, 'command'):
183 if safehasattr(registrar, 'command'):
184 command = registrar.command(cmdtable)
184 command = registrar.command(cmdtable)
185 elif safehasattr(cmdutil, 'command'):
185 elif safehasattr(cmdutil, 'command'):
186 command = cmdutil.command(cmdtable)
186 command = cmdutil.command(cmdtable)
187 if b'norepo' not in getargspec(command).args:
187 if b'norepo' not in getargspec(command).args:
188 # for "historical portability":
188 # for "historical portability":
189 # wrap original cmdutil.command, because "norepo" option has
189 # wrap original cmdutil.command, because "norepo" option has
190 # been available since 3.1 (or 75a96326cecb)
190 # been available since 3.1 (or 75a96326cecb)
191 _command = command
191 _command = command
192 def command(name, options=(), synopsis=None, norepo=False):
192 def command(name, options=(), synopsis=None, norepo=False):
193 if norepo:
193 if norepo:
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
194 commands.norepo += b' %s' % b' '.join(parsealiases(name))
195 return _command(name, list(options), synopsis)
195 return _command(name, list(options), synopsis)
196 else:
196 else:
197 # for "historical portability":
197 # for "historical portability":
198 # define "@command" annotation locally, because cmdutil.command
198 # define "@command" annotation locally, because cmdutil.command
199 # has been available since 1.9 (or 2daa5179e73f)
199 # has been available since 1.9 (or 2daa5179e73f)
200 def command(name, options=(), synopsis=None, norepo=False):
200 def command(name, options=(), synopsis=None, norepo=False):
201 def decorator(func):
201 def decorator(func):
202 if synopsis:
202 if synopsis:
203 cmdtable[name] = func, list(options), synopsis
203 cmdtable[name] = func, list(options), synopsis
204 else:
204 else:
205 cmdtable[name] = func, list(options)
205 cmdtable[name] = func, list(options)
206 if norepo:
206 if norepo:
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
207 commands.norepo += b' %s' % b' '.join(parsealiases(name))
208 return func
208 return func
209 return decorator
209 return decorator
210
210
211 try:
211 try:
212 import mercurial.registrar
212 import mercurial.registrar
213 import mercurial.configitems
213 import mercurial.configitems
214 configtable = {}
214 configtable = {}
215 configitem = mercurial.registrar.configitem(configtable)
215 configitem = mercurial.registrar.configitem(configtable)
216 configitem(b'perf', b'presleep',
216 configitem(b'perf', b'presleep',
217 default=mercurial.configitems.dynamicdefault,
217 default=mercurial.configitems.dynamicdefault,
218 )
218 )
219 configitem(b'perf', b'stub',
219 configitem(b'perf', b'stub',
220 default=mercurial.configitems.dynamicdefault,
220 default=mercurial.configitems.dynamicdefault,
221 )
221 )
222 configitem(b'perf', b'parentscount',
222 configitem(b'perf', b'parentscount',
223 default=mercurial.configitems.dynamicdefault,
223 default=mercurial.configitems.dynamicdefault,
224 )
224 )
225 configitem(b'perf', b'all-timing',
225 configitem(b'perf', b'all-timing',
226 default=mercurial.configitems.dynamicdefault,
226 default=mercurial.configitems.dynamicdefault,
227 )
227 )
228 except (ImportError, AttributeError):
228 except (ImportError, AttributeError):
229 pass
229 pass
230
230
231 def getlen(ui):
231 def getlen(ui):
232 if ui.configbool(b"perf", b"stub", False):
232 if ui.configbool(b"perf", b"stub", False):
233 return lambda x: 1
233 return lambda x: 1
234 return len
234 return len
235
235
236 def gettimer(ui, opts=None):
236 def gettimer(ui, opts=None):
237 """return a timer function and formatter: (timer, formatter)
237 """return a timer function and formatter: (timer, formatter)
238
238
239 This function exists to gather the creation of formatter in a single
239 This function exists to gather the creation of formatter in a single
240 place instead of duplicating it in all performance commands."""
240 place instead of duplicating it in all performance commands."""
241
241
242 # enforce an idle period before execution to counteract power management
242 # enforce an idle period before execution to counteract power management
243 # experimental config: perf.presleep
243 # experimental config: perf.presleep
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
244 time.sleep(getint(ui, b"perf", b"presleep", 1))
245
245
246 if opts is None:
246 if opts is None:
247 opts = {}
247 opts = {}
248 # redirect all to stderr unless buffer api is in use
248 # redirect all to stderr unless buffer api is in use
249 if not ui._buffers:
249 if not ui._buffers:
250 ui = ui.copy()
250 ui = ui.copy()
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
251 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
252 if uifout:
252 if uifout:
253 # for "historical portability":
253 # for "historical portability":
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
254 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
255 uifout.set(ui.ferr)
255 uifout.set(ui.ferr)
256
256
257 # get a formatter
257 # get a formatter
258 uiformatter = getattr(ui, 'formatter', None)
258 uiformatter = getattr(ui, 'formatter', None)
259 if uiformatter:
259 if uiformatter:
260 fm = uiformatter(b'perf', opts)
260 fm = uiformatter(b'perf', opts)
261 else:
261 else:
262 # for "historical portability":
262 # for "historical portability":
263 # define formatter locally, because ui.formatter has been
263 # define formatter locally, because ui.formatter has been
264 # available since 2.2 (or ae5f92e154d3)
264 # available since 2.2 (or ae5f92e154d3)
265 from mercurial import node
265 from mercurial import node
266 class defaultformatter(object):
266 class defaultformatter(object):
267 """Minimized composition of baseformatter and plainformatter
267 """Minimized composition of baseformatter and plainformatter
268 """
268 """
269 def __init__(self, ui, topic, opts):
269 def __init__(self, ui, topic, opts):
270 self._ui = ui
270 self._ui = ui
271 if ui.debugflag:
271 if ui.debugflag:
272 self.hexfunc = node.hex
272 self.hexfunc = node.hex
273 else:
273 else:
274 self.hexfunc = node.short
274 self.hexfunc = node.short
275 def __nonzero__(self):
275 def __nonzero__(self):
276 return False
276 return False
277 __bool__ = __nonzero__
277 __bool__ = __nonzero__
278 def startitem(self):
278 def startitem(self):
279 pass
279 pass
280 def data(self, **data):
280 def data(self, **data):
281 pass
281 pass
282 def write(self, fields, deftext, *fielddata, **opts):
282 def write(self, fields, deftext, *fielddata, **opts):
283 self._ui.write(deftext % fielddata, **opts)
283 self._ui.write(deftext % fielddata, **opts)
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
284 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
285 if cond:
285 if cond:
286 self._ui.write(deftext % fielddata, **opts)
286 self._ui.write(deftext % fielddata, **opts)
287 def plain(self, text, **opts):
287 def plain(self, text, **opts):
288 self._ui.write(text, **opts)
288 self._ui.write(text, **opts)
289 def end(self):
289 def end(self):
290 pass
290 pass
291 fm = defaultformatter(ui, b'perf', opts)
291 fm = defaultformatter(ui, b'perf', opts)
292
292
293 # stub function, runs code only once instead of in a loop
293 # stub function, runs code only once instead of in a loop
294 # experimental config: perf.stub
294 # experimental config: perf.stub
295 if ui.configbool(b"perf", b"stub", False):
295 if ui.configbool(b"perf", b"stub", False):
296 return functools.partial(stub_timer, fm), fm
296 return functools.partial(stub_timer, fm), fm
297
297
298 # experimental config: perf.all-timing
298 # experimental config: perf.all-timing
299 displayall = ui.configbool(b"perf", b"all-timing", False)
299 displayall = ui.configbool(b"perf", b"all-timing", False)
300 return functools.partial(_timer, fm, displayall=displayall), fm
300 return functools.partial(_timer, fm, displayall=displayall), fm
301
301
302 def stub_timer(fm, func, setup=None, title=None):
302 def stub_timer(fm, func, setup=None, title=None):
303 if setup is not None:
303 if setup is not None:
304 setup()
304 setup()
305 func()
305 func()
306
306
307 @contextlib.contextmanager
307 @contextlib.contextmanager
308 def timeone():
308 def timeone():
309 r = []
309 r = []
310 ostart = os.times()
310 ostart = os.times()
311 cstart = util.timer()
311 cstart = util.timer()
312 yield r
312 yield r
313 cstop = util.timer()
313 cstop = util.timer()
314 ostop = os.times()
314 ostop = os.times()
315 a, b = ostart, ostop
315 a, b = ostart, ostop
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
316 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
317
317
318 def _timer(fm, func, setup=None, title=None, displayall=False):
318 def _timer(fm, func, setup=None, title=None, displayall=False):
319 gc.collect()
319 gc.collect()
320 results = []
320 results = []
321 begin = util.timer()
321 begin = util.timer()
322 count = 0
322 count = 0
323 while True:
323 while True:
324 if setup is not None:
324 if setup is not None:
325 setup()
325 setup()
326 with timeone() as item:
326 with timeone() as item:
327 r = func()
327 r = func()
328 count += 1
328 count += 1
329 results.append(item[0])
329 results.append(item[0])
330 cstop = util.timer()
330 cstop = util.timer()
331 if cstop - begin > 3 and count >= 100:
331 if cstop - begin > 3 and count >= 100:
332 break
332 break
333 if cstop - begin > 10 and count >= 3:
333 if cstop - begin > 10 and count >= 3:
334 break
334 break
335
335
336 formatone(fm, results, title=title, result=r,
336 formatone(fm, results, title=title, result=r,
337 displayall=displayall)
337 displayall=displayall)
338
338
339 def formatone(fm, timings, title=None, result=None, displayall=False):
339 def formatone(fm, timings, title=None, result=None, displayall=False):
340
340
341 count = len(timings)
341 count = len(timings)
342
342
343 fm.startitem()
343 fm.startitem()
344
344
345 if title:
345 if title:
346 fm.write(b'title', b'! %s\n', title)
346 fm.write(b'title', b'! %s\n', title)
347 if result:
347 if result:
348 fm.write(b'result', b'! result: %s\n', result)
348 fm.write(b'result', b'! result: %s\n', result)
349 def display(role, entry):
349 def display(role, entry):
350 prefix = b''
350 prefix = b''
351 if role != b'best':
351 if role != b'best':
352 prefix = b'%s.' % role
352 prefix = b'%s.' % role
353 fm.plain(b'!')
353 fm.plain(b'!')
354 fm.write(prefix + b'wall', b' wall %f', entry[0])
354 fm.write(prefix + b'wall', b' wall %f', entry[0])
355 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
355 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
356 fm.write(prefix + b'user', b' user %f', entry[1])
356 fm.write(prefix + b'user', b' user %f', entry[1])
357 fm.write(prefix + b'sys', b' sys %f', entry[2])
357 fm.write(prefix + b'sys', b' sys %f', entry[2])
358 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
358 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
359 fm.plain(b'\n')
359 fm.plain(b'\n')
360 timings.sort()
360 timings.sort()
361 min_val = timings[0]
361 min_val = timings[0]
362 display(b'best', min_val)
362 display(b'best', min_val)
363 if displayall:
363 if displayall:
364 max_val = timings[-1]
364 max_val = timings[-1]
365 display(b'max', max_val)
365 display(b'max', max_val)
366 avg = tuple([sum(x) / count for x in zip(*timings)])
366 avg = tuple([sum(x) / count for x in zip(*timings)])
367 display(b'avg', avg)
367 display(b'avg', avg)
368 median = timings[len(timings) // 2]
368 median = timings[len(timings) // 2]
369 display(b'median', median)
369 display(b'median', median)
370
370
371 # utilities for historical portability
371 # utilities for historical portability
372
372
373 def getint(ui, section, name, default):
373 def getint(ui, section, name, default):
374 # for "historical portability":
374 # for "historical portability":
375 # ui.configint has been available since 1.9 (or fa2b596db182)
375 # ui.configint has been available since 1.9 (or fa2b596db182)
376 v = ui.config(section, name, None)
376 v = ui.config(section, name, None)
377 if v is None:
377 if v is None:
378 return default
378 return default
379 try:
379 try:
380 return int(v)
380 return int(v)
381 except ValueError:
381 except ValueError:
382 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
382 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
383 % (section, name, v))
383 % (section, name, v))
384
384
385 def safeattrsetter(obj, name, ignoremissing=False):
385 def safeattrsetter(obj, name, ignoremissing=False):
386 """Ensure that 'obj' has 'name' attribute before subsequent setattr
386 """Ensure that 'obj' has 'name' attribute before subsequent setattr
387
387
388 This function is aborted, if 'obj' doesn't have 'name' attribute
388 This function is aborted, if 'obj' doesn't have 'name' attribute
389 at runtime. This avoids overlooking removal of an attribute, which
389 at runtime. This avoids overlooking removal of an attribute, which
390 breaks assumption of performance measurement, in the future.
390 breaks assumption of performance measurement, in the future.
391
391
392 This function returns the object to (1) assign a new value, and
392 This function returns the object to (1) assign a new value, and
393 (2) restore an original value to the attribute.
393 (2) restore an original value to the attribute.
394
394
395 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
395 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
396 abortion, and this function returns None. This is useful to
396 abortion, and this function returns None. This is useful to
397 examine an attribute, which isn't ensured in all Mercurial
397 examine an attribute, which isn't ensured in all Mercurial
398 versions.
398 versions.
399 """
399 """
400 if not util.safehasattr(obj, name):
400 if not util.safehasattr(obj, name):
401 if ignoremissing:
401 if ignoremissing:
402 return None
402 return None
403 raise error.Abort((b"missing attribute %s of %s might break assumption"
403 raise error.Abort((b"missing attribute %s of %s might break assumption"
404 b" of performance measurement") % (name, obj))
404 b" of performance measurement") % (name, obj))
405
405
406 origvalue = getattr(obj, _sysstr(name))
406 origvalue = getattr(obj, _sysstr(name))
407 class attrutil(object):
407 class attrutil(object):
408 def set(self, newvalue):
408 def set(self, newvalue):
409 setattr(obj, _sysstr(name), newvalue)
409 setattr(obj, _sysstr(name), newvalue)
410 def restore(self):
410 def restore(self):
411 setattr(obj, _sysstr(name), origvalue)
411 setattr(obj, _sysstr(name), origvalue)
412
412
413 return attrutil()
413 return attrutil()
414
414
415 # utilities to examine each internal API changes
415 # utilities to examine each internal API changes
416
416
417 def getbranchmapsubsettable():
417 def getbranchmapsubsettable():
418 # for "historical portability":
418 # for "historical portability":
419 # subsettable is defined in:
419 # subsettable is defined in:
420 # - branchmap since 2.9 (or 175c6fd8cacc)
420 # - branchmap since 2.9 (or 175c6fd8cacc)
421 # - repoview since 2.5 (or 59a9f18d4587)
421 # - repoview since 2.5 (or 59a9f18d4587)
422 for mod in (branchmap, repoview):
422 for mod in (branchmap, repoview):
423 subsettable = getattr(mod, 'subsettable', None)
423 subsettable = getattr(mod, 'subsettable', None)
424 if subsettable:
424 if subsettable:
425 return subsettable
425 return subsettable
426
426
427 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
427 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
428 # branchmap and repoview modules exist, but subsettable attribute
428 # branchmap and repoview modules exist, but subsettable attribute
429 # doesn't)
429 # doesn't)
430 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
430 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
431 hint=b"use 2.5 or later")
431 hint=b"use 2.5 or later")
432
432
433 def getsvfs(repo):
433 def getsvfs(repo):
434 """Return appropriate object to access files under .hg/store
434 """Return appropriate object to access files under .hg/store
435 """
435 """
436 # for "historical portability":
436 # for "historical portability":
437 # repo.svfs has been available since 2.3 (or 7034365089bf)
437 # repo.svfs has been available since 2.3 (or 7034365089bf)
438 svfs = getattr(repo, 'svfs', None)
438 svfs = getattr(repo, 'svfs', None)
439 if svfs:
439 if svfs:
440 return svfs
440 return svfs
441 else:
441 else:
442 return getattr(repo, 'sopener')
442 return getattr(repo, 'sopener')
443
443
444 def getvfs(repo):
444 def getvfs(repo):
445 """Return appropriate object to access files under .hg
445 """Return appropriate object to access files under .hg
446 """
446 """
447 # for "historical portability":
447 # for "historical portability":
448 # repo.vfs has been available since 2.3 (or 7034365089bf)
448 # repo.vfs has been available since 2.3 (or 7034365089bf)
449 vfs = getattr(repo, 'vfs', None)
449 vfs = getattr(repo, 'vfs', None)
450 if vfs:
450 if vfs:
451 return vfs
451 return vfs
452 else:
452 else:
453 return getattr(repo, 'opener')
453 return getattr(repo, 'opener')
454
454
455 def repocleartagscachefunc(repo):
455 def repocleartagscachefunc(repo):
456 """Return the function to clear tags cache according to repo internal API
456 """Return the function to clear tags cache according to repo internal API
457 """
457 """
458 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
458 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
459 # in this case, setattr(repo, '_tagscache', None) or so isn't
459 # in this case, setattr(repo, '_tagscache', None) or so isn't
460 # correct way to clear tags cache, because existing code paths
460 # correct way to clear tags cache, because existing code paths
461 # expect _tagscache to be a structured object.
461 # expect _tagscache to be a structured object.
462 def clearcache():
462 def clearcache():
463 # _tagscache has been filteredpropertycache since 2.5 (or
463 # _tagscache has been filteredpropertycache since 2.5 (or
464 # 98c867ac1330), and delattr() can't work in such case
464 # 98c867ac1330), and delattr() can't work in such case
465 if b'_tagscache' in vars(repo):
465 if b'_tagscache' in vars(repo):
466 del repo.__dict__[b'_tagscache']
466 del repo.__dict__[b'_tagscache']
467 return clearcache
467 return clearcache
468
468
469 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
469 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
470 if repotags: # since 1.4 (or 5614a628d173)
470 if repotags: # since 1.4 (or 5614a628d173)
471 return lambda : repotags.set(None)
471 return lambda : repotags.set(None)
472
472
473 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
473 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
474 if repotagscache: # since 0.6 (or d7df759d0e97)
474 if repotagscache: # since 0.6 (or d7df759d0e97)
475 return lambda : repotagscache.set(None)
475 return lambda : repotagscache.set(None)
476
476
477 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
477 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
478 # this point, but it isn't so problematic, because:
478 # this point, but it isn't so problematic, because:
479 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
479 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
480 # in perftags() causes failure soon
480 # in perftags() causes failure soon
481 # - perf.py itself has been available since 1.1 (or eb240755386d)
481 # - perf.py itself has been available since 1.1 (or eb240755386d)
482 raise error.Abort((b"tags API of this hg command is unknown"))
482 raise error.Abort((b"tags API of this hg command is unknown"))
483
483
484 # utilities to clear cache
484 # utilities to clear cache
485
485
486 def clearfilecache(obj, attrname):
486 def clearfilecache(obj, attrname):
487 unfiltered = getattr(obj, 'unfiltered', None)
487 unfiltered = getattr(obj, 'unfiltered', None)
488 if unfiltered is not None:
488 if unfiltered is not None:
489 obj = obj.unfiltered()
489 obj = obj.unfiltered()
490 if attrname in vars(obj):
490 if attrname in vars(obj):
491 delattr(obj, attrname)
491 delattr(obj, attrname)
492 obj._filecache.pop(attrname, None)
492 obj._filecache.pop(attrname, None)
493
493
494 def clearchangelog(repo):
494 def clearchangelog(repo):
495 if repo is not repo.unfiltered():
495 if repo is not repo.unfiltered():
496 object.__setattr__(repo, r'_clcachekey', None)
496 object.__setattr__(repo, r'_clcachekey', None)
497 object.__setattr__(repo, r'_clcache', None)
497 object.__setattr__(repo, r'_clcache', None)
498 clearfilecache(repo.unfiltered(), 'changelog')
498 clearfilecache(repo.unfiltered(), 'changelog')
499
499
500 # perf commands
500 # perf commands
501
501
502 @command(b'perfwalk', formatteropts)
502 @command(b'perfwalk', formatteropts)
503 def perfwalk(ui, repo, *pats, **opts):
503 def perfwalk(ui, repo, *pats, **opts):
504 opts = _byteskwargs(opts)
504 opts = _byteskwargs(opts)
505 timer, fm = gettimer(ui, opts)
505 timer, fm = gettimer(ui, opts)
506 m = scmutil.match(repo[None], pats, {})
506 m = scmutil.match(repo[None], pats, {})
507 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
507 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
508 ignored=False))))
508 ignored=False))))
509 fm.end()
509 fm.end()
510
510
511 @command(b'perfannotate', formatteropts)
511 @command(b'perfannotate', formatteropts)
512 def perfannotate(ui, repo, f, **opts):
512 def perfannotate(ui, repo, f, **opts):
513 opts = _byteskwargs(opts)
513 opts = _byteskwargs(opts)
514 timer, fm = gettimer(ui, opts)
514 timer, fm = gettimer(ui, opts)
515 fc = repo[b'.'][f]
515 fc = repo[b'.'][f]
516 timer(lambda: len(fc.annotate(True)))
516 timer(lambda: len(fc.annotate(True)))
517 fm.end()
517 fm.end()
518
518
519 @command(b'perfstatus',
519 @command(b'perfstatus',
520 [(b'u', b'unknown', False,
520 [(b'u', b'unknown', False,
521 b'ask status to look for unknown files')] + formatteropts)
521 b'ask status to look for unknown files')] + formatteropts)
522 def perfstatus(ui, repo, **opts):
522 def perfstatus(ui, repo, **opts):
523 opts = _byteskwargs(opts)
523 opts = _byteskwargs(opts)
524 #m = match.always(repo.root, repo.getcwd())
524 #m = match.always(repo.root, repo.getcwd())
525 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
525 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
526 # False))))
526 # False))))
527 timer, fm = gettimer(ui, opts)
527 timer, fm = gettimer(ui, opts)
528 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
528 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
529 fm.end()
529 fm.end()
530
530
531 @command(b'perfaddremove', formatteropts)
531 @command(b'perfaddremove', formatteropts)
532 def perfaddremove(ui, repo, **opts):
532 def perfaddremove(ui, repo, **opts):
533 opts = _byteskwargs(opts)
533 opts = _byteskwargs(opts)
534 timer, fm = gettimer(ui, opts)
534 timer, fm = gettimer(ui, opts)
535 try:
535 try:
536 oldquiet = repo.ui.quiet
536 oldquiet = repo.ui.quiet
537 repo.ui.quiet = True
537 repo.ui.quiet = True
538 matcher = scmutil.match(repo[None])
538 matcher = scmutil.match(repo[None])
539 opts[b'dry_run'] = True
539 opts[b'dry_run'] = True
540 if b'uipathfn' in getargspec(scmutil.addremove).args:
540 if b'uipathfn' in getargspec(scmutil.addremove).args:
541 uipathfn = scmutil.getuipathfn(repo)
541 uipathfn = scmutil.getuipathfn(repo)
542 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
542 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
543 else:
543 else:
544 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
544 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
545 finally:
545 finally:
546 repo.ui.quiet = oldquiet
546 repo.ui.quiet = oldquiet
547 fm.end()
547 fm.end()
548
548
549 def clearcaches(cl):
549 def clearcaches(cl):
550 # behave somewhat consistently across internal API changes
550 # behave somewhat consistently across internal API changes
551 if util.safehasattr(cl, b'clearcaches'):
551 if util.safehasattr(cl, b'clearcaches'):
552 cl.clearcaches()
552 cl.clearcaches()
553 elif util.safehasattr(cl, b'_nodecache'):
553 elif util.safehasattr(cl, b'_nodecache'):
554 from mercurial.node import nullid, nullrev
554 from mercurial.node import nullid, nullrev
555 cl._nodecache = {nullid: nullrev}
555 cl._nodecache = {nullid: nullrev}
556 cl._nodepos = None
556 cl._nodepos = None
557
557
558 @command(b'perfheads', formatteropts)
558 @command(b'perfheads', formatteropts)
559 def perfheads(ui, repo, **opts):
559 def perfheads(ui, repo, **opts):
560 """benchmark the computation of a changelog heads"""
560 """benchmark the computation of a changelog heads"""
561 opts = _byteskwargs(opts)
561 opts = _byteskwargs(opts)
562 timer, fm = gettimer(ui, opts)
562 timer, fm = gettimer(ui, opts)
563 cl = repo.changelog
563 cl = repo.changelog
564 def s():
564 def s():
565 clearcaches(cl)
565 clearcaches(cl)
566 def d():
566 def d():
567 len(cl.headrevs())
567 len(cl.headrevs())
568 timer(d, setup=s)
568 timer(d, setup=s)
569 fm.end()
569 fm.end()
570
570
571 @command(b'perftags', formatteropts+
571 @command(b'perftags', formatteropts+
572 [
572 [
573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
574 ])
574 ])
575 def perftags(ui, repo, **opts):
575 def perftags(ui, repo, **opts):
576 opts = _byteskwargs(opts)
576 opts = _byteskwargs(opts)
577 timer, fm = gettimer(ui, opts)
577 timer, fm = gettimer(ui, opts)
578 repocleartagscache = repocleartagscachefunc(repo)
578 repocleartagscache = repocleartagscachefunc(repo)
579 clearrevlogs = opts[b'clear_revlogs']
579 clearrevlogs = opts[b'clear_revlogs']
580 def s():
580 def s():
581 if clearrevlogs:
581 if clearrevlogs:
582 clearchangelog(repo)
582 clearchangelog(repo)
583 clearfilecache(repo.unfiltered(), 'manifest')
583 clearfilecache(repo.unfiltered(), 'manifest')
584 repocleartagscache()
584 repocleartagscache()
585 def t():
585 def t():
586 return len(repo.tags())
586 return len(repo.tags())
587 timer(t, setup=s)
587 timer(t, setup=s)
588 fm.end()
588 fm.end()
589
589
590 @command(b'perfancestors', formatteropts)
590 @command(b'perfancestors', formatteropts)
591 def perfancestors(ui, repo, **opts):
591 def perfancestors(ui, repo, **opts):
592 opts = _byteskwargs(opts)
592 opts = _byteskwargs(opts)
593 timer, fm = gettimer(ui, opts)
593 timer, fm = gettimer(ui, opts)
594 heads = repo.changelog.headrevs()
594 heads = repo.changelog.headrevs()
595 def d():
595 def d():
596 for a in repo.changelog.ancestors(heads):
596 for a in repo.changelog.ancestors(heads):
597 pass
597 pass
598 timer(d)
598 timer(d)
599 fm.end()
599 fm.end()
600
600
601 @command(b'perfancestorset', formatteropts)
601 @command(b'perfancestorset', formatteropts)
602 def perfancestorset(ui, repo, revset, **opts):
602 def perfancestorset(ui, repo, revset, **opts):
603 opts = _byteskwargs(opts)
603 opts = _byteskwargs(opts)
604 timer, fm = gettimer(ui, opts)
604 timer, fm = gettimer(ui, opts)
605 revs = repo.revs(revset)
605 revs = repo.revs(revset)
606 heads = repo.changelog.headrevs()
606 heads = repo.changelog.headrevs()
607 def d():
607 def d():
608 s = repo.changelog.ancestors(heads)
608 s = repo.changelog.ancestors(heads)
609 for rev in revs:
609 for rev in revs:
610 rev in s
610 rev in s
611 timer(d)
611 timer(d)
612 fm.end()
612 fm.end()
613
613
614 @command(b'perfdiscovery', formatteropts, b'PATH')
614 @command(b'perfdiscovery', formatteropts, b'PATH')
615 def perfdiscovery(ui, repo, path, **opts):
615 def perfdiscovery(ui, repo, path, **opts):
616 """benchmark discovery between local repo and the peer at given path
616 """benchmark discovery between local repo and the peer at given path
617 """
617 """
618 repos = [repo, None]
618 repos = [repo, None]
619 timer, fm = gettimer(ui, opts)
619 timer, fm = gettimer(ui, opts)
620 path = ui.expandpath(path)
620 path = ui.expandpath(path)
621
621
622 def s():
622 def s():
623 repos[1] = hg.peer(ui, opts, path)
623 repos[1] = hg.peer(ui, opts, path)
624 def d():
624 def d():
625 setdiscovery.findcommonheads(ui, *repos)
625 setdiscovery.findcommonheads(ui, *repos)
626 timer(d, setup=s)
626 timer(d, setup=s)
627 fm.end()
627 fm.end()
628
628
629 @command(b'perfbookmarks', formatteropts +
629 @command(b'perfbookmarks', formatteropts +
630 [
630 [
631 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
631 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
632 ])
632 ])
633 def perfbookmarks(ui, repo, **opts):
633 def perfbookmarks(ui, repo, **opts):
634 """benchmark parsing bookmarks from disk to memory"""
634 """benchmark parsing bookmarks from disk to memory"""
635 opts = _byteskwargs(opts)
635 opts = _byteskwargs(opts)
636 timer, fm = gettimer(ui, opts)
636 timer, fm = gettimer(ui, opts)
637
637
638 clearrevlogs = opts[b'clear_revlogs']
638 clearrevlogs = opts[b'clear_revlogs']
639 def s():
639 def s():
640 if clearrevlogs:
640 if clearrevlogs:
641 clearchangelog(repo)
641 clearchangelog(repo)
642 clearfilecache(repo, b'_bookmarks')
642 clearfilecache(repo, b'_bookmarks')
643 def d():
643 def d():
644 repo._bookmarks
644 repo._bookmarks
645 timer(d, setup=s)
645 timer(d, setup=s)
646 fm.end()
646 fm.end()
647
647
648 @command(b'perfbundleread', formatteropts, b'BUNDLE')
648 @command(b'perfbundleread', formatteropts, b'BUNDLE')
649 def perfbundleread(ui, repo, bundlepath, **opts):
649 def perfbundleread(ui, repo, bundlepath, **opts):
650 """Benchmark reading of bundle files.
650 """Benchmark reading of bundle files.
651
651
652 This command is meant to isolate the I/O part of bundle reading as
652 This command is meant to isolate the I/O part of bundle reading as
653 much as possible.
653 much as possible.
654 """
654 """
655 from mercurial import (
655 from mercurial import (
656 bundle2,
656 bundle2,
657 exchange,
657 exchange,
658 streamclone,
658 streamclone,
659 )
659 )
660
660
661 opts = _byteskwargs(opts)
661 opts = _byteskwargs(opts)
662
662
663 def makebench(fn):
663 def makebench(fn):
664 def run():
664 def run():
665 with open(bundlepath, b'rb') as fh:
665 with open(bundlepath, b'rb') as fh:
666 bundle = exchange.readbundle(ui, fh, bundlepath)
666 bundle = exchange.readbundle(ui, fh, bundlepath)
667 fn(bundle)
667 fn(bundle)
668
668
669 return run
669 return run
670
670
671 def makereadnbytes(size):
671 def makereadnbytes(size):
672 def run():
672 def run():
673 with open(bundlepath, b'rb') as fh:
673 with open(bundlepath, b'rb') as fh:
674 bundle = exchange.readbundle(ui, fh, bundlepath)
674 bundle = exchange.readbundle(ui, fh, bundlepath)
675 while bundle.read(size):
675 while bundle.read(size):
676 pass
676 pass
677
677
678 return run
678 return run
679
679
680 def makestdioread(size):
680 def makestdioread(size):
681 def run():
681 def run():
682 with open(bundlepath, b'rb') as fh:
682 with open(bundlepath, b'rb') as fh:
683 while fh.read(size):
683 while fh.read(size):
684 pass
684 pass
685
685
686 return run
686 return run
687
687
688 # bundle1
688 # bundle1
689
689
690 def deltaiter(bundle):
690 def deltaiter(bundle):
691 for delta in bundle.deltaiter():
691 for delta in bundle.deltaiter():
692 pass
692 pass
693
693
694 def iterchunks(bundle):
694 def iterchunks(bundle):
695 for chunk in bundle.getchunks():
695 for chunk in bundle.getchunks():
696 pass
696 pass
697
697
698 # bundle2
698 # bundle2
699
699
700 def forwardchunks(bundle):
700 def forwardchunks(bundle):
701 for chunk in bundle._forwardchunks():
701 for chunk in bundle._forwardchunks():
702 pass
702 pass
703
703
704 def iterparts(bundle):
704 def iterparts(bundle):
705 for part in bundle.iterparts():
705 for part in bundle.iterparts():
706 pass
706 pass
707
707
708 def iterpartsseekable(bundle):
708 def iterpartsseekable(bundle):
709 for part in bundle.iterparts(seekable=True):
709 for part in bundle.iterparts(seekable=True):
710 pass
710 pass
711
711
712 def seek(bundle):
712 def seek(bundle):
713 for part in bundle.iterparts(seekable=True):
713 for part in bundle.iterparts(seekable=True):
714 part.seek(0, os.SEEK_END)
714 part.seek(0, os.SEEK_END)
715
715
716 def makepartreadnbytes(size):
716 def makepartreadnbytes(size):
717 def run():
717 def run():
718 with open(bundlepath, b'rb') as fh:
718 with open(bundlepath, b'rb') as fh:
719 bundle = exchange.readbundle(ui, fh, bundlepath)
719 bundle = exchange.readbundle(ui, fh, bundlepath)
720 for part in bundle.iterparts():
720 for part in bundle.iterparts():
721 while part.read(size):
721 while part.read(size):
722 pass
722 pass
723
723
724 return run
724 return run
725
725
726 benches = [
726 benches = [
727 (makestdioread(8192), b'read(8k)'),
727 (makestdioread(8192), b'read(8k)'),
728 (makestdioread(16384), b'read(16k)'),
728 (makestdioread(16384), b'read(16k)'),
729 (makestdioread(32768), b'read(32k)'),
729 (makestdioread(32768), b'read(32k)'),
730 (makestdioread(131072), b'read(128k)'),
730 (makestdioread(131072), b'read(128k)'),
731 ]
731 ]
732
732
733 with open(bundlepath, b'rb') as fh:
733 with open(bundlepath, b'rb') as fh:
734 bundle = exchange.readbundle(ui, fh, bundlepath)
734 bundle = exchange.readbundle(ui, fh, bundlepath)
735
735
736 if isinstance(bundle, changegroup.cg1unpacker):
736 if isinstance(bundle, changegroup.cg1unpacker):
737 benches.extend([
737 benches.extend([
738 (makebench(deltaiter), b'cg1 deltaiter()'),
738 (makebench(deltaiter), b'cg1 deltaiter()'),
739 (makebench(iterchunks), b'cg1 getchunks()'),
739 (makebench(iterchunks), b'cg1 getchunks()'),
740 (makereadnbytes(8192), b'cg1 read(8k)'),
740 (makereadnbytes(8192), b'cg1 read(8k)'),
741 (makereadnbytes(16384), b'cg1 read(16k)'),
741 (makereadnbytes(16384), b'cg1 read(16k)'),
742 (makereadnbytes(32768), b'cg1 read(32k)'),
742 (makereadnbytes(32768), b'cg1 read(32k)'),
743 (makereadnbytes(131072), b'cg1 read(128k)'),
743 (makereadnbytes(131072), b'cg1 read(128k)'),
744 ])
744 ])
745 elif isinstance(bundle, bundle2.unbundle20):
745 elif isinstance(bundle, bundle2.unbundle20):
746 benches.extend([
746 benches.extend([
747 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
747 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
748 (makebench(iterparts), b'bundle2 iterparts()'),
748 (makebench(iterparts), b'bundle2 iterparts()'),
749 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
749 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
750 (makebench(seek), b'bundle2 part seek()'),
750 (makebench(seek), b'bundle2 part seek()'),
751 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
751 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
752 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
752 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
753 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
753 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
754 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
754 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
755 ])
755 ])
756 elif isinstance(bundle, streamclone.streamcloneapplier):
756 elif isinstance(bundle, streamclone.streamcloneapplier):
757 raise error.Abort(b'stream clone bundles not supported')
757 raise error.Abort(b'stream clone bundles not supported')
758 else:
758 else:
759 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
759 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
760
760
761 for fn, title in benches:
761 for fn, title in benches:
762 timer, fm = gettimer(ui, opts)
762 timer, fm = gettimer(ui, opts)
763 timer(fn, title=title)
763 timer(fn, title=title)
764 fm.end()
764 fm.end()
765
765
766 @command(b'perfchangegroupchangelog', formatteropts +
766 @command(b'perfchangegroupchangelog', formatteropts +
767 [(b'', b'cgversion', b'02', b'changegroup version'),
767 [(b'', b'cgversion', b'02', b'changegroup version'),
768 (b'r', b'rev', b'', b'revisions to add to changegroup')])
768 (b'r', b'rev', b'', b'revisions to add to changegroup')])
769 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
769 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
770 """Benchmark producing a changelog group for a changegroup.
770 """Benchmark producing a changelog group for a changegroup.
771
771
772 This measures the time spent processing the changelog during a
772 This measures the time spent processing the changelog during a
773 bundle operation. This occurs during `hg bundle` and on a server
773 bundle operation. This occurs during `hg bundle` and on a server
774 processing a `getbundle` wire protocol request (handles clones
774 processing a `getbundle` wire protocol request (handles clones
775 and pull requests).
775 and pull requests).
776
776
777 By default, all revisions are added to the changegroup.
777 By default, all revisions are added to the changegroup.
778 """
778 """
779 opts = _byteskwargs(opts)
779 opts = _byteskwargs(opts)
780 cl = repo.changelog
780 cl = repo.changelog
781 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
781 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
782 bundler = changegroup.getbundler(cgversion, repo)
782 bundler = changegroup.getbundler(cgversion, repo)
783
783
784 def d():
784 def d():
785 state, chunks = bundler._generatechangelog(cl, nodes)
785 state, chunks = bundler._generatechangelog(cl, nodes)
786 for chunk in chunks:
786 for chunk in chunks:
787 pass
787 pass
788
788
789 timer, fm = gettimer(ui, opts)
789 timer, fm = gettimer(ui, opts)
790
790
791 # Terminal printing can interfere with timing. So disable it.
791 # Terminal printing can interfere with timing. So disable it.
792 with ui.configoverride({(b'progress', b'disable'): True}):
792 with ui.configoverride({(b'progress', b'disable'): True}):
793 timer(d)
793 timer(d)
794
794
795 fm.end()
795 fm.end()
796
796
797 @command(b'perfdirs', formatteropts)
797 @command(b'perfdirs', formatteropts)
798 def perfdirs(ui, repo, **opts):
798 def perfdirs(ui, repo, **opts):
799 opts = _byteskwargs(opts)
799 opts = _byteskwargs(opts)
800 timer, fm = gettimer(ui, opts)
800 timer, fm = gettimer(ui, opts)
801 dirstate = repo.dirstate
801 dirstate = repo.dirstate
802 b'a' in dirstate
802 b'a' in dirstate
803 def d():
803 def d():
804 dirstate.hasdir(b'a')
804 dirstate.hasdir(b'a')
805 del dirstate._map._dirs
805 del dirstate._map._dirs
806 timer(d)
806 timer(d)
807 fm.end()
807 fm.end()
808
808
809 @command(b'perfdirstate', formatteropts)
809 @command(b'perfdirstate', formatteropts)
810 def perfdirstate(ui, repo, **opts):
810 def perfdirstate(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 b"a" in repo.dirstate
813 b"a" in repo.dirstate
814 def d():
814 def d():
815 repo.dirstate.invalidate()
815 repo.dirstate.invalidate()
816 b"a" in repo.dirstate
816 b"a" in repo.dirstate
817 timer(d)
817 timer(d)
818 fm.end()
818 fm.end()
819
819
820 @command(b'perfdirstatedirs', formatteropts)
820 @command(b'perfdirstatedirs', formatteropts)
821 def perfdirstatedirs(ui, repo, **opts):
821 def perfdirstatedirs(ui, repo, **opts):
822 opts = _byteskwargs(opts)
822 opts = _byteskwargs(opts)
823 timer, fm = gettimer(ui, opts)
823 timer, fm = gettimer(ui, opts)
824 b"a" in repo.dirstate
824 b"a" in repo.dirstate
825 def d():
825 def d():
826 repo.dirstate.hasdir(b"a")
826 repo.dirstate.hasdir(b"a")
827 del repo.dirstate._map._dirs
827 del repo.dirstate._map._dirs
828 timer(d)
828 timer(d)
829 fm.end()
829 fm.end()
830
830
831 @command(b'perfdirstatefoldmap', formatteropts)
831 @command(b'perfdirstatefoldmap', formatteropts)
832 def perfdirstatefoldmap(ui, repo, **opts):
832 def perfdirstatefoldmap(ui, repo, **opts):
833 opts = _byteskwargs(opts)
833 opts = _byteskwargs(opts)
834 timer, fm = gettimer(ui, opts)
834 timer, fm = gettimer(ui, opts)
835 dirstate = repo.dirstate
835 dirstate = repo.dirstate
836 b'a' in dirstate
836 b'a' in dirstate
837 def d():
837 def d():
838 dirstate._map.filefoldmap.get(b'a')
838 dirstate._map.filefoldmap.get(b'a')
839 del dirstate._map.filefoldmap
839 del dirstate._map.filefoldmap
840 timer(d)
840 timer(d)
841 fm.end()
841 fm.end()
842
842
843 @command(b'perfdirfoldmap', formatteropts)
843 @command(b'perfdirfoldmap', formatteropts)
844 def perfdirfoldmap(ui, repo, **opts):
844 def perfdirfoldmap(ui, repo, **opts):
845 opts = _byteskwargs(opts)
845 opts = _byteskwargs(opts)
846 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
847 dirstate = repo.dirstate
847 dirstate = repo.dirstate
848 b'a' in dirstate
848 b'a' in dirstate
849 def d():
849 def d():
850 dirstate._map.dirfoldmap.get(b'a')
850 dirstate._map.dirfoldmap.get(b'a')
851 del dirstate._map.dirfoldmap
851 del dirstate._map.dirfoldmap
852 del dirstate._map._dirs
852 del dirstate._map._dirs
853 timer(d)
853 timer(d)
854 fm.end()
854 fm.end()
855
855
856 @command(b'perfdirstatewrite', formatteropts)
856 @command(b'perfdirstatewrite', formatteropts)
857 def perfdirstatewrite(ui, repo, **opts):
857 def perfdirstatewrite(ui, repo, **opts):
858 opts = _byteskwargs(opts)
858 opts = _byteskwargs(opts)
859 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
860 ds = repo.dirstate
860 ds = repo.dirstate
861 b"a" in ds
861 b"a" in ds
862 def d():
862 def d():
863 ds._dirty = True
863 ds._dirty = True
864 ds.write(repo.currenttransaction())
864 ds.write(repo.currenttransaction())
865 timer(d)
865 timer(d)
866 fm.end()
866 fm.end()
867
867
868 @command(b'perfmergecalculate',
868 @command(b'perfmergecalculate',
869 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
869 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
870 def perfmergecalculate(ui, repo, rev, **opts):
870 def perfmergecalculate(ui, repo, rev, **opts):
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873 wctx = repo[None]
873 wctx = repo[None]
874 rctx = scmutil.revsingle(repo, rev, rev)
874 rctx = scmutil.revsingle(repo, rev, rev)
875 ancestor = wctx.ancestor(rctx)
875 ancestor = wctx.ancestor(rctx)
876 # we don't want working dir files to be stat'd in the benchmark, so prime
876 # we don't want working dir files to be stat'd in the benchmark, so prime
877 # that cache
877 # that cache
878 wctx.dirty()
878 wctx.dirty()
879 def d():
879 def d():
880 # acceptremote is True because we don't want prompts in the middle of
880 # acceptremote is True because we don't want prompts in the middle of
881 # our benchmark
881 # our benchmark
882 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
882 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
883 acceptremote=True, followcopies=True)
883 acceptremote=True, followcopies=True)
884 timer(d)
884 timer(d)
885 fm.end()
885 fm.end()
886
886
887 @command(b'perfpathcopies', [], b"REV REV")
887 @command(b'perfpathcopies', [], b"REV REV")
888 def perfpathcopies(ui, repo, rev1, rev2, **opts):
888 def perfpathcopies(ui, repo, rev1, rev2, **opts):
889 """benchmark the copy tracing logic"""
889 """benchmark the copy tracing logic"""
890 opts = _byteskwargs(opts)
890 opts = _byteskwargs(opts)
891 timer, fm = gettimer(ui, opts)
891 timer, fm = gettimer(ui, opts)
892 ctx1 = scmutil.revsingle(repo, rev1, rev1)
892 ctx1 = scmutil.revsingle(repo, rev1, rev1)
893 ctx2 = scmutil.revsingle(repo, rev2, rev2)
893 ctx2 = scmutil.revsingle(repo, rev2, rev2)
894 def d():
894 def d():
895 copies.pathcopies(ctx1, ctx2)
895 copies.pathcopies(ctx1, ctx2)
896 timer(d)
896 timer(d)
897 fm.end()
897 fm.end()
898
898
899 @command(b'perfphases',
899 @command(b'perfphases',
900 [(b'', b'full', False, b'include file reading time too'),
900 [(b'', b'full', False, b'include file reading time too'),
901 ], b"")
901 ], b"")
902 def perfphases(ui, repo, **opts):
902 def perfphases(ui, repo, **opts):
903 """benchmark phasesets computation"""
903 """benchmark phasesets computation"""
904 opts = _byteskwargs(opts)
904 opts = _byteskwargs(opts)
905 timer, fm = gettimer(ui, opts)
905 timer, fm = gettimer(ui, opts)
906 _phases = repo._phasecache
906 _phases = repo._phasecache
907 full = opts.get(b'full')
907 full = opts.get(b'full')
908 def d():
908 def d():
909 phases = _phases
909 phases = _phases
910 if full:
910 if full:
911 clearfilecache(repo, b'_phasecache')
911 clearfilecache(repo, b'_phasecache')
912 phases = repo._phasecache
912 phases = repo._phasecache
913 phases.invalidate()
913 phases.invalidate()
914 phases.loadphaserevs(repo)
914 phases.loadphaserevs(repo)
915 timer(d)
915 timer(d)
916 fm.end()
916 fm.end()
917
917
918 @command(b'perfphasesremote',
918 @command(b'perfphasesremote',
919 [], b"[DEST]")
919 [], b"[DEST]")
920 def perfphasesremote(ui, repo, dest=None, **opts):
920 def perfphasesremote(ui, repo, dest=None, **opts):
921 """benchmark time needed to analyse phases of the remote server"""
921 """benchmark time needed to analyse phases of the remote server"""
922 from mercurial.node import (
922 from mercurial.node import (
923 bin,
923 bin,
924 )
924 )
925 from mercurial import (
925 from mercurial import (
926 exchange,
926 exchange,
927 hg,
927 hg,
928 phases,
928 phases,
929 )
929 )
930 opts = _byteskwargs(opts)
930 opts = _byteskwargs(opts)
931 timer, fm = gettimer(ui, opts)
931 timer, fm = gettimer(ui, opts)
932
932
933 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
933 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
934 if not path:
934 if not path:
935 raise error.Abort((b'default repository not configured!'),
935 raise error.Abort((b'default repository not configured!'),
936 hint=(b"see 'hg help config.paths'"))
936 hint=(b"see 'hg help config.paths'"))
937 dest = path.pushloc or path.loc
937 dest = path.pushloc or path.loc
938 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
938 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
939 other = hg.peer(repo, opts, dest)
939 other = hg.peer(repo, opts, dest)
940
940
941 # easier to perform discovery through the operation
941 # easier to perform discovery through the operation
942 op = exchange.pushoperation(repo, other)
942 op = exchange.pushoperation(repo, other)
943 exchange._pushdiscoverychangeset(op)
943 exchange._pushdiscoverychangeset(op)
944
944
945 remotesubset = op.fallbackheads
945 remotesubset = op.fallbackheads
946
946
947 with other.commandexecutor() as e:
947 with other.commandexecutor() as e:
948 remotephases = e.callcommand(b'listkeys',
948 remotephases = e.callcommand(b'listkeys',
949 {b'namespace': b'phases'}).result()
949 {b'namespace': b'phases'}).result()
950 del other
950 del other
951 publishing = remotephases.get(b'publishing', False)
951 publishing = remotephases.get(b'publishing', False)
952 if publishing:
952 if publishing:
953 ui.status((b'publishing: yes\n'))
953 ui.status((b'publishing: yes\n'))
954 else:
954 else:
955 ui.status((b'publishing: no\n'))
955 ui.status((b'publishing: no\n'))
956
956
957 nodemap = repo.changelog.nodemap
957 nodemap = repo.changelog.nodemap
958 nonpublishroots = 0
958 nonpublishroots = 0
959 for nhex, phase in remotephases.iteritems():
959 for nhex, phase in remotephases.iteritems():
960 if nhex == b'publishing': # ignore data related to publish option
960 if nhex == b'publishing': # ignore data related to publish option
961 continue
961 continue
962 node = bin(nhex)
962 node = bin(nhex)
963 if node in nodemap and int(phase):
963 if node in nodemap and int(phase):
964 nonpublishroots += 1
964 nonpublishroots += 1
965 ui.status((b'number of roots: %d\n') % len(remotephases))
965 ui.status((b'number of roots: %d\n') % len(remotephases))
966 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
966 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
967 def d():
967 def d():
968 phases.remotephasessummary(repo,
968 phases.remotephasessummary(repo,
969 remotesubset,
969 remotesubset,
970 remotephases)
970 remotephases)
971 timer(d)
971 timer(d)
972 fm.end()
972 fm.end()
973
973
974 @command(b'perfmanifest',[
974 @command(b'perfmanifest',[
975 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
975 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
976 (b'', b'clear-disk', False, b'clear on-disk caches too'),
976 (b'', b'clear-disk', False, b'clear on-disk caches too'),
977 ] + formatteropts, b'REV|NODE')
977 ] + formatteropts, b'REV|NODE')
978 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
978 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
979 """benchmark the time to read a manifest from disk and return a usable
979 """benchmark the time to read a manifest from disk and return a usable
980 dict-like object
980 dict-like object
981
981
982 Manifest caches are cleared before retrieval."""
982 Manifest caches are cleared before retrieval."""
983 opts = _byteskwargs(opts)
983 opts = _byteskwargs(opts)
984 timer, fm = gettimer(ui, opts)
984 timer, fm = gettimer(ui, opts)
985 if not manifest_rev:
985 if not manifest_rev:
986 ctx = scmutil.revsingle(repo, rev, rev)
986 ctx = scmutil.revsingle(repo, rev, rev)
987 t = ctx.manifestnode()
987 t = ctx.manifestnode()
988 else:
988 else:
989 from mercurial.node import bin
989 from mercurial.node import bin
990
990
991 if len(rev) == 40:
991 if len(rev) == 40:
992 t = bin(rev)
992 t = bin(rev)
993 else:
993 else:
994 try:
994 try:
995 rev = int(rev)
995 rev = int(rev)
996
996
997 if util.safehasattr(repo.manifestlog, b'getstorage'):
997 if util.safehasattr(repo.manifestlog, b'getstorage'):
998 t = repo.manifestlog.getstorage(b'').node(rev)
998 t = repo.manifestlog.getstorage(b'').node(rev)
999 else:
999 else:
1000 t = repo.manifestlog._revlog.lookup(rev)
1000 t = repo.manifestlog._revlog.lookup(rev)
1001 except ValueError:
1001 except ValueError:
1002 raise error.Abort(b'manifest revision must be integer or full '
1002 raise error.Abort(b'manifest revision must be integer or full '
1003 b'node')
1003 b'node')
1004 def d():
1004 def d():
1005 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1005 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1006 repo.manifestlog[t].read()
1006 repo.manifestlog[t].read()
1007 timer(d)
1007 timer(d)
1008 fm.end()
1008 fm.end()
1009
1009
1010 @command(b'perfchangeset', formatteropts)
1010 @command(b'perfchangeset', formatteropts)
1011 def perfchangeset(ui, repo, rev, **opts):
1011 def perfchangeset(ui, repo, rev, **opts):
1012 opts = _byteskwargs(opts)
1012 opts = _byteskwargs(opts)
1013 timer, fm = gettimer(ui, opts)
1013 timer, fm = gettimer(ui, opts)
1014 n = scmutil.revsingle(repo, rev).node()
1014 n = scmutil.revsingle(repo, rev).node()
1015 def d():
1015 def d():
1016 repo.changelog.read(n)
1016 repo.changelog.read(n)
1017 #repo.changelog._cache = None
1017 #repo.changelog._cache = None
1018 timer(d)
1018 timer(d)
1019 fm.end()
1019 fm.end()
1020
1020
1021 @command(b'perfignore', formatteropts)
1021 @command(b'perfignore', formatteropts)
1022 def perfignore(ui, repo, **opts):
1022 def perfignore(ui, repo, **opts):
1023 """benchmark operation related to computing ignore"""
1023 """benchmark operation related to computing ignore"""
1024 opts = _byteskwargs(opts)
1024 opts = _byteskwargs(opts)
1025 timer, fm = gettimer(ui, opts)
1025 timer, fm = gettimer(ui, opts)
1026 dirstate = repo.dirstate
1026 dirstate = repo.dirstate
1027
1027
1028 def setupone():
1028 def setupone():
1029 dirstate.invalidate()
1029 dirstate.invalidate()
1030 clearfilecache(dirstate, b'_ignore')
1030 clearfilecache(dirstate, b'_ignore')
1031
1031
1032 def runone():
1032 def runone():
1033 dirstate._ignore
1033 dirstate._ignore
1034
1034
1035 timer(runone, setup=setupone, title=b"load")
1035 timer(runone, setup=setupone, title=b"load")
1036 fm.end()
1036 fm.end()
1037
1037
1038 @command(b'perfindex', [
1038 @command(b'perfindex', [
1039 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1039 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1040 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1040 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1041 ] + formatteropts)
1041 ] + formatteropts)
1042 def perfindex(ui, repo, **opts):
1042 def perfindex(ui, repo, **opts):
1043 """benchmark index creation time followed by a lookup
1043 """benchmark index creation time followed by a lookup
1044
1044
1045 The default is to look `tip` up. Depending on the index implementation,
1045 The default is to look `tip` up. Depending on the index implementation,
1046 the revision looked up can matters. For example, an implementation
1046 the revision looked up can matters. For example, an implementation
1047 scanning the index will have a faster lookup time for `--rev tip` than for
1047 scanning the index will have a faster lookup time for `--rev tip` than for
1048 `--rev 0`. The number of looked up revisions and their order can also
1048 `--rev 0`. The number of looked up revisions and their order can also
1049 matters.
1049 matters.
1050
1050
1051 Example of useful set to test:
1051 Example of useful set to test:
1052 * tip
1052 * tip
1053 * 0
1053 * 0
1054 * -10:
1054 * -10:
1055 * :10
1055 * :10
1056 * -10: + :10
1056 * -10: + :10
1057 * :10: + -10:
1057 * :10: + -10:
1058 * -10000:
1058 * -10000:
1059 * -10000: + 0
1059 * -10000: + 0
1060
1060
1061 It is not currently possible to check for lookup of a missing node. For
1061 It is not currently possible to check for lookup of a missing node. For
1062 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1062 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1063 import mercurial.revlog
1063 import mercurial.revlog
1064 opts = _byteskwargs(opts)
1064 opts = _byteskwargs(opts)
1065 timer, fm = gettimer(ui, opts)
1065 timer, fm = gettimer(ui, opts)
1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1066 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1067 if opts[b'no_lookup']:
1067 if opts[b'no_lookup']:
1068 if opts['rev']:
1068 if opts['rev']:
1069 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1069 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1070 nodes = []
1070 nodes = []
1071 elif not opts[b'rev']:
1071 elif not opts[b'rev']:
1072 nodes = [repo[b"tip"].node()]
1072 nodes = [repo[b"tip"].node()]
1073 else:
1073 else:
1074 revs = scmutil.revrange(repo, opts[b'rev'])
1074 revs = scmutil.revrange(repo, opts[b'rev'])
1075 cl = repo.changelog
1075 cl = repo.changelog
1076 nodes = [cl.node(r) for r in revs]
1076 nodes = [cl.node(r) for r in revs]
1077
1077
1078 unfi = repo.unfiltered()
1078 unfi = repo.unfiltered()
1079 # find the filecache func directly
1079 # find the filecache func directly
1080 # This avoid polluting the benchmark with the filecache logic
1080 # This avoid polluting the benchmark with the filecache logic
1081 makecl = unfi.__class__.changelog.func
1081 makecl = unfi.__class__.changelog.func
1082 def setup():
1082 def setup():
1083 # probably not necessary, but for good measure
1083 # probably not necessary, but for good measure
1084 clearchangelog(unfi)
1084 clearchangelog(unfi)
1085 def d():
1085 def d():
1086 cl = makecl(unfi)
1086 cl = makecl(unfi)
1087 for n in nodes:
1087 for n in nodes:
1088 cl.rev(n)
1088 cl.rev(n)
1089 timer(d, setup=setup)
1089 timer(d, setup=setup)
1090 fm.end()
1090 fm.end()
1091
1091
1092 @command(b'perfnodemap', [
1092 @command(b'perfnodemap', [
1093 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1093 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1094 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1094 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1095 ] + formatteropts)
1095 ] + formatteropts)
1096 def perfnodemap(ui, repo, **opts):
1096 def perfnodemap(ui, repo, **opts):
1097 """benchmark the time necessary to look up revision from a cold nodemap
1097 """benchmark the time necessary to look up revision from a cold nodemap
1098
1098
1099 Depending on the implementation, the amount and order of revision we look
1099 Depending on the implementation, the amount and order of revision we look
1100 up can varies. Example of useful set to test:
1100 up can varies. Example of useful set to test:
1101 * tip
1101 * tip
1102 * 0
1102 * 0
1103 * -10:
1103 * -10:
1104 * :10
1104 * :10
1105 * -10: + :10
1105 * -10: + :10
1106 * :10: + -10:
1106 * :10: + -10:
1107 * -10000:
1107 * -10000:
1108 * -10000: + 0
1108 * -10000: + 0
1109
1109
1110 The command currently focus on valid binary lookup. Benchmarking for
1110 The command currently focus on valid binary lookup. Benchmarking for
1111 hexlookup, prefix lookup and missing lookup would also be valuable.
1111 hexlookup, prefix lookup and missing lookup would also be valuable.
1112 """
1112 """
1113 import mercurial.revlog
1113 import mercurial.revlog
1114 opts = _byteskwargs(opts)
1114 opts = _byteskwargs(opts)
1115 timer, fm = gettimer(ui, opts)
1115 timer, fm = gettimer(ui, opts)
1116 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1116 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1117
1117
1118 unfi = repo.unfiltered()
1118 unfi = repo.unfiltered()
1119 clearcaches = opts['clear_caches']
1119 clearcaches = opts['clear_caches']
1120 # find the filecache func directly
1120 # find the filecache func directly
1121 # This avoid polluting the benchmark with the filecache logic
1121 # This avoid polluting the benchmark with the filecache logic
1122 makecl = unfi.__class__.changelog.func
1122 makecl = unfi.__class__.changelog.func
1123 if not opts[b'rev']:
1123 if not opts[b'rev']:
1124 raise error.Abort('use --rev to specify revisions to look up')
1124 raise error.Abort('use --rev to specify revisions to look up')
1125 revs = scmutil.revrange(repo, opts[b'rev'])
1125 revs = scmutil.revrange(repo, opts[b'rev'])
1126 cl = repo.changelog
1126 cl = repo.changelog
1127 nodes = [cl.node(r) for r in revs]
1127 nodes = [cl.node(r) for r in revs]
1128
1128
1129 # use a list to pass reference to a nodemap from one closure to the next
1129 # use a list to pass reference to a nodemap from one closure to the next
1130 nodeget = [None]
1130 nodeget = [None]
1131 def setnodeget():
1131 def setnodeget():
1132 # probably not necessary, but for good measure
1132 # probably not necessary, but for good measure
1133 clearchangelog(unfi)
1133 clearchangelog(unfi)
1134 nodeget[0] = makecl(unfi).nodemap.get
1134 nodeget[0] = makecl(unfi).nodemap.get
1135
1135
1136 def d():
1136 def d():
1137 get = nodeget[0]
1137 get = nodeget[0]
1138 for n in nodes:
1138 for n in nodes:
1139 get(n)
1139 get(n)
1140
1140
1141 setup = None
1141 setup = None
1142 if clearcaches:
1142 if clearcaches:
1143 def setup():
1143 def setup():
1144 setnodeget()
1144 setnodeget()
1145 else:
1145 else:
1146 setnodeget()
1146 setnodeget()
1147 d() # prewarm the data structure
1147 d() # prewarm the data structure
1148 timer(d, setup=setup)
1148 timer(d, setup=setup)
1149 fm.end()
1149 fm.end()
1150
1150
1151 @command(b'perfstartup', formatteropts)
1151 @command(b'perfstartup', formatteropts)
1152 def perfstartup(ui, repo, **opts):
1152 def perfstartup(ui, repo, **opts):
1153 opts = _byteskwargs(opts)
1153 opts = _byteskwargs(opts)
1154 timer, fm = gettimer(ui, opts)
1154 timer, fm = gettimer(ui, opts)
1155 def d():
1155 def d():
1156 if os.name != r'nt':
1156 if os.name != r'nt':
1157 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1157 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1158 fsencode(sys.argv[0]))
1158 fsencode(sys.argv[0]))
1159 else:
1159 else:
1160 os.environ[r'HGRCPATH'] = r' '
1160 os.environ[r'HGRCPATH'] = r' '
1161 os.system(r"%s version -q > NUL" % sys.argv[0])
1161 os.system(r"%s version -q > NUL" % sys.argv[0])
1162 timer(d)
1162 timer(d)
1163 fm.end()
1163 fm.end()
1164
1164
1165 @command(b'perfparents', formatteropts)
1165 @command(b'perfparents', formatteropts)
1166 def perfparents(ui, repo, **opts):
1166 def perfparents(ui, repo, **opts):
1167 """benchmark the time necessary to fetch one changeset's parents.
1168
1169 The fetch is done using the `node identifier`, traversing all object layer
1170 from the repository object. The N first revision will be used for this
1171 benchmark. N is controlled by the ``perf.parentscount`` config option
1172 (default: 1000).
1173 """
1167 opts = _byteskwargs(opts)
1174 opts = _byteskwargs(opts)
1168 timer, fm = gettimer(ui, opts)
1175 timer, fm = gettimer(ui, opts)
1169 # control the number of commits perfparents iterates over
1176 # control the number of commits perfparents iterates over
1170 # experimental config: perf.parentscount
1177 # experimental config: perf.parentscount
1171 count = getint(ui, b"perf", b"parentscount", 1000)
1178 count = getint(ui, b"perf", b"parentscount", 1000)
1172 if len(repo.changelog) < count:
1179 if len(repo.changelog) < count:
1173 raise error.Abort(b"repo needs %d commits for this test" % count)
1180 raise error.Abort(b"repo needs %d commits for this test" % count)
1174 repo = repo.unfiltered()
1181 repo = repo.unfiltered()
1175 nl = [repo.changelog.node(i) for i in _xrange(count)]
1182 nl = [repo.changelog.node(i) for i in _xrange(count)]
1176 def d():
1183 def d():
1177 for n in nl:
1184 for n in nl:
1178 repo.changelog.parents(n)
1185 repo.changelog.parents(n)
1179 timer(d)
1186 timer(d)
1180 fm.end()
1187 fm.end()
1181
1188
1182 @command(b'perfctxfiles', formatteropts)
1189 @command(b'perfctxfiles', formatteropts)
1183 def perfctxfiles(ui, repo, x, **opts):
1190 def perfctxfiles(ui, repo, x, **opts):
1184 opts = _byteskwargs(opts)
1191 opts = _byteskwargs(opts)
1185 x = int(x)
1192 x = int(x)
1186 timer, fm = gettimer(ui, opts)
1193 timer, fm = gettimer(ui, opts)
1187 def d():
1194 def d():
1188 len(repo[x].files())
1195 len(repo[x].files())
1189 timer(d)
1196 timer(d)
1190 fm.end()
1197 fm.end()
1191
1198
1192 @command(b'perfrawfiles', formatteropts)
1199 @command(b'perfrawfiles', formatteropts)
1193 def perfrawfiles(ui, repo, x, **opts):
1200 def perfrawfiles(ui, repo, x, **opts):
1194 opts = _byteskwargs(opts)
1201 opts = _byteskwargs(opts)
1195 x = int(x)
1202 x = int(x)
1196 timer, fm = gettimer(ui, opts)
1203 timer, fm = gettimer(ui, opts)
1197 cl = repo.changelog
1204 cl = repo.changelog
1198 def d():
1205 def d():
1199 len(cl.read(x)[3])
1206 len(cl.read(x)[3])
1200 timer(d)
1207 timer(d)
1201 fm.end()
1208 fm.end()
1202
1209
1203 @command(b'perflookup', formatteropts)
1210 @command(b'perflookup', formatteropts)
1204 def perflookup(ui, repo, rev, **opts):
1211 def perflookup(ui, repo, rev, **opts):
1205 opts = _byteskwargs(opts)
1212 opts = _byteskwargs(opts)
1206 timer, fm = gettimer(ui, opts)
1213 timer, fm = gettimer(ui, opts)
1207 timer(lambda: len(repo.lookup(rev)))
1214 timer(lambda: len(repo.lookup(rev)))
1208 fm.end()
1215 fm.end()
1209
1216
1210 @command(b'perflinelogedits',
1217 @command(b'perflinelogedits',
1211 [(b'n', b'edits', 10000, b'number of edits'),
1218 [(b'n', b'edits', 10000, b'number of edits'),
1212 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1219 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1213 ], norepo=True)
1220 ], norepo=True)
1214 def perflinelogedits(ui, **opts):
1221 def perflinelogedits(ui, **opts):
1215 from mercurial import linelog
1222 from mercurial import linelog
1216
1223
1217 opts = _byteskwargs(opts)
1224 opts = _byteskwargs(opts)
1218
1225
1219 edits = opts[b'edits']
1226 edits = opts[b'edits']
1220 maxhunklines = opts[b'max_hunk_lines']
1227 maxhunklines = opts[b'max_hunk_lines']
1221
1228
1222 maxb1 = 100000
1229 maxb1 = 100000
1223 random.seed(0)
1230 random.seed(0)
1224 randint = random.randint
1231 randint = random.randint
1225 currentlines = 0
1232 currentlines = 0
1226 arglist = []
1233 arglist = []
1227 for rev in _xrange(edits):
1234 for rev in _xrange(edits):
1228 a1 = randint(0, currentlines)
1235 a1 = randint(0, currentlines)
1229 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1236 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1230 b1 = randint(0, maxb1)
1237 b1 = randint(0, maxb1)
1231 b2 = randint(b1, b1 + maxhunklines)
1238 b2 = randint(b1, b1 + maxhunklines)
1232 currentlines += (b2 - b1) - (a2 - a1)
1239 currentlines += (b2 - b1) - (a2 - a1)
1233 arglist.append((rev, a1, a2, b1, b2))
1240 arglist.append((rev, a1, a2, b1, b2))
1234
1241
1235 def d():
1242 def d():
1236 ll = linelog.linelog()
1243 ll = linelog.linelog()
1237 for args in arglist:
1244 for args in arglist:
1238 ll.replacelines(*args)
1245 ll.replacelines(*args)
1239
1246
1240 timer, fm = gettimer(ui, opts)
1247 timer, fm = gettimer(ui, opts)
1241 timer(d)
1248 timer(d)
1242 fm.end()
1249 fm.end()
1243
1250
1244 @command(b'perfrevrange', formatteropts)
1251 @command(b'perfrevrange', formatteropts)
1245 def perfrevrange(ui, repo, *specs, **opts):
1252 def perfrevrange(ui, repo, *specs, **opts):
1246 opts = _byteskwargs(opts)
1253 opts = _byteskwargs(opts)
1247 timer, fm = gettimer(ui, opts)
1254 timer, fm = gettimer(ui, opts)
1248 revrange = scmutil.revrange
1255 revrange = scmutil.revrange
1249 timer(lambda: len(revrange(repo, specs)))
1256 timer(lambda: len(revrange(repo, specs)))
1250 fm.end()
1257 fm.end()
1251
1258
1252 @command(b'perfnodelookup', formatteropts)
1259 @command(b'perfnodelookup', formatteropts)
1253 def perfnodelookup(ui, repo, rev, **opts):
1260 def perfnodelookup(ui, repo, rev, **opts):
1254 opts = _byteskwargs(opts)
1261 opts = _byteskwargs(opts)
1255 timer, fm = gettimer(ui, opts)
1262 timer, fm = gettimer(ui, opts)
1256 import mercurial.revlog
1263 import mercurial.revlog
1257 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1264 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1258 n = scmutil.revsingle(repo, rev).node()
1265 n = scmutil.revsingle(repo, rev).node()
1259 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1266 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1260 def d():
1267 def d():
1261 cl.rev(n)
1268 cl.rev(n)
1262 clearcaches(cl)
1269 clearcaches(cl)
1263 timer(d)
1270 timer(d)
1264 fm.end()
1271 fm.end()
1265
1272
1266 @command(b'perflog',
1273 @command(b'perflog',
1267 [(b'', b'rename', False, b'ask log to follow renames')
1274 [(b'', b'rename', False, b'ask log to follow renames')
1268 ] + formatteropts)
1275 ] + formatteropts)
1269 def perflog(ui, repo, rev=None, **opts):
1276 def perflog(ui, repo, rev=None, **opts):
1270 opts = _byteskwargs(opts)
1277 opts = _byteskwargs(opts)
1271 if rev is None:
1278 if rev is None:
1272 rev=[]
1279 rev=[]
1273 timer, fm = gettimer(ui, opts)
1280 timer, fm = gettimer(ui, opts)
1274 ui.pushbuffer()
1281 ui.pushbuffer()
1275 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1282 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1276 copies=opts.get(b'rename')))
1283 copies=opts.get(b'rename')))
1277 ui.popbuffer()
1284 ui.popbuffer()
1278 fm.end()
1285 fm.end()
1279
1286
1280 @command(b'perfmoonwalk', formatteropts)
1287 @command(b'perfmoonwalk', formatteropts)
1281 def perfmoonwalk(ui, repo, **opts):
1288 def perfmoonwalk(ui, repo, **opts):
1282 """benchmark walking the changelog backwards
1289 """benchmark walking the changelog backwards
1283
1290
1284 This also loads the changelog data for each revision in the changelog.
1291 This also loads the changelog data for each revision in the changelog.
1285 """
1292 """
1286 opts = _byteskwargs(opts)
1293 opts = _byteskwargs(opts)
1287 timer, fm = gettimer(ui, opts)
1294 timer, fm = gettimer(ui, opts)
1288 def moonwalk():
1295 def moonwalk():
1289 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1296 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1290 ctx = repo[i]
1297 ctx = repo[i]
1291 ctx.branch() # read changelog data (in addition to the index)
1298 ctx.branch() # read changelog data (in addition to the index)
1292 timer(moonwalk)
1299 timer(moonwalk)
1293 fm.end()
1300 fm.end()
1294
1301
1295 @command(b'perftemplating',
1302 @command(b'perftemplating',
1296 [(b'r', b'rev', [], b'revisions to run the template on'),
1303 [(b'r', b'rev', [], b'revisions to run the template on'),
1297 ] + formatteropts)
1304 ] + formatteropts)
1298 def perftemplating(ui, repo, testedtemplate=None, **opts):
1305 def perftemplating(ui, repo, testedtemplate=None, **opts):
1299 """test the rendering time of a given template"""
1306 """test the rendering time of a given template"""
1300 if makelogtemplater is None:
1307 if makelogtemplater is None:
1301 raise error.Abort((b"perftemplating not available with this Mercurial"),
1308 raise error.Abort((b"perftemplating not available with this Mercurial"),
1302 hint=b"use 4.3 or later")
1309 hint=b"use 4.3 or later")
1303
1310
1304 opts = _byteskwargs(opts)
1311 opts = _byteskwargs(opts)
1305
1312
1306 nullui = ui.copy()
1313 nullui = ui.copy()
1307 nullui.fout = open(os.devnull, r'wb')
1314 nullui.fout = open(os.devnull, r'wb')
1308 nullui.disablepager()
1315 nullui.disablepager()
1309 revs = opts.get(b'rev')
1316 revs = opts.get(b'rev')
1310 if not revs:
1317 if not revs:
1311 revs = [b'all()']
1318 revs = [b'all()']
1312 revs = list(scmutil.revrange(repo, revs))
1319 revs = list(scmutil.revrange(repo, revs))
1313
1320
1314 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1321 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1315 b' {author|person}: {desc|firstline}\n')
1322 b' {author|person}: {desc|firstline}\n')
1316 if testedtemplate is None:
1323 if testedtemplate is None:
1317 testedtemplate = defaulttemplate
1324 testedtemplate = defaulttemplate
1318 displayer = makelogtemplater(nullui, repo, testedtemplate)
1325 displayer = makelogtemplater(nullui, repo, testedtemplate)
1319 def format():
1326 def format():
1320 for r in revs:
1327 for r in revs:
1321 ctx = repo[r]
1328 ctx = repo[r]
1322 displayer.show(ctx)
1329 displayer.show(ctx)
1323 displayer.flush(ctx)
1330 displayer.flush(ctx)
1324
1331
1325 timer, fm = gettimer(ui, opts)
1332 timer, fm = gettimer(ui, opts)
1326 timer(format)
1333 timer(format)
1327 fm.end()
1334 fm.end()
1328
1335
1329 @command(b'perfhelper-pathcopies', formatteropts +
1336 @command(b'perfhelper-pathcopies', formatteropts +
1330 [
1337 [
1331 (b'r', b'revs', [], b'restrict search to these revisions'),
1338 (b'r', b'revs', [], b'restrict search to these revisions'),
1332 (b'', b'timing', False, b'provides extra data (costly)'),
1339 (b'', b'timing', False, b'provides extra data (costly)'),
1333 ])
1340 ])
1334 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1341 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1335 """find statistic about potential parameters for the `perftracecopies`
1342 """find statistic about potential parameters for the `perftracecopies`
1336
1343
1337 This command find source-destination pair relevant for copytracing testing.
1344 This command find source-destination pair relevant for copytracing testing.
1338 It report value for some of the parameters that impact copy tracing time.
1345 It report value for some of the parameters that impact copy tracing time.
1339
1346
1340 If `--timing` is set, rename detection is run and the associated timing
1347 If `--timing` is set, rename detection is run and the associated timing
1341 will be reported. The extra details comes at the cost of a slower command
1348 will be reported. The extra details comes at the cost of a slower command
1342 execution.
1349 execution.
1343
1350
1344 Since the rename detection is only run once, other factors might easily
1351 Since the rename detection is only run once, other factors might easily
1345 affect the precision of the timing. However it should give a good
1352 affect the precision of the timing. However it should give a good
1346 approximation of which revision pairs are very costly.
1353 approximation of which revision pairs are very costly.
1347 """
1354 """
1348 opts = _byteskwargs(opts)
1355 opts = _byteskwargs(opts)
1349 fm = ui.formatter(b'perf', opts)
1356 fm = ui.formatter(b'perf', opts)
1350 dotiming = opts[b'timing']
1357 dotiming = opts[b'timing']
1351
1358
1352 if dotiming:
1359 if dotiming:
1353 header = '%12s %12s %12s %12s %12s %12s\n'
1360 header = '%12s %12s %12s %12s %12s %12s\n'
1354 output = ("%(source)12s %(destination)12s "
1361 output = ("%(source)12s %(destination)12s "
1355 "%(nbrevs)12d %(nbmissingfiles)12d "
1362 "%(nbrevs)12d %(nbmissingfiles)12d "
1356 "%(nbrenamedfiles)12d %(time)18.5f\n")
1363 "%(nbrenamedfiles)12d %(time)18.5f\n")
1357 header_names = ("source", "destination", "nb-revs", "nb-files",
1364 header_names = ("source", "destination", "nb-revs", "nb-files",
1358 "nb-renames", "time")
1365 "nb-renames", "time")
1359 fm.plain(header % header_names)
1366 fm.plain(header % header_names)
1360 else:
1367 else:
1361 header = '%12s %12s %12s %12s\n'
1368 header = '%12s %12s %12s %12s\n'
1362 output = ("%(source)12s %(destination)12s "
1369 output = ("%(source)12s %(destination)12s "
1363 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1370 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1364 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1371 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1365
1372
1366 if not revs:
1373 if not revs:
1367 revs = ['all()']
1374 revs = ['all()']
1368 revs = scmutil.revrange(repo, revs)
1375 revs = scmutil.revrange(repo, revs)
1369
1376
1370 roi = repo.revs('merge() and %ld', revs)
1377 roi = repo.revs('merge() and %ld', revs)
1371 for r in roi:
1378 for r in roi:
1372 ctx = repo[r]
1379 ctx = repo[r]
1373 p1 = ctx.p1().rev()
1380 p1 = ctx.p1().rev()
1374 p2 = ctx.p2().rev()
1381 p2 = ctx.p2().rev()
1375 bases = repo.changelog._commonancestorsheads(p1, p2)
1382 bases = repo.changelog._commonancestorsheads(p1, p2)
1376 for p in (p1, p2):
1383 for p in (p1, p2):
1377 for b in bases:
1384 for b in bases:
1378 base = repo[b]
1385 base = repo[b]
1379 parent = repo[p]
1386 parent = repo[p]
1380 missing = copies._computeforwardmissing(base, parent)
1387 missing = copies._computeforwardmissing(base, parent)
1381 if not missing:
1388 if not missing:
1382 continue
1389 continue
1383 data = {
1390 data = {
1384 b'source': base.hex(),
1391 b'source': base.hex(),
1385 b'destination': parent.hex(),
1392 b'destination': parent.hex(),
1386 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1393 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1387 b'nbmissingfiles': len(missing),
1394 b'nbmissingfiles': len(missing),
1388 }
1395 }
1389 if dotiming:
1396 if dotiming:
1390 begin = util.timer()
1397 begin = util.timer()
1391 renames = copies.pathcopies(base, parent)
1398 renames = copies.pathcopies(base, parent)
1392 end = util.timer()
1399 end = util.timer()
1393 # not very stable timing since we did only one run
1400 # not very stable timing since we did only one run
1394 data['time'] = end - begin
1401 data['time'] = end - begin
1395 data['nbrenamedfiles'] = len(renames)
1402 data['nbrenamedfiles'] = len(renames)
1396 fm.startitem()
1403 fm.startitem()
1397 fm.data(**data)
1404 fm.data(**data)
1398 out = data.copy()
1405 out = data.copy()
1399 out['source'] = fm.hexfunc(base.node())
1406 out['source'] = fm.hexfunc(base.node())
1400 out['destination'] = fm.hexfunc(parent.node())
1407 out['destination'] = fm.hexfunc(parent.node())
1401 fm.plain(output % out)
1408 fm.plain(output % out)
1402
1409
1403 fm.end()
1410 fm.end()
1404
1411
1405 @command(b'perfcca', formatteropts)
1412 @command(b'perfcca', formatteropts)
1406 def perfcca(ui, repo, **opts):
1413 def perfcca(ui, repo, **opts):
1407 opts = _byteskwargs(opts)
1414 opts = _byteskwargs(opts)
1408 timer, fm = gettimer(ui, opts)
1415 timer, fm = gettimer(ui, opts)
1409 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1416 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1410 fm.end()
1417 fm.end()
1411
1418
1412 @command(b'perffncacheload', formatteropts)
1419 @command(b'perffncacheload', formatteropts)
1413 def perffncacheload(ui, repo, **opts):
1420 def perffncacheload(ui, repo, **opts):
1414 opts = _byteskwargs(opts)
1421 opts = _byteskwargs(opts)
1415 timer, fm = gettimer(ui, opts)
1422 timer, fm = gettimer(ui, opts)
1416 s = repo.store
1423 s = repo.store
1417 def d():
1424 def d():
1418 s.fncache._load()
1425 s.fncache._load()
1419 timer(d)
1426 timer(d)
1420 fm.end()
1427 fm.end()
1421
1428
1422 @command(b'perffncachewrite', formatteropts)
1429 @command(b'perffncachewrite', formatteropts)
1423 def perffncachewrite(ui, repo, **opts):
1430 def perffncachewrite(ui, repo, **opts):
1424 opts = _byteskwargs(opts)
1431 opts = _byteskwargs(opts)
1425 timer, fm = gettimer(ui, opts)
1432 timer, fm = gettimer(ui, opts)
1426 s = repo.store
1433 s = repo.store
1427 lock = repo.lock()
1434 lock = repo.lock()
1428 s.fncache._load()
1435 s.fncache._load()
1429 tr = repo.transaction(b'perffncachewrite')
1436 tr = repo.transaction(b'perffncachewrite')
1430 tr.addbackup(b'fncache')
1437 tr.addbackup(b'fncache')
1431 def d():
1438 def d():
1432 s.fncache._dirty = True
1439 s.fncache._dirty = True
1433 s.fncache.write(tr)
1440 s.fncache.write(tr)
1434 timer(d)
1441 timer(d)
1435 tr.close()
1442 tr.close()
1436 lock.release()
1443 lock.release()
1437 fm.end()
1444 fm.end()
1438
1445
1439 @command(b'perffncacheencode', formatteropts)
1446 @command(b'perffncacheencode', formatteropts)
1440 def perffncacheencode(ui, repo, **opts):
1447 def perffncacheencode(ui, repo, **opts):
1441 opts = _byteskwargs(opts)
1448 opts = _byteskwargs(opts)
1442 timer, fm = gettimer(ui, opts)
1449 timer, fm = gettimer(ui, opts)
1443 s = repo.store
1450 s = repo.store
1444 s.fncache._load()
1451 s.fncache._load()
1445 def d():
1452 def d():
1446 for p in s.fncache.entries:
1453 for p in s.fncache.entries:
1447 s.encode(p)
1454 s.encode(p)
1448 timer(d)
1455 timer(d)
1449 fm.end()
1456 fm.end()
1450
1457
1451 def _bdiffworker(q, blocks, xdiff, ready, done):
1458 def _bdiffworker(q, blocks, xdiff, ready, done):
1452 while not done.is_set():
1459 while not done.is_set():
1453 pair = q.get()
1460 pair = q.get()
1454 while pair is not None:
1461 while pair is not None:
1455 if xdiff:
1462 if xdiff:
1456 mdiff.bdiff.xdiffblocks(*pair)
1463 mdiff.bdiff.xdiffblocks(*pair)
1457 elif blocks:
1464 elif blocks:
1458 mdiff.bdiff.blocks(*pair)
1465 mdiff.bdiff.blocks(*pair)
1459 else:
1466 else:
1460 mdiff.textdiff(*pair)
1467 mdiff.textdiff(*pair)
1461 q.task_done()
1468 q.task_done()
1462 pair = q.get()
1469 pair = q.get()
1463 q.task_done() # for the None one
1470 q.task_done() # for the None one
1464 with ready:
1471 with ready:
1465 ready.wait()
1472 ready.wait()
1466
1473
1467 def _manifestrevision(repo, mnode):
1474 def _manifestrevision(repo, mnode):
1468 ml = repo.manifestlog
1475 ml = repo.manifestlog
1469
1476
1470 if util.safehasattr(ml, b'getstorage'):
1477 if util.safehasattr(ml, b'getstorage'):
1471 store = ml.getstorage(b'')
1478 store = ml.getstorage(b'')
1472 else:
1479 else:
1473 store = ml._revlog
1480 store = ml._revlog
1474
1481
1475 return store.revision(mnode)
1482 return store.revision(mnode)
1476
1483
1477 @command(b'perfbdiff', revlogopts + formatteropts + [
1484 @command(b'perfbdiff', revlogopts + formatteropts + [
1478 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1485 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1479 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1486 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1480 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1487 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1481 (b'', b'blocks', False, b'test computing diffs into blocks'),
1488 (b'', b'blocks', False, b'test computing diffs into blocks'),
1482 (b'', b'xdiff', False, b'use xdiff algorithm'),
1489 (b'', b'xdiff', False, b'use xdiff algorithm'),
1483 ],
1490 ],
1484
1491
1485 b'-c|-m|FILE REV')
1492 b'-c|-m|FILE REV')
1486 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1493 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1487 """benchmark a bdiff between revisions
1494 """benchmark a bdiff between revisions
1488
1495
1489 By default, benchmark a bdiff between its delta parent and itself.
1496 By default, benchmark a bdiff between its delta parent and itself.
1490
1497
1491 With ``--count``, benchmark bdiffs between delta parents and self for N
1498 With ``--count``, benchmark bdiffs between delta parents and self for N
1492 revisions starting at the specified revision.
1499 revisions starting at the specified revision.
1493
1500
1494 With ``--alldata``, assume the requested revision is a changeset and
1501 With ``--alldata``, assume the requested revision is a changeset and
1495 measure bdiffs for all changes related to that changeset (manifest
1502 measure bdiffs for all changes related to that changeset (manifest
1496 and filelogs).
1503 and filelogs).
1497 """
1504 """
1498 opts = _byteskwargs(opts)
1505 opts = _byteskwargs(opts)
1499
1506
1500 if opts[b'xdiff'] and not opts[b'blocks']:
1507 if opts[b'xdiff'] and not opts[b'blocks']:
1501 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1508 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1502
1509
1503 if opts[b'alldata']:
1510 if opts[b'alldata']:
1504 opts[b'changelog'] = True
1511 opts[b'changelog'] = True
1505
1512
1506 if opts.get(b'changelog') or opts.get(b'manifest'):
1513 if opts.get(b'changelog') or opts.get(b'manifest'):
1507 file_, rev = None, file_
1514 file_, rev = None, file_
1508 elif rev is None:
1515 elif rev is None:
1509 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1516 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1510
1517
1511 blocks = opts[b'blocks']
1518 blocks = opts[b'blocks']
1512 xdiff = opts[b'xdiff']
1519 xdiff = opts[b'xdiff']
1513 textpairs = []
1520 textpairs = []
1514
1521
1515 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1522 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1516
1523
1517 startrev = r.rev(r.lookup(rev))
1524 startrev = r.rev(r.lookup(rev))
1518 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1525 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1519 if opts[b'alldata']:
1526 if opts[b'alldata']:
1520 # Load revisions associated with changeset.
1527 # Load revisions associated with changeset.
1521 ctx = repo[rev]
1528 ctx = repo[rev]
1522 mtext = _manifestrevision(repo, ctx.manifestnode())
1529 mtext = _manifestrevision(repo, ctx.manifestnode())
1523 for pctx in ctx.parents():
1530 for pctx in ctx.parents():
1524 pman = _manifestrevision(repo, pctx.manifestnode())
1531 pman = _manifestrevision(repo, pctx.manifestnode())
1525 textpairs.append((pman, mtext))
1532 textpairs.append((pman, mtext))
1526
1533
1527 # Load filelog revisions by iterating manifest delta.
1534 # Load filelog revisions by iterating manifest delta.
1528 man = ctx.manifest()
1535 man = ctx.manifest()
1529 pman = ctx.p1().manifest()
1536 pman = ctx.p1().manifest()
1530 for filename, change in pman.diff(man).items():
1537 for filename, change in pman.diff(man).items():
1531 fctx = repo.file(filename)
1538 fctx = repo.file(filename)
1532 f1 = fctx.revision(change[0][0] or -1)
1539 f1 = fctx.revision(change[0][0] or -1)
1533 f2 = fctx.revision(change[1][0] or -1)
1540 f2 = fctx.revision(change[1][0] or -1)
1534 textpairs.append((f1, f2))
1541 textpairs.append((f1, f2))
1535 else:
1542 else:
1536 dp = r.deltaparent(rev)
1543 dp = r.deltaparent(rev)
1537 textpairs.append((r.revision(dp), r.revision(rev)))
1544 textpairs.append((r.revision(dp), r.revision(rev)))
1538
1545
1539 withthreads = threads > 0
1546 withthreads = threads > 0
1540 if not withthreads:
1547 if not withthreads:
1541 def d():
1548 def d():
1542 for pair in textpairs:
1549 for pair in textpairs:
1543 if xdiff:
1550 if xdiff:
1544 mdiff.bdiff.xdiffblocks(*pair)
1551 mdiff.bdiff.xdiffblocks(*pair)
1545 elif blocks:
1552 elif blocks:
1546 mdiff.bdiff.blocks(*pair)
1553 mdiff.bdiff.blocks(*pair)
1547 else:
1554 else:
1548 mdiff.textdiff(*pair)
1555 mdiff.textdiff(*pair)
1549 else:
1556 else:
1550 q = queue()
1557 q = queue()
1551 for i in _xrange(threads):
1558 for i in _xrange(threads):
1552 q.put(None)
1559 q.put(None)
1553 ready = threading.Condition()
1560 ready = threading.Condition()
1554 done = threading.Event()
1561 done = threading.Event()
1555 for i in _xrange(threads):
1562 for i in _xrange(threads):
1556 threading.Thread(target=_bdiffworker,
1563 threading.Thread(target=_bdiffworker,
1557 args=(q, blocks, xdiff, ready, done)).start()
1564 args=(q, blocks, xdiff, ready, done)).start()
1558 q.join()
1565 q.join()
1559 def d():
1566 def d():
1560 for pair in textpairs:
1567 for pair in textpairs:
1561 q.put(pair)
1568 q.put(pair)
1562 for i in _xrange(threads):
1569 for i in _xrange(threads):
1563 q.put(None)
1570 q.put(None)
1564 with ready:
1571 with ready:
1565 ready.notify_all()
1572 ready.notify_all()
1566 q.join()
1573 q.join()
1567 timer, fm = gettimer(ui, opts)
1574 timer, fm = gettimer(ui, opts)
1568 timer(d)
1575 timer(d)
1569 fm.end()
1576 fm.end()
1570
1577
1571 if withthreads:
1578 if withthreads:
1572 done.set()
1579 done.set()
1573 for i in _xrange(threads):
1580 for i in _xrange(threads):
1574 q.put(None)
1581 q.put(None)
1575 with ready:
1582 with ready:
1576 ready.notify_all()
1583 ready.notify_all()
1577
1584
1578 @command(b'perfunidiff', revlogopts + formatteropts + [
1585 @command(b'perfunidiff', revlogopts + formatteropts + [
1579 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1586 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1580 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1587 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1581 ], b'-c|-m|FILE REV')
1588 ], b'-c|-m|FILE REV')
1582 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1589 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1583 """benchmark a unified diff between revisions
1590 """benchmark a unified diff between revisions
1584
1591
1585 This doesn't include any copy tracing - it's just a unified diff
1592 This doesn't include any copy tracing - it's just a unified diff
1586 of the texts.
1593 of the texts.
1587
1594
1588 By default, benchmark a diff between its delta parent and itself.
1595 By default, benchmark a diff between its delta parent and itself.
1589
1596
1590 With ``--count``, benchmark diffs between delta parents and self for N
1597 With ``--count``, benchmark diffs between delta parents and self for N
1591 revisions starting at the specified revision.
1598 revisions starting at the specified revision.
1592
1599
1593 With ``--alldata``, assume the requested revision is a changeset and
1600 With ``--alldata``, assume the requested revision is a changeset and
1594 measure diffs for all changes related to that changeset (manifest
1601 measure diffs for all changes related to that changeset (manifest
1595 and filelogs).
1602 and filelogs).
1596 """
1603 """
1597 opts = _byteskwargs(opts)
1604 opts = _byteskwargs(opts)
1598 if opts[b'alldata']:
1605 if opts[b'alldata']:
1599 opts[b'changelog'] = True
1606 opts[b'changelog'] = True
1600
1607
1601 if opts.get(b'changelog') or opts.get(b'manifest'):
1608 if opts.get(b'changelog') or opts.get(b'manifest'):
1602 file_, rev = None, file_
1609 file_, rev = None, file_
1603 elif rev is None:
1610 elif rev is None:
1604 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1611 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1605
1612
1606 textpairs = []
1613 textpairs = []
1607
1614
1608 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1615 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1609
1616
1610 startrev = r.rev(r.lookup(rev))
1617 startrev = r.rev(r.lookup(rev))
1611 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1618 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1612 if opts[b'alldata']:
1619 if opts[b'alldata']:
1613 # Load revisions associated with changeset.
1620 # Load revisions associated with changeset.
1614 ctx = repo[rev]
1621 ctx = repo[rev]
1615 mtext = _manifestrevision(repo, ctx.manifestnode())
1622 mtext = _manifestrevision(repo, ctx.manifestnode())
1616 for pctx in ctx.parents():
1623 for pctx in ctx.parents():
1617 pman = _manifestrevision(repo, pctx.manifestnode())
1624 pman = _manifestrevision(repo, pctx.manifestnode())
1618 textpairs.append((pman, mtext))
1625 textpairs.append((pman, mtext))
1619
1626
1620 # Load filelog revisions by iterating manifest delta.
1627 # Load filelog revisions by iterating manifest delta.
1621 man = ctx.manifest()
1628 man = ctx.manifest()
1622 pman = ctx.p1().manifest()
1629 pman = ctx.p1().manifest()
1623 for filename, change in pman.diff(man).items():
1630 for filename, change in pman.diff(man).items():
1624 fctx = repo.file(filename)
1631 fctx = repo.file(filename)
1625 f1 = fctx.revision(change[0][0] or -1)
1632 f1 = fctx.revision(change[0][0] or -1)
1626 f2 = fctx.revision(change[1][0] or -1)
1633 f2 = fctx.revision(change[1][0] or -1)
1627 textpairs.append((f1, f2))
1634 textpairs.append((f1, f2))
1628 else:
1635 else:
1629 dp = r.deltaparent(rev)
1636 dp = r.deltaparent(rev)
1630 textpairs.append((r.revision(dp), r.revision(rev)))
1637 textpairs.append((r.revision(dp), r.revision(rev)))
1631
1638
1632 def d():
1639 def d():
1633 for left, right in textpairs:
1640 for left, right in textpairs:
1634 # The date strings don't matter, so we pass empty strings.
1641 # The date strings don't matter, so we pass empty strings.
1635 headerlines, hunks = mdiff.unidiff(
1642 headerlines, hunks = mdiff.unidiff(
1636 left, b'', right, b'', b'left', b'right', binary=False)
1643 left, b'', right, b'', b'left', b'right', binary=False)
1637 # consume iterators in roughly the way patch.py does
1644 # consume iterators in roughly the way patch.py does
1638 b'\n'.join(headerlines)
1645 b'\n'.join(headerlines)
1639 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1646 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1640 timer, fm = gettimer(ui, opts)
1647 timer, fm = gettimer(ui, opts)
1641 timer(d)
1648 timer(d)
1642 fm.end()
1649 fm.end()
1643
1650
1644 @command(b'perfdiffwd', formatteropts)
1651 @command(b'perfdiffwd', formatteropts)
1645 def perfdiffwd(ui, repo, **opts):
1652 def perfdiffwd(ui, repo, **opts):
1646 """Profile diff of working directory changes"""
1653 """Profile diff of working directory changes"""
1647 opts = _byteskwargs(opts)
1654 opts = _byteskwargs(opts)
1648 timer, fm = gettimer(ui, opts)
1655 timer, fm = gettimer(ui, opts)
1649 options = {
1656 options = {
1650 'w': 'ignore_all_space',
1657 'w': 'ignore_all_space',
1651 'b': 'ignore_space_change',
1658 'b': 'ignore_space_change',
1652 'B': 'ignore_blank_lines',
1659 'B': 'ignore_blank_lines',
1653 }
1660 }
1654
1661
1655 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1662 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1656 opts = dict((options[c], b'1') for c in diffopt)
1663 opts = dict((options[c], b'1') for c in diffopt)
1657 def d():
1664 def d():
1658 ui.pushbuffer()
1665 ui.pushbuffer()
1659 commands.diff(ui, repo, **opts)
1666 commands.diff(ui, repo, **opts)
1660 ui.popbuffer()
1667 ui.popbuffer()
1661 diffopt = diffopt.encode('ascii')
1668 diffopt = diffopt.encode('ascii')
1662 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1669 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1663 timer(d, title=title)
1670 timer(d, title=title)
1664 fm.end()
1671 fm.end()
1665
1672
1666 @command(b'perfrevlogindex', revlogopts + formatteropts,
1673 @command(b'perfrevlogindex', revlogopts + formatteropts,
1667 b'-c|-m|FILE')
1674 b'-c|-m|FILE')
1668 def perfrevlogindex(ui, repo, file_=None, **opts):
1675 def perfrevlogindex(ui, repo, file_=None, **opts):
1669 """Benchmark operations against a revlog index.
1676 """Benchmark operations against a revlog index.
1670
1677
1671 This tests constructing a revlog instance, reading index data,
1678 This tests constructing a revlog instance, reading index data,
1672 parsing index data, and performing various operations related to
1679 parsing index data, and performing various operations related to
1673 index data.
1680 index data.
1674 """
1681 """
1675
1682
1676 opts = _byteskwargs(opts)
1683 opts = _byteskwargs(opts)
1677
1684
1678 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1685 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1679
1686
1680 opener = getattr(rl, 'opener') # trick linter
1687 opener = getattr(rl, 'opener') # trick linter
1681 indexfile = rl.indexfile
1688 indexfile = rl.indexfile
1682 data = opener.read(indexfile)
1689 data = opener.read(indexfile)
1683
1690
1684 header = struct.unpack(b'>I', data[0:4])[0]
1691 header = struct.unpack(b'>I', data[0:4])[0]
1685 version = header & 0xFFFF
1692 version = header & 0xFFFF
1686 if version == 1:
1693 if version == 1:
1687 revlogio = revlog.revlogio()
1694 revlogio = revlog.revlogio()
1688 inline = header & (1 << 16)
1695 inline = header & (1 << 16)
1689 else:
1696 else:
1690 raise error.Abort((b'unsupported revlog version: %d') % version)
1697 raise error.Abort((b'unsupported revlog version: %d') % version)
1691
1698
1692 rllen = len(rl)
1699 rllen = len(rl)
1693
1700
1694 node0 = rl.node(0)
1701 node0 = rl.node(0)
1695 node25 = rl.node(rllen // 4)
1702 node25 = rl.node(rllen // 4)
1696 node50 = rl.node(rllen // 2)
1703 node50 = rl.node(rllen // 2)
1697 node75 = rl.node(rllen // 4 * 3)
1704 node75 = rl.node(rllen // 4 * 3)
1698 node100 = rl.node(rllen - 1)
1705 node100 = rl.node(rllen - 1)
1699
1706
1700 allrevs = range(rllen)
1707 allrevs = range(rllen)
1701 allrevsrev = list(reversed(allrevs))
1708 allrevsrev = list(reversed(allrevs))
1702 allnodes = [rl.node(rev) for rev in range(rllen)]
1709 allnodes = [rl.node(rev) for rev in range(rllen)]
1703 allnodesrev = list(reversed(allnodes))
1710 allnodesrev = list(reversed(allnodes))
1704
1711
1705 def constructor():
1712 def constructor():
1706 revlog.revlog(opener, indexfile)
1713 revlog.revlog(opener, indexfile)
1707
1714
1708 def read():
1715 def read():
1709 with opener(indexfile) as fh:
1716 with opener(indexfile) as fh:
1710 fh.read()
1717 fh.read()
1711
1718
1712 def parseindex():
1719 def parseindex():
1713 revlogio.parseindex(data, inline)
1720 revlogio.parseindex(data, inline)
1714
1721
1715 def getentry(revornode):
1722 def getentry(revornode):
1716 index = revlogio.parseindex(data, inline)[0]
1723 index = revlogio.parseindex(data, inline)[0]
1717 index[revornode]
1724 index[revornode]
1718
1725
1719 def getentries(revs, count=1):
1726 def getentries(revs, count=1):
1720 index = revlogio.parseindex(data, inline)[0]
1727 index = revlogio.parseindex(data, inline)[0]
1721
1728
1722 for i in range(count):
1729 for i in range(count):
1723 for rev in revs:
1730 for rev in revs:
1724 index[rev]
1731 index[rev]
1725
1732
1726 def resolvenode(node):
1733 def resolvenode(node):
1727 nodemap = revlogio.parseindex(data, inline)[1]
1734 nodemap = revlogio.parseindex(data, inline)[1]
1728 # This only works for the C code.
1735 # This only works for the C code.
1729 if nodemap is None:
1736 if nodemap is None:
1730 return
1737 return
1731
1738
1732 try:
1739 try:
1733 nodemap[node]
1740 nodemap[node]
1734 except error.RevlogError:
1741 except error.RevlogError:
1735 pass
1742 pass
1736
1743
1737 def resolvenodes(nodes, count=1):
1744 def resolvenodes(nodes, count=1):
1738 nodemap = revlogio.parseindex(data, inline)[1]
1745 nodemap = revlogio.parseindex(data, inline)[1]
1739 if nodemap is None:
1746 if nodemap is None:
1740 return
1747 return
1741
1748
1742 for i in range(count):
1749 for i in range(count):
1743 for node in nodes:
1750 for node in nodes:
1744 try:
1751 try:
1745 nodemap[node]
1752 nodemap[node]
1746 except error.RevlogError:
1753 except error.RevlogError:
1747 pass
1754 pass
1748
1755
1749 benches = [
1756 benches = [
1750 (constructor, b'revlog constructor'),
1757 (constructor, b'revlog constructor'),
1751 (read, b'read'),
1758 (read, b'read'),
1752 (parseindex, b'create index object'),
1759 (parseindex, b'create index object'),
1753 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1760 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1754 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1761 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1755 (lambda: resolvenode(node0), b'look up node at rev 0'),
1762 (lambda: resolvenode(node0), b'look up node at rev 0'),
1756 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1763 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1757 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1764 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1758 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1765 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1759 (lambda: resolvenode(node100), b'look up node at tip'),
1766 (lambda: resolvenode(node100), b'look up node at tip'),
1760 # 2x variation is to measure caching impact.
1767 # 2x variation is to measure caching impact.
1761 (lambda: resolvenodes(allnodes),
1768 (lambda: resolvenodes(allnodes),
1762 b'look up all nodes (forward)'),
1769 b'look up all nodes (forward)'),
1763 (lambda: resolvenodes(allnodes, 2),
1770 (lambda: resolvenodes(allnodes, 2),
1764 b'look up all nodes 2x (forward)'),
1771 b'look up all nodes 2x (forward)'),
1765 (lambda: resolvenodes(allnodesrev),
1772 (lambda: resolvenodes(allnodesrev),
1766 b'look up all nodes (reverse)'),
1773 b'look up all nodes (reverse)'),
1767 (lambda: resolvenodes(allnodesrev, 2),
1774 (lambda: resolvenodes(allnodesrev, 2),
1768 b'look up all nodes 2x (reverse)'),
1775 b'look up all nodes 2x (reverse)'),
1769 (lambda: getentries(allrevs),
1776 (lambda: getentries(allrevs),
1770 b'retrieve all index entries (forward)'),
1777 b'retrieve all index entries (forward)'),
1771 (lambda: getentries(allrevs, 2),
1778 (lambda: getentries(allrevs, 2),
1772 b'retrieve all index entries 2x (forward)'),
1779 b'retrieve all index entries 2x (forward)'),
1773 (lambda: getentries(allrevsrev),
1780 (lambda: getentries(allrevsrev),
1774 b'retrieve all index entries (reverse)'),
1781 b'retrieve all index entries (reverse)'),
1775 (lambda: getentries(allrevsrev, 2),
1782 (lambda: getentries(allrevsrev, 2),
1776 b'retrieve all index entries 2x (reverse)'),
1783 b'retrieve all index entries 2x (reverse)'),
1777 ]
1784 ]
1778
1785
1779 for fn, title in benches:
1786 for fn, title in benches:
1780 timer, fm = gettimer(ui, opts)
1787 timer, fm = gettimer(ui, opts)
1781 timer(fn, title=title)
1788 timer(fn, title=title)
1782 fm.end()
1789 fm.end()
1783
1790
1784 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1791 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1785 [(b'd', b'dist', 100, b'distance between the revisions'),
1792 [(b'd', b'dist', 100, b'distance between the revisions'),
1786 (b's', b'startrev', 0, b'revision to start reading at'),
1793 (b's', b'startrev', 0, b'revision to start reading at'),
1787 (b'', b'reverse', False, b'read in reverse')],
1794 (b'', b'reverse', False, b'read in reverse')],
1788 b'-c|-m|FILE')
1795 b'-c|-m|FILE')
1789 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1796 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1790 **opts):
1797 **opts):
1791 """Benchmark reading a series of revisions from a revlog.
1798 """Benchmark reading a series of revisions from a revlog.
1792
1799
1793 By default, we read every ``-d/--dist`` revision from 0 to tip of
1800 By default, we read every ``-d/--dist`` revision from 0 to tip of
1794 the specified revlog.
1801 the specified revlog.
1795
1802
1796 The start revision can be defined via ``-s/--startrev``.
1803 The start revision can be defined via ``-s/--startrev``.
1797 """
1804 """
1798 opts = _byteskwargs(opts)
1805 opts = _byteskwargs(opts)
1799
1806
1800 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1807 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1801 rllen = getlen(ui)(rl)
1808 rllen = getlen(ui)(rl)
1802
1809
1803 if startrev < 0:
1810 if startrev < 0:
1804 startrev = rllen + startrev
1811 startrev = rllen + startrev
1805
1812
1806 def d():
1813 def d():
1807 rl.clearcaches()
1814 rl.clearcaches()
1808
1815
1809 beginrev = startrev
1816 beginrev = startrev
1810 endrev = rllen
1817 endrev = rllen
1811 dist = opts[b'dist']
1818 dist = opts[b'dist']
1812
1819
1813 if reverse:
1820 if reverse:
1814 beginrev, endrev = endrev - 1, beginrev - 1
1821 beginrev, endrev = endrev - 1, beginrev - 1
1815 dist = -1 * dist
1822 dist = -1 * dist
1816
1823
1817 for x in _xrange(beginrev, endrev, dist):
1824 for x in _xrange(beginrev, endrev, dist):
1818 # Old revisions don't support passing int.
1825 # Old revisions don't support passing int.
1819 n = rl.node(x)
1826 n = rl.node(x)
1820 rl.revision(n)
1827 rl.revision(n)
1821
1828
1822 timer, fm = gettimer(ui, opts)
1829 timer, fm = gettimer(ui, opts)
1823 timer(d)
1830 timer(d)
1824 fm.end()
1831 fm.end()
1825
1832
1826 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1833 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1827 [(b's', b'startrev', 1000, b'revision to start writing at'),
1834 [(b's', b'startrev', 1000, b'revision to start writing at'),
1828 (b'', b'stoprev', -1, b'last revision to write'),
1835 (b'', b'stoprev', -1, b'last revision to write'),
1829 (b'', b'count', 3, b'last revision to write'),
1836 (b'', b'count', 3, b'last revision to write'),
1830 (b'', b'details', False, b'print timing for every revisions tested'),
1837 (b'', b'details', False, b'print timing for every revisions tested'),
1831 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1838 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1832 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1839 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1833 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1840 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1834 ],
1841 ],
1835 b'-c|-m|FILE')
1842 b'-c|-m|FILE')
1836 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1843 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1837 """Benchmark writing a series of revisions to a revlog.
1844 """Benchmark writing a series of revisions to a revlog.
1838
1845
1839 Possible source values are:
1846 Possible source values are:
1840 * `full`: add from a full text (default).
1847 * `full`: add from a full text (default).
1841 * `parent-1`: add from a delta to the first parent
1848 * `parent-1`: add from a delta to the first parent
1842 * `parent-2`: add from a delta to the second parent if it exists
1849 * `parent-2`: add from a delta to the second parent if it exists
1843 (use a delta from the first parent otherwise)
1850 (use a delta from the first parent otherwise)
1844 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1851 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1845 * `storage`: add from the existing precomputed deltas
1852 * `storage`: add from the existing precomputed deltas
1846 """
1853 """
1847 opts = _byteskwargs(opts)
1854 opts = _byteskwargs(opts)
1848
1855
1849 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1856 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1850 rllen = getlen(ui)(rl)
1857 rllen = getlen(ui)(rl)
1851 if startrev < 0:
1858 if startrev < 0:
1852 startrev = rllen + startrev
1859 startrev = rllen + startrev
1853 if stoprev < 0:
1860 if stoprev < 0:
1854 stoprev = rllen + stoprev
1861 stoprev = rllen + stoprev
1855
1862
1856 lazydeltabase = opts['lazydeltabase']
1863 lazydeltabase = opts['lazydeltabase']
1857 source = opts['source']
1864 source = opts['source']
1858 clearcaches = opts['clear_caches']
1865 clearcaches = opts['clear_caches']
1859 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1866 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1860 b'storage')
1867 b'storage')
1861 if source not in validsource:
1868 if source not in validsource:
1862 raise error.Abort('invalid source type: %s' % source)
1869 raise error.Abort('invalid source type: %s' % source)
1863
1870
1864 ### actually gather results
1871 ### actually gather results
1865 count = opts['count']
1872 count = opts['count']
1866 if count <= 0:
1873 if count <= 0:
1867 raise error.Abort('invalide run count: %d' % count)
1874 raise error.Abort('invalide run count: %d' % count)
1868 allresults = []
1875 allresults = []
1869 for c in range(count):
1876 for c in range(count):
1870 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1877 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1871 lazydeltabase=lazydeltabase,
1878 lazydeltabase=lazydeltabase,
1872 clearcaches=clearcaches)
1879 clearcaches=clearcaches)
1873 allresults.append(timing)
1880 allresults.append(timing)
1874
1881
1875 ### consolidate the results in a single list
1882 ### consolidate the results in a single list
1876 results = []
1883 results = []
1877 for idx, (rev, t) in enumerate(allresults[0]):
1884 for idx, (rev, t) in enumerate(allresults[0]):
1878 ts = [t]
1885 ts = [t]
1879 for other in allresults[1:]:
1886 for other in allresults[1:]:
1880 orev, ot = other[idx]
1887 orev, ot = other[idx]
1881 assert orev == rev
1888 assert orev == rev
1882 ts.append(ot)
1889 ts.append(ot)
1883 results.append((rev, ts))
1890 results.append((rev, ts))
1884 resultcount = len(results)
1891 resultcount = len(results)
1885
1892
1886 ### Compute and display relevant statistics
1893 ### Compute and display relevant statistics
1887
1894
1888 # get a formatter
1895 # get a formatter
1889 fm = ui.formatter(b'perf', opts)
1896 fm = ui.formatter(b'perf', opts)
1890 displayall = ui.configbool(b"perf", b"all-timing", False)
1897 displayall = ui.configbool(b"perf", b"all-timing", False)
1891
1898
1892 # print individual details if requested
1899 # print individual details if requested
1893 if opts['details']:
1900 if opts['details']:
1894 for idx, item in enumerate(results, 1):
1901 for idx, item in enumerate(results, 1):
1895 rev, data = item
1902 rev, data = item
1896 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1903 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1897 formatone(fm, data, title=title, displayall=displayall)
1904 formatone(fm, data, title=title, displayall=displayall)
1898
1905
1899 # sorts results by median time
1906 # sorts results by median time
1900 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1907 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1901 # list of (name, index) to display)
1908 # list of (name, index) to display)
1902 relevants = [
1909 relevants = [
1903 ("min", 0),
1910 ("min", 0),
1904 ("10%", resultcount * 10 // 100),
1911 ("10%", resultcount * 10 // 100),
1905 ("25%", resultcount * 25 // 100),
1912 ("25%", resultcount * 25 // 100),
1906 ("50%", resultcount * 70 // 100),
1913 ("50%", resultcount * 70 // 100),
1907 ("75%", resultcount * 75 // 100),
1914 ("75%", resultcount * 75 // 100),
1908 ("90%", resultcount * 90 // 100),
1915 ("90%", resultcount * 90 // 100),
1909 ("95%", resultcount * 95 // 100),
1916 ("95%", resultcount * 95 // 100),
1910 ("99%", resultcount * 99 // 100),
1917 ("99%", resultcount * 99 // 100),
1911 ("99.9%", resultcount * 999 // 1000),
1918 ("99.9%", resultcount * 999 // 1000),
1912 ("99.99%", resultcount * 9999 // 10000),
1919 ("99.99%", resultcount * 9999 // 10000),
1913 ("99.999%", resultcount * 99999 // 100000),
1920 ("99.999%", resultcount * 99999 // 100000),
1914 ("max", -1),
1921 ("max", -1),
1915 ]
1922 ]
1916 if not ui.quiet:
1923 if not ui.quiet:
1917 for name, idx in relevants:
1924 for name, idx in relevants:
1918 data = results[idx]
1925 data = results[idx]
1919 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1926 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1920 formatone(fm, data[1], title=title, displayall=displayall)
1927 formatone(fm, data[1], title=title, displayall=displayall)
1921
1928
1922 # XXX summing that many float will not be very precise, we ignore this fact
1929 # XXX summing that many float will not be very precise, we ignore this fact
1923 # for now
1930 # for now
1924 totaltime = []
1931 totaltime = []
1925 for item in allresults:
1932 for item in allresults:
1926 totaltime.append((sum(x[1][0] for x in item),
1933 totaltime.append((sum(x[1][0] for x in item),
1927 sum(x[1][1] for x in item),
1934 sum(x[1][1] for x in item),
1928 sum(x[1][2] for x in item),)
1935 sum(x[1][2] for x in item),)
1929 )
1936 )
1930 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1937 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1931 displayall=displayall)
1938 displayall=displayall)
1932 fm.end()
1939 fm.end()
1933
1940
1934 class _faketr(object):
1941 class _faketr(object):
1935 def add(s, x, y, z=None):
1942 def add(s, x, y, z=None):
1936 return None
1943 return None
1937
1944
1938 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1945 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1939 lazydeltabase=True, clearcaches=True):
1946 lazydeltabase=True, clearcaches=True):
1940 timings = []
1947 timings = []
1941 tr = _faketr()
1948 tr = _faketr()
1942 with _temprevlog(ui, orig, startrev) as dest:
1949 with _temprevlog(ui, orig, startrev) as dest:
1943 dest._lazydeltabase = lazydeltabase
1950 dest._lazydeltabase = lazydeltabase
1944 revs = list(orig.revs(startrev, stoprev))
1951 revs = list(orig.revs(startrev, stoprev))
1945 total = len(revs)
1952 total = len(revs)
1946 topic = 'adding'
1953 topic = 'adding'
1947 if runidx is not None:
1954 if runidx is not None:
1948 topic += ' (run #%d)' % runidx
1955 topic += ' (run #%d)' % runidx
1949 # Support both old and new progress API
1956 # Support both old and new progress API
1950 if util.safehasattr(ui, 'makeprogress'):
1957 if util.safehasattr(ui, 'makeprogress'):
1951 progress = ui.makeprogress(topic, unit='revs', total=total)
1958 progress = ui.makeprogress(topic, unit='revs', total=total)
1952 def updateprogress(pos):
1959 def updateprogress(pos):
1953 progress.update(pos)
1960 progress.update(pos)
1954 def completeprogress():
1961 def completeprogress():
1955 progress.complete()
1962 progress.complete()
1956 else:
1963 else:
1957 def updateprogress(pos):
1964 def updateprogress(pos):
1958 ui.progress(topic, pos, unit='revs', total=total)
1965 ui.progress(topic, pos, unit='revs', total=total)
1959 def completeprogress():
1966 def completeprogress():
1960 ui.progress(topic, None, unit='revs', total=total)
1967 ui.progress(topic, None, unit='revs', total=total)
1961
1968
1962 for idx, rev in enumerate(revs):
1969 for idx, rev in enumerate(revs):
1963 updateprogress(idx)
1970 updateprogress(idx)
1964 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1971 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1965 if clearcaches:
1972 if clearcaches:
1966 dest.index.clearcaches()
1973 dest.index.clearcaches()
1967 dest.clearcaches()
1974 dest.clearcaches()
1968 with timeone() as r:
1975 with timeone() as r:
1969 dest.addrawrevision(*addargs, **addkwargs)
1976 dest.addrawrevision(*addargs, **addkwargs)
1970 timings.append((rev, r[0]))
1977 timings.append((rev, r[0]))
1971 updateprogress(total)
1978 updateprogress(total)
1972 completeprogress()
1979 completeprogress()
1973 return timings
1980 return timings
1974
1981
1975 def _getrevisionseed(orig, rev, tr, source):
1982 def _getrevisionseed(orig, rev, tr, source):
1976 from mercurial.node import nullid
1983 from mercurial.node import nullid
1977
1984
1978 linkrev = orig.linkrev(rev)
1985 linkrev = orig.linkrev(rev)
1979 node = orig.node(rev)
1986 node = orig.node(rev)
1980 p1, p2 = orig.parents(node)
1987 p1, p2 = orig.parents(node)
1981 flags = orig.flags(rev)
1988 flags = orig.flags(rev)
1982 cachedelta = None
1989 cachedelta = None
1983 text = None
1990 text = None
1984
1991
1985 if source == b'full':
1992 if source == b'full':
1986 text = orig.revision(rev)
1993 text = orig.revision(rev)
1987 elif source == b'parent-1':
1994 elif source == b'parent-1':
1988 baserev = orig.rev(p1)
1995 baserev = orig.rev(p1)
1989 cachedelta = (baserev, orig.revdiff(p1, rev))
1996 cachedelta = (baserev, orig.revdiff(p1, rev))
1990 elif source == b'parent-2':
1997 elif source == b'parent-2':
1991 parent = p2
1998 parent = p2
1992 if p2 == nullid:
1999 if p2 == nullid:
1993 parent = p1
2000 parent = p1
1994 baserev = orig.rev(parent)
2001 baserev = orig.rev(parent)
1995 cachedelta = (baserev, orig.revdiff(parent, rev))
2002 cachedelta = (baserev, orig.revdiff(parent, rev))
1996 elif source == b'parent-smallest':
2003 elif source == b'parent-smallest':
1997 p1diff = orig.revdiff(p1, rev)
2004 p1diff = orig.revdiff(p1, rev)
1998 parent = p1
2005 parent = p1
1999 diff = p1diff
2006 diff = p1diff
2000 if p2 != nullid:
2007 if p2 != nullid:
2001 p2diff = orig.revdiff(p2, rev)
2008 p2diff = orig.revdiff(p2, rev)
2002 if len(p1diff) > len(p2diff):
2009 if len(p1diff) > len(p2diff):
2003 parent = p2
2010 parent = p2
2004 diff = p2diff
2011 diff = p2diff
2005 baserev = orig.rev(parent)
2012 baserev = orig.rev(parent)
2006 cachedelta = (baserev, diff)
2013 cachedelta = (baserev, diff)
2007 elif source == b'storage':
2014 elif source == b'storage':
2008 baserev = orig.deltaparent(rev)
2015 baserev = orig.deltaparent(rev)
2009 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2016 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2010
2017
2011 return ((text, tr, linkrev, p1, p2),
2018 return ((text, tr, linkrev, p1, p2),
2012 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2019 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2013
2020
2014 @contextlib.contextmanager
2021 @contextlib.contextmanager
2015 def _temprevlog(ui, orig, truncaterev):
2022 def _temprevlog(ui, orig, truncaterev):
2016 from mercurial import vfs as vfsmod
2023 from mercurial import vfs as vfsmod
2017
2024
2018 if orig._inline:
2025 if orig._inline:
2019 raise error.Abort('not supporting inline revlog (yet)')
2026 raise error.Abort('not supporting inline revlog (yet)')
2020
2027
2021 origindexpath = orig.opener.join(orig.indexfile)
2028 origindexpath = orig.opener.join(orig.indexfile)
2022 origdatapath = orig.opener.join(orig.datafile)
2029 origdatapath = orig.opener.join(orig.datafile)
2023 indexname = 'revlog.i'
2030 indexname = 'revlog.i'
2024 dataname = 'revlog.d'
2031 dataname = 'revlog.d'
2025
2032
2026 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2033 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2027 try:
2034 try:
2028 # copy the data file in a temporary directory
2035 # copy the data file in a temporary directory
2029 ui.debug('copying data in %s\n' % tmpdir)
2036 ui.debug('copying data in %s\n' % tmpdir)
2030 destindexpath = os.path.join(tmpdir, 'revlog.i')
2037 destindexpath = os.path.join(tmpdir, 'revlog.i')
2031 destdatapath = os.path.join(tmpdir, 'revlog.d')
2038 destdatapath = os.path.join(tmpdir, 'revlog.d')
2032 shutil.copyfile(origindexpath, destindexpath)
2039 shutil.copyfile(origindexpath, destindexpath)
2033 shutil.copyfile(origdatapath, destdatapath)
2040 shutil.copyfile(origdatapath, destdatapath)
2034
2041
2035 # remove the data we want to add again
2042 # remove the data we want to add again
2036 ui.debug('truncating data to be rewritten\n')
2043 ui.debug('truncating data to be rewritten\n')
2037 with open(destindexpath, 'ab') as index:
2044 with open(destindexpath, 'ab') as index:
2038 index.seek(0)
2045 index.seek(0)
2039 index.truncate(truncaterev * orig._io.size)
2046 index.truncate(truncaterev * orig._io.size)
2040 with open(destdatapath, 'ab') as data:
2047 with open(destdatapath, 'ab') as data:
2041 data.seek(0)
2048 data.seek(0)
2042 data.truncate(orig.start(truncaterev))
2049 data.truncate(orig.start(truncaterev))
2043
2050
2044 # instantiate a new revlog from the temporary copy
2051 # instantiate a new revlog from the temporary copy
2045 ui.debug('truncating adding to be rewritten\n')
2052 ui.debug('truncating adding to be rewritten\n')
2046 vfs = vfsmod.vfs(tmpdir)
2053 vfs = vfsmod.vfs(tmpdir)
2047 vfs.options = getattr(orig.opener, 'options', None)
2054 vfs.options = getattr(orig.opener, 'options', None)
2048
2055
2049 dest = revlog.revlog(vfs,
2056 dest = revlog.revlog(vfs,
2050 indexfile=indexname,
2057 indexfile=indexname,
2051 datafile=dataname)
2058 datafile=dataname)
2052 if dest._inline:
2059 if dest._inline:
2053 raise error.Abort('not supporting inline revlog (yet)')
2060 raise error.Abort('not supporting inline revlog (yet)')
2054 # make sure internals are initialized
2061 # make sure internals are initialized
2055 dest.revision(len(dest) - 1)
2062 dest.revision(len(dest) - 1)
2056 yield dest
2063 yield dest
2057 del dest, vfs
2064 del dest, vfs
2058 finally:
2065 finally:
2059 shutil.rmtree(tmpdir, True)
2066 shutil.rmtree(tmpdir, True)
2060
2067
2061 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2068 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2062 [(b'e', b'engines', b'', b'compression engines to use'),
2069 [(b'e', b'engines', b'', b'compression engines to use'),
2063 (b's', b'startrev', 0, b'revision to start at')],
2070 (b's', b'startrev', 0, b'revision to start at')],
2064 b'-c|-m|FILE')
2071 b'-c|-m|FILE')
2065 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2072 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2066 """Benchmark operations on revlog chunks.
2073 """Benchmark operations on revlog chunks.
2067
2074
2068 Logically, each revlog is a collection of fulltext revisions. However,
2075 Logically, each revlog is a collection of fulltext revisions. However,
2069 stored within each revlog are "chunks" of possibly compressed data. This
2076 stored within each revlog are "chunks" of possibly compressed data. This
2070 data needs to be read and decompressed or compressed and written.
2077 data needs to be read and decompressed or compressed and written.
2071
2078
2072 This command measures the time it takes to read+decompress and recompress
2079 This command measures the time it takes to read+decompress and recompress
2073 chunks in a revlog. It effectively isolates I/O and compression performance.
2080 chunks in a revlog. It effectively isolates I/O and compression performance.
2074 For measurements of higher-level operations like resolving revisions,
2081 For measurements of higher-level operations like resolving revisions,
2075 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2082 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2076 """
2083 """
2077 opts = _byteskwargs(opts)
2084 opts = _byteskwargs(opts)
2078
2085
2079 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2086 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2080
2087
2081 # _chunkraw was renamed to _getsegmentforrevs.
2088 # _chunkraw was renamed to _getsegmentforrevs.
2082 try:
2089 try:
2083 segmentforrevs = rl._getsegmentforrevs
2090 segmentforrevs = rl._getsegmentforrevs
2084 except AttributeError:
2091 except AttributeError:
2085 segmentforrevs = rl._chunkraw
2092 segmentforrevs = rl._chunkraw
2086
2093
2087 # Verify engines argument.
2094 # Verify engines argument.
2088 if engines:
2095 if engines:
2089 engines = set(e.strip() for e in engines.split(b','))
2096 engines = set(e.strip() for e in engines.split(b','))
2090 for engine in engines:
2097 for engine in engines:
2091 try:
2098 try:
2092 util.compressionengines[engine]
2099 util.compressionengines[engine]
2093 except KeyError:
2100 except KeyError:
2094 raise error.Abort(b'unknown compression engine: %s' % engine)
2101 raise error.Abort(b'unknown compression engine: %s' % engine)
2095 else:
2102 else:
2096 engines = []
2103 engines = []
2097 for e in util.compengines:
2104 for e in util.compengines:
2098 engine = util.compengines[e]
2105 engine = util.compengines[e]
2099 try:
2106 try:
2100 if engine.available():
2107 if engine.available():
2101 engine.revlogcompressor().compress(b'dummy')
2108 engine.revlogcompressor().compress(b'dummy')
2102 engines.append(e)
2109 engines.append(e)
2103 except NotImplementedError:
2110 except NotImplementedError:
2104 pass
2111 pass
2105
2112
2106 revs = list(rl.revs(startrev, len(rl) - 1))
2113 revs = list(rl.revs(startrev, len(rl) - 1))
2107
2114
2108 def rlfh(rl):
2115 def rlfh(rl):
2109 if rl._inline:
2116 if rl._inline:
2110 return getsvfs(repo)(rl.indexfile)
2117 return getsvfs(repo)(rl.indexfile)
2111 else:
2118 else:
2112 return getsvfs(repo)(rl.datafile)
2119 return getsvfs(repo)(rl.datafile)
2113
2120
2114 def doread():
2121 def doread():
2115 rl.clearcaches()
2122 rl.clearcaches()
2116 for rev in revs:
2123 for rev in revs:
2117 segmentforrevs(rev, rev)
2124 segmentforrevs(rev, rev)
2118
2125
2119 def doreadcachedfh():
2126 def doreadcachedfh():
2120 rl.clearcaches()
2127 rl.clearcaches()
2121 fh = rlfh(rl)
2128 fh = rlfh(rl)
2122 for rev in revs:
2129 for rev in revs:
2123 segmentforrevs(rev, rev, df=fh)
2130 segmentforrevs(rev, rev, df=fh)
2124
2131
2125 def doreadbatch():
2132 def doreadbatch():
2126 rl.clearcaches()
2133 rl.clearcaches()
2127 segmentforrevs(revs[0], revs[-1])
2134 segmentforrevs(revs[0], revs[-1])
2128
2135
2129 def doreadbatchcachedfh():
2136 def doreadbatchcachedfh():
2130 rl.clearcaches()
2137 rl.clearcaches()
2131 fh = rlfh(rl)
2138 fh = rlfh(rl)
2132 segmentforrevs(revs[0], revs[-1], df=fh)
2139 segmentforrevs(revs[0], revs[-1], df=fh)
2133
2140
2134 def dochunk():
2141 def dochunk():
2135 rl.clearcaches()
2142 rl.clearcaches()
2136 fh = rlfh(rl)
2143 fh = rlfh(rl)
2137 for rev in revs:
2144 for rev in revs:
2138 rl._chunk(rev, df=fh)
2145 rl._chunk(rev, df=fh)
2139
2146
2140 chunks = [None]
2147 chunks = [None]
2141
2148
2142 def dochunkbatch():
2149 def dochunkbatch():
2143 rl.clearcaches()
2150 rl.clearcaches()
2144 fh = rlfh(rl)
2151 fh = rlfh(rl)
2145 # Save chunks as a side-effect.
2152 # Save chunks as a side-effect.
2146 chunks[0] = rl._chunks(revs, df=fh)
2153 chunks[0] = rl._chunks(revs, df=fh)
2147
2154
2148 def docompress(compressor):
2155 def docompress(compressor):
2149 rl.clearcaches()
2156 rl.clearcaches()
2150
2157
2151 try:
2158 try:
2152 # Swap in the requested compression engine.
2159 # Swap in the requested compression engine.
2153 oldcompressor = rl._compressor
2160 oldcompressor = rl._compressor
2154 rl._compressor = compressor
2161 rl._compressor = compressor
2155 for chunk in chunks[0]:
2162 for chunk in chunks[0]:
2156 rl.compress(chunk)
2163 rl.compress(chunk)
2157 finally:
2164 finally:
2158 rl._compressor = oldcompressor
2165 rl._compressor = oldcompressor
2159
2166
2160 benches = [
2167 benches = [
2161 (lambda: doread(), b'read'),
2168 (lambda: doread(), b'read'),
2162 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2169 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2163 (lambda: doreadbatch(), b'read batch'),
2170 (lambda: doreadbatch(), b'read batch'),
2164 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2171 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2165 (lambda: dochunk(), b'chunk'),
2172 (lambda: dochunk(), b'chunk'),
2166 (lambda: dochunkbatch(), b'chunk batch'),
2173 (lambda: dochunkbatch(), b'chunk batch'),
2167 ]
2174 ]
2168
2175
2169 for engine in sorted(engines):
2176 for engine in sorted(engines):
2170 compressor = util.compengines[engine].revlogcompressor()
2177 compressor = util.compengines[engine].revlogcompressor()
2171 benches.append((functools.partial(docompress, compressor),
2178 benches.append((functools.partial(docompress, compressor),
2172 b'compress w/ %s' % engine))
2179 b'compress w/ %s' % engine))
2173
2180
2174 for fn, title in benches:
2181 for fn, title in benches:
2175 timer, fm = gettimer(ui, opts)
2182 timer, fm = gettimer(ui, opts)
2176 timer(fn, title=title)
2183 timer(fn, title=title)
2177 fm.end()
2184 fm.end()
2178
2185
2179 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2186 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2180 [(b'', b'cache', False, b'use caches instead of clearing')],
2187 [(b'', b'cache', False, b'use caches instead of clearing')],
2181 b'-c|-m|FILE REV')
2188 b'-c|-m|FILE REV')
2182 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2189 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2183 """Benchmark obtaining a revlog revision.
2190 """Benchmark obtaining a revlog revision.
2184
2191
2185 Obtaining a revlog revision consists of roughly the following steps:
2192 Obtaining a revlog revision consists of roughly the following steps:
2186
2193
2187 1. Compute the delta chain
2194 1. Compute the delta chain
2188 2. Slice the delta chain if applicable
2195 2. Slice the delta chain if applicable
2189 3. Obtain the raw chunks for that delta chain
2196 3. Obtain the raw chunks for that delta chain
2190 4. Decompress each raw chunk
2197 4. Decompress each raw chunk
2191 5. Apply binary patches to obtain fulltext
2198 5. Apply binary patches to obtain fulltext
2192 6. Verify hash of fulltext
2199 6. Verify hash of fulltext
2193
2200
2194 This command measures the time spent in each of these phases.
2201 This command measures the time spent in each of these phases.
2195 """
2202 """
2196 opts = _byteskwargs(opts)
2203 opts = _byteskwargs(opts)
2197
2204
2198 if opts.get(b'changelog') or opts.get(b'manifest'):
2205 if opts.get(b'changelog') or opts.get(b'manifest'):
2199 file_, rev = None, file_
2206 file_, rev = None, file_
2200 elif rev is None:
2207 elif rev is None:
2201 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2208 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2202
2209
2203 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2210 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2204
2211
2205 # _chunkraw was renamed to _getsegmentforrevs.
2212 # _chunkraw was renamed to _getsegmentforrevs.
2206 try:
2213 try:
2207 segmentforrevs = r._getsegmentforrevs
2214 segmentforrevs = r._getsegmentforrevs
2208 except AttributeError:
2215 except AttributeError:
2209 segmentforrevs = r._chunkraw
2216 segmentforrevs = r._chunkraw
2210
2217
2211 node = r.lookup(rev)
2218 node = r.lookup(rev)
2212 rev = r.rev(node)
2219 rev = r.rev(node)
2213
2220
2214 def getrawchunks(data, chain):
2221 def getrawchunks(data, chain):
2215 start = r.start
2222 start = r.start
2216 length = r.length
2223 length = r.length
2217 inline = r._inline
2224 inline = r._inline
2218 iosize = r._io.size
2225 iosize = r._io.size
2219 buffer = util.buffer
2226 buffer = util.buffer
2220
2227
2221 chunks = []
2228 chunks = []
2222 ladd = chunks.append
2229 ladd = chunks.append
2223 for idx, item in enumerate(chain):
2230 for idx, item in enumerate(chain):
2224 offset = start(item[0])
2231 offset = start(item[0])
2225 bits = data[idx]
2232 bits = data[idx]
2226 for rev in item:
2233 for rev in item:
2227 chunkstart = start(rev)
2234 chunkstart = start(rev)
2228 if inline:
2235 if inline:
2229 chunkstart += (rev + 1) * iosize
2236 chunkstart += (rev + 1) * iosize
2230 chunklength = length(rev)
2237 chunklength = length(rev)
2231 ladd(buffer(bits, chunkstart - offset, chunklength))
2238 ladd(buffer(bits, chunkstart - offset, chunklength))
2232
2239
2233 return chunks
2240 return chunks
2234
2241
2235 def dodeltachain(rev):
2242 def dodeltachain(rev):
2236 if not cache:
2243 if not cache:
2237 r.clearcaches()
2244 r.clearcaches()
2238 r._deltachain(rev)
2245 r._deltachain(rev)
2239
2246
2240 def doread(chain):
2247 def doread(chain):
2241 if not cache:
2248 if not cache:
2242 r.clearcaches()
2249 r.clearcaches()
2243 for item in slicedchain:
2250 for item in slicedchain:
2244 segmentforrevs(item[0], item[-1])
2251 segmentforrevs(item[0], item[-1])
2245
2252
2246 def doslice(r, chain, size):
2253 def doslice(r, chain, size):
2247 for s in slicechunk(r, chain, targetsize=size):
2254 for s in slicechunk(r, chain, targetsize=size):
2248 pass
2255 pass
2249
2256
2250 def dorawchunks(data, chain):
2257 def dorawchunks(data, chain):
2251 if not cache:
2258 if not cache:
2252 r.clearcaches()
2259 r.clearcaches()
2253 getrawchunks(data, chain)
2260 getrawchunks(data, chain)
2254
2261
2255 def dodecompress(chunks):
2262 def dodecompress(chunks):
2256 decomp = r.decompress
2263 decomp = r.decompress
2257 for chunk in chunks:
2264 for chunk in chunks:
2258 decomp(chunk)
2265 decomp(chunk)
2259
2266
2260 def dopatch(text, bins):
2267 def dopatch(text, bins):
2261 if not cache:
2268 if not cache:
2262 r.clearcaches()
2269 r.clearcaches()
2263 mdiff.patches(text, bins)
2270 mdiff.patches(text, bins)
2264
2271
2265 def dohash(text):
2272 def dohash(text):
2266 if not cache:
2273 if not cache:
2267 r.clearcaches()
2274 r.clearcaches()
2268 r.checkhash(text, node, rev=rev)
2275 r.checkhash(text, node, rev=rev)
2269
2276
2270 def dorevision():
2277 def dorevision():
2271 if not cache:
2278 if not cache:
2272 r.clearcaches()
2279 r.clearcaches()
2273 r.revision(node)
2280 r.revision(node)
2274
2281
2275 try:
2282 try:
2276 from mercurial.revlogutils.deltas import slicechunk
2283 from mercurial.revlogutils.deltas import slicechunk
2277 except ImportError:
2284 except ImportError:
2278 slicechunk = getattr(revlog, '_slicechunk', None)
2285 slicechunk = getattr(revlog, '_slicechunk', None)
2279
2286
2280 size = r.length(rev)
2287 size = r.length(rev)
2281 chain = r._deltachain(rev)[0]
2288 chain = r._deltachain(rev)[0]
2282 if not getattr(r, '_withsparseread', False):
2289 if not getattr(r, '_withsparseread', False):
2283 slicedchain = (chain,)
2290 slicedchain = (chain,)
2284 else:
2291 else:
2285 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2292 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2286 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2293 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2287 rawchunks = getrawchunks(data, slicedchain)
2294 rawchunks = getrawchunks(data, slicedchain)
2288 bins = r._chunks(chain)
2295 bins = r._chunks(chain)
2289 text = bytes(bins[0])
2296 text = bytes(bins[0])
2290 bins = bins[1:]
2297 bins = bins[1:]
2291 text = mdiff.patches(text, bins)
2298 text = mdiff.patches(text, bins)
2292
2299
2293 benches = [
2300 benches = [
2294 (lambda: dorevision(), b'full'),
2301 (lambda: dorevision(), b'full'),
2295 (lambda: dodeltachain(rev), b'deltachain'),
2302 (lambda: dodeltachain(rev), b'deltachain'),
2296 (lambda: doread(chain), b'read'),
2303 (lambda: doread(chain), b'read'),
2297 ]
2304 ]
2298
2305
2299 if getattr(r, '_withsparseread', False):
2306 if getattr(r, '_withsparseread', False):
2300 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2307 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2301 benches.append(slicing)
2308 benches.append(slicing)
2302
2309
2303 benches.extend([
2310 benches.extend([
2304 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2311 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2305 (lambda: dodecompress(rawchunks), b'decompress'),
2312 (lambda: dodecompress(rawchunks), b'decompress'),
2306 (lambda: dopatch(text, bins), b'patch'),
2313 (lambda: dopatch(text, bins), b'patch'),
2307 (lambda: dohash(text), b'hash'),
2314 (lambda: dohash(text), b'hash'),
2308 ])
2315 ])
2309
2316
2310 timer, fm = gettimer(ui, opts)
2317 timer, fm = gettimer(ui, opts)
2311 for fn, title in benches:
2318 for fn, title in benches:
2312 timer(fn, title=title)
2319 timer(fn, title=title)
2313 fm.end()
2320 fm.end()
2314
2321
2315 @command(b'perfrevset',
2322 @command(b'perfrevset',
2316 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2323 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2317 (b'', b'contexts', False, b'obtain changectx for each revision')]
2324 (b'', b'contexts', False, b'obtain changectx for each revision')]
2318 + formatteropts, b"REVSET")
2325 + formatteropts, b"REVSET")
2319 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2326 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2320 """benchmark the execution time of a revset
2327 """benchmark the execution time of a revset
2321
2328
2322 Use the --clean option if need to evaluate the impact of build volatile
2329 Use the --clean option if need to evaluate the impact of build volatile
2323 revisions set cache on the revset execution. Volatile cache hold filtered
2330 revisions set cache on the revset execution. Volatile cache hold filtered
2324 and obsolete related cache."""
2331 and obsolete related cache."""
2325 opts = _byteskwargs(opts)
2332 opts = _byteskwargs(opts)
2326
2333
2327 timer, fm = gettimer(ui, opts)
2334 timer, fm = gettimer(ui, opts)
2328 def d():
2335 def d():
2329 if clear:
2336 if clear:
2330 repo.invalidatevolatilesets()
2337 repo.invalidatevolatilesets()
2331 if contexts:
2338 if contexts:
2332 for ctx in repo.set(expr): pass
2339 for ctx in repo.set(expr): pass
2333 else:
2340 else:
2334 for r in repo.revs(expr): pass
2341 for r in repo.revs(expr): pass
2335 timer(d)
2342 timer(d)
2336 fm.end()
2343 fm.end()
2337
2344
2338 @command(b'perfvolatilesets',
2345 @command(b'perfvolatilesets',
2339 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2346 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2340 ] + formatteropts)
2347 ] + formatteropts)
2341 def perfvolatilesets(ui, repo, *names, **opts):
2348 def perfvolatilesets(ui, repo, *names, **opts):
2342 """benchmark the computation of various volatile set
2349 """benchmark the computation of various volatile set
2343
2350
2344 Volatile set computes element related to filtering and obsolescence."""
2351 Volatile set computes element related to filtering and obsolescence."""
2345 opts = _byteskwargs(opts)
2352 opts = _byteskwargs(opts)
2346 timer, fm = gettimer(ui, opts)
2353 timer, fm = gettimer(ui, opts)
2347 repo = repo.unfiltered()
2354 repo = repo.unfiltered()
2348
2355
2349 def getobs(name):
2356 def getobs(name):
2350 def d():
2357 def d():
2351 repo.invalidatevolatilesets()
2358 repo.invalidatevolatilesets()
2352 if opts[b'clear_obsstore']:
2359 if opts[b'clear_obsstore']:
2353 clearfilecache(repo, b'obsstore')
2360 clearfilecache(repo, b'obsstore')
2354 obsolete.getrevs(repo, name)
2361 obsolete.getrevs(repo, name)
2355 return d
2362 return d
2356
2363
2357 allobs = sorted(obsolete.cachefuncs)
2364 allobs = sorted(obsolete.cachefuncs)
2358 if names:
2365 if names:
2359 allobs = [n for n in allobs if n in names]
2366 allobs = [n for n in allobs if n in names]
2360
2367
2361 for name in allobs:
2368 for name in allobs:
2362 timer(getobs(name), title=name)
2369 timer(getobs(name), title=name)
2363
2370
2364 def getfiltered(name):
2371 def getfiltered(name):
2365 def d():
2372 def d():
2366 repo.invalidatevolatilesets()
2373 repo.invalidatevolatilesets()
2367 if opts[b'clear_obsstore']:
2374 if opts[b'clear_obsstore']:
2368 clearfilecache(repo, b'obsstore')
2375 clearfilecache(repo, b'obsstore')
2369 repoview.filterrevs(repo, name)
2376 repoview.filterrevs(repo, name)
2370 return d
2377 return d
2371
2378
2372 allfilter = sorted(repoview.filtertable)
2379 allfilter = sorted(repoview.filtertable)
2373 if names:
2380 if names:
2374 allfilter = [n for n in allfilter if n in names]
2381 allfilter = [n for n in allfilter if n in names]
2375
2382
2376 for name in allfilter:
2383 for name in allfilter:
2377 timer(getfiltered(name), title=name)
2384 timer(getfiltered(name), title=name)
2378 fm.end()
2385 fm.end()
2379
2386
2380 @command(b'perfbranchmap',
2387 @command(b'perfbranchmap',
2381 [(b'f', b'full', False,
2388 [(b'f', b'full', False,
2382 b'Includes build time of subset'),
2389 b'Includes build time of subset'),
2383 (b'', b'clear-revbranch', False,
2390 (b'', b'clear-revbranch', False,
2384 b'purge the revbranch cache between computation'),
2391 b'purge the revbranch cache between computation'),
2385 ] + formatteropts)
2392 ] + formatteropts)
2386 def perfbranchmap(ui, repo, *filternames, **opts):
2393 def perfbranchmap(ui, repo, *filternames, **opts):
2387 """benchmark the update of a branchmap
2394 """benchmark the update of a branchmap
2388
2395
2389 This benchmarks the full repo.branchmap() call with read and write disabled
2396 This benchmarks the full repo.branchmap() call with read and write disabled
2390 """
2397 """
2391 opts = _byteskwargs(opts)
2398 opts = _byteskwargs(opts)
2392 full = opts.get(b"full", False)
2399 full = opts.get(b"full", False)
2393 clear_revbranch = opts.get(b"clear_revbranch", False)
2400 clear_revbranch = opts.get(b"clear_revbranch", False)
2394 timer, fm = gettimer(ui, opts)
2401 timer, fm = gettimer(ui, opts)
2395 def getbranchmap(filtername):
2402 def getbranchmap(filtername):
2396 """generate a benchmark function for the filtername"""
2403 """generate a benchmark function for the filtername"""
2397 if filtername is None:
2404 if filtername is None:
2398 view = repo
2405 view = repo
2399 else:
2406 else:
2400 view = repo.filtered(filtername)
2407 view = repo.filtered(filtername)
2401 if util.safehasattr(view._branchcaches, '_per_filter'):
2408 if util.safehasattr(view._branchcaches, '_per_filter'):
2402 filtered = view._branchcaches._per_filter
2409 filtered = view._branchcaches._per_filter
2403 else:
2410 else:
2404 # older versions
2411 # older versions
2405 filtered = view._branchcaches
2412 filtered = view._branchcaches
2406 def d():
2413 def d():
2407 if clear_revbranch:
2414 if clear_revbranch:
2408 repo.revbranchcache()._clear()
2415 repo.revbranchcache()._clear()
2409 if full:
2416 if full:
2410 view._branchcaches.clear()
2417 view._branchcaches.clear()
2411 else:
2418 else:
2412 filtered.pop(filtername, None)
2419 filtered.pop(filtername, None)
2413 view.branchmap()
2420 view.branchmap()
2414 return d
2421 return d
2415 # add filter in smaller subset to bigger subset
2422 # add filter in smaller subset to bigger subset
2416 possiblefilters = set(repoview.filtertable)
2423 possiblefilters = set(repoview.filtertable)
2417 if filternames:
2424 if filternames:
2418 possiblefilters &= set(filternames)
2425 possiblefilters &= set(filternames)
2419 subsettable = getbranchmapsubsettable()
2426 subsettable = getbranchmapsubsettable()
2420 allfilters = []
2427 allfilters = []
2421 while possiblefilters:
2428 while possiblefilters:
2422 for name in possiblefilters:
2429 for name in possiblefilters:
2423 subset = subsettable.get(name)
2430 subset = subsettable.get(name)
2424 if subset not in possiblefilters:
2431 if subset not in possiblefilters:
2425 break
2432 break
2426 else:
2433 else:
2427 assert False, b'subset cycle %s!' % possiblefilters
2434 assert False, b'subset cycle %s!' % possiblefilters
2428 allfilters.append(name)
2435 allfilters.append(name)
2429 possiblefilters.remove(name)
2436 possiblefilters.remove(name)
2430
2437
2431 # warm the cache
2438 # warm the cache
2432 if not full:
2439 if not full:
2433 for name in allfilters:
2440 for name in allfilters:
2434 repo.filtered(name).branchmap()
2441 repo.filtered(name).branchmap()
2435 if not filternames or b'unfiltered' in filternames:
2442 if not filternames or b'unfiltered' in filternames:
2436 # add unfiltered
2443 # add unfiltered
2437 allfilters.append(None)
2444 allfilters.append(None)
2438
2445
2439 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2446 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2440 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2447 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2441 branchcacheread.set(classmethod(lambda *args: None))
2448 branchcacheread.set(classmethod(lambda *args: None))
2442 else:
2449 else:
2443 # older versions
2450 # older versions
2444 branchcacheread = safeattrsetter(branchmap, b'read')
2451 branchcacheread = safeattrsetter(branchmap, b'read')
2445 branchcacheread.set(lambda *args: None)
2452 branchcacheread.set(lambda *args: None)
2446 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2453 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2447 branchcachewrite.set(lambda *args: None)
2454 branchcachewrite.set(lambda *args: None)
2448 try:
2455 try:
2449 for name in allfilters:
2456 for name in allfilters:
2450 printname = name
2457 printname = name
2451 if name is None:
2458 if name is None:
2452 printname = b'unfiltered'
2459 printname = b'unfiltered'
2453 timer(getbranchmap(name), title=str(printname))
2460 timer(getbranchmap(name), title=str(printname))
2454 finally:
2461 finally:
2455 branchcacheread.restore()
2462 branchcacheread.restore()
2456 branchcachewrite.restore()
2463 branchcachewrite.restore()
2457 fm.end()
2464 fm.end()
2458
2465
2459 @command(b'perfbranchmapupdate', [
2466 @command(b'perfbranchmapupdate', [
2460 (b'', b'base', [], b'subset of revision to start from'),
2467 (b'', b'base', [], b'subset of revision to start from'),
2461 (b'', b'target', [], b'subset of revision to end with'),
2468 (b'', b'target', [], b'subset of revision to end with'),
2462 (b'', b'clear-caches', False, b'clear cache between each runs')
2469 (b'', b'clear-caches', False, b'clear cache between each runs')
2463 ] + formatteropts)
2470 ] + formatteropts)
2464 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2471 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2465 """benchmark branchmap update from for <base> revs to <target> revs
2472 """benchmark branchmap update from for <base> revs to <target> revs
2466
2473
2467 If `--clear-caches` is passed, the following items will be reset before
2474 If `--clear-caches` is passed, the following items will be reset before
2468 each update:
2475 each update:
2469 * the changelog instance and associated indexes
2476 * the changelog instance and associated indexes
2470 * the rev-branch-cache instance
2477 * the rev-branch-cache instance
2471
2478
2472 Examples:
2479 Examples:
2473
2480
2474 # update for the one last revision
2481 # update for the one last revision
2475 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2482 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2476
2483
2477 $ update for change coming with a new branch
2484 $ update for change coming with a new branch
2478 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2485 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2479 """
2486 """
2480 from mercurial import branchmap
2487 from mercurial import branchmap
2481 from mercurial import repoview
2488 from mercurial import repoview
2482 opts = _byteskwargs(opts)
2489 opts = _byteskwargs(opts)
2483 timer, fm = gettimer(ui, opts)
2490 timer, fm = gettimer(ui, opts)
2484 clearcaches = opts[b'clear_caches']
2491 clearcaches = opts[b'clear_caches']
2485 unfi = repo.unfiltered()
2492 unfi = repo.unfiltered()
2486 x = [None] # used to pass data between closure
2493 x = [None] # used to pass data between closure
2487
2494
2488 # we use a `list` here to avoid possible side effect from smartset
2495 # we use a `list` here to avoid possible side effect from smartset
2489 baserevs = list(scmutil.revrange(repo, base))
2496 baserevs = list(scmutil.revrange(repo, base))
2490 targetrevs = list(scmutil.revrange(repo, target))
2497 targetrevs = list(scmutil.revrange(repo, target))
2491 if not baserevs:
2498 if not baserevs:
2492 raise error.Abort(b'no revisions selected for --base')
2499 raise error.Abort(b'no revisions selected for --base')
2493 if not targetrevs:
2500 if not targetrevs:
2494 raise error.Abort(b'no revisions selected for --target')
2501 raise error.Abort(b'no revisions selected for --target')
2495
2502
2496 # make sure the target branchmap also contains the one in the base
2503 # make sure the target branchmap also contains the one in the base
2497 targetrevs = list(set(baserevs) | set(targetrevs))
2504 targetrevs = list(set(baserevs) | set(targetrevs))
2498 targetrevs.sort()
2505 targetrevs.sort()
2499
2506
2500 cl = repo.changelog
2507 cl = repo.changelog
2501 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2508 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2502 allbaserevs.sort()
2509 allbaserevs.sort()
2503 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2510 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2504
2511
2505 newrevs = list(alltargetrevs.difference(allbaserevs))
2512 newrevs = list(alltargetrevs.difference(allbaserevs))
2506 newrevs.sort()
2513 newrevs.sort()
2507
2514
2508 allrevs = frozenset(unfi.changelog.revs())
2515 allrevs = frozenset(unfi.changelog.revs())
2509 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2516 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2510 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2517 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2511
2518
2512 def basefilter(repo, visibilityexceptions=None):
2519 def basefilter(repo, visibilityexceptions=None):
2513 return basefilterrevs
2520 return basefilterrevs
2514
2521
2515 def targetfilter(repo, visibilityexceptions=None):
2522 def targetfilter(repo, visibilityexceptions=None):
2516 return targetfilterrevs
2523 return targetfilterrevs
2517
2524
2518 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2525 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2519 ui.status(msg % (len(allbaserevs), len(newrevs)))
2526 ui.status(msg % (len(allbaserevs), len(newrevs)))
2520 if targetfilterrevs:
2527 if targetfilterrevs:
2521 msg = b'(%d revisions still filtered)\n'
2528 msg = b'(%d revisions still filtered)\n'
2522 ui.status(msg % len(targetfilterrevs))
2529 ui.status(msg % len(targetfilterrevs))
2523
2530
2524 try:
2531 try:
2525 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2532 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2526 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2533 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2527
2534
2528 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2535 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2529 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2536 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2530
2537
2531 # try to find an existing branchmap to reuse
2538 # try to find an existing branchmap to reuse
2532 subsettable = getbranchmapsubsettable()
2539 subsettable = getbranchmapsubsettable()
2533 candidatefilter = subsettable.get(None)
2540 candidatefilter = subsettable.get(None)
2534 while candidatefilter is not None:
2541 while candidatefilter is not None:
2535 candidatebm = repo.filtered(candidatefilter).branchmap()
2542 candidatebm = repo.filtered(candidatefilter).branchmap()
2536 if candidatebm.validfor(baserepo):
2543 if candidatebm.validfor(baserepo):
2537 filtered = repoview.filterrevs(repo, candidatefilter)
2544 filtered = repoview.filterrevs(repo, candidatefilter)
2538 missing = [r for r in allbaserevs if r in filtered]
2545 missing = [r for r in allbaserevs if r in filtered]
2539 base = candidatebm.copy()
2546 base = candidatebm.copy()
2540 base.update(baserepo, missing)
2547 base.update(baserepo, missing)
2541 break
2548 break
2542 candidatefilter = subsettable.get(candidatefilter)
2549 candidatefilter = subsettable.get(candidatefilter)
2543 else:
2550 else:
2544 # no suitable subset where found
2551 # no suitable subset where found
2545 base = branchmap.branchcache()
2552 base = branchmap.branchcache()
2546 base.update(baserepo, allbaserevs)
2553 base.update(baserepo, allbaserevs)
2547
2554
2548 def setup():
2555 def setup():
2549 x[0] = base.copy()
2556 x[0] = base.copy()
2550 if clearcaches:
2557 if clearcaches:
2551 unfi._revbranchcache = None
2558 unfi._revbranchcache = None
2552 clearchangelog(repo)
2559 clearchangelog(repo)
2553
2560
2554 def bench():
2561 def bench():
2555 x[0].update(targetrepo, newrevs)
2562 x[0].update(targetrepo, newrevs)
2556
2563
2557 timer(bench, setup=setup)
2564 timer(bench, setup=setup)
2558 fm.end()
2565 fm.end()
2559 finally:
2566 finally:
2560 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2567 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2561 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2568 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2562
2569
2563 @command(b'perfbranchmapload', [
2570 @command(b'perfbranchmapload', [
2564 (b'f', b'filter', b'', b'Specify repoview filter'),
2571 (b'f', b'filter', b'', b'Specify repoview filter'),
2565 (b'', b'list', False, b'List brachmap filter caches'),
2572 (b'', b'list', False, b'List brachmap filter caches'),
2566 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2573 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2567
2574
2568 ] + formatteropts)
2575 ] + formatteropts)
2569 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2576 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2570 """benchmark reading the branchmap"""
2577 """benchmark reading the branchmap"""
2571 opts = _byteskwargs(opts)
2578 opts = _byteskwargs(opts)
2572 clearrevlogs = opts[b'clear_revlogs']
2579 clearrevlogs = opts[b'clear_revlogs']
2573
2580
2574 if list:
2581 if list:
2575 for name, kind, st in repo.cachevfs.readdir(stat=True):
2582 for name, kind, st in repo.cachevfs.readdir(stat=True):
2576 if name.startswith(b'branch2'):
2583 if name.startswith(b'branch2'):
2577 filtername = name.partition(b'-')[2] or b'unfiltered'
2584 filtername = name.partition(b'-')[2] or b'unfiltered'
2578 ui.status(b'%s - %s\n'
2585 ui.status(b'%s - %s\n'
2579 % (filtername, util.bytecount(st.st_size)))
2586 % (filtername, util.bytecount(st.st_size)))
2580 return
2587 return
2581 if not filter:
2588 if not filter:
2582 filter = None
2589 filter = None
2583 subsettable = getbranchmapsubsettable()
2590 subsettable = getbranchmapsubsettable()
2584 if filter is None:
2591 if filter is None:
2585 repo = repo.unfiltered()
2592 repo = repo.unfiltered()
2586 else:
2593 else:
2587 repo = repoview.repoview(repo, filter)
2594 repo = repoview.repoview(repo, filter)
2588
2595
2589 repo.branchmap() # make sure we have a relevant, up to date branchmap
2596 repo.branchmap() # make sure we have a relevant, up to date branchmap
2590
2597
2591 try:
2598 try:
2592 fromfile = branchmap.branchcache.fromfile
2599 fromfile = branchmap.branchcache.fromfile
2593 except AttributeError:
2600 except AttributeError:
2594 # older versions
2601 # older versions
2595 fromfile = branchmap.read
2602 fromfile = branchmap.read
2596
2603
2597 currentfilter = filter
2604 currentfilter = filter
2598 # try once without timer, the filter may not be cached
2605 # try once without timer, the filter may not be cached
2599 while fromfile(repo) is None:
2606 while fromfile(repo) is None:
2600 currentfilter = subsettable.get(currentfilter)
2607 currentfilter = subsettable.get(currentfilter)
2601 if currentfilter is None:
2608 if currentfilter is None:
2602 raise error.Abort(b'No branchmap cached for %s repo'
2609 raise error.Abort(b'No branchmap cached for %s repo'
2603 % (filter or b'unfiltered'))
2610 % (filter or b'unfiltered'))
2604 repo = repo.filtered(currentfilter)
2611 repo = repo.filtered(currentfilter)
2605 timer, fm = gettimer(ui, opts)
2612 timer, fm = gettimer(ui, opts)
2606 def setup():
2613 def setup():
2607 if clearrevlogs:
2614 if clearrevlogs:
2608 clearchangelog(repo)
2615 clearchangelog(repo)
2609 def bench():
2616 def bench():
2610 fromfile(repo)
2617 fromfile(repo)
2611 timer(bench, setup=setup)
2618 timer(bench, setup=setup)
2612 fm.end()
2619 fm.end()
2613
2620
2614 @command(b'perfloadmarkers')
2621 @command(b'perfloadmarkers')
2615 def perfloadmarkers(ui, repo):
2622 def perfloadmarkers(ui, repo):
2616 """benchmark the time to parse the on-disk markers for a repo
2623 """benchmark the time to parse the on-disk markers for a repo
2617
2624
2618 Result is the number of markers in the repo."""
2625 Result is the number of markers in the repo."""
2619 timer, fm = gettimer(ui)
2626 timer, fm = gettimer(ui)
2620 svfs = getsvfs(repo)
2627 svfs = getsvfs(repo)
2621 timer(lambda: len(obsolete.obsstore(svfs)))
2628 timer(lambda: len(obsolete.obsstore(svfs)))
2622 fm.end()
2629 fm.end()
2623
2630
2624 @command(b'perflrucachedict', formatteropts +
2631 @command(b'perflrucachedict', formatteropts +
2625 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2632 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2626 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2633 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2627 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2634 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2628 (b'', b'size', 4, b'size of cache'),
2635 (b'', b'size', 4, b'size of cache'),
2629 (b'', b'gets', 10000, b'number of key lookups'),
2636 (b'', b'gets', 10000, b'number of key lookups'),
2630 (b'', b'sets', 10000, b'number of key sets'),
2637 (b'', b'sets', 10000, b'number of key sets'),
2631 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2638 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2632 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2639 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2633 norepo=True)
2640 norepo=True)
2634 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2641 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2635 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2642 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2636 opts = _byteskwargs(opts)
2643 opts = _byteskwargs(opts)
2637
2644
2638 def doinit():
2645 def doinit():
2639 for i in _xrange(10000):
2646 for i in _xrange(10000):
2640 util.lrucachedict(size)
2647 util.lrucachedict(size)
2641
2648
2642 costrange = list(range(mincost, maxcost + 1))
2649 costrange = list(range(mincost, maxcost + 1))
2643
2650
2644 values = []
2651 values = []
2645 for i in _xrange(size):
2652 for i in _xrange(size):
2646 values.append(random.randint(0, _maxint))
2653 values.append(random.randint(0, _maxint))
2647
2654
2648 # Get mode fills the cache and tests raw lookup performance with no
2655 # Get mode fills the cache and tests raw lookup performance with no
2649 # eviction.
2656 # eviction.
2650 getseq = []
2657 getseq = []
2651 for i in _xrange(gets):
2658 for i in _xrange(gets):
2652 getseq.append(random.choice(values))
2659 getseq.append(random.choice(values))
2653
2660
2654 def dogets():
2661 def dogets():
2655 d = util.lrucachedict(size)
2662 d = util.lrucachedict(size)
2656 for v in values:
2663 for v in values:
2657 d[v] = v
2664 d[v] = v
2658 for key in getseq:
2665 for key in getseq:
2659 value = d[key]
2666 value = d[key]
2660 value # silence pyflakes warning
2667 value # silence pyflakes warning
2661
2668
2662 def dogetscost():
2669 def dogetscost():
2663 d = util.lrucachedict(size, maxcost=costlimit)
2670 d = util.lrucachedict(size, maxcost=costlimit)
2664 for i, v in enumerate(values):
2671 for i, v in enumerate(values):
2665 d.insert(v, v, cost=costs[i])
2672 d.insert(v, v, cost=costs[i])
2666 for key in getseq:
2673 for key in getseq:
2667 try:
2674 try:
2668 value = d[key]
2675 value = d[key]
2669 value # silence pyflakes warning
2676 value # silence pyflakes warning
2670 except KeyError:
2677 except KeyError:
2671 pass
2678 pass
2672
2679
2673 # Set mode tests insertion speed with cache eviction.
2680 # Set mode tests insertion speed with cache eviction.
2674 setseq = []
2681 setseq = []
2675 costs = []
2682 costs = []
2676 for i in _xrange(sets):
2683 for i in _xrange(sets):
2677 setseq.append(random.randint(0, _maxint))
2684 setseq.append(random.randint(0, _maxint))
2678 costs.append(random.choice(costrange))
2685 costs.append(random.choice(costrange))
2679
2686
2680 def doinserts():
2687 def doinserts():
2681 d = util.lrucachedict(size)
2688 d = util.lrucachedict(size)
2682 for v in setseq:
2689 for v in setseq:
2683 d.insert(v, v)
2690 d.insert(v, v)
2684
2691
2685 def doinsertscost():
2692 def doinsertscost():
2686 d = util.lrucachedict(size, maxcost=costlimit)
2693 d = util.lrucachedict(size, maxcost=costlimit)
2687 for i, v in enumerate(setseq):
2694 for i, v in enumerate(setseq):
2688 d.insert(v, v, cost=costs[i])
2695 d.insert(v, v, cost=costs[i])
2689
2696
2690 def dosets():
2697 def dosets():
2691 d = util.lrucachedict(size)
2698 d = util.lrucachedict(size)
2692 for v in setseq:
2699 for v in setseq:
2693 d[v] = v
2700 d[v] = v
2694
2701
2695 # Mixed mode randomly performs gets and sets with eviction.
2702 # Mixed mode randomly performs gets and sets with eviction.
2696 mixedops = []
2703 mixedops = []
2697 for i in _xrange(mixed):
2704 for i in _xrange(mixed):
2698 r = random.randint(0, 100)
2705 r = random.randint(0, 100)
2699 if r < mixedgetfreq:
2706 if r < mixedgetfreq:
2700 op = 0
2707 op = 0
2701 else:
2708 else:
2702 op = 1
2709 op = 1
2703
2710
2704 mixedops.append((op,
2711 mixedops.append((op,
2705 random.randint(0, size * 2),
2712 random.randint(0, size * 2),
2706 random.choice(costrange)))
2713 random.choice(costrange)))
2707
2714
2708 def domixed():
2715 def domixed():
2709 d = util.lrucachedict(size)
2716 d = util.lrucachedict(size)
2710
2717
2711 for op, v, cost in mixedops:
2718 for op, v, cost in mixedops:
2712 if op == 0:
2719 if op == 0:
2713 try:
2720 try:
2714 d[v]
2721 d[v]
2715 except KeyError:
2722 except KeyError:
2716 pass
2723 pass
2717 else:
2724 else:
2718 d[v] = v
2725 d[v] = v
2719
2726
2720 def domixedcost():
2727 def domixedcost():
2721 d = util.lrucachedict(size, maxcost=costlimit)
2728 d = util.lrucachedict(size, maxcost=costlimit)
2722
2729
2723 for op, v, cost in mixedops:
2730 for op, v, cost in mixedops:
2724 if op == 0:
2731 if op == 0:
2725 try:
2732 try:
2726 d[v]
2733 d[v]
2727 except KeyError:
2734 except KeyError:
2728 pass
2735 pass
2729 else:
2736 else:
2730 d.insert(v, v, cost=cost)
2737 d.insert(v, v, cost=cost)
2731
2738
2732 benches = [
2739 benches = [
2733 (doinit, b'init'),
2740 (doinit, b'init'),
2734 ]
2741 ]
2735
2742
2736 if costlimit:
2743 if costlimit:
2737 benches.extend([
2744 benches.extend([
2738 (dogetscost, b'gets w/ cost limit'),
2745 (dogetscost, b'gets w/ cost limit'),
2739 (doinsertscost, b'inserts w/ cost limit'),
2746 (doinsertscost, b'inserts w/ cost limit'),
2740 (domixedcost, b'mixed w/ cost limit'),
2747 (domixedcost, b'mixed w/ cost limit'),
2741 ])
2748 ])
2742 else:
2749 else:
2743 benches.extend([
2750 benches.extend([
2744 (dogets, b'gets'),
2751 (dogets, b'gets'),
2745 (doinserts, b'inserts'),
2752 (doinserts, b'inserts'),
2746 (dosets, b'sets'),
2753 (dosets, b'sets'),
2747 (domixed, b'mixed')
2754 (domixed, b'mixed')
2748 ])
2755 ])
2749
2756
2750 for fn, title in benches:
2757 for fn, title in benches:
2751 timer, fm = gettimer(ui, opts)
2758 timer, fm = gettimer(ui, opts)
2752 timer(fn, title=title)
2759 timer(fn, title=title)
2753 fm.end()
2760 fm.end()
2754
2761
2755 @command(b'perfwrite', formatteropts)
2762 @command(b'perfwrite', formatteropts)
2756 def perfwrite(ui, repo, **opts):
2763 def perfwrite(ui, repo, **opts):
2757 """microbenchmark ui.write
2764 """microbenchmark ui.write
2758 """
2765 """
2759 opts = _byteskwargs(opts)
2766 opts = _byteskwargs(opts)
2760
2767
2761 timer, fm = gettimer(ui, opts)
2768 timer, fm = gettimer(ui, opts)
2762 def write():
2769 def write():
2763 for i in range(100000):
2770 for i in range(100000):
2764 ui.write((b'Testing write performance\n'))
2771 ui.write((b'Testing write performance\n'))
2765 timer(write)
2772 timer(write)
2766 fm.end()
2773 fm.end()
2767
2774
2768 def uisetup(ui):
2775 def uisetup(ui):
2769 if (util.safehasattr(cmdutil, b'openrevlog') and
2776 if (util.safehasattr(cmdutil, b'openrevlog') and
2770 not util.safehasattr(commands, b'debugrevlogopts')):
2777 not util.safehasattr(commands, b'debugrevlogopts')):
2771 # for "historical portability":
2778 # for "historical portability":
2772 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2779 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2773 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2780 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2774 # openrevlog() should cause failure, because it has been
2781 # openrevlog() should cause failure, because it has been
2775 # available since 3.5 (or 49c583ca48c4).
2782 # available since 3.5 (or 49c583ca48c4).
2776 def openrevlog(orig, repo, cmd, file_, opts):
2783 def openrevlog(orig, repo, cmd, file_, opts):
2777 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2784 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2778 raise error.Abort(b"This version doesn't support --dir option",
2785 raise error.Abort(b"This version doesn't support --dir option",
2779 hint=b"use 3.5 or later")
2786 hint=b"use 3.5 or later")
2780 return orig(repo, cmd, file_, opts)
2787 return orig(repo, cmd, file_, opts)
2781 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2788 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2782
2789
2783 @command(b'perfprogress', formatteropts + [
2790 @command(b'perfprogress', formatteropts + [
2784 (b'', b'topic', b'topic', b'topic for progress messages'),
2791 (b'', b'topic', b'topic', b'topic for progress messages'),
2785 (b'c', b'total', 1000000, b'total value we are progressing to'),
2792 (b'c', b'total', 1000000, b'total value we are progressing to'),
2786 ], norepo=True)
2793 ], norepo=True)
2787 def perfprogress(ui, topic=None, total=None, **opts):
2794 def perfprogress(ui, topic=None, total=None, **opts):
2788 """printing of progress bars"""
2795 """printing of progress bars"""
2789 opts = _byteskwargs(opts)
2796 opts = _byteskwargs(opts)
2790
2797
2791 timer, fm = gettimer(ui, opts)
2798 timer, fm = gettimer(ui, opts)
2792
2799
2793 def doprogress():
2800 def doprogress():
2794 with ui.makeprogress(topic, total=total) as progress:
2801 with ui.makeprogress(topic, total=total) as progress:
2795 for i in pycompat.xrange(total):
2802 for i in pycompat.xrange(total):
2796 progress.increment()
2803 progress.increment()
2797
2804
2798 timer(doprogress)
2805 timer(doprogress)
2799 fm.end()
2806 fm.end()
@@ -1,320 +1,320 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perf=$CONTRIBDIR/perf.py
35 > perf=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help -e perf
41 $ hg help -e perf
42 perf extension - helper extension to measure performance
42 perf extension - helper extension to measure performance
43
43
44 Configurations
44 Configurations
45 ==============
45 ==============
46
46
47 "perf"
47 "perf"
48 ------
48 ------
49
49
50 "all-timing"
50 "all-timing"
51 When set, additional statistic will be reported for each benchmark: best,
51 When set, additional statistic will be reported for each benchmark: best,
52 worst, median average. If not set only the best timing is reported
52 worst, median average. If not set only the best timing is reported
53 (default: off).
53 (default: off).
54
54
55 "presleep"
55 "presleep"
56 number of second to wait before any group of run (default: 1)
56 number of second to wait before any group of run (default: 1)
57
57
58 "stub"
58 "stub"
59 When set, benchmark will only be run once, useful for testing (default:
59 When set, benchmark will only be run once, useful for testing (default:
60 off)
60 off)
61
61
62 list of commands:
62 list of commands:
63
63
64 perfaddremove
64 perfaddremove
65 (no help text available)
65 (no help text available)
66 perfancestors
66 perfancestors
67 (no help text available)
67 (no help text available)
68 perfancestorset
68 perfancestorset
69 (no help text available)
69 (no help text available)
70 perfannotate (no help text available)
70 perfannotate (no help text available)
71 perfbdiff benchmark a bdiff between revisions
71 perfbdiff benchmark a bdiff between revisions
72 perfbookmarks
72 perfbookmarks
73 benchmark parsing bookmarks from disk to memory
73 benchmark parsing bookmarks from disk to memory
74 perfbranchmap
74 perfbranchmap
75 benchmark the update of a branchmap
75 benchmark the update of a branchmap
76 perfbranchmapload
76 perfbranchmapload
77 benchmark reading the branchmap
77 benchmark reading the branchmap
78 perfbranchmapupdate
78 perfbranchmapupdate
79 benchmark branchmap update from for <base> revs to <target>
79 benchmark branchmap update from for <base> revs to <target>
80 revs
80 revs
81 perfbundleread
81 perfbundleread
82 Benchmark reading of bundle files.
82 Benchmark reading of bundle files.
83 perfcca (no help text available)
83 perfcca (no help text available)
84 perfchangegroupchangelog
84 perfchangegroupchangelog
85 Benchmark producing a changelog group for a changegroup.
85 Benchmark producing a changelog group for a changegroup.
86 perfchangeset
86 perfchangeset
87 (no help text available)
87 (no help text available)
88 perfctxfiles (no help text available)
88 perfctxfiles (no help text available)
89 perfdiffwd Profile diff of working directory changes
89 perfdiffwd Profile diff of working directory changes
90 perfdirfoldmap
90 perfdirfoldmap
91 (no help text available)
91 (no help text available)
92 perfdirs (no help text available)
92 perfdirs (no help text available)
93 perfdirstate (no help text available)
93 perfdirstate (no help text available)
94 perfdirstatedirs
94 perfdirstatedirs
95 (no help text available)
95 (no help text available)
96 perfdirstatefoldmap
96 perfdirstatefoldmap
97 (no help text available)
97 (no help text available)
98 perfdirstatewrite
98 perfdirstatewrite
99 (no help text available)
99 (no help text available)
100 perfdiscovery
100 perfdiscovery
101 benchmark discovery between local repo and the peer at given
101 benchmark discovery between local repo and the peer at given
102 path
102 path
103 perffncacheencode
103 perffncacheencode
104 (no help text available)
104 (no help text available)
105 perffncacheload
105 perffncacheload
106 (no help text available)
106 (no help text available)
107 perffncachewrite
107 perffncachewrite
108 (no help text available)
108 (no help text available)
109 perfheads benchmark the computation of a changelog heads
109 perfheads benchmark the computation of a changelog heads
110 perfhelper-pathcopies
110 perfhelper-pathcopies
111 find statistic about potential parameters for the
111 find statistic about potential parameters for the
112 'perftracecopies'
112 'perftracecopies'
113 perfignore benchmark operation related to computing ignore
113 perfignore benchmark operation related to computing ignore
114 perfindex benchmark index creation time followed by a lookup
114 perfindex benchmark index creation time followed by a lookup
115 perflinelogedits
115 perflinelogedits
116 (no help text available)
116 (no help text available)
117 perfloadmarkers
117 perfloadmarkers
118 benchmark the time to parse the on-disk markers for a repo
118 benchmark the time to parse the on-disk markers for a repo
119 perflog (no help text available)
119 perflog (no help text available)
120 perflookup (no help text available)
120 perflookup (no help text available)
121 perflrucachedict
121 perflrucachedict
122 (no help text available)
122 (no help text available)
123 perfmanifest benchmark the time to read a manifest from disk and return a
123 perfmanifest benchmark the time to read a manifest from disk and return a
124 usable
124 usable
125 perfmergecalculate
125 perfmergecalculate
126 (no help text available)
126 (no help text available)
127 perfmoonwalk benchmark walking the changelog backwards
127 perfmoonwalk benchmark walking the changelog backwards
128 perfnodelookup
128 perfnodelookup
129 (no help text available)
129 (no help text available)
130 perfnodemap benchmark the time necessary to look up revision from a cold
130 perfnodemap benchmark the time necessary to look up revision from a cold
131 nodemap
131 nodemap
132 perfparents (no help text available)
132 perfparents benchmark the time necessary to fetch one changeset's parents.
133 perfpathcopies
133 perfpathcopies
134 benchmark the copy tracing logic
134 benchmark the copy tracing logic
135 perfphases benchmark phasesets computation
135 perfphases benchmark phasesets computation
136 perfphasesremote
136 perfphasesremote
137 benchmark time needed to analyse phases of the remote server
137 benchmark time needed to analyse phases of the remote server
138 perfprogress printing of progress bars
138 perfprogress printing of progress bars
139 perfrawfiles (no help text available)
139 perfrawfiles (no help text available)
140 perfrevlogchunks
140 perfrevlogchunks
141 Benchmark operations on revlog chunks.
141 Benchmark operations on revlog chunks.
142 perfrevlogindex
142 perfrevlogindex
143 Benchmark operations against a revlog index.
143 Benchmark operations against a revlog index.
144 perfrevlogrevision
144 perfrevlogrevision
145 Benchmark obtaining a revlog revision.
145 Benchmark obtaining a revlog revision.
146 perfrevlogrevisions
146 perfrevlogrevisions
147 Benchmark reading a series of revisions from a revlog.
147 Benchmark reading a series of revisions from a revlog.
148 perfrevlogwrite
148 perfrevlogwrite
149 Benchmark writing a series of revisions to a revlog.
149 Benchmark writing a series of revisions to a revlog.
150 perfrevrange (no help text available)
150 perfrevrange (no help text available)
151 perfrevset benchmark the execution time of a revset
151 perfrevset benchmark the execution time of a revset
152 perfstartup (no help text available)
152 perfstartup (no help text available)
153 perfstatus (no help text available)
153 perfstatus (no help text available)
154 perftags (no help text available)
154 perftags (no help text available)
155 perftemplating
155 perftemplating
156 test the rendering time of a given template
156 test the rendering time of a given template
157 perfunidiff benchmark a unified diff between revisions
157 perfunidiff benchmark a unified diff between revisions
158 perfvolatilesets
158 perfvolatilesets
159 benchmark the computation of various volatile set
159 benchmark the computation of various volatile set
160 perfwalk (no help text available)
160 perfwalk (no help text available)
161 perfwrite microbenchmark ui.write
161 perfwrite microbenchmark ui.write
162
162
163 (use 'hg help -v perf' to show built-in aliases and global options)
163 (use 'hg help -v perf' to show built-in aliases and global options)
164 $ hg perfaddremove
164 $ hg perfaddremove
165 $ hg perfancestors
165 $ hg perfancestors
166 $ hg perfancestorset 2
166 $ hg perfancestorset 2
167 $ hg perfannotate a
167 $ hg perfannotate a
168 $ hg perfbdiff -c 1
168 $ hg perfbdiff -c 1
169 $ hg perfbdiff --alldata 1
169 $ hg perfbdiff --alldata 1
170 $ hg perfunidiff -c 1
170 $ hg perfunidiff -c 1
171 $ hg perfunidiff --alldata 1
171 $ hg perfunidiff --alldata 1
172 $ hg perfbookmarks
172 $ hg perfbookmarks
173 $ hg perfbranchmap
173 $ hg perfbranchmap
174 $ hg perfbranchmapload
174 $ hg perfbranchmapload
175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
175 $ hg perfbranchmapupdate --base "not tip" --target "tip"
176 benchmark of branchmap with 3 revisions with 1 new ones
176 benchmark of branchmap with 3 revisions with 1 new ones
177 $ hg perfcca
177 $ hg perfcca
178 $ hg perfchangegroupchangelog
178 $ hg perfchangegroupchangelog
179 $ hg perfchangegroupchangelog --cgversion 01
179 $ hg perfchangegroupchangelog --cgversion 01
180 $ hg perfchangeset 2
180 $ hg perfchangeset 2
181 $ hg perfctxfiles 2
181 $ hg perfctxfiles 2
182 $ hg perfdiffwd
182 $ hg perfdiffwd
183 $ hg perfdirfoldmap
183 $ hg perfdirfoldmap
184 $ hg perfdirs
184 $ hg perfdirs
185 $ hg perfdirstate
185 $ hg perfdirstate
186 $ hg perfdirstatedirs
186 $ hg perfdirstatedirs
187 $ hg perfdirstatefoldmap
187 $ hg perfdirstatefoldmap
188 $ hg perfdirstatewrite
188 $ hg perfdirstatewrite
189 #if repofncache
189 #if repofncache
190 $ hg perffncacheencode
190 $ hg perffncacheencode
191 $ hg perffncacheload
191 $ hg perffncacheload
192 $ hg debugrebuildfncache
192 $ hg debugrebuildfncache
193 fncache already up to date
193 fncache already up to date
194 $ hg perffncachewrite
194 $ hg perffncachewrite
195 $ hg debugrebuildfncache
195 $ hg debugrebuildfncache
196 fncache already up to date
196 fncache already up to date
197 #endif
197 #endif
198 $ hg perfheads
198 $ hg perfheads
199 $ hg perfignore
199 $ hg perfignore
200 $ hg perfindex
200 $ hg perfindex
201 $ hg perflinelogedits -n 1
201 $ hg perflinelogedits -n 1
202 $ hg perfloadmarkers
202 $ hg perfloadmarkers
203 $ hg perflog
203 $ hg perflog
204 $ hg perflookup 2
204 $ hg perflookup 2
205 $ hg perflrucache
205 $ hg perflrucache
206 $ hg perfmanifest 2
206 $ hg perfmanifest 2
207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
207 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
208 $ hg perfmanifest -m 44fe2c8352bb
208 $ hg perfmanifest -m 44fe2c8352bb
209 abort: manifest revision must be integer or full node
209 abort: manifest revision must be integer or full node
210 [255]
210 [255]
211 $ hg perfmergecalculate -r 3
211 $ hg perfmergecalculate -r 3
212 $ hg perfmoonwalk
212 $ hg perfmoonwalk
213 $ hg perfnodelookup 2
213 $ hg perfnodelookup 2
214 $ hg perfpathcopies 1 2
214 $ hg perfpathcopies 1 2
215 $ hg perfprogress --total 1000
215 $ hg perfprogress --total 1000
216 $ hg perfrawfiles 2
216 $ hg perfrawfiles 2
217 $ hg perfrevlogindex -c
217 $ hg perfrevlogindex -c
218 #if reporevlogstore
218 #if reporevlogstore
219 $ hg perfrevlogrevisions .hg/store/data/a.i
219 $ hg perfrevlogrevisions .hg/store/data/a.i
220 #endif
220 #endif
221 $ hg perfrevlogrevision -m 0
221 $ hg perfrevlogrevision -m 0
222 $ hg perfrevlogchunks -c
222 $ hg perfrevlogchunks -c
223 $ hg perfrevrange
223 $ hg perfrevrange
224 $ hg perfrevset 'all()'
224 $ hg perfrevset 'all()'
225 $ hg perfstartup
225 $ hg perfstartup
226 $ hg perfstatus
226 $ hg perfstatus
227 $ hg perftags
227 $ hg perftags
228 $ hg perftemplating
228 $ hg perftemplating
229 $ hg perfvolatilesets
229 $ hg perfvolatilesets
230 $ hg perfwalk
230 $ hg perfwalk
231 $ hg perfparents
231 $ hg perfparents
232 $ hg perfdiscovery -q .
232 $ hg perfdiscovery -q .
233
233
234 test actual output
234 test actual output
235 ------------------
235 ------------------
236
236
237 normal output:
237 normal output:
238
238
239 $ hg perfheads --config perf.stub=no
239 $ hg perfheads --config perf.stub=no
240 ! wall * comb * user * sys * (best of *) (glob)
240 ! wall * comb * user * sys * (best of *) (glob)
241
241
242 detailed output:
242 detailed output:
243
243
244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
244 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
245 ! wall * comb * user * sys * (best of *) (glob)
245 ! wall * comb * user * sys * (best of *) (glob)
246 ! wall * comb * user * sys * (max of *) (glob)
246 ! wall * comb * user * sys * (max of *) (glob)
247 ! wall * comb * user * sys * (avg of *) (glob)
247 ! wall * comb * user * sys * (avg of *) (glob)
248 ! wall * comb * user * sys * (median of *) (glob)
248 ! wall * comb * user * sys * (median of *) (glob)
249
249
250 test json output
250 test json output
251 ----------------
251 ----------------
252
252
253 normal output:
253 normal output:
254
254
255 $ hg perfheads --template json --config perf.stub=no
255 $ hg perfheads --template json --config perf.stub=no
256 [
256 [
257 {
257 {
258 "comb": *, (glob)
258 "comb": *, (glob)
259 "count": *, (glob)
259 "count": *, (glob)
260 "sys": *, (glob)
260 "sys": *, (glob)
261 "user": *, (glob)
261 "user": *, (glob)
262 "wall": * (glob)
262 "wall": * (glob)
263 }
263 }
264 ]
264 ]
265
265
266 detailed output:
266 detailed output:
267
267
268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
268 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
269 [
269 [
270 {
270 {
271 "avg.comb": *, (glob)
271 "avg.comb": *, (glob)
272 "avg.count": *, (glob)
272 "avg.count": *, (glob)
273 "avg.sys": *, (glob)
273 "avg.sys": *, (glob)
274 "avg.user": *, (glob)
274 "avg.user": *, (glob)
275 "avg.wall": *, (glob)
275 "avg.wall": *, (glob)
276 "comb": *, (glob)
276 "comb": *, (glob)
277 "count": *, (glob)
277 "count": *, (glob)
278 "max.comb": *, (glob)
278 "max.comb": *, (glob)
279 "max.count": *, (glob)
279 "max.count": *, (glob)
280 "max.sys": *, (glob)
280 "max.sys": *, (glob)
281 "max.user": *, (glob)
281 "max.user": *, (glob)
282 "max.wall": *, (glob)
282 "max.wall": *, (glob)
283 "median.comb": *, (glob)
283 "median.comb": *, (glob)
284 "median.count": *, (glob)
284 "median.count": *, (glob)
285 "median.sys": *, (glob)
285 "median.sys": *, (glob)
286 "median.user": *, (glob)
286 "median.user": *, (glob)
287 "median.wall": *, (glob)
287 "median.wall": *, (glob)
288 "sys": *, (glob)
288 "sys": *, (glob)
289 "user": *, (glob)
289 "user": *, (glob)
290 "wall": * (glob)
290 "wall": * (glob)
291 }
291 }
292 ]
292 ]
293
293
294 Check perf.py for historical portability
294 Check perf.py for historical portability
295 ----------------------------------------
295 ----------------------------------------
296
296
297 $ cd "$TESTDIR/.."
297 $ cd "$TESTDIR/.."
298
298
299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
299 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
300 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
301 > "$TESTDIR"/check-perf-code.py contrib/perf.py
302 contrib/perf.py:\d+: (re)
302 contrib/perf.py:\d+: (re)
303 > from mercurial import (
303 > from mercurial import (
304 import newer module separately in try clause for early Mercurial
304 import newer module separately in try clause for early Mercurial
305 contrib/perf.py:\d+: (re)
305 contrib/perf.py:\d+: (re)
306 > from mercurial import (
306 > from mercurial import (
307 import newer module separately in try clause for early Mercurial
307 import newer module separately in try clause for early Mercurial
308 contrib/perf.py:\d+: (re)
308 contrib/perf.py:\d+: (re)
309 > origindexpath = orig.opener.join(orig.indexfile)
309 > origindexpath = orig.opener.join(orig.indexfile)
310 use getvfs()/getsvfs() for early Mercurial
310 use getvfs()/getsvfs() for early Mercurial
311 contrib/perf.py:\d+: (re)
311 contrib/perf.py:\d+: (re)
312 > origdatapath = orig.opener.join(orig.datafile)
312 > origdatapath = orig.opener.join(orig.datafile)
313 use getvfs()/getsvfs() for early Mercurial
313 use getvfs()/getsvfs() for early Mercurial
314 contrib/perf.py:\d+: (re)
314 contrib/perf.py:\d+: (re)
315 > vfs = vfsmod.vfs(tmpdir)
315 > vfs = vfsmod.vfs(tmpdir)
316 use getvfs()/getsvfs() for early Mercurial
316 use getvfs()/getsvfs() for early Mercurial
317 contrib/perf.py:\d+: (re)
317 contrib/perf.py:\d+: (re)
318 > vfs.options = getattr(orig.opener, 'options', None)
318 > vfs.options = getattr(orig.opener, 'options', None)
319 use getvfs()/getsvfs() for early Mercurial
319 use getvfs()/getsvfs() for early Mercurial
320 [1]
320 [1]
General Comments 0
You need to be logged in to leave comments. Login now