##// END OF EJS Templates
perf: add a docstring to `perfpathcopies`...
Boris Feld -
r40770:dc3ab5e5 default
parent child Browse files
Show More
@@ -1,2474 +1,2475 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 if setup is not None:
279 if setup is not None:
280 setup()
280 setup()
281 func()
281 func()
282
282
283 @contextlib.contextmanager
283 @contextlib.contextmanager
284 def timeone():
284 def timeone():
285 r = []
285 r = []
286 ostart = os.times()
286 ostart = os.times()
287 cstart = util.timer()
287 cstart = util.timer()
288 yield r
288 yield r
289 cstop = util.timer()
289 cstop = util.timer()
290 ostop = os.times()
290 ostop = os.times()
291 a, b = ostart, ostop
291 a, b = ostart, ostop
292 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
292 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
293
293
294 def _timer(fm, func, setup=None, title=None, displayall=False):
294 def _timer(fm, func, setup=None, title=None, displayall=False):
295 gc.collect()
295 gc.collect()
296 results = []
296 results = []
297 begin = util.timer()
297 begin = util.timer()
298 count = 0
298 count = 0
299 while True:
299 while True:
300 if setup is not None:
300 if setup is not None:
301 setup()
301 setup()
302 with timeone() as item:
302 with timeone() as item:
303 r = func()
303 r = func()
304 count += 1
304 count += 1
305 results.append(item[0])
305 results.append(item[0])
306 cstop = util.timer()
306 cstop = util.timer()
307 if cstop - begin > 3 and count >= 100:
307 if cstop - begin > 3 and count >= 100:
308 break
308 break
309 if cstop - begin > 10 and count >= 3:
309 if cstop - begin > 10 and count >= 3:
310 break
310 break
311
311
312 formatone(fm, results, title=title, result=r,
312 formatone(fm, results, title=title, result=r,
313 displayall=displayall)
313 displayall=displayall)
314
314
315 def formatone(fm, timings, title=None, result=None, displayall=False):
315 def formatone(fm, timings, title=None, result=None, displayall=False):
316
316
317 count = len(timings)
317 count = len(timings)
318
318
319 fm.startitem()
319 fm.startitem()
320
320
321 if title:
321 if title:
322 fm.write(b'title', b'! %s\n', title)
322 fm.write(b'title', b'! %s\n', title)
323 if result:
323 if result:
324 fm.write(b'result', b'! result: %s\n', result)
324 fm.write(b'result', b'! result: %s\n', result)
325 def display(role, entry):
325 def display(role, entry):
326 prefix = b''
326 prefix = b''
327 if role != b'best':
327 if role != b'best':
328 prefix = b'%s.' % role
328 prefix = b'%s.' % role
329 fm.plain(b'!')
329 fm.plain(b'!')
330 fm.write(prefix + b'wall', b' wall %f', entry[0])
330 fm.write(prefix + b'wall', b' wall %f', entry[0])
331 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
331 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
332 fm.write(prefix + b'user', b' user %f', entry[1])
332 fm.write(prefix + b'user', b' user %f', entry[1])
333 fm.write(prefix + b'sys', b' sys %f', entry[2])
333 fm.write(prefix + b'sys', b' sys %f', entry[2])
334 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
334 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
335 fm.plain(b'\n')
335 fm.plain(b'\n')
336 timings.sort()
336 timings.sort()
337 min_val = timings[0]
337 min_val = timings[0]
338 display(b'best', min_val)
338 display(b'best', min_val)
339 if displayall:
339 if displayall:
340 max_val = timings[-1]
340 max_val = timings[-1]
341 display(b'max', max_val)
341 display(b'max', max_val)
342 avg = tuple([sum(x) / count for x in zip(*timings)])
342 avg = tuple([sum(x) / count for x in zip(*timings)])
343 display(b'avg', avg)
343 display(b'avg', avg)
344 median = timings[len(timings) // 2]
344 median = timings[len(timings) // 2]
345 display(b'median', median)
345 display(b'median', median)
346
346
347 # utilities for historical portability
347 # utilities for historical portability
348
348
349 def getint(ui, section, name, default):
349 def getint(ui, section, name, default):
350 # for "historical portability":
350 # for "historical portability":
351 # ui.configint has been available since 1.9 (or fa2b596db182)
351 # ui.configint has been available since 1.9 (or fa2b596db182)
352 v = ui.config(section, name, None)
352 v = ui.config(section, name, None)
353 if v is None:
353 if v is None:
354 return default
354 return default
355 try:
355 try:
356 return int(v)
356 return int(v)
357 except ValueError:
357 except ValueError:
358 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
358 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
359 % (section, name, v))
359 % (section, name, v))
360
360
361 def safeattrsetter(obj, name, ignoremissing=False):
361 def safeattrsetter(obj, name, ignoremissing=False):
362 """Ensure that 'obj' has 'name' attribute before subsequent setattr
362 """Ensure that 'obj' has 'name' attribute before subsequent setattr
363
363
364 This function is aborted, if 'obj' doesn't have 'name' attribute
364 This function is aborted, if 'obj' doesn't have 'name' attribute
365 at runtime. This avoids overlooking removal of an attribute, which
365 at runtime. This avoids overlooking removal of an attribute, which
366 breaks assumption of performance measurement, in the future.
366 breaks assumption of performance measurement, in the future.
367
367
368 This function returns the object to (1) assign a new value, and
368 This function returns the object to (1) assign a new value, and
369 (2) restore an original value to the attribute.
369 (2) restore an original value to the attribute.
370
370
371 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
371 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
372 abortion, and this function returns None. This is useful to
372 abortion, and this function returns None. This is useful to
373 examine an attribute, which isn't ensured in all Mercurial
373 examine an attribute, which isn't ensured in all Mercurial
374 versions.
374 versions.
375 """
375 """
376 if not util.safehasattr(obj, name):
376 if not util.safehasattr(obj, name):
377 if ignoremissing:
377 if ignoremissing:
378 return None
378 return None
379 raise error.Abort((b"missing attribute %s of %s might break assumption"
379 raise error.Abort((b"missing attribute %s of %s might break assumption"
380 b" of performance measurement") % (name, obj))
380 b" of performance measurement") % (name, obj))
381
381
382 origvalue = getattr(obj, _sysstr(name))
382 origvalue = getattr(obj, _sysstr(name))
383 class attrutil(object):
383 class attrutil(object):
384 def set(self, newvalue):
384 def set(self, newvalue):
385 setattr(obj, _sysstr(name), newvalue)
385 setattr(obj, _sysstr(name), newvalue)
386 def restore(self):
386 def restore(self):
387 setattr(obj, _sysstr(name), origvalue)
387 setattr(obj, _sysstr(name), origvalue)
388
388
389 return attrutil()
389 return attrutil()
390
390
391 # utilities to examine each internal API changes
391 # utilities to examine each internal API changes
392
392
393 def getbranchmapsubsettable():
393 def getbranchmapsubsettable():
394 # for "historical portability":
394 # for "historical portability":
395 # subsettable is defined in:
395 # subsettable is defined in:
396 # - branchmap since 2.9 (or 175c6fd8cacc)
396 # - branchmap since 2.9 (or 175c6fd8cacc)
397 # - repoview since 2.5 (or 59a9f18d4587)
397 # - repoview since 2.5 (or 59a9f18d4587)
398 for mod in (branchmap, repoview):
398 for mod in (branchmap, repoview):
399 subsettable = getattr(mod, 'subsettable', None)
399 subsettable = getattr(mod, 'subsettable', None)
400 if subsettable:
400 if subsettable:
401 return subsettable
401 return subsettable
402
402
403 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
403 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
404 # branchmap and repoview modules exist, but subsettable attribute
404 # branchmap and repoview modules exist, but subsettable attribute
405 # doesn't)
405 # doesn't)
406 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
406 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
407 hint=b"use 2.5 or later")
407 hint=b"use 2.5 or later")
408
408
409 def getsvfs(repo):
409 def getsvfs(repo):
410 """Return appropriate object to access files under .hg/store
410 """Return appropriate object to access files under .hg/store
411 """
411 """
412 # for "historical portability":
412 # for "historical portability":
413 # repo.svfs has been available since 2.3 (or 7034365089bf)
413 # repo.svfs has been available since 2.3 (or 7034365089bf)
414 svfs = getattr(repo, 'svfs', None)
414 svfs = getattr(repo, 'svfs', None)
415 if svfs:
415 if svfs:
416 return svfs
416 return svfs
417 else:
417 else:
418 return getattr(repo, 'sopener')
418 return getattr(repo, 'sopener')
419
419
420 def getvfs(repo):
420 def getvfs(repo):
421 """Return appropriate object to access files under .hg
421 """Return appropriate object to access files under .hg
422 """
422 """
423 # for "historical portability":
423 # for "historical portability":
424 # repo.vfs has been available since 2.3 (or 7034365089bf)
424 # repo.vfs has been available since 2.3 (or 7034365089bf)
425 vfs = getattr(repo, 'vfs', None)
425 vfs = getattr(repo, 'vfs', None)
426 if vfs:
426 if vfs:
427 return vfs
427 return vfs
428 else:
428 else:
429 return getattr(repo, 'opener')
429 return getattr(repo, 'opener')
430
430
431 def repocleartagscachefunc(repo):
431 def repocleartagscachefunc(repo):
432 """Return the function to clear tags cache according to repo internal API
432 """Return the function to clear tags cache according to repo internal API
433 """
433 """
434 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
434 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
435 # in this case, setattr(repo, '_tagscache', None) or so isn't
435 # in this case, setattr(repo, '_tagscache', None) or so isn't
436 # correct way to clear tags cache, because existing code paths
436 # correct way to clear tags cache, because existing code paths
437 # expect _tagscache to be a structured object.
437 # expect _tagscache to be a structured object.
438 def clearcache():
438 def clearcache():
439 # _tagscache has been filteredpropertycache since 2.5 (or
439 # _tagscache has been filteredpropertycache since 2.5 (or
440 # 98c867ac1330), and delattr() can't work in such case
440 # 98c867ac1330), and delattr() can't work in such case
441 if b'_tagscache' in vars(repo):
441 if b'_tagscache' in vars(repo):
442 del repo.__dict__[b'_tagscache']
442 del repo.__dict__[b'_tagscache']
443 return clearcache
443 return clearcache
444
444
445 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
445 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
446 if repotags: # since 1.4 (or 5614a628d173)
446 if repotags: # since 1.4 (or 5614a628d173)
447 return lambda : repotags.set(None)
447 return lambda : repotags.set(None)
448
448
449 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
449 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
450 if repotagscache: # since 0.6 (or d7df759d0e97)
450 if repotagscache: # since 0.6 (or d7df759d0e97)
451 return lambda : repotagscache.set(None)
451 return lambda : repotagscache.set(None)
452
452
453 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
453 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
454 # this point, but it isn't so problematic, because:
454 # this point, but it isn't so problematic, because:
455 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
455 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
456 # in perftags() causes failure soon
456 # in perftags() causes failure soon
457 # - perf.py itself has been available since 1.1 (or eb240755386d)
457 # - perf.py itself has been available since 1.1 (or eb240755386d)
458 raise error.Abort((b"tags API of this hg command is unknown"))
458 raise error.Abort((b"tags API of this hg command is unknown"))
459
459
460 # utilities to clear cache
460 # utilities to clear cache
461
461
462 def clearfilecache(obj, attrname):
462 def clearfilecache(obj, attrname):
463 unfiltered = getattr(obj, 'unfiltered', None)
463 unfiltered = getattr(obj, 'unfiltered', None)
464 if unfiltered is not None:
464 if unfiltered is not None:
465 obj = obj.unfiltered()
465 obj = obj.unfiltered()
466 if attrname in vars(obj):
466 if attrname in vars(obj):
467 delattr(obj, attrname)
467 delattr(obj, attrname)
468 obj._filecache.pop(attrname, None)
468 obj._filecache.pop(attrname, None)
469
469
470 def clearchangelog(repo):
470 def clearchangelog(repo):
471 if repo is not repo.unfiltered():
471 if repo is not repo.unfiltered():
472 object.__setattr__(repo, r'_clcachekey', None)
472 object.__setattr__(repo, r'_clcachekey', None)
473 object.__setattr__(repo, r'_clcache', None)
473 object.__setattr__(repo, r'_clcache', None)
474 clearfilecache(repo.unfiltered(), 'changelog')
474 clearfilecache(repo.unfiltered(), 'changelog')
475
475
476 # perf commands
476 # perf commands
477
477
478 @command(b'perfwalk', formatteropts)
478 @command(b'perfwalk', formatteropts)
479 def perfwalk(ui, repo, *pats, **opts):
479 def perfwalk(ui, repo, *pats, **opts):
480 opts = _byteskwargs(opts)
480 opts = _byteskwargs(opts)
481 timer, fm = gettimer(ui, opts)
481 timer, fm = gettimer(ui, opts)
482 m = scmutil.match(repo[None], pats, {})
482 m = scmutil.match(repo[None], pats, {})
483 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
483 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
484 ignored=False))))
484 ignored=False))))
485 fm.end()
485 fm.end()
486
486
487 @command(b'perfannotate', formatteropts)
487 @command(b'perfannotate', formatteropts)
488 def perfannotate(ui, repo, f, **opts):
488 def perfannotate(ui, repo, f, **opts):
489 opts = _byteskwargs(opts)
489 opts = _byteskwargs(opts)
490 timer, fm = gettimer(ui, opts)
490 timer, fm = gettimer(ui, opts)
491 fc = repo[b'.'][f]
491 fc = repo[b'.'][f]
492 timer(lambda: len(fc.annotate(True)))
492 timer(lambda: len(fc.annotate(True)))
493 fm.end()
493 fm.end()
494
494
495 @command(b'perfstatus',
495 @command(b'perfstatus',
496 [(b'u', b'unknown', False,
496 [(b'u', b'unknown', False,
497 b'ask status to look for unknown files')] + formatteropts)
497 b'ask status to look for unknown files')] + formatteropts)
498 def perfstatus(ui, repo, **opts):
498 def perfstatus(ui, repo, **opts):
499 opts = _byteskwargs(opts)
499 opts = _byteskwargs(opts)
500 #m = match.always(repo.root, repo.getcwd())
500 #m = match.always(repo.root, repo.getcwd())
501 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
501 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
502 # False))))
502 # False))))
503 timer, fm = gettimer(ui, opts)
503 timer, fm = gettimer(ui, opts)
504 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
504 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
505 fm.end()
505 fm.end()
506
506
507 @command(b'perfaddremove', formatteropts)
507 @command(b'perfaddremove', formatteropts)
508 def perfaddremove(ui, repo, **opts):
508 def perfaddremove(ui, repo, **opts):
509 opts = _byteskwargs(opts)
509 opts = _byteskwargs(opts)
510 timer, fm = gettimer(ui, opts)
510 timer, fm = gettimer(ui, opts)
511 try:
511 try:
512 oldquiet = repo.ui.quiet
512 oldquiet = repo.ui.quiet
513 repo.ui.quiet = True
513 repo.ui.quiet = True
514 matcher = scmutil.match(repo[None])
514 matcher = scmutil.match(repo[None])
515 opts[b'dry_run'] = True
515 opts[b'dry_run'] = True
516 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
516 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
517 finally:
517 finally:
518 repo.ui.quiet = oldquiet
518 repo.ui.quiet = oldquiet
519 fm.end()
519 fm.end()
520
520
521 def clearcaches(cl):
521 def clearcaches(cl):
522 # behave somewhat consistently across internal API changes
522 # behave somewhat consistently across internal API changes
523 if util.safehasattr(cl, b'clearcaches'):
523 if util.safehasattr(cl, b'clearcaches'):
524 cl.clearcaches()
524 cl.clearcaches()
525 elif util.safehasattr(cl, b'_nodecache'):
525 elif util.safehasattr(cl, b'_nodecache'):
526 from mercurial.node import nullid, nullrev
526 from mercurial.node import nullid, nullrev
527 cl._nodecache = {nullid: nullrev}
527 cl._nodecache = {nullid: nullrev}
528 cl._nodepos = None
528 cl._nodepos = None
529
529
530 @command(b'perfheads', formatteropts)
530 @command(b'perfheads', formatteropts)
531 def perfheads(ui, repo, **opts):
531 def perfheads(ui, repo, **opts):
532 opts = _byteskwargs(opts)
532 opts = _byteskwargs(opts)
533 timer, fm = gettimer(ui, opts)
533 timer, fm = gettimer(ui, opts)
534 cl = repo.changelog
534 cl = repo.changelog
535 def d():
535 def d():
536 len(cl.headrevs())
536 len(cl.headrevs())
537 clearcaches(cl)
537 clearcaches(cl)
538 timer(d)
538 timer(d)
539 fm.end()
539 fm.end()
540
540
541 @command(b'perftags', formatteropts)
541 @command(b'perftags', formatteropts)
542 def perftags(ui, repo, **opts):
542 def perftags(ui, repo, **opts):
543 import mercurial.changelog
543 import mercurial.changelog
544 import mercurial.manifest
544 import mercurial.manifest
545
545
546 opts = _byteskwargs(opts)
546 opts = _byteskwargs(opts)
547 timer, fm = gettimer(ui, opts)
547 timer, fm = gettimer(ui, opts)
548 svfs = getsvfs(repo)
548 svfs = getsvfs(repo)
549 repocleartagscache = repocleartagscachefunc(repo)
549 repocleartagscache = repocleartagscachefunc(repo)
550 def s():
550 def s():
551 repo.changelog = mercurial.changelog.changelog(svfs)
551 repo.changelog = mercurial.changelog.changelog(svfs)
552 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
552 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
553 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
553 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
554 rootmanifest)
554 rootmanifest)
555 repocleartagscache()
555 repocleartagscache()
556 def t():
556 def t():
557 return len(repo.tags())
557 return len(repo.tags())
558 timer(t, setup=s)
558 timer(t, setup=s)
559 fm.end()
559 fm.end()
560
560
561 @command(b'perfancestors', formatteropts)
561 @command(b'perfancestors', formatteropts)
562 def perfancestors(ui, repo, **opts):
562 def perfancestors(ui, repo, **opts):
563 opts = _byteskwargs(opts)
563 opts = _byteskwargs(opts)
564 timer, fm = gettimer(ui, opts)
564 timer, fm = gettimer(ui, opts)
565 heads = repo.changelog.headrevs()
565 heads = repo.changelog.headrevs()
566 def d():
566 def d():
567 for a in repo.changelog.ancestors(heads):
567 for a in repo.changelog.ancestors(heads):
568 pass
568 pass
569 timer(d)
569 timer(d)
570 fm.end()
570 fm.end()
571
571
572 @command(b'perfancestorset', formatteropts)
572 @command(b'perfancestorset', formatteropts)
573 def perfancestorset(ui, repo, revset, **opts):
573 def perfancestorset(ui, repo, revset, **opts):
574 opts = _byteskwargs(opts)
574 opts = _byteskwargs(opts)
575 timer, fm = gettimer(ui, opts)
575 timer, fm = gettimer(ui, opts)
576 revs = repo.revs(revset)
576 revs = repo.revs(revset)
577 heads = repo.changelog.headrevs()
577 heads = repo.changelog.headrevs()
578 def d():
578 def d():
579 s = repo.changelog.ancestors(heads)
579 s = repo.changelog.ancestors(heads)
580 for rev in revs:
580 for rev in revs:
581 rev in s
581 rev in s
582 timer(d)
582 timer(d)
583 fm.end()
583 fm.end()
584
584
585 @command(b'perfbookmarks', formatteropts)
585 @command(b'perfbookmarks', formatteropts)
586 def perfbookmarks(ui, repo, **opts):
586 def perfbookmarks(ui, repo, **opts):
587 """benchmark parsing bookmarks from disk to memory"""
587 """benchmark parsing bookmarks from disk to memory"""
588 opts = _byteskwargs(opts)
588 opts = _byteskwargs(opts)
589 timer, fm = gettimer(ui, opts)
589 timer, fm = gettimer(ui, opts)
590
590
591 def s():
591 def s():
592 clearfilecache(repo, b'_bookmarks')
592 clearfilecache(repo, b'_bookmarks')
593 def d():
593 def d():
594 repo._bookmarks
594 repo._bookmarks
595 timer(d, setup=s)
595 timer(d, setup=s)
596 fm.end()
596 fm.end()
597
597
598 @command(b'perfbundleread', formatteropts, b'BUNDLE')
598 @command(b'perfbundleread', formatteropts, b'BUNDLE')
599 def perfbundleread(ui, repo, bundlepath, **opts):
599 def perfbundleread(ui, repo, bundlepath, **opts):
600 """Benchmark reading of bundle files.
600 """Benchmark reading of bundle files.
601
601
602 This command is meant to isolate the I/O part of bundle reading as
602 This command is meant to isolate the I/O part of bundle reading as
603 much as possible.
603 much as possible.
604 """
604 """
605 from mercurial import (
605 from mercurial import (
606 bundle2,
606 bundle2,
607 exchange,
607 exchange,
608 streamclone,
608 streamclone,
609 )
609 )
610
610
611 opts = _byteskwargs(opts)
611 opts = _byteskwargs(opts)
612
612
613 def makebench(fn):
613 def makebench(fn):
614 def run():
614 def run():
615 with open(bundlepath, b'rb') as fh:
615 with open(bundlepath, b'rb') as fh:
616 bundle = exchange.readbundle(ui, fh, bundlepath)
616 bundle = exchange.readbundle(ui, fh, bundlepath)
617 fn(bundle)
617 fn(bundle)
618
618
619 return run
619 return run
620
620
621 def makereadnbytes(size):
621 def makereadnbytes(size):
622 def run():
622 def run():
623 with open(bundlepath, b'rb') as fh:
623 with open(bundlepath, b'rb') as fh:
624 bundle = exchange.readbundle(ui, fh, bundlepath)
624 bundle = exchange.readbundle(ui, fh, bundlepath)
625 while bundle.read(size):
625 while bundle.read(size):
626 pass
626 pass
627
627
628 return run
628 return run
629
629
630 def makestdioread(size):
630 def makestdioread(size):
631 def run():
631 def run():
632 with open(bundlepath, b'rb') as fh:
632 with open(bundlepath, b'rb') as fh:
633 while fh.read(size):
633 while fh.read(size):
634 pass
634 pass
635
635
636 return run
636 return run
637
637
638 # bundle1
638 # bundle1
639
639
640 def deltaiter(bundle):
640 def deltaiter(bundle):
641 for delta in bundle.deltaiter():
641 for delta in bundle.deltaiter():
642 pass
642 pass
643
643
644 def iterchunks(bundle):
644 def iterchunks(bundle):
645 for chunk in bundle.getchunks():
645 for chunk in bundle.getchunks():
646 pass
646 pass
647
647
648 # bundle2
648 # bundle2
649
649
650 def forwardchunks(bundle):
650 def forwardchunks(bundle):
651 for chunk in bundle._forwardchunks():
651 for chunk in bundle._forwardchunks():
652 pass
652 pass
653
653
654 def iterparts(bundle):
654 def iterparts(bundle):
655 for part in bundle.iterparts():
655 for part in bundle.iterparts():
656 pass
656 pass
657
657
658 def iterpartsseekable(bundle):
658 def iterpartsseekable(bundle):
659 for part in bundle.iterparts(seekable=True):
659 for part in bundle.iterparts(seekable=True):
660 pass
660 pass
661
661
662 def seek(bundle):
662 def seek(bundle):
663 for part in bundle.iterparts(seekable=True):
663 for part in bundle.iterparts(seekable=True):
664 part.seek(0, os.SEEK_END)
664 part.seek(0, os.SEEK_END)
665
665
666 def makepartreadnbytes(size):
666 def makepartreadnbytes(size):
667 def run():
667 def run():
668 with open(bundlepath, b'rb') as fh:
668 with open(bundlepath, b'rb') as fh:
669 bundle = exchange.readbundle(ui, fh, bundlepath)
669 bundle = exchange.readbundle(ui, fh, bundlepath)
670 for part in bundle.iterparts():
670 for part in bundle.iterparts():
671 while part.read(size):
671 while part.read(size):
672 pass
672 pass
673
673
674 return run
674 return run
675
675
676 benches = [
676 benches = [
677 (makestdioread(8192), b'read(8k)'),
677 (makestdioread(8192), b'read(8k)'),
678 (makestdioread(16384), b'read(16k)'),
678 (makestdioread(16384), b'read(16k)'),
679 (makestdioread(32768), b'read(32k)'),
679 (makestdioread(32768), b'read(32k)'),
680 (makestdioread(131072), b'read(128k)'),
680 (makestdioread(131072), b'read(128k)'),
681 ]
681 ]
682
682
683 with open(bundlepath, b'rb') as fh:
683 with open(bundlepath, b'rb') as fh:
684 bundle = exchange.readbundle(ui, fh, bundlepath)
684 bundle = exchange.readbundle(ui, fh, bundlepath)
685
685
686 if isinstance(bundle, changegroup.cg1unpacker):
686 if isinstance(bundle, changegroup.cg1unpacker):
687 benches.extend([
687 benches.extend([
688 (makebench(deltaiter), b'cg1 deltaiter()'),
688 (makebench(deltaiter), b'cg1 deltaiter()'),
689 (makebench(iterchunks), b'cg1 getchunks()'),
689 (makebench(iterchunks), b'cg1 getchunks()'),
690 (makereadnbytes(8192), b'cg1 read(8k)'),
690 (makereadnbytes(8192), b'cg1 read(8k)'),
691 (makereadnbytes(16384), b'cg1 read(16k)'),
691 (makereadnbytes(16384), b'cg1 read(16k)'),
692 (makereadnbytes(32768), b'cg1 read(32k)'),
692 (makereadnbytes(32768), b'cg1 read(32k)'),
693 (makereadnbytes(131072), b'cg1 read(128k)'),
693 (makereadnbytes(131072), b'cg1 read(128k)'),
694 ])
694 ])
695 elif isinstance(bundle, bundle2.unbundle20):
695 elif isinstance(bundle, bundle2.unbundle20):
696 benches.extend([
696 benches.extend([
697 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
697 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
698 (makebench(iterparts), b'bundle2 iterparts()'),
698 (makebench(iterparts), b'bundle2 iterparts()'),
699 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
699 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
700 (makebench(seek), b'bundle2 part seek()'),
700 (makebench(seek), b'bundle2 part seek()'),
701 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
701 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
702 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
702 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
703 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
703 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
704 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
704 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
705 ])
705 ])
706 elif isinstance(bundle, streamclone.streamcloneapplier):
706 elif isinstance(bundle, streamclone.streamcloneapplier):
707 raise error.Abort(b'stream clone bundles not supported')
707 raise error.Abort(b'stream clone bundles not supported')
708 else:
708 else:
709 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
709 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
710
710
711 for fn, title in benches:
711 for fn, title in benches:
712 timer, fm = gettimer(ui, opts)
712 timer, fm = gettimer(ui, opts)
713 timer(fn, title=title)
713 timer(fn, title=title)
714 fm.end()
714 fm.end()
715
715
716 @command(b'perfchangegroupchangelog', formatteropts +
716 @command(b'perfchangegroupchangelog', formatteropts +
717 [(b'', b'cgversion', b'02', b'changegroup version'),
717 [(b'', b'cgversion', b'02', b'changegroup version'),
718 (b'r', b'rev', b'', b'revisions to add to changegroup')])
718 (b'r', b'rev', b'', b'revisions to add to changegroup')])
719 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
719 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
720 """Benchmark producing a changelog group for a changegroup.
720 """Benchmark producing a changelog group for a changegroup.
721
721
722 This measures the time spent processing the changelog during a
722 This measures the time spent processing the changelog during a
723 bundle operation. This occurs during `hg bundle` and on a server
723 bundle operation. This occurs during `hg bundle` and on a server
724 processing a `getbundle` wire protocol request (handles clones
724 processing a `getbundle` wire protocol request (handles clones
725 and pull requests).
725 and pull requests).
726
726
727 By default, all revisions are added to the changegroup.
727 By default, all revisions are added to the changegroup.
728 """
728 """
729 opts = _byteskwargs(opts)
729 opts = _byteskwargs(opts)
730 cl = repo.changelog
730 cl = repo.changelog
731 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
731 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
732 bundler = changegroup.getbundler(cgversion, repo)
732 bundler = changegroup.getbundler(cgversion, repo)
733
733
734 def d():
734 def d():
735 state, chunks = bundler._generatechangelog(cl, nodes)
735 state, chunks = bundler._generatechangelog(cl, nodes)
736 for chunk in chunks:
736 for chunk in chunks:
737 pass
737 pass
738
738
739 timer, fm = gettimer(ui, opts)
739 timer, fm = gettimer(ui, opts)
740
740
741 # Terminal printing can interfere with timing. So disable it.
741 # Terminal printing can interfere with timing. So disable it.
742 with ui.configoverride({(b'progress', b'disable'): True}):
742 with ui.configoverride({(b'progress', b'disable'): True}):
743 timer(d)
743 timer(d)
744
744
745 fm.end()
745 fm.end()
746
746
747 @command(b'perfdirs', formatteropts)
747 @command(b'perfdirs', formatteropts)
748 def perfdirs(ui, repo, **opts):
748 def perfdirs(ui, repo, **opts):
749 opts = _byteskwargs(opts)
749 opts = _byteskwargs(opts)
750 timer, fm = gettimer(ui, opts)
750 timer, fm = gettimer(ui, opts)
751 dirstate = repo.dirstate
751 dirstate = repo.dirstate
752 b'a' in dirstate
752 b'a' in dirstate
753 def d():
753 def d():
754 dirstate.hasdir(b'a')
754 dirstate.hasdir(b'a')
755 del dirstate._map._dirs
755 del dirstate._map._dirs
756 timer(d)
756 timer(d)
757 fm.end()
757 fm.end()
758
758
759 @command(b'perfdirstate', formatteropts)
759 @command(b'perfdirstate', formatteropts)
760 def perfdirstate(ui, repo, **opts):
760 def perfdirstate(ui, repo, **opts):
761 opts = _byteskwargs(opts)
761 opts = _byteskwargs(opts)
762 timer, fm = gettimer(ui, opts)
762 timer, fm = gettimer(ui, opts)
763 b"a" in repo.dirstate
763 b"a" in repo.dirstate
764 def d():
764 def d():
765 repo.dirstate.invalidate()
765 repo.dirstate.invalidate()
766 b"a" in repo.dirstate
766 b"a" in repo.dirstate
767 timer(d)
767 timer(d)
768 fm.end()
768 fm.end()
769
769
770 @command(b'perfdirstatedirs', formatteropts)
770 @command(b'perfdirstatedirs', formatteropts)
771 def perfdirstatedirs(ui, repo, **opts):
771 def perfdirstatedirs(ui, repo, **opts):
772 opts = _byteskwargs(opts)
772 opts = _byteskwargs(opts)
773 timer, fm = gettimer(ui, opts)
773 timer, fm = gettimer(ui, opts)
774 b"a" in repo.dirstate
774 b"a" in repo.dirstate
775 def d():
775 def d():
776 repo.dirstate.hasdir(b"a")
776 repo.dirstate.hasdir(b"a")
777 del repo.dirstate._map._dirs
777 del repo.dirstate._map._dirs
778 timer(d)
778 timer(d)
779 fm.end()
779 fm.end()
780
780
781 @command(b'perfdirstatefoldmap', formatteropts)
781 @command(b'perfdirstatefoldmap', formatteropts)
782 def perfdirstatefoldmap(ui, repo, **opts):
782 def perfdirstatefoldmap(ui, repo, **opts):
783 opts = _byteskwargs(opts)
783 opts = _byteskwargs(opts)
784 timer, fm = gettimer(ui, opts)
784 timer, fm = gettimer(ui, opts)
785 dirstate = repo.dirstate
785 dirstate = repo.dirstate
786 b'a' in dirstate
786 b'a' in dirstate
787 def d():
787 def d():
788 dirstate._map.filefoldmap.get(b'a')
788 dirstate._map.filefoldmap.get(b'a')
789 del dirstate._map.filefoldmap
789 del dirstate._map.filefoldmap
790 timer(d)
790 timer(d)
791 fm.end()
791 fm.end()
792
792
793 @command(b'perfdirfoldmap', formatteropts)
793 @command(b'perfdirfoldmap', formatteropts)
794 def perfdirfoldmap(ui, repo, **opts):
794 def perfdirfoldmap(ui, repo, **opts):
795 opts = _byteskwargs(opts)
795 opts = _byteskwargs(opts)
796 timer, fm = gettimer(ui, opts)
796 timer, fm = gettimer(ui, opts)
797 dirstate = repo.dirstate
797 dirstate = repo.dirstate
798 b'a' in dirstate
798 b'a' in dirstate
799 def d():
799 def d():
800 dirstate._map.dirfoldmap.get(b'a')
800 dirstate._map.dirfoldmap.get(b'a')
801 del dirstate._map.dirfoldmap
801 del dirstate._map.dirfoldmap
802 del dirstate._map._dirs
802 del dirstate._map._dirs
803 timer(d)
803 timer(d)
804 fm.end()
804 fm.end()
805
805
806 @command(b'perfdirstatewrite', formatteropts)
806 @command(b'perfdirstatewrite', formatteropts)
807 def perfdirstatewrite(ui, repo, **opts):
807 def perfdirstatewrite(ui, repo, **opts):
808 opts = _byteskwargs(opts)
808 opts = _byteskwargs(opts)
809 timer, fm = gettimer(ui, opts)
809 timer, fm = gettimer(ui, opts)
810 ds = repo.dirstate
810 ds = repo.dirstate
811 b"a" in ds
811 b"a" in ds
812 def d():
812 def d():
813 ds._dirty = True
813 ds._dirty = True
814 ds.write(repo.currenttransaction())
814 ds.write(repo.currenttransaction())
815 timer(d)
815 timer(d)
816 fm.end()
816 fm.end()
817
817
818 @command(b'perfmergecalculate',
818 @command(b'perfmergecalculate',
819 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
819 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
820 def perfmergecalculate(ui, repo, rev, **opts):
820 def perfmergecalculate(ui, repo, rev, **opts):
821 opts = _byteskwargs(opts)
821 opts = _byteskwargs(opts)
822 timer, fm = gettimer(ui, opts)
822 timer, fm = gettimer(ui, opts)
823 wctx = repo[None]
823 wctx = repo[None]
824 rctx = scmutil.revsingle(repo, rev, rev)
824 rctx = scmutil.revsingle(repo, rev, rev)
825 ancestor = wctx.ancestor(rctx)
825 ancestor = wctx.ancestor(rctx)
826 # we don't want working dir files to be stat'd in the benchmark, so prime
826 # we don't want working dir files to be stat'd in the benchmark, so prime
827 # that cache
827 # that cache
828 wctx.dirty()
828 wctx.dirty()
829 def d():
829 def d():
830 # acceptremote is True because we don't want prompts in the middle of
830 # acceptremote is True because we don't want prompts in the middle of
831 # our benchmark
831 # our benchmark
832 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
832 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
833 acceptremote=True, followcopies=True)
833 acceptremote=True, followcopies=True)
834 timer(d)
834 timer(d)
835 fm.end()
835 fm.end()
836
836
837 @command(b'perfpathcopies', [], b"REV REV")
837 @command(b'perfpathcopies', [], b"REV REV")
838 def perfpathcopies(ui, repo, rev1, rev2, **opts):
838 def perfpathcopies(ui, repo, rev1, rev2, **opts):
839 """benchmark the copy tracing logic"""
839 opts = _byteskwargs(opts)
840 opts = _byteskwargs(opts)
840 timer, fm = gettimer(ui, opts)
841 timer, fm = gettimer(ui, opts)
841 ctx1 = scmutil.revsingle(repo, rev1, rev1)
842 ctx1 = scmutil.revsingle(repo, rev1, rev1)
842 ctx2 = scmutil.revsingle(repo, rev2, rev2)
843 ctx2 = scmutil.revsingle(repo, rev2, rev2)
843 def d():
844 def d():
844 copies.pathcopies(ctx1, ctx2)
845 copies.pathcopies(ctx1, ctx2)
845 timer(d)
846 timer(d)
846 fm.end()
847 fm.end()
847
848
848 @command(b'perfphases',
849 @command(b'perfphases',
849 [(b'', b'full', False, b'include file reading time too'),
850 [(b'', b'full', False, b'include file reading time too'),
850 ], b"")
851 ], b"")
851 def perfphases(ui, repo, **opts):
852 def perfphases(ui, repo, **opts):
852 """benchmark phasesets computation"""
853 """benchmark phasesets computation"""
853 opts = _byteskwargs(opts)
854 opts = _byteskwargs(opts)
854 timer, fm = gettimer(ui, opts)
855 timer, fm = gettimer(ui, opts)
855 _phases = repo._phasecache
856 _phases = repo._phasecache
856 full = opts.get(b'full')
857 full = opts.get(b'full')
857 def d():
858 def d():
858 phases = _phases
859 phases = _phases
859 if full:
860 if full:
860 clearfilecache(repo, b'_phasecache')
861 clearfilecache(repo, b'_phasecache')
861 phases = repo._phasecache
862 phases = repo._phasecache
862 phases.invalidate()
863 phases.invalidate()
863 phases.loadphaserevs(repo)
864 phases.loadphaserevs(repo)
864 timer(d)
865 timer(d)
865 fm.end()
866 fm.end()
866
867
867 @command(b'perfphasesremote',
868 @command(b'perfphasesremote',
868 [], b"[DEST]")
869 [], b"[DEST]")
869 def perfphasesremote(ui, repo, dest=None, **opts):
870 def perfphasesremote(ui, repo, dest=None, **opts):
870 """benchmark time needed to analyse phases of the remote server"""
871 """benchmark time needed to analyse phases of the remote server"""
871 from mercurial.node import (
872 from mercurial.node import (
872 bin,
873 bin,
873 )
874 )
874 from mercurial import (
875 from mercurial import (
875 exchange,
876 exchange,
876 hg,
877 hg,
877 phases,
878 phases,
878 )
879 )
879 opts = _byteskwargs(opts)
880 opts = _byteskwargs(opts)
880 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
881
882
882 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
883 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
883 if not path:
884 if not path:
884 raise error.Abort((b'default repository not configured!'),
885 raise error.Abort((b'default repository not configured!'),
885 hint=(b"see 'hg help config.paths'"))
886 hint=(b"see 'hg help config.paths'"))
886 dest = path.pushloc or path.loc
887 dest = path.pushloc or path.loc
887 branches = (path.branch, opts.get(b'branch') or [])
888 branches = (path.branch, opts.get(b'branch') or [])
888 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
889 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
889 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
890 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
890 other = hg.peer(repo, opts, dest)
891 other = hg.peer(repo, opts, dest)
891
892
892 # easier to perform discovery through the operation
893 # easier to perform discovery through the operation
893 op = exchange.pushoperation(repo, other)
894 op = exchange.pushoperation(repo, other)
894 exchange._pushdiscoverychangeset(op)
895 exchange._pushdiscoverychangeset(op)
895
896
896 remotesubset = op.fallbackheads
897 remotesubset = op.fallbackheads
897
898
898 with other.commandexecutor() as e:
899 with other.commandexecutor() as e:
899 remotephases = e.callcommand(b'listkeys',
900 remotephases = e.callcommand(b'listkeys',
900 {b'namespace': b'phases'}).result()
901 {b'namespace': b'phases'}).result()
901 del other
902 del other
902 publishing = remotephases.get(b'publishing', False)
903 publishing = remotephases.get(b'publishing', False)
903 if publishing:
904 if publishing:
904 ui.status((b'publishing: yes\n'))
905 ui.status((b'publishing: yes\n'))
905 else:
906 else:
906 ui.status((b'publishing: no\n'))
907 ui.status((b'publishing: no\n'))
907
908
908 nodemap = repo.changelog.nodemap
909 nodemap = repo.changelog.nodemap
909 nonpublishroots = 0
910 nonpublishroots = 0
910 for nhex, phase in remotephases.iteritems():
911 for nhex, phase in remotephases.iteritems():
911 if nhex == b'publishing': # ignore data related to publish option
912 if nhex == b'publishing': # ignore data related to publish option
912 continue
913 continue
913 node = bin(nhex)
914 node = bin(nhex)
914 if node in nodemap and int(phase):
915 if node in nodemap and int(phase):
915 nonpublishroots += 1
916 nonpublishroots += 1
916 ui.status((b'number of roots: %d\n') % len(remotephases))
917 ui.status((b'number of roots: %d\n') % len(remotephases))
917 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
918 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
918 def d():
919 def d():
919 phases.remotephasessummary(repo,
920 phases.remotephasessummary(repo,
920 remotesubset,
921 remotesubset,
921 remotephases)
922 remotephases)
922 timer(d)
923 timer(d)
923 fm.end()
924 fm.end()
924
925
925 @command(b'perfmanifest',[
926 @command(b'perfmanifest',[
926 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
927 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
927 (b'', b'clear-disk', False, b'clear on-disk caches too'),
928 (b'', b'clear-disk', False, b'clear on-disk caches too'),
928 ] + formatteropts, b'REV|NODE')
929 ] + formatteropts, b'REV|NODE')
929 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
930 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
930 """benchmark the time to read a manifest from disk and return a usable
931 """benchmark the time to read a manifest from disk and return a usable
931 dict-like object
932 dict-like object
932
933
933 Manifest caches are cleared before retrieval."""
934 Manifest caches are cleared before retrieval."""
934 opts = _byteskwargs(opts)
935 opts = _byteskwargs(opts)
935 timer, fm = gettimer(ui, opts)
936 timer, fm = gettimer(ui, opts)
936 if not manifest_rev:
937 if not manifest_rev:
937 ctx = scmutil.revsingle(repo, rev, rev)
938 ctx = scmutil.revsingle(repo, rev, rev)
938 t = ctx.manifestnode()
939 t = ctx.manifestnode()
939 else:
940 else:
940 from mercurial.node import bin
941 from mercurial.node import bin
941
942
942 if len(rev) == 40:
943 if len(rev) == 40:
943 t = bin(rev)
944 t = bin(rev)
944 else:
945 else:
945 try:
946 try:
946 rev = int(rev)
947 rev = int(rev)
947
948
948 if util.safehasattr(repo.manifestlog, b'getstorage'):
949 if util.safehasattr(repo.manifestlog, b'getstorage'):
949 t = repo.manifestlog.getstorage(b'').node(rev)
950 t = repo.manifestlog.getstorage(b'').node(rev)
950 else:
951 else:
951 t = repo.manifestlog._revlog.lookup(rev)
952 t = repo.manifestlog._revlog.lookup(rev)
952 except ValueError:
953 except ValueError:
953 raise error.Abort(b'manifest revision must be integer or full '
954 raise error.Abort(b'manifest revision must be integer or full '
954 b'node')
955 b'node')
955 def d():
956 def d():
956 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
957 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
957 repo.manifestlog[t].read()
958 repo.manifestlog[t].read()
958 timer(d)
959 timer(d)
959 fm.end()
960 fm.end()
960
961
961 @command(b'perfchangeset', formatteropts)
962 @command(b'perfchangeset', formatteropts)
962 def perfchangeset(ui, repo, rev, **opts):
963 def perfchangeset(ui, repo, rev, **opts):
963 opts = _byteskwargs(opts)
964 opts = _byteskwargs(opts)
964 timer, fm = gettimer(ui, opts)
965 timer, fm = gettimer(ui, opts)
965 n = scmutil.revsingle(repo, rev).node()
966 n = scmutil.revsingle(repo, rev).node()
966 def d():
967 def d():
967 repo.changelog.read(n)
968 repo.changelog.read(n)
968 #repo.changelog._cache = None
969 #repo.changelog._cache = None
969 timer(d)
970 timer(d)
970 fm.end()
971 fm.end()
971
972
972 @command(b'perfindex', formatteropts)
973 @command(b'perfindex', formatteropts)
973 def perfindex(ui, repo, **opts):
974 def perfindex(ui, repo, **opts):
974 import mercurial.revlog
975 import mercurial.revlog
975 opts = _byteskwargs(opts)
976 opts = _byteskwargs(opts)
976 timer, fm = gettimer(ui, opts)
977 timer, fm = gettimer(ui, opts)
977 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
978 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
978 n = repo[b"tip"].node()
979 n = repo[b"tip"].node()
979 svfs = getsvfs(repo)
980 svfs = getsvfs(repo)
980 def d():
981 def d():
981 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
982 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
982 cl.rev(n)
983 cl.rev(n)
983 timer(d)
984 timer(d)
984 fm.end()
985 fm.end()
985
986
986 @command(b'perfstartup', formatteropts)
987 @command(b'perfstartup', formatteropts)
987 def perfstartup(ui, repo, **opts):
988 def perfstartup(ui, repo, **opts):
988 opts = _byteskwargs(opts)
989 opts = _byteskwargs(opts)
989 timer, fm = gettimer(ui, opts)
990 timer, fm = gettimer(ui, opts)
990 def d():
991 def d():
991 if os.name != r'nt':
992 if os.name != r'nt':
992 os.system(b"HGRCPATH= %s version -q > /dev/null" %
993 os.system(b"HGRCPATH= %s version -q > /dev/null" %
993 fsencode(sys.argv[0]))
994 fsencode(sys.argv[0]))
994 else:
995 else:
995 os.environ[r'HGRCPATH'] = r' '
996 os.environ[r'HGRCPATH'] = r' '
996 os.system(r"%s version -q > NUL" % sys.argv[0])
997 os.system(r"%s version -q > NUL" % sys.argv[0])
997 timer(d)
998 timer(d)
998 fm.end()
999 fm.end()
999
1000
1000 @command(b'perfparents', formatteropts)
1001 @command(b'perfparents', formatteropts)
1001 def perfparents(ui, repo, **opts):
1002 def perfparents(ui, repo, **opts):
1002 opts = _byteskwargs(opts)
1003 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1004 timer, fm = gettimer(ui, opts)
1004 # control the number of commits perfparents iterates over
1005 # control the number of commits perfparents iterates over
1005 # experimental config: perf.parentscount
1006 # experimental config: perf.parentscount
1006 count = getint(ui, b"perf", b"parentscount", 1000)
1007 count = getint(ui, b"perf", b"parentscount", 1000)
1007 if len(repo.changelog) < count:
1008 if len(repo.changelog) < count:
1008 raise error.Abort(b"repo needs %d commits for this test" % count)
1009 raise error.Abort(b"repo needs %d commits for this test" % count)
1009 repo = repo.unfiltered()
1010 repo = repo.unfiltered()
1010 nl = [repo.changelog.node(i) for i in _xrange(count)]
1011 nl = [repo.changelog.node(i) for i in _xrange(count)]
1011 def d():
1012 def d():
1012 for n in nl:
1013 for n in nl:
1013 repo.changelog.parents(n)
1014 repo.changelog.parents(n)
1014 timer(d)
1015 timer(d)
1015 fm.end()
1016 fm.end()
1016
1017
1017 @command(b'perfctxfiles', formatteropts)
1018 @command(b'perfctxfiles', formatteropts)
1018 def perfctxfiles(ui, repo, x, **opts):
1019 def perfctxfiles(ui, repo, x, **opts):
1019 opts = _byteskwargs(opts)
1020 opts = _byteskwargs(opts)
1020 x = int(x)
1021 x = int(x)
1021 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1022 def d():
1023 def d():
1023 len(repo[x].files())
1024 len(repo[x].files())
1024 timer(d)
1025 timer(d)
1025 fm.end()
1026 fm.end()
1026
1027
1027 @command(b'perfrawfiles', formatteropts)
1028 @command(b'perfrawfiles', formatteropts)
1028 def perfrawfiles(ui, repo, x, **opts):
1029 def perfrawfiles(ui, repo, x, **opts):
1029 opts = _byteskwargs(opts)
1030 opts = _byteskwargs(opts)
1030 x = int(x)
1031 x = int(x)
1031 timer, fm = gettimer(ui, opts)
1032 timer, fm = gettimer(ui, opts)
1032 cl = repo.changelog
1033 cl = repo.changelog
1033 def d():
1034 def d():
1034 len(cl.read(x)[3])
1035 len(cl.read(x)[3])
1035 timer(d)
1036 timer(d)
1036 fm.end()
1037 fm.end()
1037
1038
1038 @command(b'perflookup', formatteropts)
1039 @command(b'perflookup', formatteropts)
1039 def perflookup(ui, repo, rev, **opts):
1040 def perflookup(ui, repo, rev, **opts):
1040 opts = _byteskwargs(opts)
1041 opts = _byteskwargs(opts)
1041 timer, fm = gettimer(ui, opts)
1042 timer, fm = gettimer(ui, opts)
1042 timer(lambda: len(repo.lookup(rev)))
1043 timer(lambda: len(repo.lookup(rev)))
1043 fm.end()
1044 fm.end()
1044
1045
1045 @command(b'perflinelogedits',
1046 @command(b'perflinelogedits',
1046 [(b'n', b'edits', 10000, b'number of edits'),
1047 [(b'n', b'edits', 10000, b'number of edits'),
1047 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1048 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1048 ], norepo=True)
1049 ], norepo=True)
1049 def perflinelogedits(ui, **opts):
1050 def perflinelogedits(ui, **opts):
1050 from mercurial import linelog
1051 from mercurial import linelog
1051
1052
1052 opts = _byteskwargs(opts)
1053 opts = _byteskwargs(opts)
1053
1054
1054 edits = opts[b'edits']
1055 edits = opts[b'edits']
1055 maxhunklines = opts[b'max_hunk_lines']
1056 maxhunklines = opts[b'max_hunk_lines']
1056
1057
1057 maxb1 = 100000
1058 maxb1 = 100000
1058 random.seed(0)
1059 random.seed(0)
1059 randint = random.randint
1060 randint = random.randint
1060 currentlines = 0
1061 currentlines = 0
1061 arglist = []
1062 arglist = []
1062 for rev in _xrange(edits):
1063 for rev in _xrange(edits):
1063 a1 = randint(0, currentlines)
1064 a1 = randint(0, currentlines)
1064 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1065 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1065 b1 = randint(0, maxb1)
1066 b1 = randint(0, maxb1)
1066 b2 = randint(b1, b1 + maxhunklines)
1067 b2 = randint(b1, b1 + maxhunklines)
1067 currentlines += (b2 - b1) - (a2 - a1)
1068 currentlines += (b2 - b1) - (a2 - a1)
1068 arglist.append((rev, a1, a2, b1, b2))
1069 arglist.append((rev, a1, a2, b1, b2))
1069
1070
1070 def d():
1071 def d():
1071 ll = linelog.linelog()
1072 ll = linelog.linelog()
1072 for args in arglist:
1073 for args in arglist:
1073 ll.replacelines(*args)
1074 ll.replacelines(*args)
1074
1075
1075 timer, fm = gettimer(ui, opts)
1076 timer, fm = gettimer(ui, opts)
1076 timer(d)
1077 timer(d)
1077 fm.end()
1078 fm.end()
1078
1079
1079 @command(b'perfrevrange', formatteropts)
1080 @command(b'perfrevrange', formatteropts)
1080 def perfrevrange(ui, repo, *specs, **opts):
1081 def perfrevrange(ui, repo, *specs, **opts):
1081 opts = _byteskwargs(opts)
1082 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1083 timer, fm = gettimer(ui, opts)
1083 revrange = scmutil.revrange
1084 revrange = scmutil.revrange
1084 timer(lambda: len(revrange(repo, specs)))
1085 timer(lambda: len(revrange(repo, specs)))
1085 fm.end()
1086 fm.end()
1086
1087
1087 @command(b'perfnodelookup', formatteropts)
1088 @command(b'perfnodelookup', formatteropts)
1088 def perfnodelookup(ui, repo, rev, **opts):
1089 def perfnodelookup(ui, repo, rev, **opts):
1089 opts = _byteskwargs(opts)
1090 opts = _byteskwargs(opts)
1090 timer, fm = gettimer(ui, opts)
1091 timer, fm = gettimer(ui, opts)
1091 import mercurial.revlog
1092 import mercurial.revlog
1092 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1093 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1093 n = scmutil.revsingle(repo, rev).node()
1094 n = scmutil.revsingle(repo, rev).node()
1094 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1095 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1095 def d():
1096 def d():
1096 cl.rev(n)
1097 cl.rev(n)
1097 clearcaches(cl)
1098 clearcaches(cl)
1098 timer(d)
1099 timer(d)
1099 fm.end()
1100 fm.end()
1100
1101
1101 @command(b'perflog',
1102 @command(b'perflog',
1102 [(b'', b'rename', False, b'ask log to follow renames')
1103 [(b'', b'rename', False, b'ask log to follow renames')
1103 ] + formatteropts)
1104 ] + formatteropts)
1104 def perflog(ui, repo, rev=None, **opts):
1105 def perflog(ui, repo, rev=None, **opts):
1105 opts = _byteskwargs(opts)
1106 opts = _byteskwargs(opts)
1106 if rev is None:
1107 if rev is None:
1107 rev=[]
1108 rev=[]
1108 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1109 ui.pushbuffer()
1110 ui.pushbuffer()
1110 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1111 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1111 copies=opts.get(b'rename')))
1112 copies=opts.get(b'rename')))
1112 ui.popbuffer()
1113 ui.popbuffer()
1113 fm.end()
1114 fm.end()
1114
1115
1115 @command(b'perfmoonwalk', formatteropts)
1116 @command(b'perfmoonwalk', formatteropts)
1116 def perfmoonwalk(ui, repo, **opts):
1117 def perfmoonwalk(ui, repo, **opts):
1117 """benchmark walking the changelog backwards
1118 """benchmark walking the changelog backwards
1118
1119
1119 This also loads the changelog data for each revision in the changelog.
1120 This also loads the changelog data for each revision in the changelog.
1120 """
1121 """
1121 opts = _byteskwargs(opts)
1122 opts = _byteskwargs(opts)
1122 timer, fm = gettimer(ui, opts)
1123 timer, fm = gettimer(ui, opts)
1123 def moonwalk():
1124 def moonwalk():
1124 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1125 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1125 ctx = repo[i]
1126 ctx = repo[i]
1126 ctx.branch() # read changelog data (in addition to the index)
1127 ctx.branch() # read changelog data (in addition to the index)
1127 timer(moonwalk)
1128 timer(moonwalk)
1128 fm.end()
1129 fm.end()
1129
1130
1130 @command(b'perftemplating',
1131 @command(b'perftemplating',
1131 [(b'r', b'rev', [], b'revisions to run the template on'),
1132 [(b'r', b'rev', [], b'revisions to run the template on'),
1132 ] + formatteropts)
1133 ] + formatteropts)
1133 def perftemplating(ui, repo, testedtemplate=None, **opts):
1134 def perftemplating(ui, repo, testedtemplate=None, **opts):
1134 """test the rendering time of a given template"""
1135 """test the rendering time of a given template"""
1135 if makelogtemplater is None:
1136 if makelogtemplater is None:
1136 raise error.Abort((b"perftemplating not available with this Mercurial"),
1137 raise error.Abort((b"perftemplating not available with this Mercurial"),
1137 hint=b"use 4.3 or later")
1138 hint=b"use 4.3 or later")
1138
1139
1139 opts = _byteskwargs(opts)
1140 opts = _byteskwargs(opts)
1140
1141
1141 nullui = ui.copy()
1142 nullui = ui.copy()
1142 nullui.fout = open(os.devnull, r'wb')
1143 nullui.fout = open(os.devnull, r'wb')
1143 nullui.disablepager()
1144 nullui.disablepager()
1144 revs = opts.get(b'rev')
1145 revs = opts.get(b'rev')
1145 if not revs:
1146 if not revs:
1146 revs = [b'all()']
1147 revs = [b'all()']
1147 revs = list(scmutil.revrange(repo, revs))
1148 revs = list(scmutil.revrange(repo, revs))
1148
1149
1149 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1150 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1150 b' {author|person}: {desc|firstline}\n')
1151 b' {author|person}: {desc|firstline}\n')
1151 if testedtemplate is None:
1152 if testedtemplate is None:
1152 testedtemplate = defaulttemplate
1153 testedtemplate = defaulttemplate
1153 displayer = makelogtemplater(nullui, repo, testedtemplate)
1154 displayer = makelogtemplater(nullui, repo, testedtemplate)
1154 def format():
1155 def format():
1155 for r in revs:
1156 for r in revs:
1156 ctx = repo[r]
1157 ctx = repo[r]
1157 displayer.show(ctx)
1158 displayer.show(ctx)
1158 displayer.flush(ctx)
1159 displayer.flush(ctx)
1159
1160
1160 timer, fm = gettimer(ui, opts)
1161 timer, fm = gettimer(ui, opts)
1161 timer(format)
1162 timer(format)
1162 fm.end()
1163 fm.end()
1163
1164
1164 @command(b'perfhelper-tracecopies', formatteropts +
1165 @command(b'perfhelper-tracecopies', formatteropts +
1165 [
1166 [
1166 (b'r', b'revs', [], b'restrict search to these revisions'),
1167 (b'r', b'revs', [], b'restrict search to these revisions'),
1167 (b'', b'timing', False, b'provides extra data (costly)'),
1168 (b'', b'timing', False, b'provides extra data (costly)'),
1168 ])
1169 ])
1169 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1170 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1170 """find statistic about potential parameters for the `perftracecopies`
1171 """find statistic about potential parameters for the `perftracecopies`
1171
1172
1172 This command find source-destination pair relevant for copytracing testing.
1173 This command find source-destination pair relevant for copytracing testing.
1173 It report value for some of the parameters that impact copy tracing time.
1174 It report value for some of the parameters that impact copy tracing time.
1174
1175
1175 If `--timing` is set, rename detection is run and the associated timing
1176 If `--timing` is set, rename detection is run and the associated timing
1176 will be reported. The extra details comes at the cost of a slower command
1177 will be reported. The extra details comes at the cost of a slower command
1177 execution.
1178 execution.
1178
1179
1179 Since the rename detection is only run once, other factors might easily
1180 Since the rename detection is only run once, other factors might easily
1180 affect the precision of the timing. However it should give a good
1181 affect the precision of the timing. However it should give a good
1181 approximation of which revision pairs are very costly.
1182 approximation of which revision pairs are very costly.
1182 """
1183 """
1183 opts = _byteskwargs(opts)
1184 opts = _byteskwargs(opts)
1184 fm = ui.formatter(b'perf', opts)
1185 fm = ui.formatter(b'perf', opts)
1185 dotiming = opts[b'timing']
1186 dotiming = opts[b'timing']
1186
1187
1187 if dotiming:
1188 if dotiming:
1188 header = '%12s %12s %12s %12s %12s %12s\n'
1189 header = '%12s %12s %12s %12s %12s %12s\n'
1189 output = ("%(source)12s %(destination)12s "
1190 output = ("%(source)12s %(destination)12s "
1190 "%(nbrevs)12d %(nbmissingfiles)12d "
1191 "%(nbrevs)12d %(nbmissingfiles)12d "
1191 "%(nbrenamedfiles)12d %(time)18.5f\n")
1192 "%(nbrenamedfiles)12d %(time)18.5f\n")
1192 header_names = ("source", "destination", "nb-revs", "nb-files",
1193 header_names = ("source", "destination", "nb-revs", "nb-files",
1193 "nb-renames", "time")
1194 "nb-renames", "time")
1194 fm.plain(header % header_names)
1195 fm.plain(header % header_names)
1195 else:
1196 else:
1196 header = '%12s %12s %12s %12s\n'
1197 header = '%12s %12s %12s %12s\n'
1197 output = ("%(source)12s %(destination)12s "
1198 output = ("%(source)12s %(destination)12s "
1198 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1199 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1199 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1200 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1200
1201
1201 if not revs:
1202 if not revs:
1202 revs = ['all()']
1203 revs = ['all()']
1203 revs = scmutil.revrange(repo, revs)
1204 revs = scmutil.revrange(repo, revs)
1204
1205
1205 roi = repo.revs('merge() and %ld', revs)
1206 roi = repo.revs('merge() and %ld', revs)
1206 for r in roi:
1207 for r in roi:
1207 ctx = repo[r]
1208 ctx = repo[r]
1208 p1 = ctx.p1().rev()
1209 p1 = ctx.p1().rev()
1209 p2 = ctx.p2().rev()
1210 p2 = ctx.p2().rev()
1210 bases = repo.changelog._commonancestorsheads(p1, p2)
1211 bases = repo.changelog._commonancestorsheads(p1, p2)
1211 for p in (p1, p2):
1212 for p in (p1, p2):
1212 for b in bases:
1213 for b in bases:
1213 base = repo[b]
1214 base = repo[b]
1214 parent = repo[p]
1215 parent = repo[p]
1215 missing = copies._computeforwardmissing(base, parent)
1216 missing = copies._computeforwardmissing(base, parent)
1216 if not missing:
1217 if not missing:
1217 continue
1218 continue
1218 data = {
1219 data = {
1219 b'source': base.hex(),
1220 b'source': base.hex(),
1220 b'destination': parent.hex(),
1221 b'destination': parent.hex(),
1221 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1222 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1222 b'nbmissingfiles': len(missing),
1223 b'nbmissingfiles': len(missing),
1223 }
1224 }
1224 if dotiming:
1225 if dotiming:
1225 begin = util.timer()
1226 begin = util.timer()
1226 renames = copies.pathcopies(base, parent)
1227 renames = copies.pathcopies(base, parent)
1227 end = util.timer()
1228 end = util.timer()
1228 # not very stable timing since we did only one run
1229 # not very stable timing since we did only one run
1229 data['time'] = end - begin
1230 data['time'] = end - begin
1230 data['nbrenamedfiles'] = len(renames)
1231 data['nbrenamedfiles'] = len(renames)
1231 fm.startitem()
1232 fm.startitem()
1232 fm.data(**data)
1233 fm.data(**data)
1233 out = data.copy()
1234 out = data.copy()
1234 out['source'] = fm.hexfunc(base.node())
1235 out['source'] = fm.hexfunc(base.node())
1235 out['destination'] = fm.hexfunc(parent.node())
1236 out['destination'] = fm.hexfunc(parent.node())
1236 fm.plain(output % out)
1237 fm.plain(output % out)
1237
1238
1238 fm.end()
1239 fm.end()
1239
1240
1240 @command(b'perfcca', formatteropts)
1241 @command(b'perfcca', formatteropts)
1241 def perfcca(ui, repo, **opts):
1242 def perfcca(ui, repo, **opts):
1242 opts = _byteskwargs(opts)
1243 opts = _byteskwargs(opts)
1243 timer, fm = gettimer(ui, opts)
1244 timer, fm = gettimer(ui, opts)
1244 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1245 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1245 fm.end()
1246 fm.end()
1246
1247
1247 @command(b'perffncacheload', formatteropts)
1248 @command(b'perffncacheload', formatteropts)
1248 def perffncacheload(ui, repo, **opts):
1249 def perffncacheload(ui, repo, **opts):
1249 opts = _byteskwargs(opts)
1250 opts = _byteskwargs(opts)
1250 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1251 s = repo.store
1252 s = repo.store
1252 def d():
1253 def d():
1253 s.fncache._load()
1254 s.fncache._load()
1254 timer(d)
1255 timer(d)
1255 fm.end()
1256 fm.end()
1256
1257
1257 @command(b'perffncachewrite', formatteropts)
1258 @command(b'perffncachewrite', formatteropts)
1258 def perffncachewrite(ui, repo, **opts):
1259 def perffncachewrite(ui, repo, **opts):
1259 opts = _byteskwargs(opts)
1260 opts = _byteskwargs(opts)
1260 timer, fm = gettimer(ui, opts)
1261 timer, fm = gettimer(ui, opts)
1261 s = repo.store
1262 s = repo.store
1262 lock = repo.lock()
1263 lock = repo.lock()
1263 s.fncache._load()
1264 s.fncache._load()
1264 tr = repo.transaction(b'perffncachewrite')
1265 tr = repo.transaction(b'perffncachewrite')
1265 tr.addbackup(b'fncache')
1266 tr.addbackup(b'fncache')
1266 def d():
1267 def d():
1267 s.fncache._dirty = True
1268 s.fncache._dirty = True
1268 s.fncache.write(tr)
1269 s.fncache.write(tr)
1269 timer(d)
1270 timer(d)
1270 tr.close()
1271 tr.close()
1271 lock.release()
1272 lock.release()
1272 fm.end()
1273 fm.end()
1273
1274
1274 @command(b'perffncacheencode', formatteropts)
1275 @command(b'perffncacheencode', formatteropts)
1275 def perffncacheencode(ui, repo, **opts):
1276 def perffncacheencode(ui, repo, **opts):
1276 opts = _byteskwargs(opts)
1277 opts = _byteskwargs(opts)
1277 timer, fm = gettimer(ui, opts)
1278 timer, fm = gettimer(ui, opts)
1278 s = repo.store
1279 s = repo.store
1279 s.fncache._load()
1280 s.fncache._load()
1280 def d():
1281 def d():
1281 for p in s.fncache.entries:
1282 for p in s.fncache.entries:
1282 s.encode(p)
1283 s.encode(p)
1283 timer(d)
1284 timer(d)
1284 fm.end()
1285 fm.end()
1285
1286
1286 def _bdiffworker(q, blocks, xdiff, ready, done):
1287 def _bdiffworker(q, blocks, xdiff, ready, done):
1287 while not done.is_set():
1288 while not done.is_set():
1288 pair = q.get()
1289 pair = q.get()
1289 while pair is not None:
1290 while pair is not None:
1290 if xdiff:
1291 if xdiff:
1291 mdiff.bdiff.xdiffblocks(*pair)
1292 mdiff.bdiff.xdiffblocks(*pair)
1292 elif blocks:
1293 elif blocks:
1293 mdiff.bdiff.blocks(*pair)
1294 mdiff.bdiff.blocks(*pair)
1294 else:
1295 else:
1295 mdiff.textdiff(*pair)
1296 mdiff.textdiff(*pair)
1296 q.task_done()
1297 q.task_done()
1297 pair = q.get()
1298 pair = q.get()
1298 q.task_done() # for the None one
1299 q.task_done() # for the None one
1299 with ready:
1300 with ready:
1300 ready.wait()
1301 ready.wait()
1301
1302
1302 def _manifestrevision(repo, mnode):
1303 def _manifestrevision(repo, mnode):
1303 ml = repo.manifestlog
1304 ml = repo.manifestlog
1304
1305
1305 if util.safehasattr(ml, b'getstorage'):
1306 if util.safehasattr(ml, b'getstorage'):
1306 store = ml.getstorage(b'')
1307 store = ml.getstorage(b'')
1307 else:
1308 else:
1308 store = ml._revlog
1309 store = ml._revlog
1309
1310
1310 return store.revision(mnode)
1311 return store.revision(mnode)
1311
1312
1312 @command(b'perfbdiff', revlogopts + formatteropts + [
1313 @command(b'perfbdiff', revlogopts + formatteropts + [
1313 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1314 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1314 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1315 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1315 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1316 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1316 (b'', b'blocks', False, b'test computing diffs into blocks'),
1317 (b'', b'blocks', False, b'test computing diffs into blocks'),
1317 (b'', b'xdiff', False, b'use xdiff algorithm'),
1318 (b'', b'xdiff', False, b'use xdiff algorithm'),
1318 ],
1319 ],
1319
1320
1320 b'-c|-m|FILE REV')
1321 b'-c|-m|FILE REV')
1321 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1322 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1322 """benchmark a bdiff between revisions
1323 """benchmark a bdiff between revisions
1323
1324
1324 By default, benchmark a bdiff between its delta parent and itself.
1325 By default, benchmark a bdiff between its delta parent and itself.
1325
1326
1326 With ``--count``, benchmark bdiffs between delta parents and self for N
1327 With ``--count``, benchmark bdiffs between delta parents and self for N
1327 revisions starting at the specified revision.
1328 revisions starting at the specified revision.
1328
1329
1329 With ``--alldata``, assume the requested revision is a changeset and
1330 With ``--alldata``, assume the requested revision is a changeset and
1330 measure bdiffs for all changes related to that changeset (manifest
1331 measure bdiffs for all changes related to that changeset (manifest
1331 and filelogs).
1332 and filelogs).
1332 """
1333 """
1333 opts = _byteskwargs(opts)
1334 opts = _byteskwargs(opts)
1334
1335
1335 if opts[b'xdiff'] and not opts[b'blocks']:
1336 if opts[b'xdiff'] and not opts[b'blocks']:
1336 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1337 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1337
1338
1338 if opts[b'alldata']:
1339 if opts[b'alldata']:
1339 opts[b'changelog'] = True
1340 opts[b'changelog'] = True
1340
1341
1341 if opts.get(b'changelog') or opts.get(b'manifest'):
1342 if opts.get(b'changelog') or opts.get(b'manifest'):
1342 file_, rev = None, file_
1343 file_, rev = None, file_
1343 elif rev is None:
1344 elif rev is None:
1344 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1345 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1345
1346
1346 blocks = opts[b'blocks']
1347 blocks = opts[b'blocks']
1347 xdiff = opts[b'xdiff']
1348 xdiff = opts[b'xdiff']
1348 textpairs = []
1349 textpairs = []
1349
1350
1350 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1351 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1351
1352
1352 startrev = r.rev(r.lookup(rev))
1353 startrev = r.rev(r.lookup(rev))
1353 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1354 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1354 if opts[b'alldata']:
1355 if opts[b'alldata']:
1355 # Load revisions associated with changeset.
1356 # Load revisions associated with changeset.
1356 ctx = repo[rev]
1357 ctx = repo[rev]
1357 mtext = _manifestrevision(repo, ctx.manifestnode())
1358 mtext = _manifestrevision(repo, ctx.manifestnode())
1358 for pctx in ctx.parents():
1359 for pctx in ctx.parents():
1359 pman = _manifestrevision(repo, pctx.manifestnode())
1360 pman = _manifestrevision(repo, pctx.manifestnode())
1360 textpairs.append((pman, mtext))
1361 textpairs.append((pman, mtext))
1361
1362
1362 # Load filelog revisions by iterating manifest delta.
1363 # Load filelog revisions by iterating manifest delta.
1363 man = ctx.manifest()
1364 man = ctx.manifest()
1364 pman = ctx.p1().manifest()
1365 pman = ctx.p1().manifest()
1365 for filename, change in pman.diff(man).items():
1366 for filename, change in pman.diff(man).items():
1366 fctx = repo.file(filename)
1367 fctx = repo.file(filename)
1367 f1 = fctx.revision(change[0][0] or -1)
1368 f1 = fctx.revision(change[0][0] or -1)
1368 f2 = fctx.revision(change[1][0] or -1)
1369 f2 = fctx.revision(change[1][0] or -1)
1369 textpairs.append((f1, f2))
1370 textpairs.append((f1, f2))
1370 else:
1371 else:
1371 dp = r.deltaparent(rev)
1372 dp = r.deltaparent(rev)
1372 textpairs.append((r.revision(dp), r.revision(rev)))
1373 textpairs.append((r.revision(dp), r.revision(rev)))
1373
1374
1374 withthreads = threads > 0
1375 withthreads = threads > 0
1375 if not withthreads:
1376 if not withthreads:
1376 def d():
1377 def d():
1377 for pair in textpairs:
1378 for pair in textpairs:
1378 if xdiff:
1379 if xdiff:
1379 mdiff.bdiff.xdiffblocks(*pair)
1380 mdiff.bdiff.xdiffblocks(*pair)
1380 elif blocks:
1381 elif blocks:
1381 mdiff.bdiff.blocks(*pair)
1382 mdiff.bdiff.blocks(*pair)
1382 else:
1383 else:
1383 mdiff.textdiff(*pair)
1384 mdiff.textdiff(*pair)
1384 else:
1385 else:
1385 q = queue()
1386 q = queue()
1386 for i in _xrange(threads):
1387 for i in _xrange(threads):
1387 q.put(None)
1388 q.put(None)
1388 ready = threading.Condition()
1389 ready = threading.Condition()
1389 done = threading.Event()
1390 done = threading.Event()
1390 for i in _xrange(threads):
1391 for i in _xrange(threads):
1391 threading.Thread(target=_bdiffworker,
1392 threading.Thread(target=_bdiffworker,
1392 args=(q, blocks, xdiff, ready, done)).start()
1393 args=(q, blocks, xdiff, ready, done)).start()
1393 q.join()
1394 q.join()
1394 def d():
1395 def d():
1395 for pair in textpairs:
1396 for pair in textpairs:
1396 q.put(pair)
1397 q.put(pair)
1397 for i in _xrange(threads):
1398 for i in _xrange(threads):
1398 q.put(None)
1399 q.put(None)
1399 with ready:
1400 with ready:
1400 ready.notify_all()
1401 ready.notify_all()
1401 q.join()
1402 q.join()
1402 timer, fm = gettimer(ui, opts)
1403 timer, fm = gettimer(ui, opts)
1403 timer(d)
1404 timer(d)
1404 fm.end()
1405 fm.end()
1405
1406
1406 if withthreads:
1407 if withthreads:
1407 done.set()
1408 done.set()
1408 for i in _xrange(threads):
1409 for i in _xrange(threads):
1409 q.put(None)
1410 q.put(None)
1410 with ready:
1411 with ready:
1411 ready.notify_all()
1412 ready.notify_all()
1412
1413
1413 @command(b'perfunidiff', revlogopts + formatteropts + [
1414 @command(b'perfunidiff', revlogopts + formatteropts + [
1414 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1415 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1415 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1416 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1416 ], b'-c|-m|FILE REV')
1417 ], b'-c|-m|FILE REV')
1417 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1418 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1418 """benchmark a unified diff between revisions
1419 """benchmark a unified diff between revisions
1419
1420
1420 This doesn't include any copy tracing - it's just a unified diff
1421 This doesn't include any copy tracing - it's just a unified diff
1421 of the texts.
1422 of the texts.
1422
1423
1423 By default, benchmark a diff between its delta parent and itself.
1424 By default, benchmark a diff between its delta parent and itself.
1424
1425
1425 With ``--count``, benchmark diffs between delta parents and self for N
1426 With ``--count``, benchmark diffs between delta parents and self for N
1426 revisions starting at the specified revision.
1427 revisions starting at the specified revision.
1427
1428
1428 With ``--alldata``, assume the requested revision is a changeset and
1429 With ``--alldata``, assume the requested revision is a changeset and
1429 measure diffs for all changes related to that changeset (manifest
1430 measure diffs for all changes related to that changeset (manifest
1430 and filelogs).
1431 and filelogs).
1431 """
1432 """
1432 opts = _byteskwargs(opts)
1433 opts = _byteskwargs(opts)
1433 if opts[b'alldata']:
1434 if opts[b'alldata']:
1434 opts[b'changelog'] = True
1435 opts[b'changelog'] = True
1435
1436
1436 if opts.get(b'changelog') or opts.get(b'manifest'):
1437 if opts.get(b'changelog') or opts.get(b'manifest'):
1437 file_, rev = None, file_
1438 file_, rev = None, file_
1438 elif rev is None:
1439 elif rev is None:
1439 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1440 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1440
1441
1441 textpairs = []
1442 textpairs = []
1442
1443
1443 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1444 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1444
1445
1445 startrev = r.rev(r.lookup(rev))
1446 startrev = r.rev(r.lookup(rev))
1446 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1447 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1447 if opts[b'alldata']:
1448 if opts[b'alldata']:
1448 # Load revisions associated with changeset.
1449 # Load revisions associated with changeset.
1449 ctx = repo[rev]
1450 ctx = repo[rev]
1450 mtext = _manifestrevision(repo, ctx.manifestnode())
1451 mtext = _manifestrevision(repo, ctx.manifestnode())
1451 for pctx in ctx.parents():
1452 for pctx in ctx.parents():
1452 pman = _manifestrevision(repo, pctx.manifestnode())
1453 pman = _manifestrevision(repo, pctx.manifestnode())
1453 textpairs.append((pman, mtext))
1454 textpairs.append((pman, mtext))
1454
1455
1455 # Load filelog revisions by iterating manifest delta.
1456 # Load filelog revisions by iterating manifest delta.
1456 man = ctx.manifest()
1457 man = ctx.manifest()
1457 pman = ctx.p1().manifest()
1458 pman = ctx.p1().manifest()
1458 for filename, change in pman.diff(man).items():
1459 for filename, change in pman.diff(man).items():
1459 fctx = repo.file(filename)
1460 fctx = repo.file(filename)
1460 f1 = fctx.revision(change[0][0] or -1)
1461 f1 = fctx.revision(change[0][0] or -1)
1461 f2 = fctx.revision(change[1][0] or -1)
1462 f2 = fctx.revision(change[1][0] or -1)
1462 textpairs.append((f1, f2))
1463 textpairs.append((f1, f2))
1463 else:
1464 else:
1464 dp = r.deltaparent(rev)
1465 dp = r.deltaparent(rev)
1465 textpairs.append((r.revision(dp), r.revision(rev)))
1466 textpairs.append((r.revision(dp), r.revision(rev)))
1466
1467
1467 def d():
1468 def d():
1468 for left, right in textpairs:
1469 for left, right in textpairs:
1469 # The date strings don't matter, so we pass empty strings.
1470 # The date strings don't matter, so we pass empty strings.
1470 headerlines, hunks = mdiff.unidiff(
1471 headerlines, hunks = mdiff.unidiff(
1471 left, b'', right, b'', b'left', b'right', binary=False)
1472 left, b'', right, b'', b'left', b'right', binary=False)
1472 # consume iterators in roughly the way patch.py does
1473 # consume iterators in roughly the way patch.py does
1473 b'\n'.join(headerlines)
1474 b'\n'.join(headerlines)
1474 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1475 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1475 timer, fm = gettimer(ui, opts)
1476 timer, fm = gettimer(ui, opts)
1476 timer(d)
1477 timer(d)
1477 fm.end()
1478 fm.end()
1478
1479
1479 @command(b'perfdiffwd', formatteropts)
1480 @command(b'perfdiffwd', formatteropts)
1480 def perfdiffwd(ui, repo, **opts):
1481 def perfdiffwd(ui, repo, **opts):
1481 """Profile diff of working directory changes"""
1482 """Profile diff of working directory changes"""
1482 opts = _byteskwargs(opts)
1483 opts = _byteskwargs(opts)
1483 timer, fm = gettimer(ui, opts)
1484 timer, fm = gettimer(ui, opts)
1484 options = {
1485 options = {
1485 'w': 'ignore_all_space',
1486 'w': 'ignore_all_space',
1486 'b': 'ignore_space_change',
1487 'b': 'ignore_space_change',
1487 'B': 'ignore_blank_lines',
1488 'B': 'ignore_blank_lines',
1488 }
1489 }
1489
1490
1490 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1491 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1491 opts = dict((options[c], b'1') for c in diffopt)
1492 opts = dict((options[c], b'1') for c in diffopt)
1492 def d():
1493 def d():
1493 ui.pushbuffer()
1494 ui.pushbuffer()
1494 commands.diff(ui, repo, **opts)
1495 commands.diff(ui, repo, **opts)
1495 ui.popbuffer()
1496 ui.popbuffer()
1496 diffopt = diffopt.encode('ascii')
1497 diffopt = diffopt.encode('ascii')
1497 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1498 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1498 timer(d, title=title)
1499 timer(d, title=title)
1499 fm.end()
1500 fm.end()
1500
1501
1501 @command(b'perfrevlogindex', revlogopts + formatteropts,
1502 @command(b'perfrevlogindex', revlogopts + formatteropts,
1502 b'-c|-m|FILE')
1503 b'-c|-m|FILE')
1503 def perfrevlogindex(ui, repo, file_=None, **opts):
1504 def perfrevlogindex(ui, repo, file_=None, **opts):
1504 """Benchmark operations against a revlog index.
1505 """Benchmark operations against a revlog index.
1505
1506
1506 This tests constructing a revlog instance, reading index data,
1507 This tests constructing a revlog instance, reading index data,
1507 parsing index data, and performing various operations related to
1508 parsing index data, and performing various operations related to
1508 index data.
1509 index data.
1509 """
1510 """
1510
1511
1511 opts = _byteskwargs(opts)
1512 opts = _byteskwargs(opts)
1512
1513
1513 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1514 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1514
1515
1515 opener = getattr(rl, 'opener') # trick linter
1516 opener = getattr(rl, 'opener') # trick linter
1516 indexfile = rl.indexfile
1517 indexfile = rl.indexfile
1517 data = opener.read(indexfile)
1518 data = opener.read(indexfile)
1518
1519
1519 header = struct.unpack(b'>I', data[0:4])[0]
1520 header = struct.unpack(b'>I', data[0:4])[0]
1520 version = header & 0xFFFF
1521 version = header & 0xFFFF
1521 if version == 1:
1522 if version == 1:
1522 revlogio = revlog.revlogio()
1523 revlogio = revlog.revlogio()
1523 inline = header & (1 << 16)
1524 inline = header & (1 << 16)
1524 else:
1525 else:
1525 raise error.Abort((b'unsupported revlog version: %d') % version)
1526 raise error.Abort((b'unsupported revlog version: %d') % version)
1526
1527
1527 rllen = len(rl)
1528 rllen = len(rl)
1528
1529
1529 node0 = rl.node(0)
1530 node0 = rl.node(0)
1530 node25 = rl.node(rllen // 4)
1531 node25 = rl.node(rllen // 4)
1531 node50 = rl.node(rllen // 2)
1532 node50 = rl.node(rllen // 2)
1532 node75 = rl.node(rllen // 4 * 3)
1533 node75 = rl.node(rllen // 4 * 3)
1533 node100 = rl.node(rllen - 1)
1534 node100 = rl.node(rllen - 1)
1534
1535
1535 allrevs = range(rllen)
1536 allrevs = range(rllen)
1536 allrevsrev = list(reversed(allrevs))
1537 allrevsrev = list(reversed(allrevs))
1537 allnodes = [rl.node(rev) for rev in range(rllen)]
1538 allnodes = [rl.node(rev) for rev in range(rllen)]
1538 allnodesrev = list(reversed(allnodes))
1539 allnodesrev = list(reversed(allnodes))
1539
1540
1540 def constructor():
1541 def constructor():
1541 revlog.revlog(opener, indexfile)
1542 revlog.revlog(opener, indexfile)
1542
1543
1543 def read():
1544 def read():
1544 with opener(indexfile) as fh:
1545 with opener(indexfile) as fh:
1545 fh.read()
1546 fh.read()
1546
1547
1547 def parseindex():
1548 def parseindex():
1548 revlogio.parseindex(data, inline)
1549 revlogio.parseindex(data, inline)
1549
1550
1550 def getentry(revornode):
1551 def getentry(revornode):
1551 index = revlogio.parseindex(data, inline)[0]
1552 index = revlogio.parseindex(data, inline)[0]
1552 index[revornode]
1553 index[revornode]
1553
1554
1554 def getentries(revs, count=1):
1555 def getentries(revs, count=1):
1555 index = revlogio.parseindex(data, inline)[0]
1556 index = revlogio.parseindex(data, inline)[0]
1556
1557
1557 for i in range(count):
1558 for i in range(count):
1558 for rev in revs:
1559 for rev in revs:
1559 index[rev]
1560 index[rev]
1560
1561
1561 def resolvenode(node):
1562 def resolvenode(node):
1562 nodemap = revlogio.parseindex(data, inline)[1]
1563 nodemap = revlogio.parseindex(data, inline)[1]
1563 # This only works for the C code.
1564 # This only works for the C code.
1564 if nodemap is None:
1565 if nodemap is None:
1565 return
1566 return
1566
1567
1567 try:
1568 try:
1568 nodemap[node]
1569 nodemap[node]
1569 except error.RevlogError:
1570 except error.RevlogError:
1570 pass
1571 pass
1571
1572
1572 def resolvenodes(nodes, count=1):
1573 def resolvenodes(nodes, count=1):
1573 nodemap = revlogio.parseindex(data, inline)[1]
1574 nodemap = revlogio.parseindex(data, inline)[1]
1574 if nodemap is None:
1575 if nodemap is None:
1575 return
1576 return
1576
1577
1577 for i in range(count):
1578 for i in range(count):
1578 for node in nodes:
1579 for node in nodes:
1579 try:
1580 try:
1580 nodemap[node]
1581 nodemap[node]
1581 except error.RevlogError:
1582 except error.RevlogError:
1582 pass
1583 pass
1583
1584
1584 benches = [
1585 benches = [
1585 (constructor, b'revlog constructor'),
1586 (constructor, b'revlog constructor'),
1586 (read, b'read'),
1587 (read, b'read'),
1587 (parseindex, b'create index object'),
1588 (parseindex, b'create index object'),
1588 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1589 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1589 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1590 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1590 (lambda: resolvenode(node0), b'look up node at rev 0'),
1591 (lambda: resolvenode(node0), b'look up node at rev 0'),
1591 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1592 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1592 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1593 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1593 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1594 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1594 (lambda: resolvenode(node100), b'look up node at tip'),
1595 (lambda: resolvenode(node100), b'look up node at tip'),
1595 # 2x variation is to measure caching impact.
1596 # 2x variation is to measure caching impact.
1596 (lambda: resolvenodes(allnodes),
1597 (lambda: resolvenodes(allnodes),
1597 b'look up all nodes (forward)'),
1598 b'look up all nodes (forward)'),
1598 (lambda: resolvenodes(allnodes, 2),
1599 (lambda: resolvenodes(allnodes, 2),
1599 b'look up all nodes 2x (forward)'),
1600 b'look up all nodes 2x (forward)'),
1600 (lambda: resolvenodes(allnodesrev),
1601 (lambda: resolvenodes(allnodesrev),
1601 b'look up all nodes (reverse)'),
1602 b'look up all nodes (reverse)'),
1602 (lambda: resolvenodes(allnodesrev, 2),
1603 (lambda: resolvenodes(allnodesrev, 2),
1603 b'look up all nodes 2x (reverse)'),
1604 b'look up all nodes 2x (reverse)'),
1604 (lambda: getentries(allrevs),
1605 (lambda: getentries(allrevs),
1605 b'retrieve all index entries (forward)'),
1606 b'retrieve all index entries (forward)'),
1606 (lambda: getentries(allrevs, 2),
1607 (lambda: getentries(allrevs, 2),
1607 b'retrieve all index entries 2x (forward)'),
1608 b'retrieve all index entries 2x (forward)'),
1608 (lambda: getentries(allrevsrev),
1609 (lambda: getentries(allrevsrev),
1609 b'retrieve all index entries (reverse)'),
1610 b'retrieve all index entries (reverse)'),
1610 (lambda: getentries(allrevsrev, 2),
1611 (lambda: getentries(allrevsrev, 2),
1611 b'retrieve all index entries 2x (reverse)'),
1612 b'retrieve all index entries 2x (reverse)'),
1612 ]
1613 ]
1613
1614
1614 for fn, title in benches:
1615 for fn, title in benches:
1615 timer, fm = gettimer(ui, opts)
1616 timer, fm = gettimer(ui, opts)
1616 timer(fn, title=title)
1617 timer(fn, title=title)
1617 fm.end()
1618 fm.end()
1618
1619
1619 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1620 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1620 [(b'd', b'dist', 100, b'distance between the revisions'),
1621 [(b'd', b'dist', 100, b'distance between the revisions'),
1621 (b's', b'startrev', 0, b'revision to start reading at'),
1622 (b's', b'startrev', 0, b'revision to start reading at'),
1622 (b'', b'reverse', False, b'read in reverse')],
1623 (b'', b'reverse', False, b'read in reverse')],
1623 b'-c|-m|FILE')
1624 b'-c|-m|FILE')
1624 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1625 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1625 **opts):
1626 **opts):
1626 """Benchmark reading a series of revisions from a revlog.
1627 """Benchmark reading a series of revisions from a revlog.
1627
1628
1628 By default, we read every ``-d/--dist`` revision from 0 to tip of
1629 By default, we read every ``-d/--dist`` revision from 0 to tip of
1629 the specified revlog.
1630 the specified revlog.
1630
1631
1631 The start revision can be defined via ``-s/--startrev``.
1632 The start revision can be defined via ``-s/--startrev``.
1632 """
1633 """
1633 opts = _byteskwargs(opts)
1634 opts = _byteskwargs(opts)
1634
1635
1635 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1636 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1636 rllen = getlen(ui)(rl)
1637 rllen = getlen(ui)(rl)
1637
1638
1638 if startrev < 0:
1639 if startrev < 0:
1639 startrev = rllen + startrev
1640 startrev = rllen + startrev
1640
1641
1641 def d():
1642 def d():
1642 rl.clearcaches()
1643 rl.clearcaches()
1643
1644
1644 beginrev = startrev
1645 beginrev = startrev
1645 endrev = rllen
1646 endrev = rllen
1646 dist = opts[b'dist']
1647 dist = opts[b'dist']
1647
1648
1648 if reverse:
1649 if reverse:
1649 beginrev, endrev = endrev - 1, beginrev - 1
1650 beginrev, endrev = endrev - 1, beginrev - 1
1650 dist = -1 * dist
1651 dist = -1 * dist
1651
1652
1652 for x in _xrange(beginrev, endrev, dist):
1653 for x in _xrange(beginrev, endrev, dist):
1653 # Old revisions don't support passing int.
1654 # Old revisions don't support passing int.
1654 n = rl.node(x)
1655 n = rl.node(x)
1655 rl.revision(n)
1656 rl.revision(n)
1656
1657
1657 timer, fm = gettimer(ui, opts)
1658 timer, fm = gettimer(ui, opts)
1658 timer(d)
1659 timer(d)
1659 fm.end()
1660 fm.end()
1660
1661
1661 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1662 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1662 [(b's', b'startrev', 1000, b'revision to start writing at'),
1663 [(b's', b'startrev', 1000, b'revision to start writing at'),
1663 (b'', b'stoprev', -1, b'last revision to write'),
1664 (b'', b'stoprev', -1, b'last revision to write'),
1664 (b'', b'count', 3, b'last revision to write'),
1665 (b'', b'count', 3, b'last revision to write'),
1665 (b'', b'details', False, b'print timing for every revisions tested'),
1666 (b'', b'details', False, b'print timing for every revisions tested'),
1666 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1667 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1667 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1668 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1668 ],
1669 ],
1669 b'-c|-m|FILE')
1670 b'-c|-m|FILE')
1670 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1671 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1671 """Benchmark writing a series of revisions to a revlog.
1672 """Benchmark writing a series of revisions to a revlog.
1672
1673
1673 Possible source values are:
1674 Possible source values are:
1674 * `full`: add from a full text (default).
1675 * `full`: add from a full text (default).
1675 * `parent-1`: add from a delta to the first parent
1676 * `parent-1`: add from a delta to the first parent
1676 * `parent-2`: add from a delta to the second parent if it exists
1677 * `parent-2`: add from a delta to the second parent if it exists
1677 (use a delta from the first parent otherwise)
1678 (use a delta from the first parent otherwise)
1678 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1679 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1679 * `storage`: add from the existing precomputed deltas
1680 * `storage`: add from the existing precomputed deltas
1680 """
1681 """
1681 opts = _byteskwargs(opts)
1682 opts = _byteskwargs(opts)
1682
1683
1683 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1684 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1684 rllen = getlen(ui)(rl)
1685 rllen = getlen(ui)(rl)
1685 if startrev < 0:
1686 if startrev < 0:
1686 startrev = rllen + startrev
1687 startrev = rllen + startrev
1687 if stoprev < 0:
1688 if stoprev < 0:
1688 stoprev = rllen + stoprev
1689 stoprev = rllen + stoprev
1689
1690
1690 lazydeltabase = opts['lazydeltabase']
1691 lazydeltabase = opts['lazydeltabase']
1691 source = opts['source']
1692 source = opts['source']
1692 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1693 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1693 b'storage')
1694 b'storage')
1694 if source not in validsource:
1695 if source not in validsource:
1695 raise error.Abort('invalid source type: %s' % source)
1696 raise error.Abort('invalid source type: %s' % source)
1696
1697
1697 ### actually gather results
1698 ### actually gather results
1698 count = opts['count']
1699 count = opts['count']
1699 if count <= 0:
1700 if count <= 0:
1700 raise error.Abort('invalide run count: %d' % count)
1701 raise error.Abort('invalide run count: %d' % count)
1701 allresults = []
1702 allresults = []
1702 for c in range(count):
1703 for c in range(count):
1703 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1704 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1704 lazydeltabase=lazydeltabase)
1705 lazydeltabase=lazydeltabase)
1705 allresults.append(timing)
1706 allresults.append(timing)
1706
1707
1707 ### consolidate the results in a single list
1708 ### consolidate the results in a single list
1708 results = []
1709 results = []
1709 for idx, (rev, t) in enumerate(allresults[0]):
1710 for idx, (rev, t) in enumerate(allresults[0]):
1710 ts = [t]
1711 ts = [t]
1711 for other in allresults[1:]:
1712 for other in allresults[1:]:
1712 orev, ot = other[idx]
1713 orev, ot = other[idx]
1713 assert orev == rev
1714 assert orev == rev
1714 ts.append(ot)
1715 ts.append(ot)
1715 results.append((rev, ts))
1716 results.append((rev, ts))
1716 resultcount = len(results)
1717 resultcount = len(results)
1717
1718
1718 ### Compute and display relevant statistics
1719 ### Compute and display relevant statistics
1719
1720
1720 # get a formatter
1721 # get a formatter
1721 fm = ui.formatter(b'perf', opts)
1722 fm = ui.formatter(b'perf', opts)
1722 displayall = ui.configbool(b"perf", b"all-timing", False)
1723 displayall = ui.configbool(b"perf", b"all-timing", False)
1723
1724
1724 # print individual details if requested
1725 # print individual details if requested
1725 if opts['details']:
1726 if opts['details']:
1726 for idx, item in enumerate(results, 1):
1727 for idx, item in enumerate(results, 1):
1727 rev, data = item
1728 rev, data = item
1728 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1729 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1729 formatone(fm, data, title=title, displayall=displayall)
1730 formatone(fm, data, title=title, displayall=displayall)
1730
1731
1731 # sorts results by median time
1732 # sorts results by median time
1732 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1733 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1733 # list of (name, index) to display)
1734 # list of (name, index) to display)
1734 relevants = [
1735 relevants = [
1735 ("min", 0),
1736 ("min", 0),
1736 ("10%", resultcount * 10 // 100),
1737 ("10%", resultcount * 10 // 100),
1737 ("25%", resultcount * 25 // 100),
1738 ("25%", resultcount * 25 // 100),
1738 ("50%", resultcount * 70 // 100),
1739 ("50%", resultcount * 70 // 100),
1739 ("75%", resultcount * 75 // 100),
1740 ("75%", resultcount * 75 // 100),
1740 ("90%", resultcount * 90 // 100),
1741 ("90%", resultcount * 90 // 100),
1741 ("95%", resultcount * 95 // 100),
1742 ("95%", resultcount * 95 // 100),
1742 ("99%", resultcount * 99 // 100),
1743 ("99%", resultcount * 99 // 100),
1743 ("max", -1),
1744 ("max", -1),
1744 ]
1745 ]
1745 if not ui.quiet:
1746 if not ui.quiet:
1746 for name, idx in relevants:
1747 for name, idx in relevants:
1747 data = results[idx]
1748 data = results[idx]
1748 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1749 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1749 formatone(fm, data[1], title=title, displayall=displayall)
1750 formatone(fm, data[1], title=title, displayall=displayall)
1750
1751
1751 # XXX summing that many float will not be very precise, we ignore this fact
1752 # XXX summing that many float will not be very precise, we ignore this fact
1752 # for now
1753 # for now
1753 totaltime = []
1754 totaltime = []
1754 for item in allresults:
1755 for item in allresults:
1755 totaltime.append((sum(x[1][0] for x in item),
1756 totaltime.append((sum(x[1][0] for x in item),
1756 sum(x[1][1] for x in item),
1757 sum(x[1][1] for x in item),
1757 sum(x[1][2] for x in item),)
1758 sum(x[1][2] for x in item),)
1758 )
1759 )
1759 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1760 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1760 displayall=displayall)
1761 displayall=displayall)
1761 fm.end()
1762 fm.end()
1762
1763
1763 class _faketr(object):
1764 class _faketr(object):
1764 def add(s, x, y, z=None):
1765 def add(s, x, y, z=None):
1765 return None
1766 return None
1766
1767
1767 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1768 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1768 lazydeltabase=True):
1769 lazydeltabase=True):
1769 timings = []
1770 timings = []
1770 tr = _faketr()
1771 tr = _faketr()
1771 with _temprevlog(ui, orig, startrev) as dest:
1772 with _temprevlog(ui, orig, startrev) as dest:
1772 dest._lazydeltabase = lazydeltabase
1773 dest._lazydeltabase = lazydeltabase
1773 revs = list(orig.revs(startrev, stoprev))
1774 revs = list(orig.revs(startrev, stoprev))
1774 total = len(revs)
1775 total = len(revs)
1775 topic = 'adding'
1776 topic = 'adding'
1776 if runidx is not None:
1777 if runidx is not None:
1777 topic += ' (run #%d)' % runidx
1778 topic += ' (run #%d)' % runidx
1778 for idx, rev in enumerate(revs):
1779 for idx, rev in enumerate(revs):
1779 ui.progress(topic, idx, unit='revs', total=total)
1780 ui.progress(topic, idx, unit='revs', total=total)
1780 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1781 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1781 with timeone() as r:
1782 with timeone() as r:
1782 dest.addrawrevision(*addargs, **addkwargs)
1783 dest.addrawrevision(*addargs, **addkwargs)
1783 timings.append((rev, r[0]))
1784 timings.append((rev, r[0]))
1784 ui.progress(topic, total, unit='revs', total=total)
1785 ui.progress(topic, total, unit='revs', total=total)
1785 ui.progress(topic, None, unit='revs', total=total)
1786 ui.progress(topic, None, unit='revs', total=total)
1786 return timings
1787 return timings
1787
1788
1788 def _getrevisionseed(orig, rev, tr, source):
1789 def _getrevisionseed(orig, rev, tr, source):
1789 from mercurial.node import nullid
1790 from mercurial.node import nullid
1790
1791
1791 linkrev = orig.linkrev(rev)
1792 linkrev = orig.linkrev(rev)
1792 node = orig.node(rev)
1793 node = orig.node(rev)
1793 p1, p2 = orig.parents(node)
1794 p1, p2 = orig.parents(node)
1794 flags = orig.flags(rev)
1795 flags = orig.flags(rev)
1795 cachedelta = None
1796 cachedelta = None
1796 text = None
1797 text = None
1797
1798
1798 if source == b'full':
1799 if source == b'full':
1799 text = orig.revision(rev)
1800 text = orig.revision(rev)
1800 elif source == b'parent-1':
1801 elif source == b'parent-1':
1801 baserev = orig.rev(p1)
1802 baserev = orig.rev(p1)
1802 cachedelta = (baserev, orig.revdiff(p1, rev))
1803 cachedelta = (baserev, orig.revdiff(p1, rev))
1803 elif source == b'parent-2':
1804 elif source == b'parent-2':
1804 parent = p2
1805 parent = p2
1805 if p2 == nullid:
1806 if p2 == nullid:
1806 parent = p1
1807 parent = p1
1807 baserev = orig.rev(parent)
1808 baserev = orig.rev(parent)
1808 cachedelta = (baserev, orig.revdiff(parent, rev))
1809 cachedelta = (baserev, orig.revdiff(parent, rev))
1809 elif source == b'parent-smallest':
1810 elif source == b'parent-smallest':
1810 p1diff = orig.revdiff(p1, rev)
1811 p1diff = orig.revdiff(p1, rev)
1811 parent = p1
1812 parent = p1
1812 diff = p1diff
1813 diff = p1diff
1813 if p2 != nullid:
1814 if p2 != nullid:
1814 p2diff = orig.revdiff(p2, rev)
1815 p2diff = orig.revdiff(p2, rev)
1815 if len(p1diff) > len(p2diff):
1816 if len(p1diff) > len(p2diff):
1816 parent = p2
1817 parent = p2
1817 diff = p2diff
1818 diff = p2diff
1818 baserev = orig.rev(parent)
1819 baserev = orig.rev(parent)
1819 cachedelta = (baserev, diff)
1820 cachedelta = (baserev, diff)
1820 elif source == b'storage':
1821 elif source == b'storage':
1821 baserev = orig.deltaparent(rev)
1822 baserev = orig.deltaparent(rev)
1822 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1823 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1823
1824
1824 return ((text, tr, linkrev, p1, p2),
1825 return ((text, tr, linkrev, p1, p2),
1825 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1826 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1826
1827
1827 @contextlib.contextmanager
1828 @contextlib.contextmanager
1828 def _temprevlog(ui, orig, truncaterev):
1829 def _temprevlog(ui, orig, truncaterev):
1829 from mercurial import vfs as vfsmod
1830 from mercurial import vfs as vfsmod
1830
1831
1831 if orig._inline:
1832 if orig._inline:
1832 raise error.Abort('not supporting inline revlog (yet)')
1833 raise error.Abort('not supporting inline revlog (yet)')
1833
1834
1834 origindexpath = orig.opener.join(orig.indexfile)
1835 origindexpath = orig.opener.join(orig.indexfile)
1835 origdatapath = orig.opener.join(orig.datafile)
1836 origdatapath = orig.opener.join(orig.datafile)
1836 indexname = 'revlog.i'
1837 indexname = 'revlog.i'
1837 dataname = 'revlog.d'
1838 dataname = 'revlog.d'
1838
1839
1839 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1840 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1840 try:
1841 try:
1841 # copy the data file in a temporary directory
1842 # copy the data file in a temporary directory
1842 ui.debug('copying data in %s\n' % tmpdir)
1843 ui.debug('copying data in %s\n' % tmpdir)
1843 destindexpath = os.path.join(tmpdir, 'revlog.i')
1844 destindexpath = os.path.join(tmpdir, 'revlog.i')
1844 destdatapath = os.path.join(tmpdir, 'revlog.d')
1845 destdatapath = os.path.join(tmpdir, 'revlog.d')
1845 shutil.copyfile(origindexpath, destindexpath)
1846 shutil.copyfile(origindexpath, destindexpath)
1846 shutil.copyfile(origdatapath, destdatapath)
1847 shutil.copyfile(origdatapath, destdatapath)
1847
1848
1848 # remove the data we want to add again
1849 # remove the data we want to add again
1849 ui.debug('truncating data to be rewritten\n')
1850 ui.debug('truncating data to be rewritten\n')
1850 with open(destindexpath, 'ab') as index:
1851 with open(destindexpath, 'ab') as index:
1851 index.seek(0)
1852 index.seek(0)
1852 index.truncate(truncaterev * orig._io.size)
1853 index.truncate(truncaterev * orig._io.size)
1853 with open(destdatapath, 'ab') as data:
1854 with open(destdatapath, 'ab') as data:
1854 data.seek(0)
1855 data.seek(0)
1855 data.truncate(orig.start(truncaterev))
1856 data.truncate(orig.start(truncaterev))
1856
1857
1857 # instantiate a new revlog from the temporary copy
1858 # instantiate a new revlog from the temporary copy
1858 ui.debug('truncating adding to be rewritten\n')
1859 ui.debug('truncating adding to be rewritten\n')
1859 vfs = vfsmod.vfs(tmpdir)
1860 vfs = vfsmod.vfs(tmpdir)
1860 vfs.options = getattr(orig.opener, 'options', None)
1861 vfs.options = getattr(orig.opener, 'options', None)
1861
1862
1862 dest = revlog.revlog(vfs,
1863 dest = revlog.revlog(vfs,
1863 indexfile=indexname,
1864 indexfile=indexname,
1864 datafile=dataname)
1865 datafile=dataname)
1865 if dest._inline:
1866 if dest._inline:
1866 raise error.Abort('not supporting inline revlog (yet)')
1867 raise error.Abort('not supporting inline revlog (yet)')
1867 # make sure internals are initialized
1868 # make sure internals are initialized
1868 dest.revision(len(dest) - 1)
1869 dest.revision(len(dest) - 1)
1869 yield dest
1870 yield dest
1870 del dest, vfs
1871 del dest, vfs
1871 finally:
1872 finally:
1872 shutil.rmtree(tmpdir, True)
1873 shutil.rmtree(tmpdir, True)
1873
1874
1874 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1875 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1875 [(b'e', b'engines', b'', b'compression engines to use'),
1876 [(b'e', b'engines', b'', b'compression engines to use'),
1876 (b's', b'startrev', 0, b'revision to start at')],
1877 (b's', b'startrev', 0, b'revision to start at')],
1877 b'-c|-m|FILE')
1878 b'-c|-m|FILE')
1878 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1879 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1879 """Benchmark operations on revlog chunks.
1880 """Benchmark operations on revlog chunks.
1880
1881
1881 Logically, each revlog is a collection of fulltext revisions. However,
1882 Logically, each revlog is a collection of fulltext revisions. However,
1882 stored within each revlog are "chunks" of possibly compressed data. This
1883 stored within each revlog are "chunks" of possibly compressed data. This
1883 data needs to be read and decompressed or compressed and written.
1884 data needs to be read and decompressed or compressed and written.
1884
1885
1885 This command measures the time it takes to read+decompress and recompress
1886 This command measures the time it takes to read+decompress and recompress
1886 chunks in a revlog. It effectively isolates I/O and compression performance.
1887 chunks in a revlog. It effectively isolates I/O and compression performance.
1887 For measurements of higher-level operations like resolving revisions,
1888 For measurements of higher-level operations like resolving revisions,
1888 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1889 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1889 """
1890 """
1890 opts = _byteskwargs(opts)
1891 opts = _byteskwargs(opts)
1891
1892
1892 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1893 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1893
1894
1894 # _chunkraw was renamed to _getsegmentforrevs.
1895 # _chunkraw was renamed to _getsegmentforrevs.
1895 try:
1896 try:
1896 segmentforrevs = rl._getsegmentforrevs
1897 segmentforrevs = rl._getsegmentforrevs
1897 except AttributeError:
1898 except AttributeError:
1898 segmentforrevs = rl._chunkraw
1899 segmentforrevs = rl._chunkraw
1899
1900
1900 # Verify engines argument.
1901 # Verify engines argument.
1901 if engines:
1902 if engines:
1902 engines = set(e.strip() for e in engines.split(b','))
1903 engines = set(e.strip() for e in engines.split(b','))
1903 for engine in engines:
1904 for engine in engines:
1904 try:
1905 try:
1905 util.compressionengines[engine]
1906 util.compressionengines[engine]
1906 except KeyError:
1907 except KeyError:
1907 raise error.Abort(b'unknown compression engine: %s' % engine)
1908 raise error.Abort(b'unknown compression engine: %s' % engine)
1908 else:
1909 else:
1909 engines = []
1910 engines = []
1910 for e in util.compengines:
1911 for e in util.compengines:
1911 engine = util.compengines[e]
1912 engine = util.compengines[e]
1912 try:
1913 try:
1913 if engine.available():
1914 if engine.available():
1914 engine.revlogcompressor().compress(b'dummy')
1915 engine.revlogcompressor().compress(b'dummy')
1915 engines.append(e)
1916 engines.append(e)
1916 except NotImplementedError:
1917 except NotImplementedError:
1917 pass
1918 pass
1918
1919
1919 revs = list(rl.revs(startrev, len(rl) - 1))
1920 revs = list(rl.revs(startrev, len(rl) - 1))
1920
1921
1921 def rlfh(rl):
1922 def rlfh(rl):
1922 if rl._inline:
1923 if rl._inline:
1923 return getsvfs(repo)(rl.indexfile)
1924 return getsvfs(repo)(rl.indexfile)
1924 else:
1925 else:
1925 return getsvfs(repo)(rl.datafile)
1926 return getsvfs(repo)(rl.datafile)
1926
1927
1927 def doread():
1928 def doread():
1928 rl.clearcaches()
1929 rl.clearcaches()
1929 for rev in revs:
1930 for rev in revs:
1930 segmentforrevs(rev, rev)
1931 segmentforrevs(rev, rev)
1931
1932
1932 def doreadcachedfh():
1933 def doreadcachedfh():
1933 rl.clearcaches()
1934 rl.clearcaches()
1934 fh = rlfh(rl)
1935 fh = rlfh(rl)
1935 for rev in revs:
1936 for rev in revs:
1936 segmentforrevs(rev, rev, df=fh)
1937 segmentforrevs(rev, rev, df=fh)
1937
1938
1938 def doreadbatch():
1939 def doreadbatch():
1939 rl.clearcaches()
1940 rl.clearcaches()
1940 segmentforrevs(revs[0], revs[-1])
1941 segmentforrevs(revs[0], revs[-1])
1941
1942
1942 def doreadbatchcachedfh():
1943 def doreadbatchcachedfh():
1943 rl.clearcaches()
1944 rl.clearcaches()
1944 fh = rlfh(rl)
1945 fh = rlfh(rl)
1945 segmentforrevs(revs[0], revs[-1], df=fh)
1946 segmentforrevs(revs[0], revs[-1], df=fh)
1946
1947
1947 def dochunk():
1948 def dochunk():
1948 rl.clearcaches()
1949 rl.clearcaches()
1949 fh = rlfh(rl)
1950 fh = rlfh(rl)
1950 for rev in revs:
1951 for rev in revs:
1951 rl._chunk(rev, df=fh)
1952 rl._chunk(rev, df=fh)
1952
1953
1953 chunks = [None]
1954 chunks = [None]
1954
1955
1955 def dochunkbatch():
1956 def dochunkbatch():
1956 rl.clearcaches()
1957 rl.clearcaches()
1957 fh = rlfh(rl)
1958 fh = rlfh(rl)
1958 # Save chunks as a side-effect.
1959 # Save chunks as a side-effect.
1959 chunks[0] = rl._chunks(revs, df=fh)
1960 chunks[0] = rl._chunks(revs, df=fh)
1960
1961
1961 def docompress(compressor):
1962 def docompress(compressor):
1962 rl.clearcaches()
1963 rl.clearcaches()
1963
1964
1964 try:
1965 try:
1965 # Swap in the requested compression engine.
1966 # Swap in the requested compression engine.
1966 oldcompressor = rl._compressor
1967 oldcompressor = rl._compressor
1967 rl._compressor = compressor
1968 rl._compressor = compressor
1968 for chunk in chunks[0]:
1969 for chunk in chunks[0]:
1969 rl.compress(chunk)
1970 rl.compress(chunk)
1970 finally:
1971 finally:
1971 rl._compressor = oldcompressor
1972 rl._compressor = oldcompressor
1972
1973
1973 benches = [
1974 benches = [
1974 (lambda: doread(), b'read'),
1975 (lambda: doread(), b'read'),
1975 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1976 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1976 (lambda: doreadbatch(), b'read batch'),
1977 (lambda: doreadbatch(), b'read batch'),
1977 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1978 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1978 (lambda: dochunk(), b'chunk'),
1979 (lambda: dochunk(), b'chunk'),
1979 (lambda: dochunkbatch(), b'chunk batch'),
1980 (lambda: dochunkbatch(), b'chunk batch'),
1980 ]
1981 ]
1981
1982
1982 for engine in sorted(engines):
1983 for engine in sorted(engines):
1983 compressor = util.compengines[engine].revlogcompressor()
1984 compressor = util.compengines[engine].revlogcompressor()
1984 benches.append((functools.partial(docompress, compressor),
1985 benches.append((functools.partial(docompress, compressor),
1985 b'compress w/ %s' % engine))
1986 b'compress w/ %s' % engine))
1986
1987
1987 for fn, title in benches:
1988 for fn, title in benches:
1988 timer, fm = gettimer(ui, opts)
1989 timer, fm = gettimer(ui, opts)
1989 timer(fn, title=title)
1990 timer(fn, title=title)
1990 fm.end()
1991 fm.end()
1991
1992
1992 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1993 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1993 [(b'', b'cache', False, b'use caches instead of clearing')],
1994 [(b'', b'cache', False, b'use caches instead of clearing')],
1994 b'-c|-m|FILE REV')
1995 b'-c|-m|FILE REV')
1995 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1996 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1996 """Benchmark obtaining a revlog revision.
1997 """Benchmark obtaining a revlog revision.
1997
1998
1998 Obtaining a revlog revision consists of roughly the following steps:
1999 Obtaining a revlog revision consists of roughly the following steps:
1999
2000
2000 1. Compute the delta chain
2001 1. Compute the delta chain
2001 2. Slice the delta chain if applicable
2002 2. Slice the delta chain if applicable
2002 3. Obtain the raw chunks for that delta chain
2003 3. Obtain the raw chunks for that delta chain
2003 4. Decompress each raw chunk
2004 4. Decompress each raw chunk
2004 5. Apply binary patches to obtain fulltext
2005 5. Apply binary patches to obtain fulltext
2005 6. Verify hash of fulltext
2006 6. Verify hash of fulltext
2006
2007
2007 This command measures the time spent in each of these phases.
2008 This command measures the time spent in each of these phases.
2008 """
2009 """
2009 opts = _byteskwargs(opts)
2010 opts = _byteskwargs(opts)
2010
2011
2011 if opts.get(b'changelog') or opts.get(b'manifest'):
2012 if opts.get(b'changelog') or opts.get(b'manifest'):
2012 file_, rev = None, file_
2013 file_, rev = None, file_
2013 elif rev is None:
2014 elif rev is None:
2014 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2015 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2015
2016
2016 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2017 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2017
2018
2018 # _chunkraw was renamed to _getsegmentforrevs.
2019 # _chunkraw was renamed to _getsegmentforrevs.
2019 try:
2020 try:
2020 segmentforrevs = r._getsegmentforrevs
2021 segmentforrevs = r._getsegmentforrevs
2021 except AttributeError:
2022 except AttributeError:
2022 segmentforrevs = r._chunkraw
2023 segmentforrevs = r._chunkraw
2023
2024
2024 node = r.lookup(rev)
2025 node = r.lookup(rev)
2025 rev = r.rev(node)
2026 rev = r.rev(node)
2026
2027
2027 def getrawchunks(data, chain):
2028 def getrawchunks(data, chain):
2028 start = r.start
2029 start = r.start
2029 length = r.length
2030 length = r.length
2030 inline = r._inline
2031 inline = r._inline
2031 iosize = r._io.size
2032 iosize = r._io.size
2032 buffer = util.buffer
2033 buffer = util.buffer
2033
2034
2034 chunks = []
2035 chunks = []
2035 ladd = chunks.append
2036 ladd = chunks.append
2036 for idx, item in enumerate(chain):
2037 for idx, item in enumerate(chain):
2037 offset = start(item[0])
2038 offset = start(item[0])
2038 bits = data[idx]
2039 bits = data[idx]
2039 for rev in item:
2040 for rev in item:
2040 chunkstart = start(rev)
2041 chunkstart = start(rev)
2041 if inline:
2042 if inline:
2042 chunkstart += (rev + 1) * iosize
2043 chunkstart += (rev + 1) * iosize
2043 chunklength = length(rev)
2044 chunklength = length(rev)
2044 ladd(buffer(bits, chunkstart - offset, chunklength))
2045 ladd(buffer(bits, chunkstart - offset, chunklength))
2045
2046
2046 return chunks
2047 return chunks
2047
2048
2048 def dodeltachain(rev):
2049 def dodeltachain(rev):
2049 if not cache:
2050 if not cache:
2050 r.clearcaches()
2051 r.clearcaches()
2051 r._deltachain(rev)
2052 r._deltachain(rev)
2052
2053
2053 def doread(chain):
2054 def doread(chain):
2054 if not cache:
2055 if not cache:
2055 r.clearcaches()
2056 r.clearcaches()
2056 for item in slicedchain:
2057 for item in slicedchain:
2057 segmentforrevs(item[0], item[-1])
2058 segmentforrevs(item[0], item[-1])
2058
2059
2059 def doslice(r, chain, size):
2060 def doslice(r, chain, size):
2060 for s in slicechunk(r, chain, targetsize=size):
2061 for s in slicechunk(r, chain, targetsize=size):
2061 pass
2062 pass
2062
2063
2063 def dorawchunks(data, chain):
2064 def dorawchunks(data, chain):
2064 if not cache:
2065 if not cache:
2065 r.clearcaches()
2066 r.clearcaches()
2066 getrawchunks(data, chain)
2067 getrawchunks(data, chain)
2067
2068
2068 def dodecompress(chunks):
2069 def dodecompress(chunks):
2069 decomp = r.decompress
2070 decomp = r.decompress
2070 for chunk in chunks:
2071 for chunk in chunks:
2071 decomp(chunk)
2072 decomp(chunk)
2072
2073
2073 def dopatch(text, bins):
2074 def dopatch(text, bins):
2074 if not cache:
2075 if not cache:
2075 r.clearcaches()
2076 r.clearcaches()
2076 mdiff.patches(text, bins)
2077 mdiff.patches(text, bins)
2077
2078
2078 def dohash(text):
2079 def dohash(text):
2079 if not cache:
2080 if not cache:
2080 r.clearcaches()
2081 r.clearcaches()
2081 r.checkhash(text, node, rev=rev)
2082 r.checkhash(text, node, rev=rev)
2082
2083
2083 def dorevision():
2084 def dorevision():
2084 if not cache:
2085 if not cache:
2085 r.clearcaches()
2086 r.clearcaches()
2086 r.revision(node)
2087 r.revision(node)
2087
2088
2088 try:
2089 try:
2089 from mercurial.revlogutils.deltas import slicechunk
2090 from mercurial.revlogutils.deltas import slicechunk
2090 except ImportError:
2091 except ImportError:
2091 slicechunk = getattr(revlog, '_slicechunk', None)
2092 slicechunk = getattr(revlog, '_slicechunk', None)
2092
2093
2093 size = r.length(rev)
2094 size = r.length(rev)
2094 chain = r._deltachain(rev)[0]
2095 chain = r._deltachain(rev)[0]
2095 if not getattr(r, '_withsparseread', False):
2096 if not getattr(r, '_withsparseread', False):
2096 slicedchain = (chain,)
2097 slicedchain = (chain,)
2097 else:
2098 else:
2098 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2099 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2099 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2100 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2100 rawchunks = getrawchunks(data, slicedchain)
2101 rawchunks = getrawchunks(data, slicedchain)
2101 bins = r._chunks(chain)
2102 bins = r._chunks(chain)
2102 text = bytes(bins[0])
2103 text = bytes(bins[0])
2103 bins = bins[1:]
2104 bins = bins[1:]
2104 text = mdiff.patches(text, bins)
2105 text = mdiff.patches(text, bins)
2105
2106
2106 benches = [
2107 benches = [
2107 (lambda: dorevision(), b'full'),
2108 (lambda: dorevision(), b'full'),
2108 (lambda: dodeltachain(rev), b'deltachain'),
2109 (lambda: dodeltachain(rev), b'deltachain'),
2109 (lambda: doread(chain), b'read'),
2110 (lambda: doread(chain), b'read'),
2110 ]
2111 ]
2111
2112
2112 if getattr(r, '_withsparseread', False):
2113 if getattr(r, '_withsparseread', False):
2113 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2114 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2114 benches.append(slicing)
2115 benches.append(slicing)
2115
2116
2116 benches.extend([
2117 benches.extend([
2117 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2118 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2118 (lambda: dodecompress(rawchunks), b'decompress'),
2119 (lambda: dodecompress(rawchunks), b'decompress'),
2119 (lambda: dopatch(text, bins), b'patch'),
2120 (lambda: dopatch(text, bins), b'patch'),
2120 (lambda: dohash(text), b'hash'),
2121 (lambda: dohash(text), b'hash'),
2121 ])
2122 ])
2122
2123
2123 timer, fm = gettimer(ui, opts)
2124 timer, fm = gettimer(ui, opts)
2124 for fn, title in benches:
2125 for fn, title in benches:
2125 timer(fn, title=title)
2126 timer(fn, title=title)
2126 fm.end()
2127 fm.end()
2127
2128
2128 @command(b'perfrevset',
2129 @command(b'perfrevset',
2129 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2130 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2130 (b'', b'contexts', False, b'obtain changectx for each revision')]
2131 (b'', b'contexts', False, b'obtain changectx for each revision')]
2131 + formatteropts, b"REVSET")
2132 + formatteropts, b"REVSET")
2132 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2133 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2133 """benchmark the execution time of a revset
2134 """benchmark the execution time of a revset
2134
2135
2135 Use the --clean option if need to evaluate the impact of build volatile
2136 Use the --clean option if need to evaluate the impact of build volatile
2136 revisions set cache on the revset execution. Volatile cache hold filtered
2137 revisions set cache on the revset execution. Volatile cache hold filtered
2137 and obsolete related cache."""
2138 and obsolete related cache."""
2138 opts = _byteskwargs(opts)
2139 opts = _byteskwargs(opts)
2139
2140
2140 timer, fm = gettimer(ui, opts)
2141 timer, fm = gettimer(ui, opts)
2141 def d():
2142 def d():
2142 if clear:
2143 if clear:
2143 repo.invalidatevolatilesets()
2144 repo.invalidatevolatilesets()
2144 if contexts:
2145 if contexts:
2145 for ctx in repo.set(expr): pass
2146 for ctx in repo.set(expr): pass
2146 else:
2147 else:
2147 for r in repo.revs(expr): pass
2148 for r in repo.revs(expr): pass
2148 timer(d)
2149 timer(d)
2149 fm.end()
2150 fm.end()
2150
2151
2151 @command(b'perfvolatilesets',
2152 @command(b'perfvolatilesets',
2152 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2153 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2153 ] + formatteropts)
2154 ] + formatteropts)
2154 def perfvolatilesets(ui, repo, *names, **opts):
2155 def perfvolatilesets(ui, repo, *names, **opts):
2155 """benchmark the computation of various volatile set
2156 """benchmark the computation of various volatile set
2156
2157
2157 Volatile set computes element related to filtering and obsolescence."""
2158 Volatile set computes element related to filtering and obsolescence."""
2158 opts = _byteskwargs(opts)
2159 opts = _byteskwargs(opts)
2159 timer, fm = gettimer(ui, opts)
2160 timer, fm = gettimer(ui, opts)
2160 repo = repo.unfiltered()
2161 repo = repo.unfiltered()
2161
2162
2162 def getobs(name):
2163 def getobs(name):
2163 def d():
2164 def d():
2164 repo.invalidatevolatilesets()
2165 repo.invalidatevolatilesets()
2165 if opts[b'clear_obsstore']:
2166 if opts[b'clear_obsstore']:
2166 clearfilecache(repo, b'obsstore')
2167 clearfilecache(repo, b'obsstore')
2167 obsolete.getrevs(repo, name)
2168 obsolete.getrevs(repo, name)
2168 return d
2169 return d
2169
2170
2170 allobs = sorted(obsolete.cachefuncs)
2171 allobs = sorted(obsolete.cachefuncs)
2171 if names:
2172 if names:
2172 allobs = [n for n in allobs if n in names]
2173 allobs = [n for n in allobs if n in names]
2173
2174
2174 for name in allobs:
2175 for name in allobs:
2175 timer(getobs(name), title=name)
2176 timer(getobs(name), title=name)
2176
2177
2177 def getfiltered(name):
2178 def getfiltered(name):
2178 def d():
2179 def d():
2179 repo.invalidatevolatilesets()
2180 repo.invalidatevolatilesets()
2180 if opts[b'clear_obsstore']:
2181 if opts[b'clear_obsstore']:
2181 clearfilecache(repo, b'obsstore')
2182 clearfilecache(repo, b'obsstore')
2182 repoview.filterrevs(repo, name)
2183 repoview.filterrevs(repo, name)
2183 return d
2184 return d
2184
2185
2185 allfilter = sorted(repoview.filtertable)
2186 allfilter = sorted(repoview.filtertable)
2186 if names:
2187 if names:
2187 allfilter = [n for n in allfilter if n in names]
2188 allfilter = [n for n in allfilter if n in names]
2188
2189
2189 for name in allfilter:
2190 for name in allfilter:
2190 timer(getfiltered(name), title=name)
2191 timer(getfiltered(name), title=name)
2191 fm.end()
2192 fm.end()
2192
2193
2193 @command(b'perfbranchmap',
2194 @command(b'perfbranchmap',
2194 [(b'f', b'full', False,
2195 [(b'f', b'full', False,
2195 b'Includes build time of subset'),
2196 b'Includes build time of subset'),
2196 (b'', b'clear-revbranch', False,
2197 (b'', b'clear-revbranch', False,
2197 b'purge the revbranch cache between computation'),
2198 b'purge the revbranch cache between computation'),
2198 ] + formatteropts)
2199 ] + formatteropts)
2199 def perfbranchmap(ui, repo, *filternames, **opts):
2200 def perfbranchmap(ui, repo, *filternames, **opts):
2200 """benchmark the update of a branchmap
2201 """benchmark the update of a branchmap
2201
2202
2202 This benchmarks the full repo.branchmap() call with read and write disabled
2203 This benchmarks the full repo.branchmap() call with read and write disabled
2203 """
2204 """
2204 opts = _byteskwargs(opts)
2205 opts = _byteskwargs(opts)
2205 full = opts.get(b"full", False)
2206 full = opts.get(b"full", False)
2206 clear_revbranch = opts.get(b"clear_revbranch", False)
2207 clear_revbranch = opts.get(b"clear_revbranch", False)
2207 timer, fm = gettimer(ui, opts)
2208 timer, fm = gettimer(ui, opts)
2208 def getbranchmap(filtername):
2209 def getbranchmap(filtername):
2209 """generate a benchmark function for the filtername"""
2210 """generate a benchmark function for the filtername"""
2210 if filtername is None:
2211 if filtername is None:
2211 view = repo
2212 view = repo
2212 else:
2213 else:
2213 view = repo.filtered(filtername)
2214 view = repo.filtered(filtername)
2214 def d():
2215 def d():
2215 if clear_revbranch:
2216 if clear_revbranch:
2216 repo.revbranchcache()._clear()
2217 repo.revbranchcache()._clear()
2217 if full:
2218 if full:
2218 view._branchcaches.clear()
2219 view._branchcaches.clear()
2219 else:
2220 else:
2220 view._branchcaches.pop(filtername, None)
2221 view._branchcaches.pop(filtername, None)
2221 view.branchmap()
2222 view.branchmap()
2222 return d
2223 return d
2223 # add filter in smaller subset to bigger subset
2224 # add filter in smaller subset to bigger subset
2224 possiblefilters = set(repoview.filtertable)
2225 possiblefilters = set(repoview.filtertable)
2225 if filternames:
2226 if filternames:
2226 possiblefilters &= set(filternames)
2227 possiblefilters &= set(filternames)
2227 subsettable = getbranchmapsubsettable()
2228 subsettable = getbranchmapsubsettable()
2228 allfilters = []
2229 allfilters = []
2229 while possiblefilters:
2230 while possiblefilters:
2230 for name in possiblefilters:
2231 for name in possiblefilters:
2231 subset = subsettable.get(name)
2232 subset = subsettable.get(name)
2232 if subset not in possiblefilters:
2233 if subset not in possiblefilters:
2233 break
2234 break
2234 else:
2235 else:
2235 assert False, b'subset cycle %s!' % possiblefilters
2236 assert False, b'subset cycle %s!' % possiblefilters
2236 allfilters.append(name)
2237 allfilters.append(name)
2237 possiblefilters.remove(name)
2238 possiblefilters.remove(name)
2238
2239
2239 # warm the cache
2240 # warm the cache
2240 if not full:
2241 if not full:
2241 for name in allfilters:
2242 for name in allfilters:
2242 repo.filtered(name).branchmap()
2243 repo.filtered(name).branchmap()
2243 if not filternames or b'unfiltered' in filternames:
2244 if not filternames or b'unfiltered' in filternames:
2244 # add unfiltered
2245 # add unfiltered
2245 allfilters.append(None)
2246 allfilters.append(None)
2246
2247
2247 branchcacheread = safeattrsetter(branchmap, b'read')
2248 branchcacheread = safeattrsetter(branchmap, b'read')
2248 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2249 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2249 branchcacheread.set(lambda repo: None)
2250 branchcacheread.set(lambda repo: None)
2250 branchcachewrite.set(lambda bc, repo: None)
2251 branchcachewrite.set(lambda bc, repo: None)
2251 try:
2252 try:
2252 for name in allfilters:
2253 for name in allfilters:
2253 printname = name
2254 printname = name
2254 if name is None:
2255 if name is None:
2255 printname = b'unfiltered'
2256 printname = b'unfiltered'
2256 timer(getbranchmap(name), title=str(printname))
2257 timer(getbranchmap(name), title=str(printname))
2257 finally:
2258 finally:
2258 branchcacheread.restore()
2259 branchcacheread.restore()
2259 branchcachewrite.restore()
2260 branchcachewrite.restore()
2260 fm.end()
2261 fm.end()
2261
2262
2262 @command(b'perfbranchmapload', [
2263 @command(b'perfbranchmapload', [
2263 (b'f', b'filter', b'', b'Specify repoview filter'),
2264 (b'f', b'filter', b'', b'Specify repoview filter'),
2264 (b'', b'list', False, b'List brachmap filter caches'),
2265 (b'', b'list', False, b'List brachmap filter caches'),
2265 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2266 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2266
2267
2267 ] + formatteropts)
2268 ] + formatteropts)
2268 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2269 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2269 """benchmark reading the branchmap"""
2270 """benchmark reading the branchmap"""
2270 opts = _byteskwargs(opts)
2271 opts = _byteskwargs(opts)
2271 clearrevlogs = opts[b'clear_revlogs']
2272 clearrevlogs = opts[b'clear_revlogs']
2272
2273
2273 if list:
2274 if list:
2274 for name, kind, st in repo.cachevfs.readdir(stat=True):
2275 for name, kind, st in repo.cachevfs.readdir(stat=True):
2275 if name.startswith(b'branch2'):
2276 if name.startswith(b'branch2'):
2276 filtername = name.partition(b'-')[2] or b'unfiltered'
2277 filtername = name.partition(b'-')[2] or b'unfiltered'
2277 ui.status(b'%s - %s\n'
2278 ui.status(b'%s - %s\n'
2278 % (filtername, util.bytecount(st.st_size)))
2279 % (filtername, util.bytecount(st.st_size)))
2279 return
2280 return
2280 if not filter:
2281 if not filter:
2281 filter = None
2282 filter = None
2282 subsettable = getbranchmapsubsettable()
2283 subsettable = getbranchmapsubsettable()
2283 if filter is None:
2284 if filter is None:
2284 repo = repo.unfiltered()
2285 repo = repo.unfiltered()
2285 else:
2286 else:
2286 repo = repoview.repoview(repo, filter)
2287 repo = repoview.repoview(repo, filter)
2287
2288
2288 repo.branchmap() # make sure we have a relevant, up to date branchmap
2289 repo.branchmap() # make sure we have a relevant, up to date branchmap
2289
2290
2290 currentfilter = filter
2291 currentfilter = filter
2291 # try once without timer, the filter may not be cached
2292 # try once without timer, the filter may not be cached
2292 while branchmap.read(repo) is None:
2293 while branchmap.read(repo) is None:
2293 currentfilter = subsettable.get(currentfilter)
2294 currentfilter = subsettable.get(currentfilter)
2294 if currentfilter is None:
2295 if currentfilter is None:
2295 raise error.Abort(b'No branchmap cached for %s repo'
2296 raise error.Abort(b'No branchmap cached for %s repo'
2296 % (filter or b'unfiltered'))
2297 % (filter or b'unfiltered'))
2297 repo = repo.filtered(currentfilter)
2298 repo = repo.filtered(currentfilter)
2298 timer, fm = gettimer(ui, opts)
2299 timer, fm = gettimer(ui, opts)
2299 def setup():
2300 def setup():
2300 if clearrevlogs:
2301 if clearrevlogs:
2301 clearchangelog(repo)
2302 clearchangelog(repo)
2302 def bench():
2303 def bench():
2303 branchmap.read(repo)
2304 branchmap.read(repo)
2304 timer(bench, setup=setup)
2305 timer(bench, setup=setup)
2305 fm.end()
2306 fm.end()
2306
2307
2307 @command(b'perfloadmarkers')
2308 @command(b'perfloadmarkers')
2308 def perfloadmarkers(ui, repo):
2309 def perfloadmarkers(ui, repo):
2309 """benchmark the time to parse the on-disk markers for a repo
2310 """benchmark the time to parse the on-disk markers for a repo
2310
2311
2311 Result is the number of markers in the repo."""
2312 Result is the number of markers in the repo."""
2312 timer, fm = gettimer(ui)
2313 timer, fm = gettimer(ui)
2313 svfs = getsvfs(repo)
2314 svfs = getsvfs(repo)
2314 timer(lambda: len(obsolete.obsstore(svfs)))
2315 timer(lambda: len(obsolete.obsstore(svfs)))
2315 fm.end()
2316 fm.end()
2316
2317
2317 @command(b'perflrucachedict', formatteropts +
2318 @command(b'perflrucachedict', formatteropts +
2318 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2319 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2319 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2320 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2320 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2321 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2321 (b'', b'size', 4, b'size of cache'),
2322 (b'', b'size', 4, b'size of cache'),
2322 (b'', b'gets', 10000, b'number of key lookups'),
2323 (b'', b'gets', 10000, b'number of key lookups'),
2323 (b'', b'sets', 10000, b'number of key sets'),
2324 (b'', b'sets', 10000, b'number of key sets'),
2324 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2325 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2325 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2326 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2326 norepo=True)
2327 norepo=True)
2327 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2328 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2328 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2329 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2329 opts = _byteskwargs(opts)
2330 opts = _byteskwargs(opts)
2330
2331
2331 def doinit():
2332 def doinit():
2332 for i in _xrange(10000):
2333 for i in _xrange(10000):
2333 util.lrucachedict(size)
2334 util.lrucachedict(size)
2334
2335
2335 costrange = list(range(mincost, maxcost + 1))
2336 costrange = list(range(mincost, maxcost + 1))
2336
2337
2337 values = []
2338 values = []
2338 for i in _xrange(size):
2339 for i in _xrange(size):
2339 values.append(random.randint(0, _maxint))
2340 values.append(random.randint(0, _maxint))
2340
2341
2341 # Get mode fills the cache and tests raw lookup performance with no
2342 # Get mode fills the cache and tests raw lookup performance with no
2342 # eviction.
2343 # eviction.
2343 getseq = []
2344 getseq = []
2344 for i in _xrange(gets):
2345 for i in _xrange(gets):
2345 getseq.append(random.choice(values))
2346 getseq.append(random.choice(values))
2346
2347
2347 def dogets():
2348 def dogets():
2348 d = util.lrucachedict(size)
2349 d = util.lrucachedict(size)
2349 for v in values:
2350 for v in values:
2350 d[v] = v
2351 d[v] = v
2351 for key in getseq:
2352 for key in getseq:
2352 value = d[key]
2353 value = d[key]
2353 value # silence pyflakes warning
2354 value # silence pyflakes warning
2354
2355
2355 def dogetscost():
2356 def dogetscost():
2356 d = util.lrucachedict(size, maxcost=costlimit)
2357 d = util.lrucachedict(size, maxcost=costlimit)
2357 for i, v in enumerate(values):
2358 for i, v in enumerate(values):
2358 d.insert(v, v, cost=costs[i])
2359 d.insert(v, v, cost=costs[i])
2359 for key in getseq:
2360 for key in getseq:
2360 try:
2361 try:
2361 value = d[key]
2362 value = d[key]
2362 value # silence pyflakes warning
2363 value # silence pyflakes warning
2363 except KeyError:
2364 except KeyError:
2364 pass
2365 pass
2365
2366
2366 # Set mode tests insertion speed with cache eviction.
2367 # Set mode tests insertion speed with cache eviction.
2367 setseq = []
2368 setseq = []
2368 costs = []
2369 costs = []
2369 for i in _xrange(sets):
2370 for i in _xrange(sets):
2370 setseq.append(random.randint(0, _maxint))
2371 setseq.append(random.randint(0, _maxint))
2371 costs.append(random.choice(costrange))
2372 costs.append(random.choice(costrange))
2372
2373
2373 def doinserts():
2374 def doinserts():
2374 d = util.lrucachedict(size)
2375 d = util.lrucachedict(size)
2375 for v in setseq:
2376 for v in setseq:
2376 d.insert(v, v)
2377 d.insert(v, v)
2377
2378
2378 def doinsertscost():
2379 def doinsertscost():
2379 d = util.lrucachedict(size, maxcost=costlimit)
2380 d = util.lrucachedict(size, maxcost=costlimit)
2380 for i, v in enumerate(setseq):
2381 for i, v in enumerate(setseq):
2381 d.insert(v, v, cost=costs[i])
2382 d.insert(v, v, cost=costs[i])
2382
2383
2383 def dosets():
2384 def dosets():
2384 d = util.lrucachedict(size)
2385 d = util.lrucachedict(size)
2385 for v in setseq:
2386 for v in setseq:
2386 d[v] = v
2387 d[v] = v
2387
2388
2388 # Mixed mode randomly performs gets and sets with eviction.
2389 # Mixed mode randomly performs gets and sets with eviction.
2389 mixedops = []
2390 mixedops = []
2390 for i in _xrange(mixed):
2391 for i in _xrange(mixed):
2391 r = random.randint(0, 100)
2392 r = random.randint(0, 100)
2392 if r < mixedgetfreq:
2393 if r < mixedgetfreq:
2393 op = 0
2394 op = 0
2394 else:
2395 else:
2395 op = 1
2396 op = 1
2396
2397
2397 mixedops.append((op,
2398 mixedops.append((op,
2398 random.randint(0, size * 2),
2399 random.randint(0, size * 2),
2399 random.choice(costrange)))
2400 random.choice(costrange)))
2400
2401
2401 def domixed():
2402 def domixed():
2402 d = util.lrucachedict(size)
2403 d = util.lrucachedict(size)
2403
2404
2404 for op, v, cost in mixedops:
2405 for op, v, cost in mixedops:
2405 if op == 0:
2406 if op == 0:
2406 try:
2407 try:
2407 d[v]
2408 d[v]
2408 except KeyError:
2409 except KeyError:
2409 pass
2410 pass
2410 else:
2411 else:
2411 d[v] = v
2412 d[v] = v
2412
2413
2413 def domixedcost():
2414 def domixedcost():
2414 d = util.lrucachedict(size, maxcost=costlimit)
2415 d = util.lrucachedict(size, maxcost=costlimit)
2415
2416
2416 for op, v, cost in mixedops:
2417 for op, v, cost in mixedops:
2417 if op == 0:
2418 if op == 0:
2418 try:
2419 try:
2419 d[v]
2420 d[v]
2420 except KeyError:
2421 except KeyError:
2421 pass
2422 pass
2422 else:
2423 else:
2423 d.insert(v, v, cost=cost)
2424 d.insert(v, v, cost=cost)
2424
2425
2425 benches = [
2426 benches = [
2426 (doinit, b'init'),
2427 (doinit, b'init'),
2427 ]
2428 ]
2428
2429
2429 if costlimit:
2430 if costlimit:
2430 benches.extend([
2431 benches.extend([
2431 (dogetscost, b'gets w/ cost limit'),
2432 (dogetscost, b'gets w/ cost limit'),
2432 (doinsertscost, b'inserts w/ cost limit'),
2433 (doinsertscost, b'inserts w/ cost limit'),
2433 (domixedcost, b'mixed w/ cost limit'),
2434 (domixedcost, b'mixed w/ cost limit'),
2434 ])
2435 ])
2435 else:
2436 else:
2436 benches.extend([
2437 benches.extend([
2437 (dogets, b'gets'),
2438 (dogets, b'gets'),
2438 (doinserts, b'inserts'),
2439 (doinserts, b'inserts'),
2439 (dosets, b'sets'),
2440 (dosets, b'sets'),
2440 (domixed, b'mixed')
2441 (domixed, b'mixed')
2441 ])
2442 ])
2442
2443
2443 for fn, title in benches:
2444 for fn, title in benches:
2444 timer, fm = gettimer(ui, opts)
2445 timer, fm = gettimer(ui, opts)
2445 timer(fn, title=title)
2446 timer(fn, title=title)
2446 fm.end()
2447 fm.end()
2447
2448
2448 @command(b'perfwrite', formatteropts)
2449 @command(b'perfwrite', formatteropts)
2449 def perfwrite(ui, repo, **opts):
2450 def perfwrite(ui, repo, **opts):
2450 """microbenchmark ui.write
2451 """microbenchmark ui.write
2451 """
2452 """
2452 opts = _byteskwargs(opts)
2453 opts = _byteskwargs(opts)
2453
2454
2454 timer, fm = gettimer(ui, opts)
2455 timer, fm = gettimer(ui, opts)
2455 def write():
2456 def write():
2456 for i in range(100000):
2457 for i in range(100000):
2457 ui.write((b'Testing write performance\n'))
2458 ui.write((b'Testing write performance\n'))
2458 timer(write)
2459 timer(write)
2459 fm.end()
2460 fm.end()
2460
2461
2461 def uisetup(ui):
2462 def uisetup(ui):
2462 if (util.safehasattr(cmdutil, b'openrevlog') and
2463 if (util.safehasattr(cmdutil, b'openrevlog') and
2463 not util.safehasattr(commands, b'debugrevlogopts')):
2464 not util.safehasattr(commands, b'debugrevlogopts')):
2464 # for "historical portability":
2465 # for "historical portability":
2465 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2466 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2466 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2467 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2467 # openrevlog() should cause failure, because it has been
2468 # openrevlog() should cause failure, because it has been
2468 # available since 3.5 (or 49c583ca48c4).
2469 # available since 3.5 (or 49c583ca48c4).
2469 def openrevlog(orig, repo, cmd, file_, opts):
2470 def openrevlog(orig, repo, cmd, file_, opts):
2470 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2471 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2471 raise error.Abort(b"This version doesn't support --dir option",
2472 raise error.Abort(b"This version doesn't support --dir option",
2472 hint=b"use 3.5 or later")
2473 hint=b"use 3.5 or later")
2473 return orig(repo, cmd, file_, opts)
2474 return orig(repo, cmd, file_, opts)
2474 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2475 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,287 +1,287 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perfstatusext=$CONTRIBDIR/perf.py
35 > perfstatusext=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help perfstatusext
41 $ hg help perfstatusext
42 perfstatusext extension - helper extension to measure performance
42 perfstatusext extension - helper extension to measure performance
43
43
44 list of commands:
44 list of commands:
45
45
46 perfaddremove
46 perfaddremove
47 (no help text available)
47 (no help text available)
48 perfancestors
48 perfancestors
49 (no help text available)
49 (no help text available)
50 perfancestorset
50 perfancestorset
51 (no help text available)
51 (no help text available)
52 perfannotate (no help text available)
52 perfannotate (no help text available)
53 perfbdiff benchmark a bdiff between revisions
53 perfbdiff benchmark a bdiff between revisions
54 perfbookmarks
54 perfbookmarks
55 benchmark parsing bookmarks from disk to memory
55 benchmark parsing bookmarks from disk to memory
56 perfbranchmap
56 perfbranchmap
57 benchmark the update of a branchmap
57 benchmark the update of a branchmap
58 perfbranchmapload
58 perfbranchmapload
59 benchmark reading the branchmap
59 benchmark reading the branchmap
60 perfbundleread
60 perfbundleread
61 Benchmark reading of bundle files.
61 Benchmark reading of bundle files.
62 perfcca (no help text available)
62 perfcca (no help text available)
63 perfchangegroupchangelog
63 perfchangegroupchangelog
64 Benchmark producing a changelog group for a changegroup.
64 Benchmark producing a changelog group for a changegroup.
65 perfchangeset
65 perfchangeset
66 (no help text available)
66 (no help text available)
67 perfctxfiles (no help text available)
67 perfctxfiles (no help text available)
68 perfdiffwd Profile diff of working directory changes
68 perfdiffwd Profile diff of working directory changes
69 perfdirfoldmap
69 perfdirfoldmap
70 (no help text available)
70 (no help text available)
71 perfdirs (no help text available)
71 perfdirs (no help text available)
72 perfdirstate (no help text available)
72 perfdirstate (no help text available)
73 perfdirstatedirs
73 perfdirstatedirs
74 (no help text available)
74 (no help text available)
75 perfdirstatefoldmap
75 perfdirstatefoldmap
76 (no help text available)
76 (no help text available)
77 perfdirstatewrite
77 perfdirstatewrite
78 (no help text available)
78 (no help text available)
79 perffncacheencode
79 perffncacheencode
80 (no help text available)
80 (no help text available)
81 perffncacheload
81 perffncacheload
82 (no help text available)
82 (no help text available)
83 perffncachewrite
83 perffncachewrite
84 (no help text available)
84 (no help text available)
85 perfheads (no help text available)
85 perfheads (no help text available)
86 perfhelper-tracecopies
86 perfhelper-tracecopies
87 find statistic about potential parameters for the
87 find statistic about potential parameters for the
88 'perftracecopies'
88 'perftracecopies'
89 perfindex (no help text available)
89 perfindex (no help text available)
90 perflinelogedits
90 perflinelogedits
91 (no help text available)
91 (no help text available)
92 perfloadmarkers
92 perfloadmarkers
93 benchmark the time to parse the on-disk markers for a repo
93 benchmark the time to parse the on-disk markers for a repo
94 perflog (no help text available)
94 perflog (no help text available)
95 perflookup (no help text available)
95 perflookup (no help text available)
96 perflrucachedict
96 perflrucachedict
97 (no help text available)
97 (no help text available)
98 perfmanifest benchmark the time to read a manifest from disk and return a
98 perfmanifest benchmark the time to read a manifest from disk and return a
99 usable
99 usable
100 perfmergecalculate
100 perfmergecalculate
101 (no help text available)
101 (no help text available)
102 perfmoonwalk benchmark walking the changelog backwards
102 perfmoonwalk benchmark walking the changelog backwards
103 perfnodelookup
103 perfnodelookup
104 (no help text available)
104 (no help text available)
105 perfparents (no help text available)
105 perfparents (no help text available)
106 perfpathcopies
106 perfpathcopies
107 (no help text available)
107 benchmark the copy tracing logic
108 perfphases benchmark phasesets computation
108 perfphases benchmark phasesets computation
109 perfphasesremote
109 perfphasesremote
110 benchmark time needed to analyse phases of the remote server
110 benchmark time needed to analyse phases of the remote server
111 perfrawfiles (no help text available)
111 perfrawfiles (no help text available)
112 perfrevlogchunks
112 perfrevlogchunks
113 Benchmark operations on revlog chunks.
113 Benchmark operations on revlog chunks.
114 perfrevlogindex
114 perfrevlogindex
115 Benchmark operations against a revlog index.
115 Benchmark operations against a revlog index.
116 perfrevlogrevision
116 perfrevlogrevision
117 Benchmark obtaining a revlog revision.
117 Benchmark obtaining a revlog revision.
118 perfrevlogrevisions
118 perfrevlogrevisions
119 Benchmark reading a series of revisions from a revlog.
119 Benchmark reading a series of revisions from a revlog.
120 perfrevlogwrite
120 perfrevlogwrite
121 Benchmark writing a series of revisions to a revlog.
121 Benchmark writing a series of revisions to a revlog.
122 perfrevrange (no help text available)
122 perfrevrange (no help text available)
123 perfrevset benchmark the execution time of a revset
123 perfrevset benchmark the execution time of a revset
124 perfstartup (no help text available)
124 perfstartup (no help text available)
125 perfstatus (no help text available)
125 perfstatus (no help text available)
126 perftags (no help text available)
126 perftags (no help text available)
127 perftemplating
127 perftemplating
128 test the rendering time of a given template
128 test the rendering time of a given template
129 perfunidiff benchmark a unified diff between revisions
129 perfunidiff benchmark a unified diff between revisions
130 perfvolatilesets
130 perfvolatilesets
131 benchmark the computation of various volatile set
131 benchmark the computation of various volatile set
132 perfwalk (no help text available)
132 perfwalk (no help text available)
133 perfwrite microbenchmark ui.write
133 perfwrite microbenchmark ui.write
134
134
135 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
135 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
136 $ hg perfaddremove
136 $ hg perfaddremove
137 $ hg perfancestors
137 $ hg perfancestors
138 $ hg perfancestorset 2
138 $ hg perfancestorset 2
139 $ hg perfannotate a
139 $ hg perfannotate a
140 $ hg perfbdiff -c 1
140 $ hg perfbdiff -c 1
141 $ hg perfbdiff --alldata 1
141 $ hg perfbdiff --alldata 1
142 $ hg perfunidiff -c 1
142 $ hg perfunidiff -c 1
143 $ hg perfunidiff --alldata 1
143 $ hg perfunidiff --alldata 1
144 $ hg perfbookmarks
144 $ hg perfbookmarks
145 $ hg perfbranchmap
145 $ hg perfbranchmap
146 $ hg perfbranchmapload
146 $ hg perfbranchmapload
147 $ hg perfcca
147 $ hg perfcca
148 $ hg perfchangegroupchangelog
148 $ hg perfchangegroupchangelog
149 $ hg perfchangegroupchangelog --cgversion 01
149 $ hg perfchangegroupchangelog --cgversion 01
150 $ hg perfchangeset 2
150 $ hg perfchangeset 2
151 $ hg perfctxfiles 2
151 $ hg perfctxfiles 2
152 $ hg perfdiffwd
152 $ hg perfdiffwd
153 $ hg perfdirfoldmap
153 $ hg perfdirfoldmap
154 $ hg perfdirs
154 $ hg perfdirs
155 $ hg perfdirstate
155 $ hg perfdirstate
156 $ hg perfdirstatedirs
156 $ hg perfdirstatedirs
157 $ hg perfdirstatefoldmap
157 $ hg perfdirstatefoldmap
158 $ hg perfdirstatewrite
158 $ hg perfdirstatewrite
159 #if repofncache
159 #if repofncache
160 $ hg perffncacheencode
160 $ hg perffncacheencode
161 $ hg perffncacheload
161 $ hg perffncacheload
162 $ hg debugrebuildfncache
162 $ hg debugrebuildfncache
163 fncache already up to date
163 fncache already up to date
164 $ hg perffncachewrite
164 $ hg perffncachewrite
165 $ hg debugrebuildfncache
165 $ hg debugrebuildfncache
166 fncache already up to date
166 fncache already up to date
167 #endif
167 #endif
168 $ hg perfheads
168 $ hg perfheads
169 $ hg perfindex
169 $ hg perfindex
170 $ hg perflinelogedits -n 1
170 $ hg perflinelogedits -n 1
171 $ hg perfloadmarkers
171 $ hg perfloadmarkers
172 $ hg perflog
172 $ hg perflog
173 $ hg perflookup 2
173 $ hg perflookup 2
174 $ hg perflrucache
174 $ hg perflrucache
175 $ hg perfmanifest 2
175 $ hg perfmanifest 2
176 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
176 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
177 $ hg perfmanifest -m 44fe2c8352bb
177 $ hg perfmanifest -m 44fe2c8352bb
178 abort: manifest revision must be integer or full node
178 abort: manifest revision must be integer or full node
179 [255]
179 [255]
180 $ hg perfmergecalculate -r 3
180 $ hg perfmergecalculate -r 3
181 $ hg perfmoonwalk
181 $ hg perfmoonwalk
182 $ hg perfnodelookup 2
182 $ hg perfnodelookup 2
183 $ hg perfpathcopies 1 2
183 $ hg perfpathcopies 1 2
184 $ hg perfrawfiles 2
184 $ hg perfrawfiles 2
185 $ hg perfrevlogindex -c
185 $ hg perfrevlogindex -c
186 #if reporevlogstore
186 #if reporevlogstore
187 $ hg perfrevlogrevisions .hg/store/data/a.i
187 $ hg perfrevlogrevisions .hg/store/data/a.i
188 #endif
188 #endif
189 $ hg perfrevlogrevision -m 0
189 $ hg perfrevlogrevision -m 0
190 $ hg perfrevlogchunks -c
190 $ hg perfrevlogchunks -c
191 $ hg perfrevrange
191 $ hg perfrevrange
192 $ hg perfrevset 'all()'
192 $ hg perfrevset 'all()'
193 $ hg perfstartup
193 $ hg perfstartup
194 $ hg perfstatus
194 $ hg perfstatus
195 $ hg perftags
195 $ hg perftags
196 $ hg perftemplating
196 $ hg perftemplating
197 $ hg perfvolatilesets
197 $ hg perfvolatilesets
198 $ hg perfwalk
198 $ hg perfwalk
199 $ hg perfparents
199 $ hg perfparents
200
200
201 test actual output
201 test actual output
202 ------------------
202 ------------------
203
203
204 normal output:
204 normal output:
205
205
206 $ hg perfheads --config perf.stub=no
206 $ hg perfheads --config perf.stub=no
207 ! wall * comb * user * sys * (best of *) (glob)
207 ! wall * comb * user * sys * (best of *) (glob)
208
208
209 detailed output:
209 detailed output:
210
210
211 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
211 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
212 ! wall * comb * user * sys * (best of *) (glob)
212 ! wall * comb * user * sys * (best of *) (glob)
213 ! wall * comb * user * sys * (max of *) (glob)
213 ! wall * comb * user * sys * (max of *) (glob)
214 ! wall * comb * user * sys * (avg of *) (glob)
214 ! wall * comb * user * sys * (avg of *) (glob)
215 ! wall * comb * user * sys * (median of *) (glob)
215 ! wall * comb * user * sys * (median of *) (glob)
216
216
217 test json output
217 test json output
218 ----------------
218 ----------------
219
219
220 normal output:
220 normal output:
221
221
222 $ hg perfheads --template json --config perf.stub=no
222 $ hg perfheads --template json --config perf.stub=no
223 [
223 [
224 {
224 {
225 "comb": *, (glob)
225 "comb": *, (glob)
226 "count": *, (glob)
226 "count": *, (glob)
227 "sys": *, (glob)
227 "sys": *, (glob)
228 "user": *, (glob)
228 "user": *, (glob)
229 "wall": * (glob)
229 "wall": * (glob)
230 }
230 }
231 ]
231 ]
232
232
233 detailed output:
233 detailed output:
234
234
235 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
235 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
236 [
236 [
237 {
237 {
238 "avg.comb": *, (glob)
238 "avg.comb": *, (glob)
239 "avg.count": *, (glob)
239 "avg.count": *, (glob)
240 "avg.sys": *, (glob)
240 "avg.sys": *, (glob)
241 "avg.user": *, (glob)
241 "avg.user": *, (glob)
242 "avg.wall": *, (glob)
242 "avg.wall": *, (glob)
243 "comb": *, (glob)
243 "comb": *, (glob)
244 "count": *, (glob)
244 "count": *, (glob)
245 "max.comb": *, (glob)
245 "max.comb": *, (glob)
246 "max.count": *, (glob)
246 "max.count": *, (glob)
247 "max.sys": *, (glob)
247 "max.sys": *, (glob)
248 "max.user": *, (glob)
248 "max.user": *, (glob)
249 "max.wall": *, (glob)
249 "max.wall": *, (glob)
250 "median.comb": *, (glob)
250 "median.comb": *, (glob)
251 "median.count": *, (glob)
251 "median.count": *, (glob)
252 "median.sys": *, (glob)
252 "median.sys": *, (glob)
253 "median.user": *, (glob)
253 "median.user": *, (glob)
254 "median.wall": *, (glob)
254 "median.wall": *, (glob)
255 "sys": *, (glob)
255 "sys": *, (glob)
256 "user": *, (glob)
256 "user": *, (glob)
257 "wall": * (glob)
257 "wall": * (glob)
258 }
258 }
259 ]
259 ]
260
260
261 Check perf.py for historical portability
261 Check perf.py for historical portability
262 ----------------------------------------
262 ----------------------------------------
263
263
264 $ cd "$TESTDIR/.."
264 $ cd "$TESTDIR/.."
265
265
266 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
266 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
267 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
267 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
268 > "$TESTDIR"/check-perf-code.py contrib/perf.py
268 > "$TESTDIR"/check-perf-code.py contrib/perf.py
269 contrib/perf.py:\d+: (re)
269 contrib/perf.py:\d+: (re)
270 > from mercurial import (
270 > from mercurial import (
271 import newer module separately in try clause for early Mercurial
271 import newer module separately in try clause for early Mercurial
272 contrib/perf.py:\d+: (re)
272 contrib/perf.py:\d+: (re)
273 > from mercurial import (
273 > from mercurial import (
274 import newer module separately in try clause for early Mercurial
274 import newer module separately in try clause for early Mercurial
275 contrib/perf.py:\d+: (re)
275 contrib/perf.py:\d+: (re)
276 > origindexpath = orig.opener.join(orig.indexfile)
276 > origindexpath = orig.opener.join(orig.indexfile)
277 use getvfs()/getsvfs() for early Mercurial
277 use getvfs()/getsvfs() for early Mercurial
278 contrib/perf.py:\d+: (re)
278 contrib/perf.py:\d+: (re)
279 > origdatapath = orig.opener.join(orig.datafile)
279 > origdatapath = orig.opener.join(orig.datafile)
280 use getvfs()/getsvfs() for early Mercurial
280 use getvfs()/getsvfs() for early Mercurial
281 contrib/perf.py:\d+: (re)
281 contrib/perf.py:\d+: (re)
282 > vfs = vfsmod.vfs(tmpdir)
282 > vfs = vfsmod.vfs(tmpdir)
283 use getvfs()/getsvfs() for early Mercurial
283 use getvfs()/getsvfs() for early Mercurial
284 contrib/perf.py:\d+: (re)
284 contrib/perf.py:\d+: (re)
285 > vfs.options = getattr(orig.opener, 'options', None)
285 > vfs.options = getattr(orig.opener, 'options', None)
286 use getvfs()/getsvfs() for early Mercurial
286 use getvfs()/getsvfs() for early Mercurial
287 [1]
287 [1]
General Comments 0
You need to be logged in to leave comments. Login now