##// END OF EJS Templates
perf: fix a minor typo in perfbranchmapload
Boris Feld -
r40734:3c98c339 default
parent child Browse files
Show More
@@ -1,2420 +1,2420 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 mdiff,
40 mdiff,
41 merge,
41 merge,
42 revlog,
42 revlog,
43 util,
43 util,
44 )
44 )
45
45
46 # for "historical portability":
46 # for "historical portability":
47 # try to import modules separately (in dict order), and ignore
47 # try to import modules separately (in dict order), and ignore
48 # failure, because these aren't available with early Mercurial
48 # failure, because these aren't available with early Mercurial
49 try:
49 try:
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
50 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
54 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 except ImportError:
55 except ImportError:
56 pass
56 pass
57 try:
57 try:
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
58 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 dir(registrar) # forcibly load it
59 dir(registrar) # forcibly load it
60 except ImportError:
60 except ImportError:
61 registrar = None
61 registrar = None
62 try:
62 try:
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
63 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66 try:
66 try:
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
67 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 except ImportError:
68 except ImportError:
69 pass
69 pass
70
70
71 def identity(a):
71 def identity(a):
72 return a
72 return a
73
73
74 try:
74 try:
75 from mercurial import pycompat
75 from mercurial import pycompat
76 getargspec = pycompat.getargspec # added to module after 4.5
76 getargspec = pycompat.getargspec # added to module after 4.5
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
77 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
78 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
79 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
80 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
81 if pycompat.ispy3:
81 if pycompat.ispy3:
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
82 _maxint = sys.maxsize # per py3 docs for replacing maxint
83 else:
83 else:
84 _maxint = sys.maxint
84 _maxint = sys.maxint
85 except (ImportError, AttributeError):
85 except (ImportError, AttributeError):
86 import inspect
86 import inspect
87 getargspec = inspect.getargspec
87 getargspec = inspect.getargspec
88 _byteskwargs = identity
88 _byteskwargs = identity
89 fsencode = identity # no py3 support
89 fsencode = identity # no py3 support
90 _maxint = sys.maxint # no py3 support
90 _maxint = sys.maxint # no py3 support
91 _sysstr = lambda x: x # no py3 support
91 _sysstr = lambda x: x # no py3 support
92 _xrange = xrange
92 _xrange = xrange
93
93
94 try:
94 try:
95 # 4.7+
95 # 4.7+
96 queue = pycompat.queue.Queue
96 queue = pycompat.queue.Queue
97 except (AttributeError, ImportError):
97 except (AttributeError, ImportError):
98 # <4.7.
98 # <4.7.
99 try:
99 try:
100 queue = pycompat.queue
100 queue = pycompat.queue
101 except (AttributeError, ImportError):
101 except (AttributeError, ImportError):
102 queue = util.queue
102 queue = util.queue
103
103
104 try:
104 try:
105 from mercurial import logcmdutil
105 from mercurial import logcmdutil
106 makelogtemplater = logcmdutil.maketemplater
106 makelogtemplater = logcmdutil.maketemplater
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 try:
108 try:
109 makelogtemplater = cmdutil.makelogtemplater
109 makelogtemplater = cmdutil.makelogtemplater
110 except (AttributeError, ImportError):
110 except (AttributeError, ImportError):
111 makelogtemplater = None
111 makelogtemplater = None
112
112
113 # for "historical portability":
113 # for "historical portability":
114 # define util.safehasattr forcibly, because util.safehasattr has been
114 # define util.safehasattr forcibly, because util.safehasattr has been
115 # available since 1.9.3 (or 94b200a11cf7)
115 # available since 1.9.3 (or 94b200a11cf7)
116 _undefined = object()
116 _undefined = object()
117 def safehasattr(thing, attr):
117 def safehasattr(thing, attr):
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
118 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
119 setattr(util, 'safehasattr', safehasattr)
119 setattr(util, 'safehasattr', safehasattr)
120
120
121 # for "historical portability":
121 # for "historical portability":
122 # define util.timer forcibly, because util.timer has been available
122 # define util.timer forcibly, because util.timer has been available
123 # since ae5d60bb70c9
123 # since ae5d60bb70c9
124 if safehasattr(time, 'perf_counter'):
124 if safehasattr(time, 'perf_counter'):
125 util.timer = time.perf_counter
125 util.timer = time.perf_counter
126 elif os.name == b'nt':
126 elif os.name == b'nt':
127 util.timer = time.clock
127 util.timer = time.clock
128 else:
128 else:
129 util.timer = time.time
129 util.timer = time.time
130
130
131 # for "historical portability":
131 # for "historical portability":
132 # use locally defined empty option list, if formatteropts isn't
132 # use locally defined empty option list, if formatteropts isn't
133 # available, because commands.formatteropts has been available since
133 # available, because commands.formatteropts has been available since
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
134 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
135 # available since 2.2 (or ae5f92e154d3)
135 # available since 2.2 (or ae5f92e154d3)
136 formatteropts = getattr(cmdutil, "formatteropts",
136 formatteropts = getattr(cmdutil, "formatteropts",
137 getattr(commands, "formatteropts", []))
137 getattr(commands, "formatteropts", []))
138
138
139 # for "historical portability":
139 # for "historical portability":
140 # use locally defined option list, if debugrevlogopts isn't available,
140 # use locally defined option list, if debugrevlogopts isn't available,
141 # because commands.debugrevlogopts has been available since 3.7 (or
141 # because commands.debugrevlogopts has been available since 3.7 (or
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
142 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
143 # since 1.9 (or a79fea6b3e77).
143 # since 1.9 (or a79fea6b3e77).
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
144 revlogopts = getattr(cmdutil, "debugrevlogopts",
145 getattr(commands, "debugrevlogopts", [
145 getattr(commands, "debugrevlogopts", [
146 (b'c', b'changelog', False, (b'open changelog')),
146 (b'c', b'changelog', False, (b'open changelog')),
147 (b'm', b'manifest', False, (b'open manifest')),
147 (b'm', b'manifest', False, (b'open manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
148 (b'', b'dir', False, (b'open directory manifest')),
149 ]))
149 ]))
150
150
151 cmdtable = {}
151 cmdtable = {}
152
152
153 # for "historical portability":
153 # for "historical portability":
154 # define parsealiases locally, because cmdutil.parsealiases has been
154 # define parsealiases locally, because cmdutil.parsealiases has been
155 # available since 1.5 (or 6252852b4332)
155 # available since 1.5 (or 6252852b4332)
156 def parsealiases(cmd):
156 def parsealiases(cmd):
157 return cmd.split(b"|")
157 return cmd.split(b"|")
158
158
159 if safehasattr(registrar, 'command'):
159 if safehasattr(registrar, 'command'):
160 command = registrar.command(cmdtable)
160 command = registrar.command(cmdtable)
161 elif safehasattr(cmdutil, 'command'):
161 elif safehasattr(cmdutil, 'command'):
162 command = cmdutil.command(cmdtable)
162 command = cmdutil.command(cmdtable)
163 if b'norepo' not in getargspec(command).args:
163 if b'norepo' not in getargspec(command).args:
164 # for "historical portability":
164 # for "historical portability":
165 # wrap original cmdutil.command, because "norepo" option has
165 # wrap original cmdutil.command, because "norepo" option has
166 # been available since 3.1 (or 75a96326cecb)
166 # been available since 3.1 (or 75a96326cecb)
167 _command = command
167 _command = command
168 def command(name, options=(), synopsis=None, norepo=False):
168 def command(name, options=(), synopsis=None, norepo=False):
169 if norepo:
169 if norepo:
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
170 commands.norepo += b' %s' % b' '.join(parsealiases(name))
171 return _command(name, list(options), synopsis)
171 return _command(name, list(options), synopsis)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define "@command" annotation locally, because cmdutil.command
174 # define "@command" annotation locally, because cmdutil.command
175 # has been available since 1.9 (or 2daa5179e73f)
175 # has been available since 1.9 (or 2daa5179e73f)
176 def command(name, options=(), synopsis=None, norepo=False):
176 def command(name, options=(), synopsis=None, norepo=False):
177 def decorator(func):
177 def decorator(func):
178 if synopsis:
178 if synopsis:
179 cmdtable[name] = func, list(options), synopsis
179 cmdtable[name] = func, list(options), synopsis
180 else:
180 else:
181 cmdtable[name] = func, list(options)
181 cmdtable[name] = func, list(options)
182 if norepo:
182 if norepo:
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
183 commands.norepo += b' %s' % b' '.join(parsealiases(name))
184 return func
184 return func
185 return decorator
185 return decorator
186
186
187 try:
187 try:
188 import mercurial.registrar
188 import mercurial.registrar
189 import mercurial.configitems
189 import mercurial.configitems
190 configtable = {}
190 configtable = {}
191 configitem = mercurial.registrar.configitem(configtable)
191 configitem = mercurial.registrar.configitem(configtable)
192 configitem(b'perf', b'presleep',
192 configitem(b'perf', b'presleep',
193 default=mercurial.configitems.dynamicdefault,
193 default=mercurial.configitems.dynamicdefault,
194 )
194 )
195 configitem(b'perf', b'stub',
195 configitem(b'perf', b'stub',
196 default=mercurial.configitems.dynamicdefault,
196 default=mercurial.configitems.dynamicdefault,
197 )
197 )
198 configitem(b'perf', b'parentscount',
198 configitem(b'perf', b'parentscount',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'all-timing',
201 configitem(b'perf', b'all-timing',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 except (ImportError, AttributeError):
204 except (ImportError, AttributeError):
205 pass
205 pass
206
206
207 def getlen(ui):
207 def getlen(ui):
208 if ui.configbool(b"perf", b"stub", False):
208 if ui.configbool(b"perf", b"stub", False):
209 return lambda x: 1
209 return lambda x: 1
210 return len
210 return len
211
211
212 def gettimer(ui, opts=None):
212 def gettimer(ui, opts=None):
213 """return a timer function and formatter: (timer, formatter)
213 """return a timer function and formatter: (timer, formatter)
214
214
215 This function exists to gather the creation of formatter in a single
215 This function exists to gather the creation of formatter in a single
216 place instead of duplicating it in all performance commands."""
216 place instead of duplicating it in all performance commands."""
217
217
218 # enforce an idle period before execution to counteract power management
218 # enforce an idle period before execution to counteract power management
219 # experimental config: perf.presleep
219 # experimental config: perf.presleep
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
220 time.sleep(getint(ui, b"perf", b"presleep", 1))
221
221
222 if opts is None:
222 if opts is None:
223 opts = {}
223 opts = {}
224 # redirect all to stderr unless buffer api is in use
224 # redirect all to stderr unless buffer api is in use
225 if not ui._buffers:
225 if not ui._buffers:
226 ui = ui.copy()
226 ui = ui.copy()
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
227 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
228 if uifout:
228 if uifout:
229 # for "historical portability":
229 # for "historical portability":
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
230 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
231 uifout.set(ui.ferr)
231 uifout.set(ui.ferr)
232
232
233 # get a formatter
233 # get a formatter
234 uiformatter = getattr(ui, 'formatter', None)
234 uiformatter = getattr(ui, 'formatter', None)
235 if uiformatter:
235 if uiformatter:
236 fm = uiformatter(b'perf', opts)
236 fm = uiformatter(b'perf', opts)
237 else:
237 else:
238 # for "historical portability":
238 # for "historical portability":
239 # define formatter locally, because ui.formatter has been
239 # define formatter locally, because ui.formatter has been
240 # available since 2.2 (or ae5f92e154d3)
240 # available since 2.2 (or ae5f92e154d3)
241 from mercurial import node
241 from mercurial import node
242 class defaultformatter(object):
242 class defaultformatter(object):
243 """Minimized composition of baseformatter and plainformatter
243 """Minimized composition of baseformatter and plainformatter
244 """
244 """
245 def __init__(self, ui, topic, opts):
245 def __init__(self, ui, topic, opts):
246 self._ui = ui
246 self._ui = ui
247 if ui.debugflag:
247 if ui.debugflag:
248 self.hexfunc = node.hex
248 self.hexfunc = node.hex
249 else:
249 else:
250 self.hexfunc = node.short
250 self.hexfunc = node.short
251 def __nonzero__(self):
251 def __nonzero__(self):
252 return False
252 return False
253 __bool__ = __nonzero__
253 __bool__ = __nonzero__
254 def startitem(self):
254 def startitem(self):
255 pass
255 pass
256 def data(self, **data):
256 def data(self, **data):
257 pass
257 pass
258 def write(self, fields, deftext, *fielddata, **opts):
258 def write(self, fields, deftext, *fielddata, **opts):
259 self._ui.write(deftext % fielddata, **opts)
259 self._ui.write(deftext % fielddata, **opts)
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
260 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
261 if cond:
261 if cond:
262 self._ui.write(deftext % fielddata, **opts)
262 self._ui.write(deftext % fielddata, **opts)
263 def plain(self, text, **opts):
263 def plain(self, text, **opts):
264 self._ui.write(text, **opts)
264 self._ui.write(text, **opts)
265 def end(self):
265 def end(self):
266 pass
266 pass
267 fm = defaultformatter(ui, b'perf', opts)
267 fm = defaultformatter(ui, b'perf', opts)
268
268
269 # stub function, runs code only once instead of in a loop
269 # stub function, runs code only once instead of in a loop
270 # experimental config: perf.stub
270 # experimental config: perf.stub
271 if ui.configbool(b"perf", b"stub", False):
271 if ui.configbool(b"perf", b"stub", False):
272 return functools.partial(stub_timer, fm), fm
272 return functools.partial(stub_timer, fm), fm
273
273
274 # experimental config: perf.all-timing
274 # experimental config: perf.all-timing
275 displayall = ui.configbool(b"perf", b"all-timing", False)
275 displayall = ui.configbool(b"perf", b"all-timing", False)
276 return functools.partial(_timer, fm, displayall=displayall), fm
276 return functools.partial(_timer, fm, displayall=displayall), fm
277
277
278 def stub_timer(fm, func, setup=None, title=None):
278 def stub_timer(fm, func, setup=None, title=None):
279 func()
279 func()
280
280
281 @contextlib.contextmanager
281 @contextlib.contextmanager
282 def timeone():
282 def timeone():
283 r = []
283 r = []
284 ostart = os.times()
284 ostart = os.times()
285 cstart = util.timer()
285 cstart = util.timer()
286 yield r
286 yield r
287 cstop = util.timer()
287 cstop = util.timer()
288 ostop = os.times()
288 ostop = os.times()
289 a, b = ostart, ostop
289 a, b = ostart, ostop
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
290 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
291
291
292 def _timer(fm, func, setup=None, title=None, displayall=False):
292 def _timer(fm, func, setup=None, title=None, displayall=False):
293 gc.collect()
293 gc.collect()
294 results = []
294 results = []
295 begin = util.timer()
295 begin = util.timer()
296 count = 0
296 count = 0
297 while True:
297 while True:
298 if setup is not None:
298 if setup is not None:
299 setup()
299 setup()
300 with timeone() as item:
300 with timeone() as item:
301 r = func()
301 r = func()
302 count += 1
302 count += 1
303 results.append(item[0])
303 results.append(item[0])
304 cstop = util.timer()
304 cstop = util.timer()
305 if cstop - begin > 3 and count >= 100:
305 if cstop - begin > 3 and count >= 100:
306 break
306 break
307 if cstop - begin > 10 and count >= 3:
307 if cstop - begin > 10 and count >= 3:
308 break
308 break
309
309
310 formatone(fm, results, title=title, result=r,
310 formatone(fm, results, title=title, result=r,
311 displayall=displayall)
311 displayall=displayall)
312
312
313 def formatone(fm, timings, title=None, result=None, displayall=False):
313 def formatone(fm, timings, title=None, result=None, displayall=False):
314
314
315 count = len(timings)
315 count = len(timings)
316
316
317 fm.startitem()
317 fm.startitem()
318
318
319 if title:
319 if title:
320 fm.write(b'title', b'! %s\n', title)
320 fm.write(b'title', b'! %s\n', title)
321 if result:
321 if result:
322 fm.write(b'result', b'! result: %s\n', result)
322 fm.write(b'result', b'! result: %s\n', result)
323 def display(role, entry):
323 def display(role, entry):
324 prefix = b''
324 prefix = b''
325 if role != b'best':
325 if role != b'best':
326 prefix = b'%s.' % role
326 prefix = b'%s.' % role
327 fm.plain(b'!')
327 fm.plain(b'!')
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
328 fm.write(prefix + b'wall', b' wall %f', entry[0])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
329 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
330 fm.write(prefix + b'user', b' user %f', entry[1])
330 fm.write(prefix + b'user', b' user %f', entry[1])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
331 fm.write(prefix + b'sys', b' sys %f', entry[2])
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
332 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
333 fm.plain(b'\n')
333 fm.plain(b'\n')
334 timings.sort()
334 timings.sort()
335 min_val = timings[0]
335 min_val = timings[0]
336 display(b'best', min_val)
336 display(b'best', min_val)
337 if displayall:
337 if displayall:
338 max_val = timings[-1]
338 max_val = timings[-1]
339 display(b'max', max_val)
339 display(b'max', max_val)
340 avg = tuple([sum(x) / count for x in zip(*timings)])
340 avg = tuple([sum(x) / count for x in zip(*timings)])
341 display(b'avg', avg)
341 display(b'avg', avg)
342 median = timings[len(timings) // 2]
342 median = timings[len(timings) // 2]
343 display(b'median', median)
343 display(b'median', median)
344
344
345 # utilities for historical portability
345 # utilities for historical portability
346
346
347 def getint(ui, section, name, default):
347 def getint(ui, section, name, default):
348 # for "historical portability":
348 # for "historical portability":
349 # ui.configint has been available since 1.9 (or fa2b596db182)
349 # ui.configint has been available since 1.9 (or fa2b596db182)
350 v = ui.config(section, name, None)
350 v = ui.config(section, name, None)
351 if v is None:
351 if v is None:
352 return default
352 return default
353 try:
353 try:
354 return int(v)
354 return int(v)
355 except ValueError:
355 except ValueError:
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
356 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
357 % (section, name, v))
357 % (section, name, v))
358
358
359 def safeattrsetter(obj, name, ignoremissing=False):
359 def safeattrsetter(obj, name, ignoremissing=False):
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
360 """Ensure that 'obj' has 'name' attribute before subsequent setattr
361
361
362 This function is aborted, if 'obj' doesn't have 'name' attribute
362 This function is aborted, if 'obj' doesn't have 'name' attribute
363 at runtime. This avoids overlooking removal of an attribute, which
363 at runtime. This avoids overlooking removal of an attribute, which
364 breaks assumption of performance measurement, in the future.
364 breaks assumption of performance measurement, in the future.
365
365
366 This function returns the object to (1) assign a new value, and
366 This function returns the object to (1) assign a new value, and
367 (2) restore an original value to the attribute.
367 (2) restore an original value to the attribute.
368
368
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
369 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
370 abortion, and this function returns None. This is useful to
370 abortion, and this function returns None. This is useful to
371 examine an attribute, which isn't ensured in all Mercurial
371 examine an attribute, which isn't ensured in all Mercurial
372 versions.
372 versions.
373 """
373 """
374 if not util.safehasattr(obj, name):
374 if not util.safehasattr(obj, name):
375 if ignoremissing:
375 if ignoremissing:
376 return None
376 return None
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
377 raise error.Abort((b"missing attribute %s of %s might break assumption"
378 b" of performance measurement") % (name, obj))
378 b" of performance measurement") % (name, obj))
379
379
380 origvalue = getattr(obj, _sysstr(name))
380 origvalue = getattr(obj, _sysstr(name))
381 class attrutil(object):
381 class attrutil(object):
382 def set(self, newvalue):
382 def set(self, newvalue):
383 setattr(obj, _sysstr(name), newvalue)
383 setattr(obj, _sysstr(name), newvalue)
384 def restore(self):
384 def restore(self):
385 setattr(obj, _sysstr(name), origvalue)
385 setattr(obj, _sysstr(name), origvalue)
386
386
387 return attrutil()
387 return attrutil()
388
388
389 # utilities to examine each internal API changes
389 # utilities to examine each internal API changes
390
390
391 def getbranchmapsubsettable():
391 def getbranchmapsubsettable():
392 # for "historical portability":
392 # for "historical portability":
393 # subsettable is defined in:
393 # subsettable is defined in:
394 # - branchmap since 2.9 (or 175c6fd8cacc)
394 # - branchmap since 2.9 (or 175c6fd8cacc)
395 # - repoview since 2.5 (or 59a9f18d4587)
395 # - repoview since 2.5 (or 59a9f18d4587)
396 for mod in (branchmap, repoview):
396 for mod in (branchmap, repoview):
397 subsettable = getattr(mod, 'subsettable', None)
397 subsettable = getattr(mod, 'subsettable', None)
398 if subsettable:
398 if subsettable:
399 return subsettable
399 return subsettable
400
400
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
401 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
402 # branchmap and repoview modules exist, but subsettable attribute
402 # branchmap and repoview modules exist, but subsettable attribute
403 # doesn't)
403 # doesn't)
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
404 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
405 hint=b"use 2.5 or later")
405 hint=b"use 2.5 or later")
406
406
407 def getsvfs(repo):
407 def getsvfs(repo):
408 """Return appropriate object to access files under .hg/store
408 """Return appropriate object to access files under .hg/store
409 """
409 """
410 # for "historical portability":
410 # for "historical portability":
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
411 # repo.svfs has been available since 2.3 (or 7034365089bf)
412 svfs = getattr(repo, 'svfs', None)
412 svfs = getattr(repo, 'svfs', None)
413 if svfs:
413 if svfs:
414 return svfs
414 return svfs
415 else:
415 else:
416 return getattr(repo, 'sopener')
416 return getattr(repo, 'sopener')
417
417
418 def getvfs(repo):
418 def getvfs(repo):
419 """Return appropriate object to access files under .hg
419 """Return appropriate object to access files under .hg
420 """
420 """
421 # for "historical portability":
421 # for "historical portability":
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
422 # repo.vfs has been available since 2.3 (or 7034365089bf)
423 vfs = getattr(repo, 'vfs', None)
423 vfs = getattr(repo, 'vfs', None)
424 if vfs:
424 if vfs:
425 return vfs
425 return vfs
426 else:
426 else:
427 return getattr(repo, 'opener')
427 return getattr(repo, 'opener')
428
428
429 def repocleartagscachefunc(repo):
429 def repocleartagscachefunc(repo):
430 """Return the function to clear tags cache according to repo internal API
430 """Return the function to clear tags cache according to repo internal API
431 """
431 """
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
432 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
433 # in this case, setattr(repo, '_tagscache', None) or so isn't
434 # correct way to clear tags cache, because existing code paths
434 # correct way to clear tags cache, because existing code paths
435 # expect _tagscache to be a structured object.
435 # expect _tagscache to be a structured object.
436 def clearcache():
436 def clearcache():
437 # _tagscache has been filteredpropertycache since 2.5 (or
437 # _tagscache has been filteredpropertycache since 2.5 (or
438 # 98c867ac1330), and delattr() can't work in such case
438 # 98c867ac1330), and delattr() can't work in such case
439 if b'_tagscache' in vars(repo):
439 if b'_tagscache' in vars(repo):
440 del repo.__dict__[b'_tagscache']
440 del repo.__dict__[b'_tagscache']
441 return clearcache
441 return clearcache
442
442
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
443 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
444 if repotags: # since 1.4 (or 5614a628d173)
444 if repotags: # since 1.4 (or 5614a628d173)
445 return lambda : repotags.set(None)
445 return lambda : repotags.set(None)
446
446
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
447 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
448 if repotagscache: # since 0.6 (or d7df759d0e97)
449 return lambda : repotagscache.set(None)
449 return lambda : repotagscache.set(None)
450
450
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
451 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
452 # this point, but it isn't so problematic, because:
452 # this point, but it isn't so problematic, because:
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
453 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
454 # in perftags() causes failure soon
454 # in perftags() causes failure soon
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
455 # - perf.py itself has been available since 1.1 (or eb240755386d)
456 raise error.Abort((b"tags API of this hg command is unknown"))
456 raise error.Abort((b"tags API of this hg command is unknown"))
457
457
458 # utilities to clear cache
458 # utilities to clear cache
459
459
460 def clearfilecache(obj, attrname):
460 def clearfilecache(obj, attrname):
461 unfiltered = getattr(obj, 'unfiltered', None)
461 unfiltered = getattr(obj, 'unfiltered', None)
462 if unfiltered is not None:
462 if unfiltered is not None:
463 obj = obj.unfiltered()
463 obj = obj.unfiltered()
464 if attrname in vars(obj):
464 if attrname in vars(obj):
465 delattr(obj, attrname)
465 delattr(obj, attrname)
466 obj._filecache.pop(attrname, None)
466 obj._filecache.pop(attrname, None)
467
467
468 # perf commands
468 # perf commands
469
469
470 @command(b'perfwalk', formatteropts)
470 @command(b'perfwalk', formatteropts)
471 def perfwalk(ui, repo, *pats, **opts):
471 def perfwalk(ui, repo, *pats, **opts):
472 opts = _byteskwargs(opts)
472 opts = _byteskwargs(opts)
473 timer, fm = gettimer(ui, opts)
473 timer, fm = gettimer(ui, opts)
474 m = scmutil.match(repo[None], pats, {})
474 m = scmutil.match(repo[None], pats, {})
475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
475 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
476 ignored=False))))
476 ignored=False))))
477 fm.end()
477 fm.end()
478
478
479 @command(b'perfannotate', formatteropts)
479 @command(b'perfannotate', formatteropts)
480 def perfannotate(ui, repo, f, **opts):
480 def perfannotate(ui, repo, f, **opts):
481 opts = _byteskwargs(opts)
481 opts = _byteskwargs(opts)
482 timer, fm = gettimer(ui, opts)
482 timer, fm = gettimer(ui, opts)
483 fc = repo[b'.'][f]
483 fc = repo[b'.'][f]
484 timer(lambda: len(fc.annotate(True)))
484 timer(lambda: len(fc.annotate(True)))
485 fm.end()
485 fm.end()
486
486
487 @command(b'perfstatus',
487 @command(b'perfstatus',
488 [(b'u', b'unknown', False,
488 [(b'u', b'unknown', False,
489 b'ask status to look for unknown files')] + formatteropts)
489 b'ask status to look for unknown files')] + formatteropts)
490 def perfstatus(ui, repo, **opts):
490 def perfstatus(ui, repo, **opts):
491 opts = _byteskwargs(opts)
491 opts = _byteskwargs(opts)
492 #m = match.always(repo.root, repo.getcwd())
492 #m = match.always(repo.root, repo.getcwd())
493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
493 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
494 # False))))
494 # False))))
495 timer, fm = gettimer(ui, opts)
495 timer, fm = gettimer(ui, opts)
496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
496 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
497 fm.end()
497 fm.end()
498
498
499 @command(b'perfaddremove', formatteropts)
499 @command(b'perfaddremove', formatteropts)
500 def perfaddremove(ui, repo, **opts):
500 def perfaddremove(ui, repo, **opts):
501 opts = _byteskwargs(opts)
501 opts = _byteskwargs(opts)
502 timer, fm = gettimer(ui, opts)
502 timer, fm = gettimer(ui, opts)
503 try:
503 try:
504 oldquiet = repo.ui.quiet
504 oldquiet = repo.ui.quiet
505 repo.ui.quiet = True
505 repo.ui.quiet = True
506 matcher = scmutil.match(repo[None])
506 matcher = scmutil.match(repo[None])
507 opts[b'dry_run'] = True
507 opts[b'dry_run'] = True
508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
508 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
509 finally:
509 finally:
510 repo.ui.quiet = oldquiet
510 repo.ui.quiet = oldquiet
511 fm.end()
511 fm.end()
512
512
513 def clearcaches(cl):
513 def clearcaches(cl):
514 # behave somewhat consistently across internal API changes
514 # behave somewhat consistently across internal API changes
515 if util.safehasattr(cl, b'clearcaches'):
515 if util.safehasattr(cl, b'clearcaches'):
516 cl.clearcaches()
516 cl.clearcaches()
517 elif util.safehasattr(cl, b'_nodecache'):
517 elif util.safehasattr(cl, b'_nodecache'):
518 from mercurial.node import nullid, nullrev
518 from mercurial.node import nullid, nullrev
519 cl._nodecache = {nullid: nullrev}
519 cl._nodecache = {nullid: nullrev}
520 cl._nodepos = None
520 cl._nodepos = None
521
521
522 @command(b'perfheads', formatteropts)
522 @command(b'perfheads', formatteropts)
523 def perfheads(ui, repo, **opts):
523 def perfheads(ui, repo, **opts):
524 opts = _byteskwargs(opts)
524 opts = _byteskwargs(opts)
525 timer, fm = gettimer(ui, opts)
525 timer, fm = gettimer(ui, opts)
526 cl = repo.changelog
526 cl = repo.changelog
527 def d():
527 def d():
528 len(cl.headrevs())
528 len(cl.headrevs())
529 clearcaches(cl)
529 clearcaches(cl)
530 timer(d)
530 timer(d)
531 fm.end()
531 fm.end()
532
532
533 @command(b'perftags', formatteropts)
533 @command(b'perftags', formatteropts)
534 def perftags(ui, repo, **opts):
534 def perftags(ui, repo, **opts):
535 import mercurial.changelog
535 import mercurial.changelog
536 import mercurial.manifest
536 import mercurial.manifest
537
537
538 opts = _byteskwargs(opts)
538 opts = _byteskwargs(opts)
539 timer, fm = gettimer(ui, opts)
539 timer, fm = gettimer(ui, opts)
540 svfs = getsvfs(repo)
540 svfs = getsvfs(repo)
541 repocleartagscache = repocleartagscachefunc(repo)
541 repocleartagscache = repocleartagscachefunc(repo)
542 def s():
542 def s():
543 repo.changelog = mercurial.changelog.changelog(svfs)
543 repo.changelog = mercurial.changelog.changelog(svfs)
544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
544 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
545 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
546 rootmanifest)
546 rootmanifest)
547 repocleartagscache()
547 repocleartagscache()
548 def t():
548 def t():
549 return len(repo.tags())
549 return len(repo.tags())
550 timer(t, setup=s)
550 timer(t, setup=s)
551 fm.end()
551 fm.end()
552
552
553 @command(b'perfancestors', formatteropts)
553 @command(b'perfancestors', formatteropts)
554 def perfancestors(ui, repo, **opts):
554 def perfancestors(ui, repo, **opts):
555 opts = _byteskwargs(opts)
555 opts = _byteskwargs(opts)
556 timer, fm = gettimer(ui, opts)
556 timer, fm = gettimer(ui, opts)
557 heads = repo.changelog.headrevs()
557 heads = repo.changelog.headrevs()
558 def d():
558 def d():
559 for a in repo.changelog.ancestors(heads):
559 for a in repo.changelog.ancestors(heads):
560 pass
560 pass
561 timer(d)
561 timer(d)
562 fm.end()
562 fm.end()
563
563
564 @command(b'perfancestorset', formatteropts)
564 @command(b'perfancestorset', formatteropts)
565 def perfancestorset(ui, repo, revset, **opts):
565 def perfancestorset(ui, repo, revset, **opts):
566 opts = _byteskwargs(opts)
566 opts = _byteskwargs(opts)
567 timer, fm = gettimer(ui, opts)
567 timer, fm = gettimer(ui, opts)
568 revs = repo.revs(revset)
568 revs = repo.revs(revset)
569 heads = repo.changelog.headrevs()
569 heads = repo.changelog.headrevs()
570 def d():
570 def d():
571 s = repo.changelog.ancestors(heads)
571 s = repo.changelog.ancestors(heads)
572 for rev in revs:
572 for rev in revs:
573 rev in s
573 rev in s
574 timer(d)
574 timer(d)
575 fm.end()
575 fm.end()
576
576
577 @command(b'perfbookmarks', formatteropts)
577 @command(b'perfbookmarks', formatteropts)
578 def perfbookmarks(ui, repo, **opts):
578 def perfbookmarks(ui, repo, **opts):
579 """benchmark parsing bookmarks from disk to memory"""
579 """benchmark parsing bookmarks from disk to memory"""
580 opts = _byteskwargs(opts)
580 opts = _byteskwargs(opts)
581 timer, fm = gettimer(ui, opts)
581 timer, fm = gettimer(ui, opts)
582
582
583 def s():
583 def s():
584 clearfilecache(repo, b'_bookmarks')
584 clearfilecache(repo, b'_bookmarks')
585 def d():
585 def d():
586 repo._bookmarks
586 repo._bookmarks
587 timer(d, setup=s)
587 timer(d, setup=s)
588 fm.end()
588 fm.end()
589
589
590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
590 @command(b'perfbundleread', formatteropts, b'BUNDLE')
591 def perfbundleread(ui, repo, bundlepath, **opts):
591 def perfbundleread(ui, repo, bundlepath, **opts):
592 """Benchmark reading of bundle files.
592 """Benchmark reading of bundle files.
593
593
594 This command is meant to isolate the I/O part of bundle reading as
594 This command is meant to isolate the I/O part of bundle reading as
595 much as possible.
595 much as possible.
596 """
596 """
597 from mercurial import (
597 from mercurial import (
598 bundle2,
598 bundle2,
599 exchange,
599 exchange,
600 streamclone,
600 streamclone,
601 )
601 )
602
602
603 opts = _byteskwargs(opts)
603 opts = _byteskwargs(opts)
604
604
605 def makebench(fn):
605 def makebench(fn):
606 def run():
606 def run():
607 with open(bundlepath, b'rb') as fh:
607 with open(bundlepath, b'rb') as fh:
608 bundle = exchange.readbundle(ui, fh, bundlepath)
608 bundle = exchange.readbundle(ui, fh, bundlepath)
609 fn(bundle)
609 fn(bundle)
610
610
611 return run
611 return run
612
612
613 def makereadnbytes(size):
613 def makereadnbytes(size):
614 def run():
614 def run():
615 with open(bundlepath, b'rb') as fh:
615 with open(bundlepath, b'rb') as fh:
616 bundle = exchange.readbundle(ui, fh, bundlepath)
616 bundle = exchange.readbundle(ui, fh, bundlepath)
617 while bundle.read(size):
617 while bundle.read(size):
618 pass
618 pass
619
619
620 return run
620 return run
621
621
622 def makestdioread(size):
622 def makestdioread(size):
623 def run():
623 def run():
624 with open(bundlepath, b'rb') as fh:
624 with open(bundlepath, b'rb') as fh:
625 while fh.read(size):
625 while fh.read(size):
626 pass
626 pass
627
627
628 return run
628 return run
629
629
630 # bundle1
630 # bundle1
631
631
632 def deltaiter(bundle):
632 def deltaiter(bundle):
633 for delta in bundle.deltaiter():
633 for delta in bundle.deltaiter():
634 pass
634 pass
635
635
636 def iterchunks(bundle):
636 def iterchunks(bundle):
637 for chunk in bundle.getchunks():
637 for chunk in bundle.getchunks():
638 pass
638 pass
639
639
640 # bundle2
640 # bundle2
641
641
642 def forwardchunks(bundle):
642 def forwardchunks(bundle):
643 for chunk in bundle._forwardchunks():
643 for chunk in bundle._forwardchunks():
644 pass
644 pass
645
645
646 def iterparts(bundle):
646 def iterparts(bundle):
647 for part in bundle.iterparts():
647 for part in bundle.iterparts():
648 pass
648 pass
649
649
650 def iterpartsseekable(bundle):
650 def iterpartsseekable(bundle):
651 for part in bundle.iterparts(seekable=True):
651 for part in bundle.iterparts(seekable=True):
652 pass
652 pass
653
653
654 def seek(bundle):
654 def seek(bundle):
655 for part in bundle.iterparts(seekable=True):
655 for part in bundle.iterparts(seekable=True):
656 part.seek(0, os.SEEK_END)
656 part.seek(0, os.SEEK_END)
657
657
658 def makepartreadnbytes(size):
658 def makepartreadnbytes(size):
659 def run():
659 def run():
660 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
661 bundle = exchange.readbundle(ui, fh, bundlepath)
661 bundle = exchange.readbundle(ui, fh, bundlepath)
662 for part in bundle.iterparts():
662 for part in bundle.iterparts():
663 while part.read(size):
663 while part.read(size):
664 pass
664 pass
665
665
666 return run
666 return run
667
667
668 benches = [
668 benches = [
669 (makestdioread(8192), b'read(8k)'),
669 (makestdioread(8192), b'read(8k)'),
670 (makestdioread(16384), b'read(16k)'),
670 (makestdioread(16384), b'read(16k)'),
671 (makestdioread(32768), b'read(32k)'),
671 (makestdioread(32768), b'read(32k)'),
672 (makestdioread(131072), b'read(128k)'),
672 (makestdioread(131072), b'read(128k)'),
673 ]
673 ]
674
674
675 with open(bundlepath, b'rb') as fh:
675 with open(bundlepath, b'rb') as fh:
676 bundle = exchange.readbundle(ui, fh, bundlepath)
676 bundle = exchange.readbundle(ui, fh, bundlepath)
677
677
678 if isinstance(bundle, changegroup.cg1unpacker):
678 if isinstance(bundle, changegroup.cg1unpacker):
679 benches.extend([
679 benches.extend([
680 (makebench(deltaiter), b'cg1 deltaiter()'),
680 (makebench(deltaiter), b'cg1 deltaiter()'),
681 (makebench(iterchunks), b'cg1 getchunks()'),
681 (makebench(iterchunks), b'cg1 getchunks()'),
682 (makereadnbytes(8192), b'cg1 read(8k)'),
682 (makereadnbytes(8192), b'cg1 read(8k)'),
683 (makereadnbytes(16384), b'cg1 read(16k)'),
683 (makereadnbytes(16384), b'cg1 read(16k)'),
684 (makereadnbytes(32768), b'cg1 read(32k)'),
684 (makereadnbytes(32768), b'cg1 read(32k)'),
685 (makereadnbytes(131072), b'cg1 read(128k)'),
685 (makereadnbytes(131072), b'cg1 read(128k)'),
686 ])
686 ])
687 elif isinstance(bundle, bundle2.unbundle20):
687 elif isinstance(bundle, bundle2.unbundle20):
688 benches.extend([
688 benches.extend([
689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
689 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
690 (makebench(iterparts), b'bundle2 iterparts()'),
690 (makebench(iterparts), b'bundle2 iterparts()'),
691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
691 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
692 (makebench(seek), b'bundle2 part seek()'),
692 (makebench(seek), b'bundle2 part seek()'),
693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
693 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
694 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
695 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
696 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
697 ])
697 ])
698 elif isinstance(bundle, streamclone.streamcloneapplier):
698 elif isinstance(bundle, streamclone.streamcloneapplier):
699 raise error.Abort(b'stream clone bundles not supported')
699 raise error.Abort(b'stream clone bundles not supported')
700 else:
700 else:
701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
701 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
702
702
703 for fn, title in benches:
703 for fn, title in benches:
704 timer, fm = gettimer(ui, opts)
704 timer, fm = gettimer(ui, opts)
705 timer(fn, title=title)
705 timer(fn, title=title)
706 fm.end()
706 fm.end()
707
707
708 @command(b'perfchangegroupchangelog', formatteropts +
708 @command(b'perfchangegroupchangelog', formatteropts +
709 [(b'', b'version', b'02', b'changegroup version'),
709 [(b'', b'version', b'02', b'changegroup version'),
710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
710 (b'r', b'rev', b'', b'revisions to add to changegroup')])
711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
711 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
712 """Benchmark producing a changelog group for a changegroup.
712 """Benchmark producing a changelog group for a changegroup.
713
713
714 This measures the time spent processing the changelog during a
714 This measures the time spent processing the changelog during a
715 bundle operation. This occurs during `hg bundle` and on a server
715 bundle operation. This occurs during `hg bundle` and on a server
716 processing a `getbundle` wire protocol request (handles clones
716 processing a `getbundle` wire protocol request (handles clones
717 and pull requests).
717 and pull requests).
718
718
719 By default, all revisions are added to the changegroup.
719 By default, all revisions are added to the changegroup.
720 """
720 """
721 opts = _byteskwargs(opts)
721 opts = _byteskwargs(opts)
722 cl = repo.changelog
722 cl = repo.changelog
723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
723 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
724 bundler = changegroup.getbundler(version, repo)
724 bundler = changegroup.getbundler(version, repo)
725
725
726 def d():
726 def d():
727 state, chunks = bundler._generatechangelog(cl, nodes)
727 state, chunks = bundler._generatechangelog(cl, nodes)
728 for chunk in chunks:
728 for chunk in chunks:
729 pass
729 pass
730
730
731 timer, fm = gettimer(ui, opts)
731 timer, fm = gettimer(ui, opts)
732
732
733 # Terminal printing can interfere with timing. So disable it.
733 # Terminal printing can interfere with timing. So disable it.
734 with ui.configoverride({(b'progress', b'disable'): True}):
734 with ui.configoverride({(b'progress', b'disable'): True}):
735 timer(d)
735 timer(d)
736
736
737 fm.end()
737 fm.end()
738
738
739 @command(b'perfdirs', formatteropts)
739 @command(b'perfdirs', formatteropts)
740 def perfdirs(ui, repo, **opts):
740 def perfdirs(ui, repo, **opts):
741 opts = _byteskwargs(opts)
741 opts = _byteskwargs(opts)
742 timer, fm = gettimer(ui, opts)
742 timer, fm = gettimer(ui, opts)
743 dirstate = repo.dirstate
743 dirstate = repo.dirstate
744 b'a' in dirstate
744 b'a' in dirstate
745 def d():
745 def d():
746 dirstate.hasdir(b'a')
746 dirstate.hasdir(b'a')
747 del dirstate._map._dirs
747 del dirstate._map._dirs
748 timer(d)
748 timer(d)
749 fm.end()
749 fm.end()
750
750
751 @command(b'perfdirstate', formatteropts)
751 @command(b'perfdirstate', formatteropts)
752 def perfdirstate(ui, repo, **opts):
752 def perfdirstate(ui, repo, **opts):
753 opts = _byteskwargs(opts)
753 opts = _byteskwargs(opts)
754 timer, fm = gettimer(ui, opts)
754 timer, fm = gettimer(ui, opts)
755 b"a" in repo.dirstate
755 b"a" in repo.dirstate
756 def d():
756 def d():
757 repo.dirstate.invalidate()
757 repo.dirstate.invalidate()
758 b"a" in repo.dirstate
758 b"a" in repo.dirstate
759 timer(d)
759 timer(d)
760 fm.end()
760 fm.end()
761
761
762 @command(b'perfdirstatedirs', formatteropts)
762 @command(b'perfdirstatedirs', formatteropts)
763 def perfdirstatedirs(ui, repo, **opts):
763 def perfdirstatedirs(ui, repo, **opts):
764 opts = _byteskwargs(opts)
764 opts = _byteskwargs(opts)
765 timer, fm = gettimer(ui, opts)
765 timer, fm = gettimer(ui, opts)
766 b"a" in repo.dirstate
766 b"a" in repo.dirstate
767 def d():
767 def d():
768 repo.dirstate.hasdir(b"a")
768 repo.dirstate.hasdir(b"a")
769 del repo.dirstate._map._dirs
769 del repo.dirstate._map._dirs
770 timer(d)
770 timer(d)
771 fm.end()
771 fm.end()
772
772
773 @command(b'perfdirstatefoldmap', formatteropts)
773 @command(b'perfdirstatefoldmap', formatteropts)
774 def perfdirstatefoldmap(ui, repo, **opts):
774 def perfdirstatefoldmap(ui, repo, **opts):
775 opts = _byteskwargs(opts)
775 opts = _byteskwargs(opts)
776 timer, fm = gettimer(ui, opts)
776 timer, fm = gettimer(ui, opts)
777 dirstate = repo.dirstate
777 dirstate = repo.dirstate
778 b'a' in dirstate
778 b'a' in dirstate
779 def d():
779 def d():
780 dirstate._map.filefoldmap.get(b'a')
780 dirstate._map.filefoldmap.get(b'a')
781 del dirstate._map.filefoldmap
781 del dirstate._map.filefoldmap
782 timer(d)
782 timer(d)
783 fm.end()
783 fm.end()
784
784
785 @command(b'perfdirfoldmap', formatteropts)
785 @command(b'perfdirfoldmap', formatteropts)
786 def perfdirfoldmap(ui, repo, **opts):
786 def perfdirfoldmap(ui, repo, **opts):
787 opts = _byteskwargs(opts)
787 opts = _byteskwargs(opts)
788 timer, fm = gettimer(ui, opts)
788 timer, fm = gettimer(ui, opts)
789 dirstate = repo.dirstate
789 dirstate = repo.dirstate
790 b'a' in dirstate
790 b'a' in dirstate
791 def d():
791 def d():
792 dirstate._map.dirfoldmap.get(b'a')
792 dirstate._map.dirfoldmap.get(b'a')
793 del dirstate._map.dirfoldmap
793 del dirstate._map.dirfoldmap
794 del dirstate._map._dirs
794 del dirstate._map._dirs
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirstatewrite', formatteropts)
798 @command(b'perfdirstatewrite', formatteropts)
799 def perfdirstatewrite(ui, repo, **opts):
799 def perfdirstatewrite(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 ds = repo.dirstate
802 ds = repo.dirstate
803 b"a" in ds
803 b"a" in ds
804 def d():
804 def d():
805 ds._dirty = True
805 ds._dirty = True
806 ds.write(repo.currenttransaction())
806 ds.write(repo.currenttransaction())
807 timer(d)
807 timer(d)
808 fm.end()
808 fm.end()
809
809
810 @command(b'perfmergecalculate',
810 @command(b'perfmergecalculate',
811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
811 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
812 def perfmergecalculate(ui, repo, rev, **opts):
812 def perfmergecalculate(ui, repo, rev, **opts):
813 opts = _byteskwargs(opts)
813 opts = _byteskwargs(opts)
814 timer, fm = gettimer(ui, opts)
814 timer, fm = gettimer(ui, opts)
815 wctx = repo[None]
815 wctx = repo[None]
816 rctx = scmutil.revsingle(repo, rev, rev)
816 rctx = scmutil.revsingle(repo, rev, rev)
817 ancestor = wctx.ancestor(rctx)
817 ancestor = wctx.ancestor(rctx)
818 # we don't want working dir files to be stat'd in the benchmark, so prime
818 # we don't want working dir files to be stat'd in the benchmark, so prime
819 # that cache
819 # that cache
820 wctx.dirty()
820 wctx.dirty()
821 def d():
821 def d():
822 # acceptremote is True because we don't want prompts in the middle of
822 # acceptremote is True because we don't want prompts in the middle of
823 # our benchmark
823 # our benchmark
824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
824 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
825 acceptremote=True, followcopies=True)
825 acceptremote=True, followcopies=True)
826 timer(d)
826 timer(d)
827 fm.end()
827 fm.end()
828
828
829 @command(b'perfpathcopies', [], b"REV REV")
829 @command(b'perfpathcopies', [], b"REV REV")
830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
830 def perfpathcopies(ui, repo, rev1, rev2, **opts):
831 opts = _byteskwargs(opts)
831 opts = _byteskwargs(opts)
832 timer, fm = gettimer(ui, opts)
832 timer, fm = gettimer(ui, opts)
833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
833 ctx1 = scmutil.revsingle(repo, rev1, rev1)
834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
834 ctx2 = scmutil.revsingle(repo, rev2, rev2)
835 def d():
835 def d():
836 copies.pathcopies(ctx1, ctx2)
836 copies.pathcopies(ctx1, ctx2)
837 timer(d)
837 timer(d)
838 fm.end()
838 fm.end()
839
839
840 @command(b'perfphases',
840 @command(b'perfphases',
841 [(b'', b'full', False, b'include file reading time too'),
841 [(b'', b'full', False, b'include file reading time too'),
842 ], b"")
842 ], b"")
843 def perfphases(ui, repo, **opts):
843 def perfphases(ui, repo, **opts):
844 """benchmark phasesets computation"""
844 """benchmark phasesets computation"""
845 opts = _byteskwargs(opts)
845 opts = _byteskwargs(opts)
846 timer, fm = gettimer(ui, opts)
846 timer, fm = gettimer(ui, opts)
847 _phases = repo._phasecache
847 _phases = repo._phasecache
848 full = opts.get(b'full')
848 full = opts.get(b'full')
849 def d():
849 def d():
850 phases = _phases
850 phases = _phases
851 if full:
851 if full:
852 clearfilecache(repo, b'_phasecache')
852 clearfilecache(repo, b'_phasecache')
853 phases = repo._phasecache
853 phases = repo._phasecache
854 phases.invalidate()
854 phases.invalidate()
855 phases.loadphaserevs(repo)
855 phases.loadphaserevs(repo)
856 timer(d)
856 timer(d)
857 fm.end()
857 fm.end()
858
858
859 @command(b'perfphasesremote',
859 @command(b'perfphasesremote',
860 [], b"[DEST]")
860 [], b"[DEST]")
861 def perfphasesremote(ui, repo, dest=None, **opts):
861 def perfphasesremote(ui, repo, dest=None, **opts):
862 """benchmark time needed to analyse phases of the remote server"""
862 """benchmark time needed to analyse phases of the remote server"""
863 from mercurial.node import (
863 from mercurial.node import (
864 bin,
864 bin,
865 )
865 )
866 from mercurial import (
866 from mercurial import (
867 exchange,
867 exchange,
868 hg,
868 hg,
869 phases,
869 phases,
870 )
870 )
871 opts = _byteskwargs(opts)
871 opts = _byteskwargs(opts)
872 timer, fm = gettimer(ui, opts)
872 timer, fm = gettimer(ui, opts)
873
873
874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
874 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
875 if not path:
875 if not path:
876 raise error.Abort((b'default repository not configured!'),
876 raise error.Abort((b'default repository not configured!'),
877 hint=(b"see 'hg help config.paths'"))
877 hint=(b"see 'hg help config.paths'"))
878 dest = path.pushloc or path.loc
878 dest = path.pushloc or path.loc
879 branches = (path.branch, opts.get(b'branch') or [])
879 branches = (path.branch, opts.get(b'branch') or [])
880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
880 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
881 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
882 other = hg.peer(repo, opts, dest)
882 other = hg.peer(repo, opts, dest)
883
883
884 # easier to perform discovery through the operation
884 # easier to perform discovery through the operation
885 op = exchange.pushoperation(repo, other)
885 op = exchange.pushoperation(repo, other)
886 exchange._pushdiscoverychangeset(op)
886 exchange._pushdiscoverychangeset(op)
887
887
888 remotesubset = op.fallbackheads
888 remotesubset = op.fallbackheads
889
889
890 with other.commandexecutor() as e:
890 with other.commandexecutor() as e:
891 remotephases = e.callcommand(b'listkeys',
891 remotephases = e.callcommand(b'listkeys',
892 {b'namespace': b'phases'}).result()
892 {b'namespace': b'phases'}).result()
893 del other
893 del other
894 publishing = remotephases.get(b'publishing', False)
894 publishing = remotephases.get(b'publishing', False)
895 if publishing:
895 if publishing:
896 ui.status((b'publishing: yes\n'))
896 ui.status((b'publishing: yes\n'))
897 else:
897 else:
898 ui.status((b'publishing: no\n'))
898 ui.status((b'publishing: no\n'))
899
899
900 nodemap = repo.changelog.nodemap
900 nodemap = repo.changelog.nodemap
901 nonpublishroots = 0
901 nonpublishroots = 0
902 for nhex, phase in remotephases.iteritems():
902 for nhex, phase in remotephases.iteritems():
903 if nhex == b'publishing': # ignore data related to publish option
903 if nhex == b'publishing': # ignore data related to publish option
904 continue
904 continue
905 node = bin(nhex)
905 node = bin(nhex)
906 if node in nodemap and int(phase):
906 if node in nodemap and int(phase):
907 nonpublishroots += 1
907 nonpublishroots += 1
908 ui.status((b'number of roots: %d\n') % len(remotephases))
908 ui.status((b'number of roots: %d\n') % len(remotephases))
909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
909 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
910 def d():
910 def d():
911 phases.remotephasessummary(repo,
911 phases.remotephasessummary(repo,
912 remotesubset,
912 remotesubset,
913 remotephases)
913 remotephases)
914 timer(d)
914 timer(d)
915 fm.end()
915 fm.end()
916
916
917 @command(b'perfmanifest',[
917 @command(b'perfmanifest',[
918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
918 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
919 (b'', b'clear-disk', False, b'clear on-disk caches too'),
920 ] + formatteropts, b'REV|NODE')
920 ] + formatteropts, b'REV|NODE')
921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
921 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
922 """benchmark the time to read a manifest from disk and return a usable
922 """benchmark the time to read a manifest from disk and return a usable
923 dict-like object
923 dict-like object
924
924
925 Manifest caches are cleared before retrieval."""
925 Manifest caches are cleared before retrieval."""
926 opts = _byteskwargs(opts)
926 opts = _byteskwargs(opts)
927 timer, fm = gettimer(ui, opts)
927 timer, fm = gettimer(ui, opts)
928 if not manifest_rev:
928 if not manifest_rev:
929 ctx = scmutil.revsingle(repo, rev, rev)
929 ctx = scmutil.revsingle(repo, rev, rev)
930 t = ctx.manifestnode()
930 t = ctx.manifestnode()
931 else:
931 else:
932 from mercurial.node import bin
932 from mercurial.node import bin
933
933
934 if len(rev) == 40:
934 if len(rev) == 40:
935 t = bin(rev)
935 t = bin(rev)
936 else:
936 else:
937 try:
937 try:
938 rev = int(rev)
938 rev = int(rev)
939
939
940 if util.safehasattr(repo.manifestlog, b'getstorage'):
940 if util.safehasattr(repo.manifestlog, b'getstorage'):
941 t = repo.manifestlog.getstorage(b'').node(rev)
941 t = repo.manifestlog.getstorage(b'').node(rev)
942 else:
942 else:
943 t = repo.manifestlog._revlog.lookup(rev)
943 t = repo.manifestlog._revlog.lookup(rev)
944 except ValueError:
944 except ValueError:
945 raise error.Abort(b'manifest revision must be integer or full '
945 raise error.Abort(b'manifest revision must be integer or full '
946 b'node')
946 b'node')
947 def d():
947 def d():
948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
948 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
949 repo.manifestlog[t].read()
949 repo.manifestlog[t].read()
950 timer(d)
950 timer(d)
951 fm.end()
951 fm.end()
952
952
953 @command(b'perfchangeset', formatteropts)
953 @command(b'perfchangeset', formatteropts)
954 def perfchangeset(ui, repo, rev, **opts):
954 def perfchangeset(ui, repo, rev, **opts):
955 opts = _byteskwargs(opts)
955 opts = _byteskwargs(opts)
956 timer, fm = gettimer(ui, opts)
956 timer, fm = gettimer(ui, opts)
957 n = scmutil.revsingle(repo, rev).node()
957 n = scmutil.revsingle(repo, rev).node()
958 def d():
958 def d():
959 repo.changelog.read(n)
959 repo.changelog.read(n)
960 #repo.changelog._cache = None
960 #repo.changelog._cache = None
961 timer(d)
961 timer(d)
962 fm.end()
962 fm.end()
963
963
964 @command(b'perfindex', formatteropts)
964 @command(b'perfindex', formatteropts)
965 def perfindex(ui, repo, **opts):
965 def perfindex(ui, repo, **opts):
966 import mercurial.revlog
966 import mercurial.revlog
967 opts = _byteskwargs(opts)
967 opts = _byteskwargs(opts)
968 timer, fm = gettimer(ui, opts)
968 timer, fm = gettimer(ui, opts)
969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
969 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
970 n = repo[b"tip"].node()
970 n = repo[b"tip"].node()
971 svfs = getsvfs(repo)
971 svfs = getsvfs(repo)
972 def d():
972 def d():
973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
973 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
974 cl.rev(n)
974 cl.rev(n)
975 timer(d)
975 timer(d)
976 fm.end()
976 fm.end()
977
977
978 @command(b'perfstartup', formatteropts)
978 @command(b'perfstartup', formatteropts)
979 def perfstartup(ui, repo, **opts):
979 def perfstartup(ui, repo, **opts):
980 opts = _byteskwargs(opts)
980 opts = _byteskwargs(opts)
981 timer, fm = gettimer(ui, opts)
981 timer, fm = gettimer(ui, opts)
982 def d():
982 def d():
983 if os.name != r'nt':
983 if os.name != r'nt':
984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
984 os.system(b"HGRCPATH= %s version -q > /dev/null" %
985 fsencode(sys.argv[0]))
985 fsencode(sys.argv[0]))
986 else:
986 else:
987 os.environ[r'HGRCPATH'] = r' '
987 os.environ[r'HGRCPATH'] = r' '
988 os.system(r"%s version -q > NUL" % sys.argv[0])
988 os.system(r"%s version -q > NUL" % sys.argv[0])
989 timer(d)
989 timer(d)
990 fm.end()
990 fm.end()
991
991
992 @command(b'perfparents', formatteropts)
992 @command(b'perfparents', formatteropts)
993 def perfparents(ui, repo, **opts):
993 def perfparents(ui, repo, **opts):
994 opts = _byteskwargs(opts)
994 opts = _byteskwargs(opts)
995 timer, fm = gettimer(ui, opts)
995 timer, fm = gettimer(ui, opts)
996 # control the number of commits perfparents iterates over
996 # control the number of commits perfparents iterates over
997 # experimental config: perf.parentscount
997 # experimental config: perf.parentscount
998 count = getint(ui, b"perf", b"parentscount", 1000)
998 count = getint(ui, b"perf", b"parentscount", 1000)
999 if len(repo.changelog) < count:
999 if len(repo.changelog) < count:
1000 raise error.Abort(b"repo needs %d commits for this test" % count)
1000 raise error.Abort(b"repo needs %d commits for this test" % count)
1001 repo = repo.unfiltered()
1001 repo = repo.unfiltered()
1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1002 nl = [repo.changelog.node(i) for i in _xrange(count)]
1003 def d():
1003 def d():
1004 for n in nl:
1004 for n in nl:
1005 repo.changelog.parents(n)
1005 repo.changelog.parents(n)
1006 timer(d)
1006 timer(d)
1007 fm.end()
1007 fm.end()
1008
1008
1009 @command(b'perfctxfiles', formatteropts)
1009 @command(b'perfctxfiles', formatteropts)
1010 def perfctxfiles(ui, repo, x, **opts):
1010 def perfctxfiles(ui, repo, x, **opts):
1011 opts = _byteskwargs(opts)
1011 opts = _byteskwargs(opts)
1012 x = int(x)
1012 x = int(x)
1013 timer, fm = gettimer(ui, opts)
1013 timer, fm = gettimer(ui, opts)
1014 def d():
1014 def d():
1015 len(repo[x].files())
1015 len(repo[x].files())
1016 timer(d)
1016 timer(d)
1017 fm.end()
1017 fm.end()
1018
1018
1019 @command(b'perfrawfiles', formatteropts)
1019 @command(b'perfrawfiles', formatteropts)
1020 def perfrawfiles(ui, repo, x, **opts):
1020 def perfrawfiles(ui, repo, x, **opts):
1021 opts = _byteskwargs(opts)
1021 opts = _byteskwargs(opts)
1022 x = int(x)
1022 x = int(x)
1023 timer, fm = gettimer(ui, opts)
1023 timer, fm = gettimer(ui, opts)
1024 cl = repo.changelog
1024 cl = repo.changelog
1025 def d():
1025 def d():
1026 len(cl.read(x)[3])
1026 len(cl.read(x)[3])
1027 timer(d)
1027 timer(d)
1028 fm.end()
1028 fm.end()
1029
1029
1030 @command(b'perflookup', formatteropts)
1030 @command(b'perflookup', formatteropts)
1031 def perflookup(ui, repo, rev, **opts):
1031 def perflookup(ui, repo, rev, **opts):
1032 opts = _byteskwargs(opts)
1032 opts = _byteskwargs(opts)
1033 timer, fm = gettimer(ui, opts)
1033 timer, fm = gettimer(ui, opts)
1034 timer(lambda: len(repo.lookup(rev)))
1034 timer(lambda: len(repo.lookup(rev)))
1035 fm.end()
1035 fm.end()
1036
1036
1037 @command(b'perflinelogedits',
1037 @command(b'perflinelogedits',
1038 [(b'n', b'edits', 10000, b'number of edits'),
1038 [(b'n', b'edits', 10000, b'number of edits'),
1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1039 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1040 ], norepo=True)
1040 ], norepo=True)
1041 def perflinelogedits(ui, **opts):
1041 def perflinelogedits(ui, **opts):
1042 from mercurial import linelog
1042 from mercurial import linelog
1043
1043
1044 opts = _byteskwargs(opts)
1044 opts = _byteskwargs(opts)
1045
1045
1046 edits = opts[b'edits']
1046 edits = opts[b'edits']
1047 maxhunklines = opts[b'max_hunk_lines']
1047 maxhunklines = opts[b'max_hunk_lines']
1048
1048
1049 maxb1 = 100000
1049 maxb1 = 100000
1050 random.seed(0)
1050 random.seed(0)
1051 randint = random.randint
1051 randint = random.randint
1052 currentlines = 0
1052 currentlines = 0
1053 arglist = []
1053 arglist = []
1054 for rev in _xrange(edits):
1054 for rev in _xrange(edits):
1055 a1 = randint(0, currentlines)
1055 a1 = randint(0, currentlines)
1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1056 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1057 b1 = randint(0, maxb1)
1057 b1 = randint(0, maxb1)
1058 b2 = randint(b1, b1 + maxhunklines)
1058 b2 = randint(b1, b1 + maxhunklines)
1059 currentlines += (b2 - b1) - (a2 - a1)
1059 currentlines += (b2 - b1) - (a2 - a1)
1060 arglist.append((rev, a1, a2, b1, b2))
1060 arglist.append((rev, a1, a2, b1, b2))
1061
1061
1062 def d():
1062 def d():
1063 ll = linelog.linelog()
1063 ll = linelog.linelog()
1064 for args in arglist:
1064 for args in arglist:
1065 ll.replacelines(*args)
1065 ll.replacelines(*args)
1066
1066
1067 timer, fm = gettimer(ui, opts)
1067 timer, fm = gettimer(ui, opts)
1068 timer(d)
1068 timer(d)
1069 fm.end()
1069 fm.end()
1070
1070
1071 @command(b'perfrevrange', formatteropts)
1071 @command(b'perfrevrange', formatteropts)
1072 def perfrevrange(ui, repo, *specs, **opts):
1072 def perfrevrange(ui, repo, *specs, **opts):
1073 opts = _byteskwargs(opts)
1073 opts = _byteskwargs(opts)
1074 timer, fm = gettimer(ui, opts)
1074 timer, fm = gettimer(ui, opts)
1075 revrange = scmutil.revrange
1075 revrange = scmutil.revrange
1076 timer(lambda: len(revrange(repo, specs)))
1076 timer(lambda: len(revrange(repo, specs)))
1077 fm.end()
1077 fm.end()
1078
1078
1079 @command(b'perfnodelookup', formatteropts)
1079 @command(b'perfnodelookup', formatteropts)
1080 def perfnodelookup(ui, repo, rev, **opts):
1080 def perfnodelookup(ui, repo, rev, **opts):
1081 opts = _byteskwargs(opts)
1081 opts = _byteskwargs(opts)
1082 timer, fm = gettimer(ui, opts)
1082 timer, fm = gettimer(ui, opts)
1083 import mercurial.revlog
1083 import mercurial.revlog
1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1084 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1085 n = scmutil.revsingle(repo, rev).node()
1085 n = scmutil.revsingle(repo, rev).node()
1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1086 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1087 def d():
1087 def d():
1088 cl.rev(n)
1088 cl.rev(n)
1089 clearcaches(cl)
1089 clearcaches(cl)
1090 timer(d)
1090 timer(d)
1091 fm.end()
1091 fm.end()
1092
1092
1093 @command(b'perflog',
1093 @command(b'perflog',
1094 [(b'', b'rename', False, b'ask log to follow renames')
1094 [(b'', b'rename', False, b'ask log to follow renames')
1095 ] + formatteropts)
1095 ] + formatteropts)
1096 def perflog(ui, repo, rev=None, **opts):
1096 def perflog(ui, repo, rev=None, **opts):
1097 opts = _byteskwargs(opts)
1097 opts = _byteskwargs(opts)
1098 if rev is None:
1098 if rev is None:
1099 rev=[]
1099 rev=[]
1100 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1101 ui.pushbuffer()
1101 ui.pushbuffer()
1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1102 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1103 copies=opts.get(b'rename')))
1103 copies=opts.get(b'rename')))
1104 ui.popbuffer()
1104 ui.popbuffer()
1105 fm.end()
1105 fm.end()
1106
1106
1107 @command(b'perfmoonwalk', formatteropts)
1107 @command(b'perfmoonwalk', formatteropts)
1108 def perfmoonwalk(ui, repo, **opts):
1108 def perfmoonwalk(ui, repo, **opts):
1109 """benchmark walking the changelog backwards
1109 """benchmark walking the changelog backwards
1110
1110
1111 This also loads the changelog data for each revision in the changelog.
1111 This also loads the changelog data for each revision in the changelog.
1112 """
1112 """
1113 opts = _byteskwargs(opts)
1113 opts = _byteskwargs(opts)
1114 timer, fm = gettimer(ui, opts)
1114 timer, fm = gettimer(ui, opts)
1115 def moonwalk():
1115 def moonwalk():
1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1116 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1117 ctx = repo[i]
1117 ctx = repo[i]
1118 ctx.branch() # read changelog data (in addition to the index)
1118 ctx.branch() # read changelog data (in addition to the index)
1119 timer(moonwalk)
1119 timer(moonwalk)
1120 fm.end()
1120 fm.end()
1121
1121
1122 @command(b'perftemplating',
1122 @command(b'perftemplating',
1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1123 [(b'r', b'rev', [], b'revisions to run the template on'),
1124 ] + formatteropts)
1124 ] + formatteropts)
1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1125 def perftemplating(ui, repo, testedtemplate=None, **opts):
1126 """test the rendering time of a given template"""
1126 """test the rendering time of a given template"""
1127 if makelogtemplater is None:
1127 if makelogtemplater is None:
1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1128 raise error.Abort((b"perftemplating not available with this Mercurial"),
1129 hint=b"use 4.3 or later")
1129 hint=b"use 4.3 or later")
1130
1130
1131 opts = _byteskwargs(opts)
1131 opts = _byteskwargs(opts)
1132
1132
1133 nullui = ui.copy()
1133 nullui = ui.copy()
1134 nullui.fout = open(os.devnull, r'wb')
1134 nullui.fout = open(os.devnull, r'wb')
1135 nullui.disablepager()
1135 nullui.disablepager()
1136 revs = opts.get(b'rev')
1136 revs = opts.get(b'rev')
1137 if not revs:
1137 if not revs:
1138 revs = [b'all()']
1138 revs = [b'all()']
1139 revs = list(scmutil.revrange(repo, revs))
1139 revs = list(scmutil.revrange(repo, revs))
1140
1140
1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1141 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1142 b' {author|person}: {desc|firstline}\n')
1142 b' {author|person}: {desc|firstline}\n')
1143 if testedtemplate is None:
1143 if testedtemplate is None:
1144 testedtemplate = defaulttemplate
1144 testedtemplate = defaulttemplate
1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1145 displayer = makelogtemplater(nullui, repo, testedtemplate)
1146 def format():
1146 def format():
1147 for r in revs:
1147 for r in revs:
1148 ctx = repo[r]
1148 ctx = repo[r]
1149 displayer.show(ctx)
1149 displayer.show(ctx)
1150 displayer.flush(ctx)
1150 displayer.flush(ctx)
1151
1151
1152 timer, fm = gettimer(ui, opts)
1152 timer, fm = gettimer(ui, opts)
1153 timer(format)
1153 timer(format)
1154 fm.end()
1154 fm.end()
1155
1155
1156 @command(b'perfhelper-tracecopies', formatteropts +
1156 @command(b'perfhelper-tracecopies', formatteropts +
1157 [
1157 [
1158 (b'r', b'revs', [], b'restrict search to these revisions'),
1158 (b'r', b'revs', [], b'restrict search to these revisions'),
1159 ])
1159 ])
1160 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1160 def perfhelpertracecopies(ui, repo, revs=[], **opts):
1161 """find statistic about potential parameters for the `perftracecopies`
1161 """find statistic about potential parameters for the `perftracecopies`
1162
1162
1163 This command find source-destination pair relevant for copytracing testing.
1163 This command find source-destination pair relevant for copytracing testing.
1164 It report value for some of the parameters that impact copy tracing time.
1164 It report value for some of the parameters that impact copy tracing time.
1165 """
1165 """
1166 opts = _byteskwargs(opts)
1166 opts = _byteskwargs(opts)
1167 fm = ui.formatter(b'perf', opts)
1167 fm = ui.formatter(b'perf', opts)
1168 header = '%12s %12s %12s %12s\n'
1168 header = '%12s %12s %12s %12s\n'
1169 output = ("%(source)12s %(destination)12s "
1169 output = ("%(source)12s %(destination)12s "
1170 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1170 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1171 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1171 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1172
1172
1173 if not revs:
1173 if not revs:
1174 revs = ['all()']
1174 revs = ['all()']
1175 revs = scmutil.revrange(repo, revs)
1175 revs = scmutil.revrange(repo, revs)
1176
1176
1177 roi = repo.revs('merge() and %ld', revs)
1177 roi = repo.revs('merge() and %ld', revs)
1178 for r in roi:
1178 for r in roi:
1179 ctx = repo[r]
1179 ctx = repo[r]
1180 p1 = ctx.p1().rev()
1180 p1 = ctx.p1().rev()
1181 p2 = ctx.p2().rev()
1181 p2 = ctx.p2().rev()
1182 bases = repo.changelog._commonancestorsheads(p1, p2)
1182 bases = repo.changelog._commonancestorsheads(p1, p2)
1183 for p in (p1, p2):
1183 for p in (p1, p2):
1184 for b in bases:
1184 for b in bases:
1185 base = repo[b]
1185 base = repo[b]
1186 parent = repo[p]
1186 parent = repo[p]
1187 missing = copies._computeforwardmissing(base, parent)
1187 missing = copies._computeforwardmissing(base, parent)
1188 if not missing:
1188 if not missing:
1189 continue
1189 continue
1190 fm.startitem()
1190 fm.startitem()
1191 data = {
1191 data = {
1192 b'source': base.hex(),
1192 b'source': base.hex(),
1193 b'destination': parent.hex(),
1193 b'destination': parent.hex(),
1194 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1194 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1195 b'nbmissingfiles': len(missing),
1195 b'nbmissingfiles': len(missing),
1196 }
1196 }
1197 fm.data(**data)
1197 fm.data(**data)
1198 out = data.copy()
1198 out = data.copy()
1199 out['source'] = fm.hexfunc(base.node())
1199 out['source'] = fm.hexfunc(base.node())
1200 out['destination'] = fm.hexfunc(parent.node())
1200 out['destination'] = fm.hexfunc(parent.node())
1201 fm.plain(output % out)
1201 fm.plain(output % out)
1202 fm.end()
1202 fm.end()
1203
1203
1204 @command(b'perfcca', formatteropts)
1204 @command(b'perfcca', formatteropts)
1205 def perfcca(ui, repo, **opts):
1205 def perfcca(ui, repo, **opts):
1206 opts = _byteskwargs(opts)
1206 opts = _byteskwargs(opts)
1207 timer, fm = gettimer(ui, opts)
1207 timer, fm = gettimer(ui, opts)
1208 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1208 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1209 fm.end()
1209 fm.end()
1210
1210
1211 @command(b'perffncacheload', formatteropts)
1211 @command(b'perffncacheload', formatteropts)
1212 def perffncacheload(ui, repo, **opts):
1212 def perffncacheload(ui, repo, **opts):
1213 opts = _byteskwargs(opts)
1213 opts = _byteskwargs(opts)
1214 timer, fm = gettimer(ui, opts)
1214 timer, fm = gettimer(ui, opts)
1215 s = repo.store
1215 s = repo.store
1216 def d():
1216 def d():
1217 s.fncache._load()
1217 s.fncache._load()
1218 timer(d)
1218 timer(d)
1219 fm.end()
1219 fm.end()
1220
1220
1221 @command(b'perffncachewrite', formatteropts)
1221 @command(b'perffncachewrite', formatteropts)
1222 def perffncachewrite(ui, repo, **opts):
1222 def perffncachewrite(ui, repo, **opts):
1223 opts = _byteskwargs(opts)
1223 opts = _byteskwargs(opts)
1224 timer, fm = gettimer(ui, opts)
1224 timer, fm = gettimer(ui, opts)
1225 s = repo.store
1225 s = repo.store
1226 lock = repo.lock()
1226 lock = repo.lock()
1227 s.fncache._load()
1227 s.fncache._load()
1228 tr = repo.transaction(b'perffncachewrite')
1228 tr = repo.transaction(b'perffncachewrite')
1229 tr.addbackup(b'fncache')
1229 tr.addbackup(b'fncache')
1230 def d():
1230 def d():
1231 s.fncache._dirty = True
1231 s.fncache._dirty = True
1232 s.fncache.write(tr)
1232 s.fncache.write(tr)
1233 timer(d)
1233 timer(d)
1234 tr.close()
1234 tr.close()
1235 lock.release()
1235 lock.release()
1236 fm.end()
1236 fm.end()
1237
1237
1238 @command(b'perffncacheencode', formatteropts)
1238 @command(b'perffncacheencode', formatteropts)
1239 def perffncacheencode(ui, repo, **opts):
1239 def perffncacheencode(ui, repo, **opts):
1240 opts = _byteskwargs(opts)
1240 opts = _byteskwargs(opts)
1241 timer, fm = gettimer(ui, opts)
1241 timer, fm = gettimer(ui, opts)
1242 s = repo.store
1242 s = repo.store
1243 s.fncache._load()
1243 s.fncache._load()
1244 def d():
1244 def d():
1245 for p in s.fncache.entries:
1245 for p in s.fncache.entries:
1246 s.encode(p)
1246 s.encode(p)
1247 timer(d)
1247 timer(d)
1248 fm.end()
1248 fm.end()
1249
1249
1250 def _bdiffworker(q, blocks, xdiff, ready, done):
1250 def _bdiffworker(q, blocks, xdiff, ready, done):
1251 while not done.is_set():
1251 while not done.is_set():
1252 pair = q.get()
1252 pair = q.get()
1253 while pair is not None:
1253 while pair is not None:
1254 if xdiff:
1254 if xdiff:
1255 mdiff.bdiff.xdiffblocks(*pair)
1255 mdiff.bdiff.xdiffblocks(*pair)
1256 elif blocks:
1256 elif blocks:
1257 mdiff.bdiff.blocks(*pair)
1257 mdiff.bdiff.blocks(*pair)
1258 else:
1258 else:
1259 mdiff.textdiff(*pair)
1259 mdiff.textdiff(*pair)
1260 q.task_done()
1260 q.task_done()
1261 pair = q.get()
1261 pair = q.get()
1262 q.task_done() # for the None one
1262 q.task_done() # for the None one
1263 with ready:
1263 with ready:
1264 ready.wait()
1264 ready.wait()
1265
1265
1266 def _manifestrevision(repo, mnode):
1266 def _manifestrevision(repo, mnode):
1267 ml = repo.manifestlog
1267 ml = repo.manifestlog
1268
1268
1269 if util.safehasattr(ml, b'getstorage'):
1269 if util.safehasattr(ml, b'getstorage'):
1270 store = ml.getstorage(b'')
1270 store = ml.getstorage(b'')
1271 else:
1271 else:
1272 store = ml._revlog
1272 store = ml._revlog
1273
1273
1274 return store.revision(mnode)
1274 return store.revision(mnode)
1275
1275
1276 @command(b'perfbdiff', revlogopts + formatteropts + [
1276 @command(b'perfbdiff', revlogopts + formatteropts + [
1277 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1277 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1278 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1278 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1279 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1279 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1280 (b'', b'blocks', False, b'test computing diffs into blocks'),
1280 (b'', b'blocks', False, b'test computing diffs into blocks'),
1281 (b'', b'xdiff', False, b'use xdiff algorithm'),
1281 (b'', b'xdiff', False, b'use xdiff algorithm'),
1282 ],
1282 ],
1283
1283
1284 b'-c|-m|FILE REV')
1284 b'-c|-m|FILE REV')
1285 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1285 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1286 """benchmark a bdiff between revisions
1286 """benchmark a bdiff between revisions
1287
1287
1288 By default, benchmark a bdiff between its delta parent and itself.
1288 By default, benchmark a bdiff between its delta parent and itself.
1289
1289
1290 With ``--count``, benchmark bdiffs between delta parents and self for N
1290 With ``--count``, benchmark bdiffs between delta parents and self for N
1291 revisions starting at the specified revision.
1291 revisions starting at the specified revision.
1292
1292
1293 With ``--alldata``, assume the requested revision is a changeset and
1293 With ``--alldata``, assume the requested revision is a changeset and
1294 measure bdiffs for all changes related to that changeset (manifest
1294 measure bdiffs for all changes related to that changeset (manifest
1295 and filelogs).
1295 and filelogs).
1296 """
1296 """
1297 opts = _byteskwargs(opts)
1297 opts = _byteskwargs(opts)
1298
1298
1299 if opts[b'xdiff'] and not opts[b'blocks']:
1299 if opts[b'xdiff'] and not opts[b'blocks']:
1300 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1300 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1301
1301
1302 if opts[b'alldata']:
1302 if opts[b'alldata']:
1303 opts[b'changelog'] = True
1303 opts[b'changelog'] = True
1304
1304
1305 if opts.get(b'changelog') or opts.get(b'manifest'):
1305 if opts.get(b'changelog') or opts.get(b'manifest'):
1306 file_, rev = None, file_
1306 file_, rev = None, file_
1307 elif rev is None:
1307 elif rev is None:
1308 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1308 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1309
1309
1310 blocks = opts[b'blocks']
1310 blocks = opts[b'blocks']
1311 xdiff = opts[b'xdiff']
1311 xdiff = opts[b'xdiff']
1312 textpairs = []
1312 textpairs = []
1313
1313
1314 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1314 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1315
1315
1316 startrev = r.rev(r.lookup(rev))
1316 startrev = r.rev(r.lookup(rev))
1317 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1317 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1318 if opts[b'alldata']:
1318 if opts[b'alldata']:
1319 # Load revisions associated with changeset.
1319 # Load revisions associated with changeset.
1320 ctx = repo[rev]
1320 ctx = repo[rev]
1321 mtext = _manifestrevision(repo, ctx.manifestnode())
1321 mtext = _manifestrevision(repo, ctx.manifestnode())
1322 for pctx in ctx.parents():
1322 for pctx in ctx.parents():
1323 pman = _manifestrevision(repo, pctx.manifestnode())
1323 pman = _manifestrevision(repo, pctx.manifestnode())
1324 textpairs.append((pman, mtext))
1324 textpairs.append((pman, mtext))
1325
1325
1326 # Load filelog revisions by iterating manifest delta.
1326 # Load filelog revisions by iterating manifest delta.
1327 man = ctx.manifest()
1327 man = ctx.manifest()
1328 pman = ctx.p1().manifest()
1328 pman = ctx.p1().manifest()
1329 for filename, change in pman.diff(man).items():
1329 for filename, change in pman.diff(man).items():
1330 fctx = repo.file(filename)
1330 fctx = repo.file(filename)
1331 f1 = fctx.revision(change[0][0] or -1)
1331 f1 = fctx.revision(change[0][0] or -1)
1332 f2 = fctx.revision(change[1][0] or -1)
1332 f2 = fctx.revision(change[1][0] or -1)
1333 textpairs.append((f1, f2))
1333 textpairs.append((f1, f2))
1334 else:
1334 else:
1335 dp = r.deltaparent(rev)
1335 dp = r.deltaparent(rev)
1336 textpairs.append((r.revision(dp), r.revision(rev)))
1336 textpairs.append((r.revision(dp), r.revision(rev)))
1337
1337
1338 withthreads = threads > 0
1338 withthreads = threads > 0
1339 if not withthreads:
1339 if not withthreads:
1340 def d():
1340 def d():
1341 for pair in textpairs:
1341 for pair in textpairs:
1342 if xdiff:
1342 if xdiff:
1343 mdiff.bdiff.xdiffblocks(*pair)
1343 mdiff.bdiff.xdiffblocks(*pair)
1344 elif blocks:
1344 elif blocks:
1345 mdiff.bdiff.blocks(*pair)
1345 mdiff.bdiff.blocks(*pair)
1346 else:
1346 else:
1347 mdiff.textdiff(*pair)
1347 mdiff.textdiff(*pair)
1348 else:
1348 else:
1349 q = queue()
1349 q = queue()
1350 for i in _xrange(threads):
1350 for i in _xrange(threads):
1351 q.put(None)
1351 q.put(None)
1352 ready = threading.Condition()
1352 ready = threading.Condition()
1353 done = threading.Event()
1353 done = threading.Event()
1354 for i in _xrange(threads):
1354 for i in _xrange(threads):
1355 threading.Thread(target=_bdiffworker,
1355 threading.Thread(target=_bdiffworker,
1356 args=(q, blocks, xdiff, ready, done)).start()
1356 args=(q, blocks, xdiff, ready, done)).start()
1357 q.join()
1357 q.join()
1358 def d():
1358 def d():
1359 for pair in textpairs:
1359 for pair in textpairs:
1360 q.put(pair)
1360 q.put(pair)
1361 for i in _xrange(threads):
1361 for i in _xrange(threads):
1362 q.put(None)
1362 q.put(None)
1363 with ready:
1363 with ready:
1364 ready.notify_all()
1364 ready.notify_all()
1365 q.join()
1365 q.join()
1366 timer, fm = gettimer(ui, opts)
1366 timer, fm = gettimer(ui, opts)
1367 timer(d)
1367 timer(d)
1368 fm.end()
1368 fm.end()
1369
1369
1370 if withthreads:
1370 if withthreads:
1371 done.set()
1371 done.set()
1372 for i in _xrange(threads):
1372 for i in _xrange(threads):
1373 q.put(None)
1373 q.put(None)
1374 with ready:
1374 with ready:
1375 ready.notify_all()
1375 ready.notify_all()
1376
1376
1377 @command(b'perfunidiff', revlogopts + formatteropts + [
1377 @command(b'perfunidiff', revlogopts + formatteropts + [
1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1378 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1379 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1379 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1380 ], b'-c|-m|FILE REV')
1380 ], b'-c|-m|FILE REV')
1381 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1381 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1382 """benchmark a unified diff between revisions
1382 """benchmark a unified diff between revisions
1383
1383
1384 This doesn't include any copy tracing - it's just a unified diff
1384 This doesn't include any copy tracing - it's just a unified diff
1385 of the texts.
1385 of the texts.
1386
1386
1387 By default, benchmark a diff between its delta parent and itself.
1387 By default, benchmark a diff between its delta parent and itself.
1388
1388
1389 With ``--count``, benchmark diffs between delta parents and self for N
1389 With ``--count``, benchmark diffs between delta parents and self for N
1390 revisions starting at the specified revision.
1390 revisions starting at the specified revision.
1391
1391
1392 With ``--alldata``, assume the requested revision is a changeset and
1392 With ``--alldata``, assume the requested revision is a changeset and
1393 measure diffs for all changes related to that changeset (manifest
1393 measure diffs for all changes related to that changeset (manifest
1394 and filelogs).
1394 and filelogs).
1395 """
1395 """
1396 opts = _byteskwargs(opts)
1396 opts = _byteskwargs(opts)
1397 if opts[b'alldata']:
1397 if opts[b'alldata']:
1398 opts[b'changelog'] = True
1398 opts[b'changelog'] = True
1399
1399
1400 if opts.get(b'changelog') or opts.get(b'manifest'):
1400 if opts.get(b'changelog') or opts.get(b'manifest'):
1401 file_, rev = None, file_
1401 file_, rev = None, file_
1402 elif rev is None:
1402 elif rev is None:
1403 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1403 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1404
1404
1405 textpairs = []
1405 textpairs = []
1406
1406
1407 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1407 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1408
1408
1409 startrev = r.rev(r.lookup(rev))
1409 startrev = r.rev(r.lookup(rev))
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1410 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1411 if opts[b'alldata']:
1411 if opts[b'alldata']:
1412 # Load revisions associated with changeset.
1412 # Load revisions associated with changeset.
1413 ctx = repo[rev]
1413 ctx = repo[rev]
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1414 mtext = _manifestrevision(repo, ctx.manifestnode())
1415 for pctx in ctx.parents():
1415 for pctx in ctx.parents():
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1416 pman = _manifestrevision(repo, pctx.manifestnode())
1417 textpairs.append((pman, mtext))
1417 textpairs.append((pman, mtext))
1418
1418
1419 # Load filelog revisions by iterating manifest delta.
1419 # Load filelog revisions by iterating manifest delta.
1420 man = ctx.manifest()
1420 man = ctx.manifest()
1421 pman = ctx.p1().manifest()
1421 pman = ctx.p1().manifest()
1422 for filename, change in pman.diff(man).items():
1422 for filename, change in pman.diff(man).items():
1423 fctx = repo.file(filename)
1423 fctx = repo.file(filename)
1424 f1 = fctx.revision(change[0][0] or -1)
1424 f1 = fctx.revision(change[0][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1425 f2 = fctx.revision(change[1][0] or -1)
1426 textpairs.append((f1, f2))
1426 textpairs.append((f1, f2))
1427 else:
1427 else:
1428 dp = r.deltaparent(rev)
1428 dp = r.deltaparent(rev)
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1429 textpairs.append((r.revision(dp), r.revision(rev)))
1430
1430
1431 def d():
1431 def d():
1432 for left, right in textpairs:
1432 for left, right in textpairs:
1433 # The date strings don't matter, so we pass empty strings.
1433 # The date strings don't matter, so we pass empty strings.
1434 headerlines, hunks = mdiff.unidiff(
1434 headerlines, hunks = mdiff.unidiff(
1435 left, b'', right, b'', b'left', b'right', binary=False)
1435 left, b'', right, b'', b'left', b'right', binary=False)
1436 # consume iterators in roughly the way patch.py does
1436 # consume iterators in roughly the way patch.py does
1437 b'\n'.join(headerlines)
1437 b'\n'.join(headerlines)
1438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1439 timer, fm = gettimer(ui, opts)
1439 timer, fm = gettimer(ui, opts)
1440 timer(d)
1440 timer(d)
1441 fm.end()
1441 fm.end()
1442
1442
1443 @command(b'perfdiffwd', formatteropts)
1443 @command(b'perfdiffwd', formatteropts)
1444 def perfdiffwd(ui, repo, **opts):
1444 def perfdiffwd(ui, repo, **opts):
1445 """Profile diff of working directory changes"""
1445 """Profile diff of working directory changes"""
1446 opts = _byteskwargs(opts)
1446 opts = _byteskwargs(opts)
1447 timer, fm = gettimer(ui, opts)
1447 timer, fm = gettimer(ui, opts)
1448 options = {
1448 options = {
1449 'w': 'ignore_all_space',
1449 'w': 'ignore_all_space',
1450 'b': 'ignore_space_change',
1450 'b': 'ignore_space_change',
1451 'B': 'ignore_blank_lines',
1451 'B': 'ignore_blank_lines',
1452 }
1452 }
1453
1453
1454 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1454 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1455 opts = dict((options[c], b'1') for c in diffopt)
1455 opts = dict((options[c], b'1') for c in diffopt)
1456 def d():
1456 def d():
1457 ui.pushbuffer()
1457 ui.pushbuffer()
1458 commands.diff(ui, repo, **opts)
1458 commands.diff(ui, repo, **opts)
1459 ui.popbuffer()
1459 ui.popbuffer()
1460 diffopt = diffopt.encode('ascii')
1460 diffopt = diffopt.encode('ascii')
1461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1461 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1462 timer(d, title=title)
1462 timer(d, title=title)
1463 fm.end()
1463 fm.end()
1464
1464
1465 @command(b'perfrevlogindex', revlogopts + formatteropts,
1465 @command(b'perfrevlogindex', revlogopts + formatteropts,
1466 b'-c|-m|FILE')
1466 b'-c|-m|FILE')
1467 def perfrevlogindex(ui, repo, file_=None, **opts):
1467 def perfrevlogindex(ui, repo, file_=None, **opts):
1468 """Benchmark operations against a revlog index.
1468 """Benchmark operations against a revlog index.
1469
1469
1470 This tests constructing a revlog instance, reading index data,
1470 This tests constructing a revlog instance, reading index data,
1471 parsing index data, and performing various operations related to
1471 parsing index data, and performing various operations related to
1472 index data.
1472 index data.
1473 """
1473 """
1474
1474
1475 opts = _byteskwargs(opts)
1475 opts = _byteskwargs(opts)
1476
1476
1477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1477 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1478
1478
1479 opener = getattr(rl, 'opener') # trick linter
1479 opener = getattr(rl, 'opener') # trick linter
1480 indexfile = rl.indexfile
1480 indexfile = rl.indexfile
1481 data = opener.read(indexfile)
1481 data = opener.read(indexfile)
1482
1482
1483 header = struct.unpack(b'>I', data[0:4])[0]
1483 header = struct.unpack(b'>I', data[0:4])[0]
1484 version = header & 0xFFFF
1484 version = header & 0xFFFF
1485 if version == 1:
1485 if version == 1:
1486 revlogio = revlog.revlogio()
1486 revlogio = revlog.revlogio()
1487 inline = header & (1 << 16)
1487 inline = header & (1 << 16)
1488 else:
1488 else:
1489 raise error.Abort((b'unsupported revlog version: %d') % version)
1489 raise error.Abort((b'unsupported revlog version: %d') % version)
1490
1490
1491 rllen = len(rl)
1491 rllen = len(rl)
1492
1492
1493 node0 = rl.node(0)
1493 node0 = rl.node(0)
1494 node25 = rl.node(rllen // 4)
1494 node25 = rl.node(rllen // 4)
1495 node50 = rl.node(rllen // 2)
1495 node50 = rl.node(rllen // 2)
1496 node75 = rl.node(rllen // 4 * 3)
1496 node75 = rl.node(rllen // 4 * 3)
1497 node100 = rl.node(rllen - 1)
1497 node100 = rl.node(rllen - 1)
1498
1498
1499 allrevs = range(rllen)
1499 allrevs = range(rllen)
1500 allrevsrev = list(reversed(allrevs))
1500 allrevsrev = list(reversed(allrevs))
1501 allnodes = [rl.node(rev) for rev in range(rllen)]
1501 allnodes = [rl.node(rev) for rev in range(rllen)]
1502 allnodesrev = list(reversed(allnodes))
1502 allnodesrev = list(reversed(allnodes))
1503
1503
1504 def constructor():
1504 def constructor():
1505 revlog.revlog(opener, indexfile)
1505 revlog.revlog(opener, indexfile)
1506
1506
1507 def read():
1507 def read():
1508 with opener(indexfile) as fh:
1508 with opener(indexfile) as fh:
1509 fh.read()
1509 fh.read()
1510
1510
1511 def parseindex():
1511 def parseindex():
1512 revlogio.parseindex(data, inline)
1512 revlogio.parseindex(data, inline)
1513
1513
1514 def getentry(revornode):
1514 def getentry(revornode):
1515 index = revlogio.parseindex(data, inline)[0]
1515 index = revlogio.parseindex(data, inline)[0]
1516 index[revornode]
1516 index[revornode]
1517
1517
1518 def getentries(revs, count=1):
1518 def getentries(revs, count=1):
1519 index = revlogio.parseindex(data, inline)[0]
1519 index = revlogio.parseindex(data, inline)[0]
1520
1520
1521 for i in range(count):
1521 for i in range(count):
1522 for rev in revs:
1522 for rev in revs:
1523 index[rev]
1523 index[rev]
1524
1524
1525 def resolvenode(node):
1525 def resolvenode(node):
1526 nodemap = revlogio.parseindex(data, inline)[1]
1526 nodemap = revlogio.parseindex(data, inline)[1]
1527 # This only works for the C code.
1527 # This only works for the C code.
1528 if nodemap is None:
1528 if nodemap is None:
1529 return
1529 return
1530
1530
1531 try:
1531 try:
1532 nodemap[node]
1532 nodemap[node]
1533 except error.RevlogError:
1533 except error.RevlogError:
1534 pass
1534 pass
1535
1535
1536 def resolvenodes(nodes, count=1):
1536 def resolvenodes(nodes, count=1):
1537 nodemap = revlogio.parseindex(data, inline)[1]
1537 nodemap = revlogio.parseindex(data, inline)[1]
1538 if nodemap is None:
1538 if nodemap is None:
1539 return
1539 return
1540
1540
1541 for i in range(count):
1541 for i in range(count):
1542 for node in nodes:
1542 for node in nodes:
1543 try:
1543 try:
1544 nodemap[node]
1544 nodemap[node]
1545 except error.RevlogError:
1545 except error.RevlogError:
1546 pass
1546 pass
1547
1547
1548 benches = [
1548 benches = [
1549 (constructor, b'revlog constructor'),
1549 (constructor, b'revlog constructor'),
1550 (read, b'read'),
1550 (read, b'read'),
1551 (parseindex, b'create index object'),
1551 (parseindex, b'create index object'),
1552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1552 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1553 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1554 (lambda: resolvenode(node0), b'look up node at rev 0'),
1554 (lambda: resolvenode(node0), b'look up node at rev 0'),
1555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1555 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1556 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1557 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1558 (lambda: resolvenode(node100), b'look up node at tip'),
1558 (lambda: resolvenode(node100), b'look up node at tip'),
1559 # 2x variation is to measure caching impact.
1559 # 2x variation is to measure caching impact.
1560 (lambda: resolvenodes(allnodes),
1560 (lambda: resolvenodes(allnodes),
1561 b'look up all nodes (forward)'),
1561 b'look up all nodes (forward)'),
1562 (lambda: resolvenodes(allnodes, 2),
1562 (lambda: resolvenodes(allnodes, 2),
1563 b'look up all nodes 2x (forward)'),
1563 b'look up all nodes 2x (forward)'),
1564 (lambda: resolvenodes(allnodesrev),
1564 (lambda: resolvenodes(allnodesrev),
1565 b'look up all nodes (reverse)'),
1565 b'look up all nodes (reverse)'),
1566 (lambda: resolvenodes(allnodesrev, 2),
1566 (lambda: resolvenodes(allnodesrev, 2),
1567 b'look up all nodes 2x (reverse)'),
1567 b'look up all nodes 2x (reverse)'),
1568 (lambda: getentries(allrevs),
1568 (lambda: getentries(allrevs),
1569 b'retrieve all index entries (forward)'),
1569 b'retrieve all index entries (forward)'),
1570 (lambda: getentries(allrevs, 2),
1570 (lambda: getentries(allrevs, 2),
1571 b'retrieve all index entries 2x (forward)'),
1571 b'retrieve all index entries 2x (forward)'),
1572 (lambda: getentries(allrevsrev),
1572 (lambda: getentries(allrevsrev),
1573 b'retrieve all index entries (reverse)'),
1573 b'retrieve all index entries (reverse)'),
1574 (lambda: getentries(allrevsrev, 2),
1574 (lambda: getentries(allrevsrev, 2),
1575 b'retrieve all index entries 2x (reverse)'),
1575 b'retrieve all index entries 2x (reverse)'),
1576 ]
1576 ]
1577
1577
1578 for fn, title in benches:
1578 for fn, title in benches:
1579 timer, fm = gettimer(ui, opts)
1579 timer, fm = gettimer(ui, opts)
1580 timer(fn, title=title)
1580 timer(fn, title=title)
1581 fm.end()
1581 fm.end()
1582
1582
1583 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1583 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1584 [(b'd', b'dist', 100, b'distance between the revisions'),
1584 [(b'd', b'dist', 100, b'distance between the revisions'),
1585 (b's', b'startrev', 0, b'revision to start reading at'),
1585 (b's', b'startrev', 0, b'revision to start reading at'),
1586 (b'', b'reverse', False, b'read in reverse')],
1586 (b'', b'reverse', False, b'read in reverse')],
1587 b'-c|-m|FILE')
1587 b'-c|-m|FILE')
1588 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1588 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1589 **opts):
1589 **opts):
1590 """Benchmark reading a series of revisions from a revlog.
1590 """Benchmark reading a series of revisions from a revlog.
1591
1591
1592 By default, we read every ``-d/--dist`` revision from 0 to tip of
1592 By default, we read every ``-d/--dist`` revision from 0 to tip of
1593 the specified revlog.
1593 the specified revlog.
1594
1594
1595 The start revision can be defined via ``-s/--startrev``.
1595 The start revision can be defined via ``-s/--startrev``.
1596 """
1596 """
1597 opts = _byteskwargs(opts)
1597 opts = _byteskwargs(opts)
1598
1598
1599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1600 rllen = getlen(ui)(rl)
1600 rllen = getlen(ui)(rl)
1601
1601
1602 if startrev < 0:
1602 if startrev < 0:
1603 startrev = rllen + startrev
1603 startrev = rllen + startrev
1604
1604
1605 def d():
1605 def d():
1606 rl.clearcaches()
1606 rl.clearcaches()
1607
1607
1608 beginrev = startrev
1608 beginrev = startrev
1609 endrev = rllen
1609 endrev = rllen
1610 dist = opts[b'dist']
1610 dist = opts[b'dist']
1611
1611
1612 if reverse:
1612 if reverse:
1613 beginrev, endrev = endrev - 1, beginrev - 1
1613 beginrev, endrev = endrev - 1, beginrev - 1
1614 dist = -1 * dist
1614 dist = -1 * dist
1615
1615
1616 for x in _xrange(beginrev, endrev, dist):
1616 for x in _xrange(beginrev, endrev, dist):
1617 # Old revisions don't support passing int.
1617 # Old revisions don't support passing int.
1618 n = rl.node(x)
1618 n = rl.node(x)
1619 rl.revision(n)
1619 rl.revision(n)
1620
1620
1621 timer, fm = gettimer(ui, opts)
1621 timer, fm = gettimer(ui, opts)
1622 timer(d)
1622 timer(d)
1623 fm.end()
1623 fm.end()
1624
1624
1625 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1625 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1626 [(b's', b'startrev', 1000, b'revision to start writing at'),
1626 [(b's', b'startrev', 1000, b'revision to start writing at'),
1627 (b'', b'stoprev', -1, b'last revision to write'),
1627 (b'', b'stoprev', -1, b'last revision to write'),
1628 (b'', b'count', 3, b'last revision to write'),
1628 (b'', b'count', 3, b'last revision to write'),
1629 (b'', b'details', False, b'print timing for every revisions tested'),
1629 (b'', b'details', False, b'print timing for every revisions tested'),
1630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1632 ],
1632 ],
1633 b'-c|-m|FILE')
1633 b'-c|-m|FILE')
1634 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1634 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1635 """Benchmark writing a series of revisions to a revlog.
1635 """Benchmark writing a series of revisions to a revlog.
1636
1636
1637 Possible source values are:
1637 Possible source values are:
1638 * `full`: add from a full text (default).
1638 * `full`: add from a full text (default).
1639 * `parent-1`: add from a delta to the first parent
1639 * `parent-1`: add from a delta to the first parent
1640 * `parent-2`: add from a delta to the second parent if it exists
1640 * `parent-2`: add from a delta to the second parent if it exists
1641 (use a delta from the first parent otherwise)
1641 (use a delta from the first parent otherwise)
1642 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1642 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1643 * `storage`: add from the existing precomputed deltas
1643 * `storage`: add from the existing precomputed deltas
1644 """
1644 """
1645 opts = _byteskwargs(opts)
1645 opts = _byteskwargs(opts)
1646
1646
1647 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1647 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1648 rllen = getlen(ui)(rl)
1648 rllen = getlen(ui)(rl)
1649 if startrev < 0:
1649 if startrev < 0:
1650 startrev = rllen + startrev
1650 startrev = rllen + startrev
1651 if stoprev < 0:
1651 if stoprev < 0:
1652 stoprev = rllen + stoprev
1652 stoprev = rllen + stoprev
1653
1653
1654 lazydeltabase = opts['lazydeltabase']
1654 lazydeltabase = opts['lazydeltabase']
1655 source = opts['source']
1655 source = opts['source']
1656 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1656 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1657 b'storage')
1657 b'storage')
1658 if source not in validsource:
1658 if source not in validsource:
1659 raise error.Abort('invalid source type: %s' % source)
1659 raise error.Abort('invalid source type: %s' % source)
1660
1660
1661 ### actually gather results
1661 ### actually gather results
1662 count = opts['count']
1662 count = opts['count']
1663 if count <= 0:
1663 if count <= 0:
1664 raise error.Abort('invalide run count: %d' % count)
1664 raise error.Abort('invalide run count: %d' % count)
1665 allresults = []
1665 allresults = []
1666 for c in range(count):
1666 for c in range(count):
1667 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1667 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1668 lazydeltabase=lazydeltabase)
1668 lazydeltabase=lazydeltabase)
1669 allresults.append(timing)
1669 allresults.append(timing)
1670
1670
1671 ### consolidate the results in a single list
1671 ### consolidate the results in a single list
1672 results = []
1672 results = []
1673 for idx, (rev, t) in enumerate(allresults[0]):
1673 for idx, (rev, t) in enumerate(allresults[0]):
1674 ts = [t]
1674 ts = [t]
1675 for other in allresults[1:]:
1675 for other in allresults[1:]:
1676 orev, ot = other[idx]
1676 orev, ot = other[idx]
1677 assert orev == rev
1677 assert orev == rev
1678 ts.append(ot)
1678 ts.append(ot)
1679 results.append((rev, ts))
1679 results.append((rev, ts))
1680 resultcount = len(results)
1680 resultcount = len(results)
1681
1681
1682 ### Compute and display relevant statistics
1682 ### Compute and display relevant statistics
1683
1683
1684 # get a formatter
1684 # get a formatter
1685 fm = ui.formatter(b'perf', opts)
1685 fm = ui.formatter(b'perf', opts)
1686 displayall = ui.configbool(b"perf", b"all-timing", False)
1686 displayall = ui.configbool(b"perf", b"all-timing", False)
1687
1687
1688 # print individual details if requested
1688 # print individual details if requested
1689 if opts['details']:
1689 if opts['details']:
1690 for idx, item in enumerate(results, 1):
1690 for idx, item in enumerate(results, 1):
1691 rev, data = item
1691 rev, data = item
1692 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1692 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1693 formatone(fm, data, title=title, displayall=displayall)
1693 formatone(fm, data, title=title, displayall=displayall)
1694
1694
1695 # sorts results by median time
1695 # sorts results by median time
1696 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1696 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1697 # list of (name, index) to display)
1697 # list of (name, index) to display)
1698 relevants = [
1698 relevants = [
1699 ("min", 0),
1699 ("min", 0),
1700 ("10%", resultcount * 10 // 100),
1700 ("10%", resultcount * 10 // 100),
1701 ("25%", resultcount * 25 // 100),
1701 ("25%", resultcount * 25 // 100),
1702 ("50%", resultcount * 70 // 100),
1702 ("50%", resultcount * 70 // 100),
1703 ("75%", resultcount * 75 // 100),
1703 ("75%", resultcount * 75 // 100),
1704 ("90%", resultcount * 90 // 100),
1704 ("90%", resultcount * 90 // 100),
1705 ("95%", resultcount * 95 // 100),
1705 ("95%", resultcount * 95 // 100),
1706 ("99%", resultcount * 99 // 100),
1706 ("99%", resultcount * 99 // 100),
1707 ("max", -1),
1707 ("max", -1),
1708 ]
1708 ]
1709 if not ui.quiet:
1709 if not ui.quiet:
1710 for name, idx in relevants:
1710 for name, idx in relevants:
1711 data = results[idx]
1711 data = results[idx]
1712 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1712 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1713 formatone(fm, data[1], title=title, displayall=displayall)
1713 formatone(fm, data[1], title=title, displayall=displayall)
1714
1714
1715 # XXX summing that many float will not be very precise, we ignore this fact
1715 # XXX summing that many float will not be very precise, we ignore this fact
1716 # for now
1716 # for now
1717 totaltime = []
1717 totaltime = []
1718 for item in allresults:
1718 for item in allresults:
1719 totaltime.append((sum(x[1][0] for x in item),
1719 totaltime.append((sum(x[1][0] for x in item),
1720 sum(x[1][1] for x in item),
1720 sum(x[1][1] for x in item),
1721 sum(x[1][2] for x in item),)
1721 sum(x[1][2] for x in item),)
1722 )
1722 )
1723 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1723 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1724 displayall=displayall)
1724 displayall=displayall)
1725 fm.end()
1725 fm.end()
1726
1726
1727 class _faketr(object):
1727 class _faketr(object):
1728 def add(s, x, y, z=None):
1728 def add(s, x, y, z=None):
1729 return None
1729 return None
1730
1730
1731 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1731 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1732 lazydeltabase=True):
1732 lazydeltabase=True):
1733 timings = []
1733 timings = []
1734 tr = _faketr()
1734 tr = _faketr()
1735 with _temprevlog(ui, orig, startrev) as dest:
1735 with _temprevlog(ui, orig, startrev) as dest:
1736 dest._lazydeltabase = lazydeltabase
1736 dest._lazydeltabase = lazydeltabase
1737 revs = list(orig.revs(startrev, stoprev))
1737 revs = list(orig.revs(startrev, stoprev))
1738 total = len(revs)
1738 total = len(revs)
1739 topic = 'adding'
1739 topic = 'adding'
1740 if runidx is not None:
1740 if runidx is not None:
1741 topic += ' (run #%d)' % runidx
1741 topic += ' (run #%d)' % runidx
1742 for idx, rev in enumerate(revs):
1742 for idx, rev in enumerate(revs):
1743 ui.progress(topic, idx, unit='revs', total=total)
1743 ui.progress(topic, idx, unit='revs', total=total)
1744 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1744 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1745 with timeone() as r:
1745 with timeone() as r:
1746 dest.addrawrevision(*addargs, **addkwargs)
1746 dest.addrawrevision(*addargs, **addkwargs)
1747 timings.append((rev, r[0]))
1747 timings.append((rev, r[0]))
1748 ui.progress(topic, total, unit='revs', total=total)
1748 ui.progress(topic, total, unit='revs', total=total)
1749 ui.progress(topic, None, unit='revs', total=total)
1749 ui.progress(topic, None, unit='revs', total=total)
1750 return timings
1750 return timings
1751
1751
1752 def _getrevisionseed(orig, rev, tr, source):
1752 def _getrevisionseed(orig, rev, tr, source):
1753 from mercurial.node import nullid
1753 from mercurial.node import nullid
1754
1754
1755 linkrev = orig.linkrev(rev)
1755 linkrev = orig.linkrev(rev)
1756 node = orig.node(rev)
1756 node = orig.node(rev)
1757 p1, p2 = orig.parents(node)
1757 p1, p2 = orig.parents(node)
1758 flags = orig.flags(rev)
1758 flags = orig.flags(rev)
1759 cachedelta = None
1759 cachedelta = None
1760 text = None
1760 text = None
1761
1761
1762 if source == b'full':
1762 if source == b'full':
1763 text = orig.revision(rev)
1763 text = orig.revision(rev)
1764 elif source == b'parent-1':
1764 elif source == b'parent-1':
1765 baserev = orig.rev(p1)
1765 baserev = orig.rev(p1)
1766 cachedelta = (baserev, orig.revdiff(p1, rev))
1766 cachedelta = (baserev, orig.revdiff(p1, rev))
1767 elif source == b'parent-2':
1767 elif source == b'parent-2':
1768 parent = p2
1768 parent = p2
1769 if p2 == nullid:
1769 if p2 == nullid:
1770 parent = p1
1770 parent = p1
1771 baserev = orig.rev(parent)
1771 baserev = orig.rev(parent)
1772 cachedelta = (baserev, orig.revdiff(parent, rev))
1772 cachedelta = (baserev, orig.revdiff(parent, rev))
1773 elif source == b'parent-smallest':
1773 elif source == b'parent-smallest':
1774 p1diff = orig.revdiff(p1, rev)
1774 p1diff = orig.revdiff(p1, rev)
1775 parent = p1
1775 parent = p1
1776 diff = p1diff
1776 diff = p1diff
1777 if p2 != nullid:
1777 if p2 != nullid:
1778 p2diff = orig.revdiff(p2, rev)
1778 p2diff = orig.revdiff(p2, rev)
1779 if len(p1diff) > len(p2diff):
1779 if len(p1diff) > len(p2diff):
1780 parent = p2
1780 parent = p2
1781 diff = p2diff
1781 diff = p2diff
1782 baserev = orig.rev(parent)
1782 baserev = orig.rev(parent)
1783 cachedelta = (baserev, diff)
1783 cachedelta = (baserev, diff)
1784 elif source == b'storage':
1784 elif source == b'storage':
1785 baserev = orig.deltaparent(rev)
1785 baserev = orig.deltaparent(rev)
1786 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1786 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1787
1787
1788 return ((text, tr, linkrev, p1, p2),
1788 return ((text, tr, linkrev, p1, p2),
1789 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1789 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1790
1790
1791 @contextlib.contextmanager
1791 @contextlib.contextmanager
1792 def _temprevlog(ui, orig, truncaterev):
1792 def _temprevlog(ui, orig, truncaterev):
1793 from mercurial import vfs as vfsmod
1793 from mercurial import vfs as vfsmod
1794
1794
1795 if orig._inline:
1795 if orig._inline:
1796 raise error.Abort('not supporting inline revlog (yet)')
1796 raise error.Abort('not supporting inline revlog (yet)')
1797
1797
1798 origindexpath = orig.opener.join(orig.indexfile)
1798 origindexpath = orig.opener.join(orig.indexfile)
1799 origdatapath = orig.opener.join(orig.datafile)
1799 origdatapath = orig.opener.join(orig.datafile)
1800 indexname = 'revlog.i'
1800 indexname = 'revlog.i'
1801 dataname = 'revlog.d'
1801 dataname = 'revlog.d'
1802
1802
1803 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1803 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1804 try:
1804 try:
1805 # copy the data file in a temporary directory
1805 # copy the data file in a temporary directory
1806 ui.debug('copying data in %s\n' % tmpdir)
1806 ui.debug('copying data in %s\n' % tmpdir)
1807 destindexpath = os.path.join(tmpdir, 'revlog.i')
1807 destindexpath = os.path.join(tmpdir, 'revlog.i')
1808 destdatapath = os.path.join(tmpdir, 'revlog.d')
1808 destdatapath = os.path.join(tmpdir, 'revlog.d')
1809 shutil.copyfile(origindexpath, destindexpath)
1809 shutil.copyfile(origindexpath, destindexpath)
1810 shutil.copyfile(origdatapath, destdatapath)
1810 shutil.copyfile(origdatapath, destdatapath)
1811
1811
1812 # remove the data we want to add again
1812 # remove the data we want to add again
1813 ui.debug('truncating data to be rewritten\n')
1813 ui.debug('truncating data to be rewritten\n')
1814 with open(destindexpath, 'ab') as index:
1814 with open(destindexpath, 'ab') as index:
1815 index.seek(0)
1815 index.seek(0)
1816 index.truncate(truncaterev * orig._io.size)
1816 index.truncate(truncaterev * orig._io.size)
1817 with open(destdatapath, 'ab') as data:
1817 with open(destdatapath, 'ab') as data:
1818 data.seek(0)
1818 data.seek(0)
1819 data.truncate(orig.start(truncaterev))
1819 data.truncate(orig.start(truncaterev))
1820
1820
1821 # instantiate a new revlog from the temporary copy
1821 # instantiate a new revlog from the temporary copy
1822 ui.debug('truncating adding to be rewritten\n')
1822 ui.debug('truncating adding to be rewritten\n')
1823 vfs = vfsmod.vfs(tmpdir)
1823 vfs = vfsmod.vfs(tmpdir)
1824 vfs.options = getattr(orig.opener, 'options', None)
1824 vfs.options = getattr(orig.opener, 'options', None)
1825
1825
1826 dest = revlog.revlog(vfs,
1826 dest = revlog.revlog(vfs,
1827 indexfile=indexname,
1827 indexfile=indexname,
1828 datafile=dataname)
1828 datafile=dataname)
1829 if dest._inline:
1829 if dest._inline:
1830 raise error.Abort('not supporting inline revlog (yet)')
1830 raise error.Abort('not supporting inline revlog (yet)')
1831 # make sure internals are initialized
1831 # make sure internals are initialized
1832 dest.revision(len(dest) - 1)
1832 dest.revision(len(dest) - 1)
1833 yield dest
1833 yield dest
1834 del dest, vfs
1834 del dest, vfs
1835 finally:
1835 finally:
1836 shutil.rmtree(tmpdir, True)
1836 shutil.rmtree(tmpdir, True)
1837
1837
1838 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1838 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1839 [(b'e', b'engines', b'', b'compression engines to use'),
1839 [(b'e', b'engines', b'', b'compression engines to use'),
1840 (b's', b'startrev', 0, b'revision to start at')],
1840 (b's', b'startrev', 0, b'revision to start at')],
1841 b'-c|-m|FILE')
1841 b'-c|-m|FILE')
1842 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1842 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1843 """Benchmark operations on revlog chunks.
1843 """Benchmark operations on revlog chunks.
1844
1844
1845 Logically, each revlog is a collection of fulltext revisions. However,
1845 Logically, each revlog is a collection of fulltext revisions. However,
1846 stored within each revlog are "chunks" of possibly compressed data. This
1846 stored within each revlog are "chunks" of possibly compressed data. This
1847 data needs to be read and decompressed or compressed and written.
1847 data needs to be read and decompressed or compressed and written.
1848
1848
1849 This command measures the time it takes to read+decompress and recompress
1849 This command measures the time it takes to read+decompress and recompress
1850 chunks in a revlog. It effectively isolates I/O and compression performance.
1850 chunks in a revlog. It effectively isolates I/O and compression performance.
1851 For measurements of higher-level operations like resolving revisions,
1851 For measurements of higher-level operations like resolving revisions,
1852 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1852 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1853 """
1853 """
1854 opts = _byteskwargs(opts)
1854 opts = _byteskwargs(opts)
1855
1855
1856 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1856 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1857
1857
1858 # _chunkraw was renamed to _getsegmentforrevs.
1858 # _chunkraw was renamed to _getsegmentforrevs.
1859 try:
1859 try:
1860 segmentforrevs = rl._getsegmentforrevs
1860 segmentforrevs = rl._getsegmentforrevs
1861 except AttributeError:
1861 except AttributeError:
1862 segmentforrevs = rl._chunkraw
1862 segmentforrevs = rl._chunkraw
1863
1863
1864 # Verify engines argument.
1864 # Verify engines argument.
1865 if engines:
1865 if engines:
1866 engines = set(e.strip() for e in engines.split(b','))
1866 engines = set(e.strip() for e in engines.split(b','))
1867 for engine in engines:
1867 for engine in engines:
1868 try:
1868 try:
1869 util.compressionengines[engine]
1869 util.compressionengines[engine]
1870 except KeyError:
1870 except KeyError:
1871 raise error.Abort(b'unknown compression engine: %s' % engine)
1871 raise error.Abort(b'unknown compression engine: %s' % engine)
1872 else:
1872 else:
1873 engines = []
1873 engines = []
1874 for e in util.compengines:
1874 for e in util.compengines:
1875 engine = util.compengines[e]
1875 engine = util.compengines[e]
1876 try:
1876 try:
1877 if engine.available():
1877 if engine.available():
1878 engine.revlogcompressor().compress(b'dummy')
1878 engine.revlogcompressor().compress(b'dummy')
1879 engines.append(e)
1879 engines.append(e)
1880 except NotImplementedError:
1880 except NotImplementedError:
1881 pass
1881 pass
1882
1882
1883 revs = list(rl.revs(startrev, len(rl) - 1))
1883 revs = list(rl.revs(startrev, len(rl) - 1))
1884
1884
1885 def rlfh(rl):
1885 def rlfh(rl):
1886 if rl._inline:
1886 if rl._inline:
1887 return getsvfs(repo)(rl.indexfile)
1887 return getsvfs(repo)(rl.indexfile)
1888 else:
1888 else:
1889 return getsvfs(repo)(rl.datafile)
1889 return getsvfs(repo)(rl.datafile)
1890
1890
1891 def doread():
1891 def doread():
1892 rl.clearcaches()
1892 rl.clearcaches()
1893 for rev in revs:
1893 for rev in revs:
1894 segmentforrevs(rev, rev)
1894 segmentforrevs(rev, rev)
1895
1895
1896 def doreadcachedfh():
1896 def doreadcachedfh():
1897 rl.clearcaches()
1897 rl.clearcaches()
1898 fh = rlfh(rl)
1898 fh = rlfh(rl)
1899 for rev in revs:
1899 for rev in revs:
1900 segmentforrevs(rev, rev, df=fh)
1900 segmentforrevs(rev, rev, df=fh)
1901
1901
1902 def doreadbatch():
1902 def doreadbatch():
1903 rl.clearcaches()
1903 rl.clearcaches()
1904 segmentforrevs(revs[0], revs[-1])
1904 segmentforrevs(revs[0], revs[-1])
1905
1905
1906 def doreadbatchcachedfh():
1906 def doreadbatchcachedfh():
1907 rl.clearcaches()
1907 rl.clearcaches()
1908 fh = rlfh(rl)
1908 fh = rlfh(rl)
1909 segmentforrevs(revs[0], revs[-1], df=fh)
1909 segmentforrevs(revs[0], revs[-1], df=fh)
1910
1910
1911 def dochunk():
1911 def dochunk():
1912 rl.clearcaches()
1912 rl.clearcaches()
1913 fh = rlfh(rl)
1913 fh = rlfh(rl)
1914 for rev in revs:
1914 for rev in revs:
1915 rl._chunk(rev, df=fh)
1915 rl._chunk(rev, df=fh)
1916
1916
1917 chunks = [None]
1917 chunks = [None]
1918
1918
1919 def dochunkbatch():
1919 def dochunkbatch():
1920 rl.clearcaches()
1920 rl.clearcaches()
1921 fh = rlfh(rl)
1921 fh = rlfh(rl)
1922 # Save chunks as a side-effect.
1922 # Save chunks as a side-effect.
1923 chunks[0] = rl._chunks(revs, df=fh)
1923 chunks[0] = rl._chunks(revs, df=fh)
1924
1924
1925 def docompress(compressor):
1925 def docompress(compressor):
1926 rl.clearcaches()
1926 rl.clearcaches()
1927
1927
1928 try:
1928 try:
1929 # Swap in the requested compression engine.
1929 # Swap in the requested compression engine.
1930 oldcompressor = rl._compressor
1930 oldcompressor = rl._compressor
1931 rl._compressor = compressor
1931 rl._compressor = compressor
1932 for chunk in chunks[0]:
1932 for chunk in chunks[0]:
1933 rl.compress(chunk)
1933 rl.compress(chunk)
1934 finally:
1934 finally:
1935 rl._compressor = oldcompressor
1935 rl._compressor = oldcompressor
1936
1936
1937 benches = [
1937 benches = [
1938 (lambda: doread(), b'read'),
1938 (lambda: doread(), b'read'),
1939 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1939 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1940 (lambda: doreadbatch(), b'read batch'),
1940 (lambda: doreadbatch(), b'read batch'),
1941 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1941 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1942 (lambda: dochunk(), b'chunk'),
1942 (lambda: dochunk(), b'chunk'),
1943 (lambda: dochunkbatch(), b'chunk batch'),
1943 (lambda: dochunkbatch(), b'chunk batch'),
1944 ]
1944 ]
1945
1945
1946 for engine in sorted(engines):
1946 for engine in sorted(engines):
1947 compressor = util.compengines[engine].revlogcompressor()
1947 compressor = util.compengines[engine].revlogcompressor()
1948 benches.append((functools.partial(docompress, compressor),
1948 benches.append((functools.partial(docompress, compressor),
1949 b'compress w/ %s' % engine))
1949 b'compress w/ %s' % engine))
1950
1950
1951 for fn, title in benches:
1951 for fn, title in benches:
1952 timer, fm = gettimer(ui, opts)
1952 timer, fm = gettimer(ui, opts)
1953 timer(fn, title=title)
1953 timer(fn, title=title)
1954 fm.end()
1954 fm.end()
1955
1955
1956 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1956 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1957 [(b'', b'cache', False, b'use caches instead of clearing')],
1957 [(b'', b'cache', False, b'use caches instead of clearing')],
1958 b'-c|-m|FILE REV')
1958 b'-c|-m|FILE REV')
1959 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1959 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1960 """Benchmark obtaining a revlog revision.
1960 """Benchmark obtaining a revlog revision.
1961
1961
1962 Obtaining a revlog revision consists of roughly the following steps:
1962 Obtaining a revlog revision consists of roughly the following steps:
1963
1963
1964 1. Compute the delta chain
1964 1. Compute the delta chain
1965 2. Slice the delta chain if applicable
1965 2. Slice the delta chain if applicable
1966 3. Obtain the raw chunks for that delta chain
1966 3. Obtain the raw chunks for that delta chain
1967 4. Decompress each raw chunk
1967 4. Decompress each raw chunk
1968 5. Apply binary patches to obtain fulltext
1968 5. Apply binary patches to obtain fulltext
1969 6. Verify hash of fulltext
1969 6. Verify hash of fulltext
1970
1970
1971 This command measures the time spent in each of these phases.
1971 This command measures the time spent in each of these phases.
1972 """
1972 """
1973 opts = _byteskwargs(opts)
1973 opts = _byteskwargs(opts)
1974
1974
1975 if opts.get(b'changelog') or opts.get(b'manifest'):
1975 if opts.get(b'changelog') or opts.get(b'manifest'):
1976 file_, rev = None, file_
1976 file_, rev = None, file_
1977 elif rev is None:
1977 elif rev is None:
1978 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1978 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1979
1979
1980 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1980 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1981
1981
1982 # _chunkraw was renamed to _getsegmentforrevs.
1982 # _chunkraw was renamed to _getsegmentforrevs.
1983 try:
1983 try:
1984 segmentforrevs = r._getsegmentforrevs
1984 segmentforrevs = r._getsegmentforrevs
1985 except AttributeError:
1985 except AttributeError:
1986 segmentforrevs = r._chunkraw
1986 segmentforrevs = r._chunkraw
1987
1987
1988 node = r.lookup(rev)
1988 node = r.lookup(rev)
1989 rev = r.rev(node)
1989 rev = r.rev(node)
1990
1990
1991 def getrawchunks(data, chain):
1991 def getrawchunks(data, chain):
1992 start = r.start
1992 start = r.start
1993 length = r.length
1993 length = r.length
1994 inline = r._inline
1994 inline = r._inline
1995 iosize = r._io.size
1995 iosize = r._io.size
1996 buffer = util.buffer
1996 buffer = util.buffer
1997
1997
1998 chunks = []
1998 chunks = []
1999 ladd = chunks.append
1999 ladd = chunks.append
2000 for idx, item in enumerate(chain):
2000 for idx, item in enumerate(chain):
2001 offset = start(item[0])
2001 offset = start(item[0])
2002 bits = data[idx]
2002 bits = data[idx]
2003 for rev in item:
2003 for rev in item:
2004 chunkstart = start(rev)
2004 chunkstart = start(rev)
2005 if inline:
2005 if inline:
2006 chunkstart += (rev + 1) * iosize
2006 chunkstart += (rev + 1) * iosize
2007 chunklength = length(rev)
2007 chunklength = length(rev)
2008 ladd(buffer(bits, chunkstart - offset, chunklength))
2008 ladd(buffer(bits, chunkstart - offset, chunklength))
2009
2009
2010 return chunks
2010 return chunks
2011
2011
2012 def dodeltachain(rev):
2012 def dodeltachain(rev):
2013 if not cache:
2013 if not cache:
2014 r.clearcaches()
2014 r.clearcaches()
2015 r._deltachain(rev)
2015 r._deltachain(rev)
2016
2016
2017 def doread(chain):
2017 def doread(chain):
2018 if not cache:
2018 if not cache:
2019 r.clearcaches()
2019 r.clearcaches()
2020 for item in slicedchain:
2020 for item in slicedchain:
2021 segmentforrevs(item[0], item[-1])
2021 segmentforrevs(item[0], item[-1])
2022
2022
2023 def doslice(r, chain, size):
2023 def doslice(r, chain, size):
2024 for s in slicechunk(r, chain, targetsize=size):
2024 for s in slicechunk(r, chain, targetsize=size):
2025 pass
2025 pass
2026
2026
2027 def dorawchunks(data, chain):
2027 def dorawchunks(data, chain):
2028 if not cache:
2028 if not cache:
2029 r.clearcaches()
2029 r.clearcaches()
2030 getrawchunks(data, chain)
2030 getrawchunks(data, chain)
2031
2031
2032 def dodecompress(chunks):
2032 def dodecompress(chunks):
2033 decomp = r.decompress
2033 decomp = r.decompress
2034 for chunk in chunks:
2034 for chunk in chunks:
2035 decomp(chunk)
2035 decomp(chunk)
2036
2036
2037 def dopatch(text, bins):
2037 def dopatch(text, bins):
2038 if not cache:
2038 if not cache:
2039 r.clearcaches()
2039 r.clearcaches()
2040 mdiff.patches(text, bins)
2040 mdiff.patches(text, bins)
2041
2041
2042 def dohash(text):
2042 def dohash(text):
2043 if not cache:
2043 if not cache:
2044 r.clearcaches()
2044 r.clearcaches()
2045 r.checkhash(text, node, rev=rev)
2045 r.checkhash(text, node, rev=rev)
2046
2046
2047 def dorevision():
2047 def dorevision():
2048 if not cache:
2048 if not cache:
2049 r.clearcaches()
2049 r.clearcaches()
2050 r.revision(node)
2050 r.revision(node)
2051
2051
2052 try:
2052 try:
2053 from mercurial.revlogutils.deltas import slicechunk
2053 from mercurial.revlogutils.deltas import slicechunk
2054 except ImportError:
2054 except ImportError:
2055 slicechunk = getattr(revlog, '_slicechunk', None)
2055 slicechunk = getattr(revlog, '_slicechunk', None)
2056
2056
2057 size = r.length(rev)
2057 size = r.length(rev)
2058 chain = r._deltachain(rev)[0]
2058 chain = r._deltachain(rev)[0]
2059 if not getattr(r, '_withsparseread', False):
2059 if not getattr(r, '_withsparseread', False):
2060 slicedchain = (chain,)
2060 slicedchain = (chain,)
2061 else:
2061 else:
2062 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2062 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2063 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2063 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2064 rawchunks = getrawchunks(data, slicedchain)
2064 rawchunks = getrawchunks(data, slicedchain)
2065 bins = r._chunks(chain)
2065 bins = r._chunks(chain)
2066 text = bytes(bins[0])
2066 text = bytes(bins[0])
2067 bins = bins[1:]
2067 bins = bins[1:]
2068 text = mdiff.patches(text, bins)
2068 text = mdiff.patches(text, bins)
2069
2069
2070 benches = [
2070 benches = [
2071 (lambda: dorevision(), b'full'),
2071 (lambda: dorevision(), b'full'),
2072 (lambda: dodeltachain(rev), b'deltachain'),
2072 (lambda: dodeltachain(rev), b'deltachain'),
2073 (lambda: doread(chain), b'read'),
2073 (lambda: doread(chain), b'read'),
2074 ]
2074 ]
2075
2075
2076 if getattr(r, '_withsparseread', False):
2076 if getattr(r, '_withsparseread', False):
2077 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2077 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2078 benches.append(slicing)
2078 benches.append(slicing)
2079
2079
2080 benches.extend([
2080 benches.extend([
2081 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2081 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2082 (lambda: dodecompress(rawchunks), b'decompress'),
2082 (lambda: dodecompress(rawchunks), b'decompress'),
2083 (lambda: dopatch(text, bins), b'patch'),
2083 (lambda: dopatch(text, bins), b'patch'),
2084 (lambda: dohash(text), b'hash'),
2084 (lambda: dohash(text), b'hash'),
2085 ])
2085 ])
2086
2086
2087 timer, fm = gettimer(ui, opts)
2087 timer, fm = gettimer(ui, opts)
2088 for fn, title in benches:
2088 for fn, title in benches:
2089 timer(fn, title=title)
2089 timer(fn, title=title)
2090 fm.end()
2090 fm.end()
2091
2091
2092 @command(b'perfrevset',
2092 @command(b'perfrevset',
2093 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2093 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2094 (b'', b'contexts', False, b'obtain changectx for each revision')]
2094 (b'', b'contexts', False, b'obtain changectx for each revision')]
2095 + formatteropts, b"REVSET")
2095 + formatteropts, b"REVSET")
2096 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2096 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2097 """benchmark the execution time of a revset
2097 """benchmark the execution time of a revset
2098
2098
2099 Use the --clean option if need to evaluate the impact of build volatile
2099 Use the --clean option if need to evaluate the impact of build volatile
2100 revisions set cache on the revset execution. Volatile cache hold filtered
2100 revisions set cache on the revset execution. Volatile cache hold filtered
2101 and obsolete related cache."""
2101 and obsolete related cache."""
2102 opts = _byteskwargs(opts)
2102 opts = _byteskwargs(opts)
2103
2103
2104 timer, fm = gettimer(ui, opts)
2104 timer, fm = gettimer(ui, opts)
2105 def d():
2105 def d():
2106 if clear:
2106 if clear:
2107 repo.invalidatevolatilesets()
2107 repo.invalidatevolatilesets()
2108 if contexts:
2108 if contexts:
2109 for ctx in repo.set(expr): pass
2109 for ctx in repo.set(expr): pass
2110 else:
2110 else:
2111 for r in repo.revs(expr): pass
2111 for r in repo.revs(expr): pass
2112 timer(d)
2112 timer(d)
2113 fm.end()
2113 fm.end()
2114
2114
2115 @command(b'perfvolatilesets',
2115 @command(b'perfvolatilesets',
2116 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2116 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2117 ] + formatteropts)
2117 ] + formatteropts)
2118 def perfvolatilesets(ui, repo, *names, **opts):
2118 def perfvolatilesets(ui, repo, *names, **opts):
2119 """benchmark the computation of various volatile set
2119 """benchmark the computation of various volatile set
2120
2120
2121 Volatile set computes element related to filtering and obsolescence."""
2121 Volatile set computes element related to filtering and obsolescence."""
2122 opts = _byteskwargs(opts)
2122 opts = _byteskwargs(opts)
2123 timer, fm = gettimer(ui, opts)
2123 timer, fm = gettimer(ui, opts)
2124 repo = repo.unfiltered()
2124 repo = repo.unfiltered()
2125
2125
2126 def getobs(name):
2126 def getobs(name):
2127 def d():
2127 def d():
2128 repo.invalidatevolatilesets()
2128 repo.invalidatevolatilesets()
2129 if opts[b'clear_obsstore']:
2129 if opts[b'clear_obsstore']:
2130 clearfilecache(repo, b'obsstore')
2130 clearfilecache(repo, b'obsstore')
2131 obsolete.getrevs(repo, name)
2131 obsolete.getrevs(repo, name)
2132 return d
2132 return d
2133
2133
2134 allobs = sorted(obsolete.cachefuncs)
2134 allobs = sorted(obsolete.cachefuncs)
2135 if names:
2135 if names:
2136 allobs = [n for n in allobs if n in names]
2136 allobs = [n for n in allobs if n in names]
2137
2137
2138 for name in allobs:
2138 for name in allobs:
2139 timer(getobs(name), title=name)
2139 timer(getobs(name), title=name)
2140
2140
2141 def getfiltered(name):
2141 def getfiltered(name):
2142 def d():
2142 def d():
2143 repo.invalidatevolatilesets()
2143 repo.invalidatevolatilesets()
2144 if opts[b'clear_obsstore']:
2144 if opts[b'clear_obsstore']:
2145 clearfilecache(repo, b'obsstore')
2145 clearfilecache(repo, b'obsstore')
2146 repoview.filterrevs(repo, name)
2146 repoview.filterrevs(repo, name)
2147 return d
2147 return d
2148
2148
2149 allfilter = sorted(repoview.filtertable)
2149 allfilter = sorted(repoview.filtertable)
2150 if names:
2150 if names:
2151 allfilter = [n for n in allfilter if n in names]
2151 allfilter = [n for n in allfilter if n in names]
2152
2152
2153 for name in allfilter:
2153 for name in allfilter:
2154 timer(getfiltered(name), title=name)
2154 timer(getfiltered(name), title=name)
2155 fm.end()
2155 fm.end()
2156
2156
2157 @command(b'perfbranchmap',
2157 @command(b'perfbranchmap',
2158 [(b'f', b'full', False,
2158 [(b'f', b'full', False,
2159 b'Includes build time of subset'),
2159 b'Includes build time of subset'),
2160 (b'', b'clear-revbranch', False,
2160 (b'', b'clear-revbranch', False,
2161 b'purge the revbranch cache between computation'),
2161 b'purge the revbranch cache between computation'),
2162 ] + formatteropts)
2162 ] + formatteropts)
2163 def perfbranchmap(ui, repo, *filternames, **opts):
2163 def perfbranchmap(ui, repo, *filternames, **opts):
2164 """benchmark the update of a branchmap
2164 """benchmark the update of a branchmap
2165
2165
2166 This benchmarks the full repo.branchmap() call with read and write disabled
2166 This benchmarks the full repo.branchmap() call with read and write disabled
2167 """
2167 """
2168 opts = _byteskwargs(opts)
2168 opts = _byteskwargs(opts)
2169 full = opts.get(b"full", False)
2169 full = opts.get(b"full", False)
2170 clear_revbranch = opts.get(b"clear_revbranch", False)
2170 clear_revbranch = opts.get(b"clear_revbranch", False)
2171 timer, fm = gettimer(ui, opts)
2171 timer, fm = gettimer(ui, opts)
2172 def getbranchmap(filtername):
2172 def getbranchmap(filtername):
2173 """generate a benchmark function for the filtername"""
2173 """generate a benchmark function for the filtername"""
2174 if filtername is None:
2174 if filtername is None:
2175 view = repo
2175 view = repo
2176 else:
2176 else:
2177 view = repo.filtered(filtername)
2177 view = repo.filtered(filtername)
2178 def d():
2178 def d():
2179 if clear_revbranch:
2179 if clear_revbranch:
2180 repo.revbranchcache()._clear()
2180 repo.revbranchcache()._clear()
2181 if full:
2181 if full:
2182 view._branchcaches.clear()
2182 view._branchcaches.clear()
2183 else:
2183 else:
2184 view._branchcaches.pop(filtername, None)
2184 view._branchcaches.pop(filtername, None)
2185 view.branchmap()
2185 view.branchmap()
2186 return d
2186 return d
2187 # add filter in smaller subset to bigger subset
2187 # add filter in smaller subset to bigger subset
2188 possiblefilters = set(repoview.filtertable)
2188 possiblefilters = set(repoview.filtertable)
2189 if filternames:
2189 if filternames:
2190 possiblefilters &= set(filternames)
2190 possiblefilters &= set(filternames)
2191 subsettable = getbranchmapsubsettable()
2191 subsettable = getbranchmapsubsettable()
2192 allfilters = []
2192 allfilters = []
2193 while possiblefilters:
2193 while possiblefilters:
2194 for name in possiblefilters:
2194 for name in possiblefilters:
2195 subset = subsettable.get(name)
2195 subset = subsettable.get(name)
2196 if subset not in possiblefilters:
2196 if subset not in possiblefilters:
2197 break
2197 break
2198 else:
2198 else:
2199 assert False, b'subset cycle %s!' % possiblefilters
2199 assert False, b'subset cycle %s!' % possiblefilters
2200 allfilters.append(name)
2200 allfilters.append(name)
2201 possiblefilters.remove(name)
2201 possiblefilters.remove(name)
2202
2202
2203 # warm the cache
2203 # warm the cache
2204 if not full:
2204 if not full:
2205 for name in allfilters:
2205 for name in allfilters:
2206 repo.filtered(name).branchmap()
2206 repo.filtered(name).branchmap()
2207 if not filternames or b'unfiltered' in filternames:
2207 if not filternames or b'unfiltered' in filternames:
2208 # add unfiltered
2208 # add unfiltered
2209 allfilters.append(None)
2209 allfilters.append(None)
2210
2210
2211 branchcacheread = safeattrsetter(branchmap, b'read')
2211 branchcacheread = safeattrsetter(branchmap, b'read')
2212 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2212 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2213 branchcacheread.set(lambda repo: None)
2213 branchcacheread.set(lambda repo: None)
2214 branchcachewrite.set(lambda bc, repo: None)
2214 branchcachewrite.set(lambda bc, repo: None)
2215 try:
2215 try:
2216 for name in allfilters:
2216 for name in allfilters:
2217 printname = name
2217 printname = name
2218 if name is None:
2218 if name is None:
2219 printname = b'unfiltered'
2219 printname = b'unfiltered'
2220 timer(getbranchmap(name), title=str(printname))
2220 timer(getbranchmap(name), title=str(printname))
2221 finally:
2221 finally:
2222 branchcacheread.restore()
2222 branchcacheread.restore()
2223 branchcachewrite.restore()
2223 branchcachewrite.restore()
2224 fm.end()
2224 fm.end()
2225
2225
2226 @command(b'perfbranchmapload', [
2226 @command(b'perfbranchmapload', [
2227 (b'f', b'filter', b'', b'Specify repoview filter'),
2227 (b'f', b'filter', b'', b'Specify repoview filter'),
2228 (b'', b'list', False, b'List brachmap filter caches'),
2228 (b'', b'list', False, b'List brachmap filter caches'),
2229 ] + formatteropts)
2229 ] + formatteropts)
2230 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2230 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
2231 """benchmark reading the branchmap"""
2231 """benchmark reading the branchmap"""
2232 opts = _byteskwargs(opts)
2232 opts = _byteskwargs(opts)
2233
2233
2234 if list:
2234 if list:
2235 for name, kind, st in repo.cachevfs.readdir(stat=True):
2235 for name, kind, st in repo.cachevfs.readdir(stat=True):
2236 if name.startswith(b'branch2'):
2236 if name.startswith(b'branch2'):
2237 filtername = name.partition(b'-')[2] or b'unfiltered'
2237 filtername = name.partition(b'-')[2] or b'unfiltered'
2238 ui.status(b'%s - %s\n'
2238 ui.status(b'%s - %s\n'
2239 % (filtername, util.bytecount(st.st_size)))
2239 % (filtername, util.bytecount(st.st_size)))
2240 return
2240 return
2241 if filter:
2241 if filter:
2242 repo = repoview.repoview(repo, filter)
2242 repo = repoview.repoview(repo, filter)
2243 else:
2243 else:
2244 repo = repo.unfiltered()
2244 repo = repo.unfiltered()
2245 # try once without timer, the filter may not be cached
2245 # try once without timer, the filter may not be cached
2246 if branchmap.read(repo) is None:
2246 if branchmap.read(repo) is None:
2247 raise error.Abort(b'No brachmap cached for %s repo'
2247 raise error.Abort(b'No branchmap cached for %s repo'
2248 % (filter or b'unfiltered'))
2248 % (filter or b'unfiltered'))
2249 timer, fm = gettimer(ui, opts)
2249 timer, fm = gettimer(ui, opts)
2250 timer(lambda: branchmap.read(repo) and None)
2250 timer(lambda: branchmap.read(repo) and None)
2251 fm.end()
2251 fm.end()
2252
2252
2253 @command(b'perfloadmarkers')
2253 @command(b'perfloadmarkers')
2254 def perfloadmarkers(ui, repo):
2254 def perfloadmarkers(ui, repo):
2255 """benchmark the time to parse the on-disk markers for a repo
2255 """benchmark the time to parse the on-disk markers for a repo
2256
2256
2257 Result is the number of markers in the repo."""
2257 Result is the number of markers in the repo."""
2258 timer, fm = gettimer(ui)
2258 timer, fm = gettimer(ui)
2259 svfs = getsvfs(repo)
2259 svfs = getsvfs(repo)
2260 timer(lambda: len(obsolete.obsstore(svfs)))
2260 timer(lambda: len(obsolete.obsstore(svfs)))
2261 fm.end()
2261 fm.end()
2262
2262
2263 @command(b'perflrucachedict', formatteropts +
2263 @command(b'perflrucachedict', formatteropts +
2264 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2264 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2265 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2265 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2266 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2266 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2267 (b'', b'size', 4, b'size of cache'),
2267 (b'', b'size', 4, b'size of cache'),
2268 (b'', b'gets', 10000, b'number of key lookups'),
2268 (b'', b'gets', 10000, b'number of key lookups'),
2269 (b'', b'sets', 10000, b'number of key sets'),
2269 (b'', b'sets', 10000, b'number of key sets'),
2270 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2270 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2271 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2271 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2272 norepo=True)
2272 norepo=True)
2273 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2273 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2274 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2274 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2275 opts = _byteskwargs(opts)
2275 opts = _byteskwargs(opts)
2276
2276
2277 def doinit():
2277 def doinit():
2278 for i in _xrange(10000):
2278 for i in _xrange(10000):
2279 util.lrucachedict(size)
2279 util.lrucachedict(size)
2280
2280
2281 costrange = list(range(mincost, maxcost + 1))
2281 costrange = list(range(mincost, maxcost + 1))
2282
2282
2283 values = []
2283 values = []
2284 for i in _xrange(size):
2284 for i in _xrange(size):
2285 values.append(random.randint(0, _maxint))
2285 values.append(random.randint(0, _maxint))
2286
2286
2287 # Get mode fills the cache and tests raw lookup performance with no
2287 # Get mode fills the cache and tests raw lookup performance with no
2288 # eviction.
2288 # eviction.
2289 getseq = []
2289 getseq = []
2290 for i in _xrange(gets):
2290 for i in _xrange(gets):
2291 getseq.append(random.choice(values))
2291 getseq.append(random.choice(values))
2292
2292
2293 def dogets():
2293 def dogets():
2294 d = util.lrucachedict(size)
2294 d = util.lrucachedict(size)
2295 for v in values:
2295 for v in values:
2296 d[v] = v
2296 d[v] = v
2297 for key in getseq:
2297 for key in getseq:
2298 value = d[key]
2298 value = d[key]
2299 value # silence pyflakes warning
2299 value # silence pyflakes warning
2300
2300
2301 def dogetscost():
2301 def dogetscost():
2302 d = util.lrucachedict(size, maxcost=costlimit)
2302 d = util.lrucachedict(size, maxcost=costlimit)
2303 for i, v in enumerate(values):
2303 for i, v in enumerate(values):
2304 d.insert(v, v, cost=costs[i])
2304 d.insert(v, v, cost=costs[i])
2305 for key in getseq:
2305 for key in getseq:
2306 try:
2306 try:
2307 value = d[key]
2307 value = d[key]
2308 value # silence pyflakes warning
2308 value # silence pyflakes warning
2309 except KeyError:
2309 except KeyError:
2310 pass
2310 pass
2311
2311
2312 # Set mode tests insertion speed with cache eviction.
2312 # Set mode tests insertion speed with cache eviction.
2313 setseq = []
2313 setseq = []
2314 costs = []
2314 costs = []
2315 for i in _xrange(sets):
2315 for i in _xrange(sets):
2316 setseq.append(random.randint(0, _maxint))
2316 setseq.append(random.randint(0, _maxint))
2317 costs.append(random.choice(costrange))
2317 costs.append(random.choice(costrange))
2318
2318
2319 def doinserts():
2319 def doinserts():
2320 d = util.lrucachedict(size)
2320 d = util.lrucachedict(size)
2321 for v in setseq:
2321 for v in setseq:
2322 d.insert(v, v)
2322 d.insert(v, v)
2323
2323
2324 def doinsertscost():
2324 def doinsertscost():
2325 d = util.lrucachedict(size, maxcost=costlimit)
2325 d = util.lrucachedict(size, maxcost=costlimit)
2326 for i, v in enumerate(setseq):
2326 for i, v in enumerate(setseq):
2327 d.insert(v, v, cost=costs[i])
2327 d.insert(v, v, cost=costs[i])
2328
2328
2329 def dosets():
2329 def dosets():
2330 d = util.lrucachedict(size)
2330 d = util.lrucachedict(size)
2331 for v in setseq:
2331 for v in setseq:
2332 d[v] = v
2332 d[v] = v
2333
2333
2334 # Mixed mode randomly performs gets and sets with eviction.
2334 # Mixed mode randomly performs gets and sets with eviction.
2335 mixedops = []
2335 mixedops = []
2336 for i in _xrange(mixed):
2336 for i in _xrange(mixed):
2337 r = random.randint(0, 100)
2337 r = random.randint(0, 100)
2338 if r < mixedgetfreq:
2338 if r < mixedgetfreq:
2339 op = 0
2339 op = 0
2340 else:
2340 else:
2341 op = 1
2341 op = 1
2342
2342
2343 mixedops.append((op,
2343 mixedops.append((op,
2344 random.randint(0, size * 2),
2344 random.randint(0, size * 2),
2345 random.choice(costrange)))
2345 random.choice(costrange)))
2346
2346
2347 def domixed():
2347 def domixed():
2348 d = util.lrucachedict(size)
2348 d = util.lrucachedict(size)
2349
2349
2350 for op, v, cost in mixedops:
2350 for op, v, cost in mixedops:
2351 if op == 0:
2351 if op == 0:
2352 try:
2352 try:
2353 d[v]
2353 d[v]
2354 except KeyError:
2354 except KeyError:
2355 pass
2355 pass
2356 else:
2356 else:
2357 d[v] = v
2357 d[v] = v
2358
2358
2359 def domixedcost():
2359 def domixedcost():
2360 d = util.lrucachedict(size, maxcost=costlimit)
2360 d = util.lrucachedict(size, maxcost=costlimit)
2361
2361
2362 for op, v, cost in mixedops:
2362 for op, v, cost in mixedops:
2363 if op == 0:
2363 if op == 0:
2364 try:
2364 try:
2365 d[v]
2365 d[v]
2366 except KeyError:
2366 except KeyError:
2367 pass
2367 pass
2368 else:
2368 else:
2369 d.insert(v, v, cost=cost)
2369 d.insert(v, v, cost=cost)
2370
2370
2371 benches = [
2371 benches = [
2372 (doinit, b'init'),
2372 (doinit, b'init'),
2373 ]
2373 ]
2374
2374
2375 if costlimit:
2375 if costlimit:
2376 benches.extend([
2376 benches.extend([
2377 (dogetscost, b'gets w/ cost limit'),
2377 (dogetscost, b'gets w/ cost limit'),
2378 (doinsertscost, b'inserts w/ cost limit'),
2378 (doinsertscost, b'inserts w/ cost limit'),
2379 (domixedcost, b'mixed w/ cost limit'),
2379 (domixedcost, b'mixed w/ cost limit'),
2380 ])
2380 ])
2381 else:
2381 else:
2382 benches.extend([
2382 benches.extend([
2383 (dogets, b'gets'),
2383 (dogets, b'gets'),
2384 (doinserts, b'inserts'),
2384 (doinserts, b'inserts'),
2385 (dosets, b'sets'),
2385 (dosets, b'sets'),
2386 (domixed, b'mixed')
2386 (domixed, b'mixed')
2387 ])
2387 ])
2388
2388
2389 for fn, title in benches:
2389 for fn, title in benches:
2390 timer, fm = gettimer(ui, opts)
2390 timer, fm = gettimer(ui, opts)
2391 timer(fn, title=title)
2391 timer(fn, title=title)
2392 fm.end()
2392 fm.end()
2393
2393
2394 @command(b'perfwrite', formatteropts)
2394 @command(b'perfwrite', formatteropts)
2395 def perfwrite(ui, repo, **opts):
2395 def perfwrite(ui, repo, **opts):
2396 """microbenchmark ui.write
2396 """microbenchmark ui.write
2397 """
2397 """
2398 opts = _byteskwargs(opts)
2398 opts = _byteskwargs(opts)
2399
2399
2400 timer, fm = gettimer(ui, opts)
2400 timer, fm = gettimer(ui, opts)
2401 def write():
2401 def write():
2402 for i in range(100000):
2402 for i in range(100000):
2403 ui.write((b'Testing write performance\n'))
2403 ui.write((b'Testing write performance\n'))
2404 timer(write)
2404 timer(write)
2405 fm.end()
2405 fm.end()
2406
2406
2407 def uisetup(ui):
2407 def uisetup(ui):
2408 if (util.safehasattr(cmdutil, b'openrevlog') and
2408 if (util.safehasattr(cmdutil, b'openrevlog') and
2409 not util.safehasattr(commands, b'debugrevlogopts')):
2409 not util.safehasattr(commands, b'debugrevlogopts')):
2410 # for "historical portability":
2410 # for "historical portability":
2411 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2411 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2412 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2412 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2413 # openrevlog() should cause failure, because it has been
2413 # openrevlog() should cause failure, because it has been
2414 # available since 3.5 (or 49c583ca48c4).
2414 # available since 3.5 (or 49c583ca48c4).
2415 def openrevlog(orig, repo, cmd, file_, opts):
2415 def openrevlog(orig, repo, cmd, file_, opts):
2416 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2416 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2417 raise error.Abort(b"This version doesn't support --dir option",
2417 raise error.Abort(b"This version doesn't support --dir option",
2418 hint=b"use 3.5 or later")
2418 hint=b"use 3.5 or later")
2419 return orig(repo, cmd, file_, opts)
2419 return orig(repo, cmd, file_, opts)
2420 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2420 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
General Comments 0
You need to be logged in to leave comments. Login now