##// END OF EJS Templates
perf: add a --[no-]clear-caches option to `perfnodemap`...
Boris Feld -
r41611:d1a27307 default
parent child Browse files
Show More
@@ -1,2754 +1,2761 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 """benchmark the computation of a changelog heads"""
538 """benchmark the computation of a changelog heads"""
539 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
540 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
541 cl = repo.changelog
541 cl = repo.changelog
542 def s():
542 def s():
543 clearcaches(cl)
543 clearcaches(cl)
544 def d():
544 def d():
545 len(cl.headrevs())
545 len(cl.headrevs())
546 timer(d, setup=s)
546 timer(d, setup=s)
547 fm.end()
547 fm.end()
548
548
549 @command(b'perftags', formatteropts+
549 @command(b'perftags', formatteropts+
550 [
550 [
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 ])
552 ])
553 def perftags(ui, repo, **opts):
553 def perftags(ui, repo, **opts):
554 opts = _byteskwargs(opts)
554 opts = _byteskwargs(opts)
555 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
556 repocleartagscache = repocleartagscachefunc(repo)
556 repocleartagscache = repocleartagscachefunc(repo)
557 clearrevlogs = opts[b'clear_revlogs']
557 clearrevlogs = opts[b'clear_revlogs']
558 def s():
558 def s():
559 if clearrevlogs:
559 if clearrevlogs:
560 clearchangelog(repo)
560 clearchangelog(repo)
561 clearfilecache(repo.unfiltered(), 'manifest')
561 clearfilecache(repo.unfiltered(), 'manifest')
562 repocleartagscache()
562 repocleartagscache()
563 def t():
563 def t():
564 return len(repo.tags())
564 return len(repo.tags())
565 timer(t, setup=s)
565 timer(t, setup=s)
566 fm.end()
566 fm.end()
567
567
568 @command(b'perfancestors', formatteropts)
568 @command(b'perfancestors', formatteropts)
569 def perfancestors(ui, repo, **opts):
569 def perfancestors(ui, repo, **opts):
570 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
571 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
572 heads = repo.changelog.headrevs()
572 heads = repo.changelog.headrevs()
573 def d():
573 def d():
574 for a in repo.changelog.ancestors(heads):
574 for a in repo.changelog.ancestors(heads):
575 pass
575 pass
576 timer(d)
576 timer(d)
577 fm.end()
577 fm.end()
578
578
579 @command(b'perfancestorset', formatteropts)
579 @command(b'perfancestorset', formatteropts)
580 def perfancestorset(ui, repo, revset, **opts):
580 def perfancestorset(ui, repo, revset, **opts):
581 opts = _byteskwargs(opts)
581 opts = _byteskwargs(opts)
582 timer, fm = gettimer(ui, opts)
582 timer, fm = gettimer(ui, opts)
583 revs = repo.revs(revset)
583 revs = repo.revs(revset)
584 heads = repo.changelog.headrevs()
584 heads = repo.changelog.headrevs()
585 def d():
585 def d():
586 s = repo.changelog.ancestors(heads)
586 s = repo.changelog.ancestors(heads)
587 for rev in revs:
587 for rev in revs:
588 rev in s
588 rev in s
589 timer(d)
589 timer(d)
590 fm.end()
590 fm.end()
591
591
592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 def perfdiscovery(ui, repo, path, **opts):
593 def perfdiscovery(ui, repo, path, **opts):
594 """benchmark discovery between local repo and the peer at given path
594 """benchmark discovery between local repo and the peer at given path
595 """
595 """
596 repos = [repo, None]
596 repos = [repo, None]
597 timer, fm = gettimer(ui, opts)
597 timer, fm = gettimer(ui, opts)
598 path = ui.expandpath(path)
598 path = ui.expandpath(path)
599
599
600 def s():
600 def s():
601 repos[1] = hg.peer(ui, opts, path)
601 repos[1] = hg.peer(ui, opts, path)
602 def d():
602 def d():
603 setdiscovery.findcommonheads(ui, *repos)
603 setdiscovery.findcommonheads(ui, *repos)
604 timer(d, setup=s)
604 timer(d, setup=s)
605 fm.end()
605 fm.end()
606
606
607 @command(b'perfbookmarks', formatteropts +
607 @command(b'perfbookmarks', formatteropts +
608 [
608 [
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 ])
610 ])
611 def perfbookmarks(ui, repo, **opts):
611 def perfbookmarks(ui, repo, **opts):
612 """benchmark parsing bookmarks from disk to memory"""
612 """benchmark parsing bookmarks from disk to memory"""
613 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
615
615
616 clearrevlogs = opts[b'clear_revlogs']
616 clearrevlogs = opts[b'clear_revlogs']
617 def s():
617 def s():
618 if clearrevlogs:
618 if clearrevlogs:
619 clearchangelog(repo)
619 clearchangelog(repo)
620 clearfilecache(repo, b'_bookmarks')
620 clearfilecache(repo, b'_bookmarks')
621 def d():
621 def d():
622 repo._bookmarks
622 repo._bookmarks
623 timer(d, setup=s)
623 timer(d, setup=s)
624 fm.end()
624 fm.end()
625
625
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 def perfbundleread(ui, repo, bundlepath, **opts):
627 def perfbundleread(ui, repo, bundlepath, **opts):
628 """Benchmark reading of bundle files.
628 """Benchmark reading of bundle files.
629
629
630 This command is meant to isolate the I/O part of bundle reading as
630 This command is meant to isolate the I/O part of bundle reading as
631 much as possible.
631 much as possible.
632 """
632 """
633 from mercurial import (
633 from mercurial import (
634 bundle2,
634 bundle2,
635 exchange,
635 exchange,
636 streamclone,
636 streamclone,
637 )
637 )
638
638
639 opts = _byteskwargs(opts)
639 opts = _byteskwargs(opts)
640
640
641 def makebench(fn):
641 def makebench(fn):
642 def run():
642 def run():
643 with open(bundlepath, b'rb') as fh:
643 with open(bundlepath, b'rb') as fh:
644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 fn(bundle)
645 fn(bundle)
646
646
647 return run
647 return run
648
648
649 def makereadnbytes(size):
649 def makereadnbytes(size):
650 def run():
650 def run():
651 with open(bundlepath, b'rb') as fh:
651 with open(bundlepath, b'rb') as fh:
652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 while bundle.read(size):
653 while bundle.read(size):
654 pass
654 pass
655
655
656 return run
656 return run
657
657
658 def makestdioread(size):
658 def makestdioread(size):
659 def run():
659 def run():
660 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
661 while fh.read(size):
661 while fh.read(size):
662 pass
662 pass
663
663
664 return run
664 return run
665
665
666 # bundle1
666 # bundle1
667
667
668 def deltaiter(bundle):
668 def deltaiter(bundle):
669 for delta in bundle.deltaiter():
669 for delta in bundle.deltaiter():
670 pass
670 pass
671
671
672 def iterchunks(bundle):
672 def iterchunks(bundle):
673 for chunk in bundle.getchunks():
673 for chunk in bundle.getchunks():
674 pass
674 pass
675
675
676 # bundle2
676 # bundle2
677
677
678 def forwardchunks(bundle):
678 def forwardchunks(bundle):
679 for chunk in bundle._forwardchunks():
679 for chunk in bundle._forwardchunks():
680 pass
680 pass
681
681
682 def iterparts(bundle):
682 def iterparts(bundle):
683 for part in bundle.iterparts():
683 for part in bundle.iterparts():
684 pass
684 pass
685
685
686 def iterpartsseekable(bundle):
686 def iterpartsseekable(bundle):
687 for part in bundle.iterparts(seekable=True):
687 for part in bundle.iterparts(seekable=True):
688 pass
688 pass
689
689
690 def seek(bundle):
690 def seek(bundle):
691 for part in bundle.iterparts(seekable=True):
691 for part in bundle.iterparts(seekable=True):
692 part.seek(0, os.SEEK_END)
692 part.seek(0, os.SEEK_END)
693
693
694 def makepartreadnbytes(size):
694 def makepartreadnbytes(size):
695 def run():
695 def run():
696 with open(bundlepath, b'rb') as fh:
696 with open(bundlepath, b'rb') as fh:
697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 for part in bundle.iterparts():
698 for part in bundle.iterparts():
699 while part.read(size):
699 while part.read(size):
700 pass
700 pass
701
701
702 return run
702 return run
703
703
704 benches = [
704 benches = [
705 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(8192), b'read(8k)'),
706 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(16384), b'read(16k)'),
707 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(32768), b'read(32k)'),
708 (makestdioread(131072), b'read(128k)'),
708 (makestdioread(131072), b'read(128k)'),
709 ]
709 ]
710
710
711 with open(bundlepath, b'rb') as fh:
711 with open(bundlepath, b'rb') as fh:
712 bundle = exchange.readbundle(ui, fh, bundlepath)
712 bundle = exchange.readbundle(ui, fh, bundlepath)
713
713
714 if isinstance(bundle, changegroup.cg1unpacker):
714 if isinstance(bundle, changegroup.cg1unpacker):
715 benches.extend([
715 benches.extend([
716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 ])
722 ])
723 elif isinstance(bundle, bundle2.unbundle20):
723 elif isinstance(bundle, bundle2.unbundle20):
724 benches.extend([
724 benches.extend([
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 (makebench(seek), b'bundle2 part seek()'),
728 (makebench(seek), b'bundle2 part seek()'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 ])
733 ])
734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 raise error.Abort(b'stream clone bundles not supported')
735 raise error.Abort(b'stream clone bundles not supported')
736 else:
736 else:
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738
738
739 for fn, title in benches:
739 for fn, title in benches:
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 timer(fn, title=title)
741 timer(fn, title=title)
742 fm.end()
742 fm.end()
743
743
744 @command(b'perfchangegroupchangelog', formatteropts +
744 @command(b'perfchangegroupchangelog', formatteropts +
745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 """Benchmark producing a changelog group for a changegroup.
748 """Benchmark producing a changelog group for a changegroup.
749
749
750 This measures the time spent processing the changelog during a
750 This measures the time spent processing the changelog during a
751 bundle operation. This occurs during `hg bundle` and on a server
751 bundle operation. This occurs during `hg bundle` and on a server
752 processing a `getbundle` wire protocol request (handles clones
752 processing a `getbundle` wire protocol request (handles clones
753 and pull requests).
753 and pull requests).
754
754
755 By default, all revisions are added to the changegroup.
755 By default, all revisions are added to the changegroup.
756 """
756 """
757 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
758 cl = repo.changelog
758 cl = repo.changelog
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 bundler = changegroup.getbundler(cgversion, repo)
760 bundler = changegroup.getbundler(cgversion, repo)
761
761
762 def d():
762 def d():
763 state, chunks = bundler._generatechangelog(cl, nodes)
763 state, chunks = bundler._generatechangelog(cl, nodes)
764 for chunk in chunks:
764 for chunk in chunks:
765 pass
765 pass
766
766
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768
768
769 # Terminal printing can interfere with timing. So disable it.
769 # Terminal printing can interfere with timing. So disable it.
770 with ui.configoverride({(b'progress', b'disable'): True}):
770 with ui.configoverride({(b'progress', b'disable'): True}):
771 timer(d)
771 timer(d)
772
772
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfdirs', formatteropts)
775 @command(b'perfdirs', formatteropts)
776 def perfdirs(ui, repo, **opts):
776 def perfdirs(ui, repo, **opts):
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 dirstate = repo.dirstate
779 dirstate = repo.dirstate
780 b'a' in dirstate
780 b'a' in dirstate
781 def d():
781 def d():
782 dirstate.hasdir(b'a')
782 dirstate.hasdir(b'a')
783 del dirstate._map._dirs
783 del dirstate._map._dirs
784 timer(d)
784 timer(d)
785 fm.end()
785 fm.end()
786
786
787 @command(b'perfdirstate', formatteropts)
787 @command(b'perfdirstate', formatteropts)
788 def perfdirstate(ui, repo, **opts):
788 def perfdirstate(ui, repo, **opts):
789 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
790 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
791 b"a" in repo.dirstate
791 b"a" in repo.dirstate
792 def d():
792 def d():
793 repo.dirstate.invalidate()
793 repo.dirstate.invalidate()
794 b"a" in repo.dirstate
794 b"a" in repo.dirstate
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirstatedirs', formatteropts)
798 @command(b'perfdirstatedirs', formatteropts)
799 def perfdirstatedirs(ui, repo, **opts):
799 def perfdirstatedirs(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 b"a" in repo.dirstate
802 b"a" in repo.dirstate
803 def d():
803 def d():
804 repo.dirstate.hasdir(b"a")
804 repo.dirstate.hasdir(b"a")
805 del repo.dirstate._map._dirs
805 del repo.dirstate._map._dirs
806 timer(d)
806 timer(d)
807 fm.end()
807 fm.end()
808
808
809 @command(b'perfdirstatefoldmap', formatteropts)
809 @command(b'perfdirstatefoldmap', formatteropts)
810 def perfdirstatefoldmap(ui, repo, **opts):
810 def perfdirstatefoldmap(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 dirstate = repo.dirstate
813 dirstate = repo.dirstate
814 b'a' in dirstate
814 b'a' in dirstate
815 def d():
815 def d():
816 dirstate._map.filefoldmap.get(b'a')
816 dirstate._map.filefoldmap.get(b'a')
817 del dirstate._map.filefoldmap
817 del dirstate._map.filefoldmap
818 timer(d)
818 timer(d)
819 fm.end()
819 fm.end()
820
820
821 @command(b'perfdirfoldmap', formatteropts)
821 @command(b'perfdirfoldmap', formatteropts)
822 def perfdirfoldmap(ui, repo, **opts):
822 def perfdirfoldmap(ui, repo, **opts):
823 opts = _byteskwargs(opts)
823 opts = _byteskwargs(opts)
824 timer, fm = gettimer(ui, opts)
824 timer, fm = gettimer(ui, opts)
825 dirstate = repo.dirstate
825 dirstate = repo.dirstate
826 b'a' in dirstate
826 b'a' in dirstate
827 def d():
827 def d():
828 dirstate._map.dirfoldmap.get(b'a')
828 dirstate._map.dirfoldmap.get(b'a')
829 del dirstate._map.dirfoldmap
829 del dirstate._map.dirfoldmap
830 del dirstate._map._dirs
830 del dirstate._map._dirs
831 timer(d)
831 timer(d)
832 fm.end()
832 fm.end()
833
833
834 @command(b'perfdirstatewrite', formatteropts)
834 @command(b'perfdirstatewrite', formatteropts)
835 def perfdirstatewrite(ui, repo, **opts):
835 def perfdirstatewrite(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 ds = repo.dirstate
838 ds = repo.dirstate
839 b"a" in ds
839 b"a" in ds
840 def d():
840 def d():
841 ds._dirty = True
841 ds._dirty = True
842 ds.write(repo.currenttransaction())
842 ds.write(repo.currenttransaction())
843 timer(d)
843 timer(d)
844 fm.end()
844 fm.end()
845
845
846 @command(b'perfmergecalculate',
846 @command(b'perfmergecalculate',
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 def perfmergecalculate(ui, repo, rev, **opts):
848 def perfmergecalculate(ui, repo, rev, **opts):
849 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
851 wctx = repo[None]
851 wctx = repo[None]
852 rctx = scmutil.revsingle(repo, rev, rev)
852 rctx = scmutil.revsingle(repo, rev, rev)
853 ancestor = wctx.ancestor(rctx)
853 ancestor = wctx.ancestor(rctx)
854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 # that cache
855 # that cache
856 wctx.dirty()
856 wctx.dirty()
857 def d():
857 def d():
858 # acceptremote is True because we don't want prompts in the middle of
858 # acceptremote is True because we don't want prompts in the middle of
859 # our benchmark
859 # our benchmark
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 acceptremote=True, followcopies=True)
861 acceptremote=True, followcopies=True)
862 timer(d)
862 timer(d)
863 fm.end()
863 fm.end()
864
864
865 @command(b'perfpathcopies', [], b"REV REV")
865 @command(b'perfpathcopies', [], b"REV REV")
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 """benchmark the copy tracing logic"""
867 """benchmark the copy tracing logic"""
868 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 def d():
872 def d():
873 copies.pathcopies(ctx1, ctx2)
873 copies.pathcopies(ctx1, ctx2)
874 timer(d)
874 timer(d)
875 fm.end()
875 fm.end()
876
876
877 @command(b'perfphases',
877 @command(b'perfphases',
878 [(b'', b'full', False, b'include file reading time too'),
878 [(b'', b'full', False, b'include file reading time too'),
879 ], b"")
879 ], b"")
880 def perfphases(ui, repo, **opts):
880 def perfphases(ui, repo, **opts):
881 """benchmark phasesets computation"""
881 """benchmark phasesets computation"""
882 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
883 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
884 _phases = repo._phasecache
884 _phases = repo._phasecache
885 full = opts.get(b'full')
885 full = opts.get(b'full')
886 def d():
886 def d():
887 phases = _phases
887 phases = _phases
888 if full:
888 if full:
889 clearfilecache(repo, b'_phasecache')
889 clearfilecache(repo, b'_phasecache')
890 phases = repo._phasecache
890 phases = repo._phasecache
891 phases.invalidate()
891 phases.invalidate()
892 phases.loadphaserevs(repo)
892 phases.loadphaserevs(repo)
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896 @command(b'perfphasesremote',
896 @command(b'perfphasesremote',
897 [], b"[DEST]")
897 [], b"[DEST]")
898 def perfphasesremote(ui, repo, dest=None, **opts):
898 def perfphasesremote(ui, repo, dest=None, **opts):
899 """benchmark time needed to analyse phases of the remote server"""
899 """benchmark time needed to analyse phases of the remote server"""
900 from mercurial.node import (
900 from mercurial.node import (
901 bin,
901 bin,
902 )
902 )
903 from mercurial import (
903 from mercurial import (
904 exchange,
904 exchange,
905 hg,
905 hg,
906 phases,
906 phases,
907 )
907 )
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
910
910
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 if not path:
912 if not path:
913 raise error.Abort((b'default repository not configured!'),
913 raise error.Abort((b'default repository not configured!'),
914 hint=(b"see 'hg help config.paths'"))
914 hint=(b"see 'hg help config.paths'"))
915 dest = path.pushloc or path.loc
915 dest = path.pushloc or path.loc
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1019 ] + formatteropts)
1019 ] + formatteropts)
1020 def perfindex(ui, repo, **opts):
1020 def perfindex(ui, repo, **opts):
1021 """benchmark index creation time followed by a lookup
1021 """benchmark index creation time followed by a lookup
1022
1022
1023 The default is to look `tip` up. Depending on the index implementation,
1023 The default is to look `tip` up. Depending on the index implementation,
1024 the revision looked up can matters. For example, an implementation
1024 the revision looked up can matters. For example, an implementation
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1026 `--rev 0`. The number of looked up revisions and their order can also
1026 `--rev 0`. The number of looked up revisions and their order can also
1027 matters.
1027 matters.
1028
1028
1029 Example of useful set to test:
1029 Example of useful set to test:
1030 * tip
1030 * tip
1031 * 0
1031 * 0
1032 * -10:
1032 * -10:
1033 * :10
1033 * :10
1034 * -10: + :10
1034 * -10: + :10
1035 * :10: + -10:
1035 * :10: + -10:
1036 * -10000:
1036 * -10000:
1037 * -10000: + 0
1037 * -10000: + 0
1038
1038
1039 It is not currently possible to check for lookup of a missing node. For
1039 It is not currently possible to check for lookup of a missing node. For
1040 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1040 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1041 import mercurial.revlog
1041 import mercurial.revlog
1042 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1043 timer, fm = gettimer(ui, opts)
1043 timer, fm = gettimer(ui, opts)
1044 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1044 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1045 if opts[b'no_lookup']:
1045 if opts[b'no_lookup']:
1046 if opts['rev']:
1046 if opts['rev']:
1047 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1047 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1048 nodes = []
1048 nodes = []
1049 elif not opts[b'rev']:
1049 elif not opts[b'rev']:
1050 nodes = [repo[b"tip"].node()]
1050 nodes = [repo[b"tip"].node()]
1051 else:
1051 else:
1052 revs = scmutil.revrange(repo, opts[b'rev'])
1052 revs = scmutil.revrange(repo, opts[b'rev'])
1053 cl = repo.changelog
1053 cl = repo.changelog
1054 nodes = [cl.node(r) for r in revs]
1054 nodes = [cl.node(r) for r in revs]
1055
1055
1056 unfi = repo.unfiltered()
1056 unfi = repo.unfiltered()
1057 # find the filecache func directly
1057 # find the filecache func directly
1058 # This avoid polluting the benchmark with the filecache logic
1058 # This avoid polluting the benchmark with the filecache logic
1059 makecl = unfi.__class__.changelog.func
1059 makecl = unfi.__class__.changelog.func
1060 def setup():
1060 def setup():
1061 # probably not necessary, but for good measure
1061 # probably not necessary, but for good measure
1062 clearchangelog(unfi)
1062 clearchangelog(unfi)
1063 def d():
1063 def d():
1064 cl = makecl(unfi)
1064 cl = makecl(unfi)
1065 for n in nodes:
1065 for n in nodes:
1066 cl.rev(n)
1066 cl.rev(n)
1067 timer(d, setup=setup)
1067 timer(d, setup=setup)
1068 fm.end()
1068 fm.end()
1069
1069
1070 @command(b'perfnodemap', [
1070 @command(b'perfnodemap', [
1071 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1071 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1072 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1072 ] + formatteropts)
1073 ] + formatteropts)
1073 def perfnodemap(ui, repo, **opts):
1074 def perfnodemap(ui, repo, **opts):
1074 """benchmark the time necessary to look up revision from a cold nodemap
1075 """benchmark the time necessary to look up revision from a cold nodemap
1075
1076
1076 Depending on the implementation, the amount and order of revision we look
1077 Depending on the implementation, the amount and order of revision we look
1077 up can varies. Example of useful set to test:
1078 up can varies. Example of useful set to test:
1078 * tip
1079 * tip
1079 * 0
1080 * 0
1080 * -10:
1081 * -10:
1081 * :10
1082 * :10
1082 * -10: + :10
1083 * -10: + :10
1083 * :10: + -10:
1084 * :10: + -10:
1084 * -10000:
1085 * -10000:
1085 * -10000: + 0
1086 * -10000: + 0
1086
1087
1087 The command currently focus on valid binary lookup. Benchmarking for
1088 The command currently focus on valid binary lookup. Benchmarking for
1088 hexlookup, prefix lookup and missing lookup would also be valuable.
1089 hexlookup, prefix lookup and missing lookup would also be valuable.
1089 """
1090 """
1090 import mercurial.revlog
1091 import mercurial.revlog
1091 opts = _byteskwargs(opts)
1092 opts = _byteskwargs(opts)
1092 timer, fm = gettimer(ui, opts)
1093 timer, fm = gettimer(ui, opts)
1093 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1094 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1094
1095
1095 unfi = repo.unfiltered()
1096 unfi = repo.unfiltered()
1097 clearcaches = opts['clear_caches']
1096 # find the filecache func directly
1098 # find the filecache func directly
1097 # This avoid polluting the benchmark with the filecache logic
1099 # This avoid polluting the benchmark with the filecache logic
1098 makecl = unfi.__class__.changelog.func
1100 makecl = unfi.__class__.changelog.func
1099 if not opts[b'rev']:
1101 if not opts[b'rev']:
1100 raise error.Abort('use --rev to specify revisions to look up')
1102 raise error.Abort('use --rev to specify revisions to look up')
1101 revs = scmutil.revrange(repo, opts[b'rev'])
1103 revs = scmutil.revrange(repo, opts[b'rev'])
1102 cl = repo.changelog
1104 cl = repo.changelog
1103 nodes = [cl.node(r) for r in revs]
1105 nodes = [cl.node(r) for r in revs]
1104
1106
1105 # use a list to pass reference to a nodemap from one closure to the next
1107 # use a list to pass reference to a nodemap from one closure to the next
1106 nodeget = [None]
1108 nodeget = [None]
1107 def setnodeget():
1109 def setnodeget():
1108 # probably not necessary, but for good measure
1110 # probably not necessary, but for good measure
1109 clearchangelog(unfi)
1111 clearchangelog(unfi)
1110 nodeget[0] = makecl(unfi).nodemap.get
1112 nodeget[0] = makecl(unfi).nodemap.get
1111
1113
1112 def setup():
1113 setnodeget()
1114 def d():
1114 def d():
1115 get = nodeget[0]
1115 get = nodeget[0]
1116 for n in nodes:
1116 for n in nodes:
1117 get(n)
1117 get(n)
1118
1118
1119 setup = None
1120 if clearcaches:
1121 def setup():
1122 setnodeget()
1123 else:
1124 setnodeget()
1125 d() # prewarm the data structure
1119 timer(d, setup=setup)
1126 timer(d, setup=setup)
1120 fm.end()
1127 fm.end()
1121
1128
1122 @command(b'perfstartup', formatteropts)
1129 @command(b'perfstartup', formatteropts)
1123 def perfstartup(ui, repo, **opts):
1130 def perfstartup(ui, repo, **opts):
1124 opts = _byteskwargs(opts)
1131 opts = _byteskwargs(opts)
1125 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1126 def d():
1133 def d():
1127 if os.name != r'nt':
1134 if os.name != r'nt':
1128 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1135 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1129 fsencode(sys.argv[0]))
1136 fsencode(sys.argv[0]))
1130 else:
1137 else:
1131 os.environ[r'HGRCPATH'] = r' '
1138 os.environ[r'HGRCPATH'] = r' '
1132 os.system(r"%s version -q > NUL" % sys.argv[0])
1139 os.system(r"%s version -q > NUL" % sys.argv[0])
1133 timer(d)
1140 timer(d)
1134 fm.end()
1141 fm.end()
1135
1142
1136 @command(b'perfparents', formatteropts)
1143 @command(b'perfparents', formatteropts)
1137 def perfparents(ui, repo, **opts):
1144 def perfparents(ui, repo, **opts):
1138 opts = _byteskwargs(opts)
1145 opts = _byteskwargs(opts)
1139 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1140 # control the number of commits perfparents iterates over
1147 # control the number of commits perfparents iterates over
1141 # experimental config: perf.parentscount
1148 # experimental config: perf.parentscount
1142 count = getint(ui, b"perf", b"parentscount", 1000)
1149 count = getint(ui, b"perf", b"parentscount", 1000)
1143 if len(repo.changelog) < count:
1150 if len(repo.changelog) < count:
1144 raise error.Abort(b"repo needs %d commits for this test" % count)
1151 raise error.Abort(b"repo needs %d commits for this test" % count)
1145 repo = repo.unfiltered()
1152 repo = repo.unfiltered()
1146 nl = [repo.changelog.node(i) for i in _xrange(count)]
1153 nl = [repo.changelog.node(i) for i in _xrange(count)]
1147 def d():
1154 def d():
1148 for n in nl:
1155 for n in nl:
1149 repo.changelog.parents(n)
1156 repo.changelog.parents(n)
1150 timer(d)
1157 timer(d)
1151 fm.end()
1158 fm.end()
1152
1159
1153 @command(b'perfctxfiles', formatteropts)
1160 @command(b'perfctxfiles', formatteropts)
1154 def perfctxfiles(ui, repo, x, **opts):
1161 def perfctxfiles(ui, repo, x, **opts):
1155 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1156 x = int(x)
1163 x = int(x)
1157 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1158 def d():
1165 def d():
1159 len(repo[x].files())
1166 len(repo[x].files())
1160 timer(d)
1167 timer(d)
1161 fm.end()
1168 fm.end()
1162
1169
1163 @command(b'perfrawfiles', formatteropts)
1170 @command(b'perfrawfiles', formatteropts)
1164 def perfrawfiles(ui, repo, x, **opts):
1171 def perfrawfiles(ui, repo, x, **opts):
1165 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1166 x = int(x)
1173 x = int(x)
1167 timer, fm = gettimer(ui, opts)
1174 timer, fm = gettimer(ui, opts)
1168 cl = repo.changelog
1175 cl = repo.changelog
1169 def d():
1176 def d():
1170 len(cl.read(x)[3])
1177 len(cl.read(x)[3])
1171 timer(d)
1178 timer(d)
1172 fm.end()
1179 fm.end()
1173
1180
1174 @command(b'perflookup', formatteropts)
1181 @command(b'perflookup', formatteropts)
1175 def perflookup(ui, repo, rev, **opts):
1182 def perflookup(ui, repo, rev, **opts):
1176 opts = _byteskwargs(opts)
1183 opts = _byteskwargs(opts)
1177 timer, fm = gettimer(ui, opts)
1184 timer, fm = gettimer(ui, opts)
1178 timer(lambda: len(repo.lookup(rev)))
1185 timer(lambda: len(repo.lookup(rev)))
1179 fm.end()
1186 fm.end()
1180
1187
1181 @command(b'perflinelogedits',
1188 @command(b'perflinelogedits',
1182 [(b'n', b'edits', 10000, b'number of edits'),
1189 [(b'n', b'edits', 10000, b'number of edits'),
1183 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1190 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1184 ], norepo=True)
1191 ], norepo=True)
1185 def perflinelogedits(ui, **opts):
1192 def perflinelogedits(ui, **opts):
1186 from mercurial import linelog
1193 from mercurial import linelog
1187
1194
1188 opts = _byteskwargs(opts)
1195 opts = _byteskwargs(opts)
1189
1196
1190 edits = opts[b'edits']
1197 edits = opts[b'edits']
1191 maxhunklines = opts[b'max_hunk_lines']
1198 maxhunklines = opts[b'max_hunk_lines']
1192
1199
1193 maxb1 = 100000
1200 maxb1 = 100000
1194 random.seed(0)
1201 random.seed(0)
1195 randint = random.randint
1202 randint = random.randint
1196 currentlines = 0
1203 currentlines = 0
1197 arglist = []
1204 arglist = []
1198 for rev in _xrange(edits):
1205 for rev in _xrange(edits):
1199 a1 = randint(0, currentlines)
1206 a1 = randint(0, currentlines)
1200 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1207 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1201 b1 = randint(0, maxb1)
1208 b1 = randint(0, maxb1)
1202 b2 = randint(b1, b1 + maxhunklines)
1209 b2 = randint(b1, b1 + maxhunklines)
1203 currentlines += (b2 - b1) - (a2 - a1)
1210 currentlines += (b2 - b1) - (a2 - a1)
1204 arglist.append((rev, a1, a2, b1, b2))
1211 arglist.append((rev, a1, a2, b1, b2))
1205
1212
1206 def d():
1213 def d():
1207 ll = linelog.linelog()
1214 ll = linelog.linelog()
1208 for args in arglist:
1215 for args in arglist:
1209 ll.replacelines(*args)
1216 ll.replacelines(*args)
1210
1217
1211 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1212 timer(d)
1219 timer(d)
1213 fm.end()
1220 fm.end()
1214
1221
1215 @command(b'perfrevrange', formatteropts)
1222 @command(b'perfrevrange', formatteropts)
1216 def perfrevrange(ui, repo, *specs, **opts):
1223 def perfrevrange(ui, repo, *specs, **opts):
1217 opts = _byteskwargs(opts)
1224 opts = _byteskwargs(opts)
1218 timer, fm = gettimer(ui, opts)
1225 timer, fm = gettimer(ui, opts)
1219 revrange = scmutil.revrange
1226 revrange = scmutil.revrange
1220 timer(lambda: len(revrange(repo, specs)))
1227 timer(lambda: len(revrange(repo, specs)))
1221 fm.end()
1228 fm.end()
1222
1229
1223 @command(b'perfnodelookup', formatteropts)
1230 @command(b'perfnodelookup', formatteropts)
1224 def perfnodelookup(ui, repo, rev, **opts):
1231 def perfnodelookup(ui, repo, rev, **opts):
1225 opts = _byteskwargs(opts)
1232 opts = _byteskwargs(opts)
1226 timer, fm = gettimer(ui, opts)
1233 timer, fm = gettimer(ui, opts)
1227 import mercurial.revlog
1234 import mercurial.revlog
1228 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1229 n = scmutil.revsingle(repo, rev).node()
1236 n = scmutil.revsingle(repo, rev).node()
1230 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1237 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1231 def d():
1238 def d():
1232 cl.rev(n)
1239 cl.rev(n)
1233 clearcaches(cl)
1240 clearcaches(cl)
1234 timer(d)
1241 timer(d)
1235 fm.end()
1242 fm.end()
1236
1243
1237 @command(b'perflog',
1244 @command(b'perflog',
1238 [(b'', b'rename', False, b'ask log to follow renames')
1245 [(b'', b'rename', False, b'ask log to follow renames')
1239 ] + formatteropts)
1246 ] + formatteropts)
1240 def perflog(ui, repo, rev=None, **opts):
1247 def perflog(ui, repo, rev=None, **opts):
1241 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1242 if rev is None:
1249 if rev is None:
1243 rev=[]
1250 rev=[]
1244 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1245 ui.pushbuffer()
1252 ui.pushbuffer()
1246 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1253 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1247 copies=opts.get(b'rename')))
1254 copies=opts.get(b'rename')))
1248 ui.popbuffer()
1255 ui.popbuffer()
1249 fm.end()
1256 fm.end()
1250
1257
1251 @command(b'perfmoonwalk', formatteropts)
1258 @command(b'perfmoonwalk', formatteropts)
1252 def perfmoonwalk(ui, repo, **opts):
1259 def perfmoonwalk(ui, repo, **opts):
1253 """benchmark walking the changelog backwards
1260 """benchmark walking the changelog backwards
1254
1261
1255 This also loads the changelog data for each revision in the changelog.
1262 This also loads the changelog data for each revision in the changelog.
1256 """
1263 """
1257 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1258 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1259 def moonwalk():
1266 def moonwalk():
1260 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1261 ctx = repo[i]
1268 ctx = repo[i]
1262 ctx.branch() # read changelog data (in addition to the index)
1269 ctx.branch() # read changelog data (in addition to the index)
1263 timer(moonwalk)
1270 timer(moonwalk)
1264 fm.end()
1271 fm.end()
1265
1272
1266 @command(b'perftemplating',
1273 @command(b'perftemplating',
1267 [(b'r', b'rev', [], b'revisions to run the template on'),
1274 [(b'r', b'rev', [], b'revisions to run the template on'),
1268 ] + formatteropts)
1275 ] + formatteropts)
1269 def perftemplating(ui, repo, testedtemplate=None, **opts):
1276 def perftemplating(ui, repo, testedtemplate=None, **opts):
1270 """test the rendering time of a given template"""
1277 """test the rendering time of a given template"""
1271 if makelogtemplater is None:
1278 if makelogtemplater is None:
1272 raise error.Abort((b"perftemplating not available with this Mercurial"),
1279 raise error.Abort((b"perftemplating not available with this Mercurial"),
1273 hint=b"use 4.3 or later")
1280 hint=b"use 4.3 or later")
1274
1281
1275 opts = _byteskwargs(opts)
1282 opts = _byteskwargs(opts)
1276
1283
1277 nullui = ui.copy()
1284 nullui = ui.copy()
1278 nullui.fout = open(os.devnull, r'wb')
1285 nullui.fout = open(os.devnull, r'wb')
1279 nullui.disablepager()
1286 nullui.disablepager()
1280 revs = opts.get(b'rev')
1287 revs = opts.get(b'rev')
1281 if not revs:
1288 if not revs:
1282 revs = [b'all()']
1289 revs = [b'all()']
1283 revs = list(scmutil.revrange(repo, revs))
1290 revs = list(scmutil.revrange(repo, revs))
1284
1291
1285 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1292 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1286 b' {author|person}: {desc|firstline}\n')
1293 b' {author|person}: {desc|firstline}\n')
1287 if testedtemplate is None:
1294 if testedtemplate is None:
1288 testedtemplate = defaulttemplate
1295 testedtemplate = defaulttemplate
1289 displayer = makelogtemplater(nullui, repo, testedtemplate)
1296 displayer = makelogtemplater(nullui, repo, testedtemplate)
1290 def format():
1297 def format():
1291 for r in revs:
1298 for r in revs:
1292 ctx = repo[r]
1299 ctx = repo[r]
1293 displayer.show(ctx)
1300 displayer.show(ctx)
1294 displayer.flush(ctx)
1301 displayer.flush(ctx)
1295
1302
1296 timer, fm = gettimer(ui, opts)
1303 timer, fm = gettimer(ui, opts)
1297 timer(format)
1304 timer(format)
1298 fm.end()
1305 fm.end()
1299
1306
1300 @command(b'perfhelper-pathcopies', formatteropts +
1307 @command(b'perfhelper-pathcopies', formatteropts +
1301 [
1308 [
1302 (b'r', b'revs', [], b'restrict search to these revisions'),
1309 (b'r', b'revs', [], b'restrict search to these revisions'),
1303 (b'', b'timing', False, b'provides extra data (costly)'),
1310 (b'', b'timing', False, b'provides extra data (costly)'),
1304 ])
1311 ])
1305 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1312 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1306 """find statistic about potential parameters for the `perftracecopies`
1313 """find statistic about potential parameters for the `perftracecopies`
1307
1314
1308 This command find source-destination pair relevant for copytracing testing.
1315 This command find source-destination pair relevant for copytracing testing.
1309 It report value for some of the parameters that impact copy tracing time.
1316 It report value for some of the parameters that impact copy tracing time.
1310
1317
1311 If `--timing` is set, rename detection is run and the associated timing
1318 If `--timing` is set, rename detection is run and the associated timing
1312 will be reported. The extra details comes at the cost of a slower command
1319 will be reported. The extra details comes at the cost of a slower command
1313 execution.
1320 execution.
1314
1321
1315 Since the rename detection is only run once, other factors might easily
1322 Since the rename detection is only run once, other factors might easily
1316 affect the precision of the timing. However it should give a good
1323 affect the precision of the timing. However it should give a good
1317 approximation of which revision pairs are very costly.
1324 approximation of which revision pairs are very costly.
1318 """
1325 """
1319 opts = _byteskwargs(opts)
1326 opts = _byteskwargs(opts)
1320 fm = ui.formatter(b'perf', opts)
1327 fm = ui.formatter(b'perf', opts)
1321 dotiming = opts[b'timing']
1328 dotiming = opts[b'timing']
1322
1329
1323 if dotiming:
1330 if dotiming:
1324 header = '%12s %12s %12s %12s %12s %12s\n'
1331 header = '%12s %12s %12s %12s %12s %12s\n'
1325 output = ("%(source)12s %(destination)12s "
1332 output = ("%(source)12s %(destination)12s "
1326 "%(nbrevs)12d %(nbmissingfiles)12d "
1333 "%(nbrevs)12d %(nbmissingfiles)12d "
1327 "%(nbrenamedfiles)12d %(time)18.5f\n")
1334 "%(nbrenamedfiles)12d %(time)18.5f\n")
1328 header_names = ("source", "destination", "nb-revs", "nb-files",
1335 header_names = ("source", "destination", "nb-revs", "nb-files",
1329 "nb-renames", "time")
1336 "nb-renames", "time")
1330 fm.plain(header % header_names)
1337 fm.plain(header % header_names)
1331 else:
1338 else:
1332 header = '%12s %12s %12s %12s\n'
1339 header = '%12s %12s %12s %12s\n'
1333 output = ("%(source)12s %(destination)12s "
1340 output = ("%(source)12s %(destination)12s "
1334 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1341 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1335 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1342 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1336
1343
1337 if not revs:
1344 if not revs:
1338 revs = ['all()']
1345 revs = ['all()']
1339 revs = scmutil.revrange(repo, revs)
1346 revs = scmutil.revrange(repo, revs)
1340
1347
1341 roi = repo.revs('merge() and %ld', revs)
1348 roi = repo.revs('merge() and %ld', revs)
1342 for r in roi:
1349 for r in roi:
1343 ctx = repo[r]
1350 ctx = repo[r]
1344 p1 = ctx.p1().rev()
1351 p1 = ctx.p1().rev()
1345 p2 = ctx.p2().rev()
1352 p2 = ctx.p2().rev()
1346 bases = repo.changelog._commonancestorsheads(p1, p2)
1353 bases = repo.changelog._commonancestorsheads(p1, p2)
1347 for p in (p1, p2):
1354 for p in (p1, p2):
1348 for b in bases:
1355 for b in bases:
1349 base = repo[b]
1356 base = repo[b]
1350 parent = repo[p]
1357 parent = repo[p]
1351 missing = copies._computeforwardmissing(base, parent)
1358 missing = copies._computeforwardmissing(base, parent)
1352 if not missing:
1359 if not missing:
1353 continue
1360 continue
1354 data = {
1361 data = {
1355 b'source': base.hex(),
1362 b'source': base.hex(),
1356 b'destination': parent.hex(),
1363 b'destination': parent.hex(),
1357 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1364 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1358 b'nbmissingfiles': len(missing),
1365 b'nbmissingfiles': len(missing),
1359 }
1366 }
1360 if dotiming:
1367 if dotiming:
1361 begin = util.timer()
1368 begin = util.timer()
1362 renames = copies.pathcopies(base, parent)
1369 renames = copies.pathcopies(base, parent)
1363 end = util.timer()
1370 end = util.timer()
1364 # not very stable timing since we did only one run
1371 # not very stable timing since we did only one run
1365 data['time'] = end - begin
1372 data['time'] = end - begin
1366 data['nbrenamedfiles'] = len(renames)
1373 data['nbrenamedfiles'] = len(renames)
1367 fm.startitem()
1374 fm.startitem()
1368 fm.data(**data)
1375 fm.data(**data)
1369 out = data.copy()
1376 out = data.copy()
1370 out['source'] = fm.hexfunc(base.node())
1377 out['source'] = fm.hexfunc(base.node())
1371 out['destination'] = fm.hexfunc(parent.node())
1378 out['destination'] = fm.hexfunc(parent.node())
1372 fm.plain(output % out)
1379 fm.plain(output % out)
1373
1380
1374 fm.end()
1381 fm.end()
1375
1382
1376 @command(b'perfcca', formatteropts)
1383 @command(b'perfcca', formatteropts)
1377 def perfcca(ui, repo, **opts):
1384 def perfcca(ui, repo, **opts):
1378 opts = _byteskwargs(opts)
1385 opts = _byteskwargs(opts)
1379 timer, fm = gettimer(ui, opts)
1386 timer, fm = gettimer(ui, opts)
1380 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1387 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1381 fm.end()
1388 fm.end()
1382
1389
1383 @command(b'perffncacheload', formatteropts)
1390 @command(b'perffncacheload', formatteropts)
1384 def perffncacheload(ui, repo, **opts):
1391 def perffncacheload(ui, repo, **opts):
1385 opts = _byteskwargs(opts)
1392 opts = _byteskwargs(opts)
1386 timer, fm = gettimer(ui, opts)
1393 timer, fm = gettimer(ui, opts)
1387 s = repo.store
1394 s = repo.store
1388 def d():
1395 def d():
1389 s.fncache._load()
1396 s.fncache._load()
1390 timer(d)
1397 timer(d)
1391 fm.end()
1398 fm.end()
1392
1399
1393 @command(b'perffncachewrite', formatteropts)
1400 @command(b'perffncachewrite', formatteropts)
1394 def perffncachewrite(ui, repo, **opts):
1401 def perffncachewrite(ui, repo, **opts):
1395 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1396 timer, fm = gettimer(ui, opts)
1403 timer, fm = gettimer(ui, opts)
1397 s = repo.store
1404 s = repo.store
1398 lock = repo.lock()
1405 lock = repo.lock()
1399 s.fncache._load()
1406 s.fncache._load()
1400 tr = repo.transaction(b'perffncachewrite')
1407 tr = repo.transaction(b'perffncachewrite')
1401 tr.addbackup(b'fncache')
1408 tr.addbackup(b'fncache')
1402 def d():
1409 def d():
1403 s.fncache._dirty = True
1410 s.fncache._dirty = True
1404 s.fncache.write(tr)
1411 s.fncache.write(tr)
1405 timer(d)
1412 timer(d)
1406 tr.close()
1413 tr.close()
1407 lock.release()
1414 lock.release()
1408 fm.end()
1415 fm.end()
1409
1416
1410 @command(b'perffncacheencode', formatteropts)
1417 @command(b'perffncacheencode', formatteropts)
1411 def perffncacheencode(ui, repo, **opts):
1418 def perffncacheencode(ui, repo, **opts):
1412 opts = _byteskwargs(opts)
1419 opts = _byteskwargs(opts)
1413 timer, fm = gettimer(ui, opts)
1420 timer, fm = gettimer(ui, opts)
1414 s = repo.store
1421 s = repo.store
1415 s.fncache._load()
1422 s.fncache._load()
1416 def d():
1423 def d():
1417 for p in s.fncache.entries:
1424 for p in s.fncache.entries:
1418 s.encode(p)
1425 s.encode(p)
1419 timer(d)
1426 timer(d)
1420 fm.end()
1427 fm.end()
1421
1428
1422 def _bdiffworker(q, blocks, xdiff, ready, done):
1429 def _bdiffworker(q, blocks, xdiff, ready, done):
1423 while not done.is_set():
1430 while not done.is_set():
1424 pair = q.get()
1431 pair = q.get()
1425 while pair is not None:
1432 while pair is not None:
1426 if xdiff:
1433 if xdiff:
1427 mdiff.bdiff.xdiffblocks(*pair)
1434 mdiff.bdiff.xdiffblocks(*pair)
1428 elif blocks:
1435 elif blocks:
1429 mdiff.bdiff.blocks(*pair)
1436 mdiff.bdiff.blocks(*pair)
1430 else:
1437 else:
1431 mdiff.textdiff(*pair)
1438 mdiff.textdiff(*pair)
1432 q.task_done()
1439 q.task_done()
1433 pair = q.get()
1440 pair = q.get()
1434 q.task_done() # for the None one
1441 q.task_done() # for the None one
1435 with ready:
1442 with ready:
1436 ready.wait()
1443 ready.wait()
1437
1444
1438 def _manifestrevision(repo, mnode):
1445 def _manifestrevision(repo, mnode):
1439 ml = repo.manifestlog
1446 ml = repo.manifestlog
1440
1447
1441 if util.safehasattr(ml, b'getstorage'):
1448 if util.safehasattr(ml, b'getstorage'):
1442 store = ml.getstorage(b'')
1449 store = ml.getstorage(b'')
1443 else:
1450 else:
1444 store = ml._revlog
1451 store = ml._revlog
1445
1452
1446 return store.revision(mnode)
1453 return store.revision(mnode)
1447
1454
1448 @command(b'perfbdiff', revlogopts + formatteropts + [
1455 @command(b'perfbdiff', revlogopts + formatteropts + [
1449 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1456 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1450 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1457 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1451 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1458 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1452 (b'', b'blocks', False, b'test computing diffs into blocks'),
1459 (b'', b'blocks', False, b'test computing diffs into blocks'),
1453 (b'', b'xdiff', False, b'use xdiff algorithm'),
1460 (b'', b'xdiff', False, b'use xdiff algorithm'),
1454 ],
1461 ],
1455
1462
1456 b'-c|-m|FILE REV')
1463 b'-c|-m|FILE REV')
1457 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1464 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1458 """benchmark a bdiff between revisions
1465 """benchmark a bdiff between revisions
1459
1466
1460 By default, benchmark a bdiff between its delta parent and itself.
1467 By default, benchmark a bdiff between its delta parent and itself.
1461
1468
1462 With ``--count``, benchmark bdiffs between delta parents and self for N
1469 With ``--count``, benchmark bdiffs between delta parents and self for N
1463 revisions starting at the specified revision.
1470 revisions starting at the specified revision.
1464
1471
1465 With ``--alldata``, assume the requested revision is a changeset and
1472 With ``--alldata``, assume the requested revision is a changeset and
1466 measure bdiffs for all changes related to that changeset (manifest
1473 measure bdiffs for all changes related to that changeset (manifest
1467 and filelogs).
1474 and filelogs).
1468 """
1475 """
1469 opts = _byteskwargs(opts)
1476 opts = _byteskwargs(opts)
1470
1477
1471 if opts[b'xdiff'] and not opts[b'blocks']:
1478 if opts[b'xdiff'] and not opts[b'blocks']:
1472 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1479 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1473
1480
1474 if opts[b'alldata']:
1481 if opts[b'alldata']:
1475 opts[b'changelog'] = True
1482 opts[b'changelog'] = True
1476
1483
1477 if opts.get(b'changelog') or opts.get(b'manifest'):
1484 if opts.get(b'changelog') or opts.get(b'manifest'):
1478 file_, rev = None, file_
1485 file_, rev = None, file_
1479 elif rev is None:
1486 elif rev is None:
1480 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1487 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1481
1488
1482 blocks = opts[b'blocks']
1489 blocks = opts[b'blocks']
1483 xdiff = opts[b'xdiff']
1490 xdiff = opts[b'xdiff']
1484 textpairs = []
1491 textpairs = []
1485
1492
1486 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1493 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1487
1494
1488 startrev = r.rev(r.lookup(rev))
1495 startrev = r.rev(r.lookup(rev))
1489 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1496 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1490 if opts[b'alldata']:
1497 if opts[b'alldata']:
1491 # Load revisions associated with changeset.
1498 # Load revisions associated with changeset.
1492 ctx = repo[rev]
1499 ctx = repo[rev]
1493 mtext = _manifestrevision(repo, ctx.manifestnode())
1500 mtext = _manifestrevision(repo, ctx.manifestnode())
1494 for pctx in ctx.parents():
1501 for pctx in ctx.parents():
1495 pman = _manifestrevision(repo, pctx.manifestnode())
1502 pman = _manifestrevision(repo, pctx.manifestnode())
1496 textpairs.append((pman, mtext))
1503 textpairs.append((pman, mtext))
1497
1504
1498 # Load filelog revisions by iterating manifest delta.
1505 # Load filelog revisions by iterating manifest delta.
1499 man = ctx.manifest()
1506 man = ctx.manifest()
1500 pman = ctx.p1().manifest()
1507 pman = ctx.p1().manifest()
1501 for filename, change in pman.diff(man).items():
1508 for filename, change in pman.diff(man).items():
1502 fctx = repo.file(filename)
1509 fctx = repo.file(filename)
1503 f1 = fctx.revision(change[0][0] or -1)
1510 f1 = fctx.revision(change[0][0] or -1)
1504 f2 = fctx.revision(change[1][0] or -1)
1511 f2 = fctx.revision(change[1][0] or -1)
1505 textpairs.append((f1, f2))
1512 textpairs.append((f1, f2))
1506 else:
1513 else:
1507 dp = r.deltaparent(rev)
1514 dp = r.deltaparent(rev)
1508 textpairs.append((r.revision(dp), r.revision(rev)))
1515 textpairs.append((r.revision(dp), r.revision(rev)))
1509
1516
1510 withthreads = threads > 0
1517 withthreads = threads > 0
1511 if not withthreads:
1518 if not withthreads:
1512 def d():
1519 def d():
1513 for pair in textpairs:
1520 for pair in textpairs:
1514 if xdiff:
1521 if xdiff:
1515 mdiff.bdiff.xdiffblocks(*pair)
1522 mdiff.bdiff.xdiffblocks(*pair)
1516 elif blocks:
1523 elif blocks:
1517 mdiff.bdiff.blocks(*pair)
1524 mdiff.bdiff.blocks(*pair)
1518 else:
1525 else:
1519 mdiff.textdiff(*pair)
1526 mdiff.textdiff(*pair)
1520 else:
1527 else:
1521 q = queue()
1528 q = queue()
1522 for i in _xrange(threads):
1529 for i in _xrange(threads):
1523 q.put(None)
1530 q.put(None)
1524 ready = threading.Condition()
1531 ready = threading.Condition()
1525 done = threading.Event()
1532 done = threading.Event()
1526 for i in _xrange(threads):
1533 for i in _xrange(threads):
1527 threading.Thread(target=_bdiffworker,
1534 threading.Thread(target=_bdiffworker,
1528 args=(q, blocks, xdiff, ready, done)).start()
1535 args=(q, blocks, xdiff, ready, done)).start()
1529 q.join()
1536 q.join()
1530 def d():
1537 def d():
1531 for pair in textpairs:
1538 for pair in textpairs:
1532 q.put(pair)
1539 q.put(pair)
1533 for i in _xrange(threads):
1540 for i in _xrange(threads):
1534 q.put(None)
1541 q.put(None)
1535 with ready:
1542 with ready:
1536 ready.notify_all()
1543 ready.notify_all()
1537 q.join()
1544 q.join()
1538 timer, fm = gettimer(ui, opts)
1545 timer, fm = gettimer(ui, opts)
1539 timer(d)
1546 timer(d)
1540 fm.end()
1547 fm.end()
1541
1548
1542 if withthreads:
1549 if withthreads:
1543 done.set()
1550 done.set()
1544 for i in _xrange(threads):
1551 for i in _xrange(threads):
1545 q.put(None)
1552 q.put(None)
1546 with ready:
1553 with ready:
1547 ready.notify_all()
1554 ready.notify_all()
1548
1555
1549 @command(b'perfunidiff', revlogopts + formatteropts + [
1556 @command(b'perfunidiff', revlogopts + formatteropts + [
1550 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1557 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1551 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1558 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1552 ], b'-c|-m|FILE REV')
1559 ], b'-c|-m|FILE REV')
1553 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1560 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1554 """benchmark a unified diff between revisions
1561 """benchmark a unified diff between revisions
1555
1562
1556 This doesn't include any copy tracing - it's just a unified diff
1563 This doesn't include any copy tracing - it's just a unified diff
1557 of the texts.
1564 of the texts.
1558
1565
1559 By default, benchmark a diff between its delta parent and itself.
1566 By default, benchmark a diff between its delta parent and itself.
1560
1567
1561 With ``--count``, benchmark diffs between delta parents and self for N
1568 With ``--count``, benchmark diffs between delta parents and self for N
1562 revisions starting at the specified revision.
1569 revisions starting at the specified revision.
1563
1570
1564 With ``--alldata``, assume the requested revision is a changeset and
1571 With ``--alldata``, assume the requested revision is a changeset and
1565 measure diffs for all changes related to that changeset (manifest
1572 measure diffs for all changes related to that changeset (manifest
1566 and filelogs).
1573 and filelogs).
1567 """
1574 """
1568 opts = _byteskwargs(opts)
1575 opts = _byteskwargs(opts)
1569 if opts[b'alldata']:
1576 if opts[b'alldata']:
1570 opts[b'changelog'] = True
1577 opts[b'changelog'] = True
1571
1578
1572 if opts.get(b'changelog') or opts.get(b'manifest'):
1579 if opts.get(b'changelog') or opts.get(b'manifest'):
1573 file_, rev = None, file_
1580 file_, rev = None, file_
1574 elif rev is None:
1581 elif rev is None:
1575 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1582 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1576
1583
1577 textpairs = []
1584 textpairs = []
1578
1585
1579 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1586 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1580
1587
1581 startrev = r.rev(r.lookup(rev))
1588 startrev = r.rev(r.lookup(rev))
1582 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1589 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1583 if opts[b'alldata']:
1590 if opts[b'alldata']:
1584 # Load revisions associated with changeset.
1591 # Load revisions associated with changeset.
1585 ctx = repo[rev]
1592 ctx = repo[rev]
1586 mtext = _manifestrevision(repo, ctx.manifestnode())
1593 mtext = _manifestrevision(repo, ctx.manifestnode())
1587 for pctx in ctx.parents():
1594 for pctx in ctx.parents():
1588 pman = _manifestrevision(repo, pctx.manifestnode())
1595 pman = _manifestrevision(repo, pctx.manifestnode())
1589 textpairs.append((pman, mtext))
1596 textpairs.append((pman, mtext))
1590
1597
1591 # Load filelog revisions by iterating manifest delta.
1598 # Load filelog revisions by iterating manifest delta.
1592 man = ctx.manifest()
1599 man = ctx.manifest()
1593 pman = ctx.p1().manifest()
1600 pman = ctx.p1().manifest()
1594 for filename, change in pman.diff(man).items():
1601 for filename, change in pman.diff(man).items():
1595 fctx = repo.file(filename)
1602 fctx = repo.file(filename)
1596 f1 = fctx.revision(change[0][0] or -1)
1603 f1 = fctx.revision(change[0][0] or -1)
1597 f2 = fctx.revision(change[1][0] or -1)
1604 f2 = fctx.revision(change[1][0] or -1)
1598 textpairs.append((f1, f2))
1605 textpairs.append((f1, f2))
1599 else:
1606 else:
1600 dp = r.deltaparent(rev)
1607 dp = r.deltaparent(rev)
1601 textpairs.append((r.revision(dp), r.revision(rev)))
1608 textpairs.append((r.revision(dp), r.revision(rev)))
1602
1609
1603 def d():
1610 def d():
1604 for left, right in textpairs:
1611 for left, right in textpairs:
1605 # The date strings don't matter, so we pass empty strings.
1612 # The date strings don't matter, so we pass empty strings.
1606 headerlines, hunks = mdiff.unidiff(
1613 headerlines, hunks = mdiff.unidiff(
1607 left, b'', right, b'', b'left', b'right', binary=False)
1614 left, b'', right, b'', b'left', b'right', binary=False)
1608 # consume iterators in roughly the way patch.py does
1615 # consume iterators in roughly the way patch.py does
1609 b'\n'.join(headerlines)
1616 b'\n'.join(headerlines)
1610 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1617 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1611 timer, fm = gettimer(ui, opts)
1618 timer, fm = gettimer(ui, opts)
1612 timer(d)
1619 timer(d)
1613 fm.end()
1620 fm.end()
1614
1621
1615 @command(b'perfdiffwd', formatteropts)
1622 @command(b'perfdiffwd', formatteropts)
1616 def perfdiffwd(ui, repo, **opts):
1623 def perfdiffwd(ui, repo, **opts):
1617 """Profile diff of working directory changes"""
1624 """Profile diff of working directory changes"""
1618 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1619 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1620 options = {
1627 options = {
1621 'w': 'ignore_all_space',
1628 'w': 'ignore_all_space',
1622 'b': 'ignore_space_change',
1629 'b': 'ignore_space_change',
1623 'B': 'ignore_blank_lines',
1630 'B': 'ignore_blank_lines',
1624 }
1631 }
1625
1632
1626 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1633 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1627 opts = dict((options[c], b'1') for c in diffopt)
1634 opts = dict((options[c], b'1') for c in diffopt)
1628 def d():
1635 def d():
1629 ui.pushbuffer()
1636 ui.pushbuffer()
1630 commands.diff(ui, repo, **opts)
1637 commands.diff(ui, repo, **opts)
1631 ui.popbuffer()
1638 ui.popbuffer()
1632 diffopt = diffopt.encode('ascii')
1639 diffopt = diffopt.encode('ascii')
1633 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1640 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1634 timer(d, title=title)
1641 timer(d, title=title)
1635 fm.end()
1642 fm.end()
1636
1643
1637 @command(b'perfrevlogindex', revlogopts + formatteropts,
1644 @command(b'perfrevlogindex', revlogopts + formatteropts,
1638 b'-c|-m|FILE')
1645 b'-c|-m|FILE')
1639 def perfrevlogindex(ui, repo, file_=None, **opts):
1646 def perfrevlogindex(ui, repo, file_=None, **opts):
1640 """Benchmark operations against a revlog index.
1647 """Benchmark operations against a revlog index.
1641
1648
1642 This tests constructing a revlog instance, reading index data,
1649 This tests constructing a revlog instance, reading index data,
1643 parsing index data, and performing various operations related to
1650 parsing index data, and performing various operations related to
1644 index data.
1651 index data.
1645 """
1652 """
1646
1653
1647 opts = _byteskwargs(opts)
1654 opts = _byteskwargs(opts)
1648
1655
1649 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1656 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1650
1657
1651 opener = getattr(rl, 'opener') # trick linter
1658 opener = getattr(rl, 'opener') # trick linter
1652 indexfile = rl.indexfile
1659 indexfile = rl.indexfile
1653 data = opener.read(indexfile)
1660 data = opener.read(indexfile)
1654
1661
1655 header = struct.unpack(b'>I', data[0:4])[0]
1662 header = struct.unpack(b'>I', data[0:4])[0]
1656 version = header & 0xFFFF
1663 version = header & 0xFFFF
1657 if version == 1:
1664 if version == 1:
1658 revlogio = revlog.revlogio()
1665 revlogio = revlog.revlogio()
1659 inline = header & (1 << 16)
1666 inline = header & (1 << 16)
1660 else:
1667 else:
1661 raise error.Abort((b'unsupported revlog version: %d') % version)
1668 raise error.Abort((b'unsupported revlog version: %d') % version)
1662
1669
1663 rllen = len(rl)
1670 rllen = len(rl)
1664
1671
1665 node0 = rl.node(0)
1672 node0 = rl.node(0)
1666 node25 = rl.node(rllen // 4)
1673 node25 = rl.node(rllen // 4)
1667 node50 = rl.node(rllen // 2)
1674 node50 = rl.node(rllen // 2)
1668 node75 = rl.node(rllen // 4 * 3)
1675 node75 = rl.node(rllen // 4 * 3)
1669 node100 = rl.node(rllen - 1)
1676 node100 = rl.node(rllen - 1)
1670
1677
1671 allrevs = range(rllen)
1678 allrevs = range(rllen)
1672 allrevsrev = list(reversed(allrevs))
1679 allrevsrev = list(reversed(allrevs))
1673 allnodes = [rl.node(rev) for rev in range(rllen)]
1680 allnodes = [rl.node(rev) for rev in range(rllen)]
1674 allnodesrev = list(reversed(allnodes))
1681 allnodesrev = list(reversed(allnodes))
1675
1682
1676 def constructor():
1683 def constructor():
1677 revlog.revlog(opener, indexfile)
1684 revlog.revlog(opener, indexfile)
1678
1685
1679 def read():
1686 def read():
1680 with opener(indexfile) as fh:
1687 with opener(indexfile) as fh:
1681 fh.read()
1688 fh.read()
1682
1689
1683 def parseindex():
1690 def parseindex():
1684 revlogio.parseindex(data, inline)
1691 revlogio.parseindex(data, inline)
1685
1692
1686 def getentry(revornode):
1693 def getentry(revornode):
1687 index = revlogio.parseindex(data, inline)[0]
1694 index = revlogio.parseindex(data, inline)[0]
1688 index[revornode]
1695 index[revornode]
1689
1696
1690 def getentries(revs, count=1):
1697 def getentries(revs, count=1):
1691 index = revlogio.parseindex(data, inline)[0]
1698 index = revlogio.parseindex(data, inline)[0]
1692
1699
1693 for i in range(count):
1700 for i in range(count):
1694 for rev in revs:
1701 for rev in revs:
1695 index[rev]
1702 index[rev]
1696
1703
1697 def resolvenode(node):
1704 def resolvenode(node):
1698 nodemap = revlogio.parseindex(data, inline)[1]
1705 nodemap = revlogio.parseindex(data, inline)[1]
1699 # This only works for the C code.
1706 # This only works for the C code.
1700 if nodemap is None:
1707 if nodemap is None:
1701 return
1708 return
1702
1709
1703 try:
1710 try:
1704 nodemap[node]
1711 nodemap[node]
1705 except error.RevlogError:
1712 except error.RevlogError:
1706 pass
1713 pass
1707
1714
1708 def resolvenodes(nodes, count=1):
1715 def resolvenodes(nodes, count=1):
1709 nodemap = revlogio.parseindex(data, inline)[1]
1716 nodemap = revlogio.parseindex(data, inline)[1]
1710 if nodemap is None:
1717 if nodemap is None:
1711 return
1718 return
1712
1719
1713 for i in range(count):
1720 for i in range(count):
1714 for node in nodes:
1721 for node in nodes:
1715 try:
1722 try:
1716 nodemap[node]
1723 nodemap[node]
1717 except error.RevlogError:
1724 except error.RevlogError:
1718 pass
1725 pass
1719
1726
1720 benches = [
1727 benches = [
1721 (constructor, b'revlog constructor'),
1728 (constructor, b'revlog constructor'),
1722 (read, b'read'),
1729 (read, b'read'),
1723 (parseindex, b'create index object'),
1730 (parseindex, b'create index object'),
1724 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1731 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1725 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1732 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1726 (lambda: resolvenode(node0), b'look up node at rev 0'),
1733 (lambda: resolvenode(node0), b'look up node at rev 0'),
1727 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1734 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1728 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1735 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1729 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1736 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1730 (lambda: resolvenode(node100), b'look up node at tip'),
1737 (lambda: resolvenode(node100), b'look up node at tip'),
1731 # 2x variation is to measure caching impact.
1738 # 2x variation is to measure caching impact.
1732 (lambda: resolvenodes(allnodes),
1739 (lambda: resolvenodes(allnodes),
1733 b'look up all nodes (forward)'),
1740 b'look up all nodes (forward)'),
1734 (lambda: resolvenodes(allnodes, 2),
1741 (lambda: resolvenodes(allnodes, 2),
1735 b'look up all nodes 2x (forward)'),
1742 b'look up all nodes 2x (forward)'),
1736 (lambda: resolvenodes(allnodesrev),
1743 (lambda: resolvenodes(allnodesrev),
1737 b'look up all nodes (reverse)'),
1744 b'look up all nodes (reverse)'),
1738 (lambda: resolvenodes(allnodesrev, 2),
1745 (lambda: resolvenodes(allnodesrev, 2),
1739 b'look up all nodes 2x (reverse)'),
1746 b'look up all nodes 2x (reverse)'),
1740 (lambda: getentries(allrevs),
1747 (lambda: getentries(allrevs),
1741 b'retrieve all index entries (forward)'),
1748 b'retrieve all index entries (forward)'),
1742 (lambda: getentries(allrevs, 2),
1749 (lambda: getentries(allrevs, 2),
1743 b'retrieve all index entries 2x (forward)'),
1750 b'retrieve all index entries 2x (forward)'),
1744 (lambda: getentries(allrevsrev),
1751 (lambda: getentries(allrevsrev),
1745 b'retrieve all index entries (reverse)'),
1752 b'retrieve all index entries (reverse)'),
1746 (lambda: getentries(allrevsrev, 2),
1753 (lambda: getentries(allrevsrev, 2),
1747 b'retrieve all index entries 2x (reverse)'),
1754 b'retrieve all index entries 2x (reverse)'),
1748 ]
1755 ]
1749
1756
1750 for fn, title in benches:
1757 for fn, title in benches:
1751 timer, fm = gettimer(ui, opts)
1758 timer, fm = gettimer(ui, opts)
1752 timer(fn, title=title)
1759 timer(fn, title=title)
1753 fm.end()
1760 fm.end()
1754
1761
1755 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1762 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1756 [(b'd', b'dist', 100, b'distance between the revisions'),
1763 [(b'd', b'dist', 100, b'distance between the revisions'),
1757 (b's', b'startrev', 0, b'revision to start reading at'),
1764 (b's', b'startrev', 0, b'revision to start reading at'),
1758 (b'', b'reverse', False, b'read in reverse')],
1765 (b'', b'reverse', False, b'read in reverse')],
1759 b'-c|-m|FILE')
1766 b'-c|-m|FILE')
1760 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1767 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1761 **opts):
1768 **opts):
1762 """Benchmark reading a series of revisions from a revlog.
1769 """Benchmark reading a series of revisions from a revlog.
1763
1770
1764 By default, we read every ``-d/--dist`` revision from 0 to tip of
1771 By default, we read every ``-d/--dist`` revision from 0 to tip of
1765 the specified revlog.
1772 the specified revlog.
1766
1773
1767 The start revision can be defined via ``-s/--startrev``.
1774 The start revision can be defined via ``-s/--startrev``.
1768 """
1775 """
1769 opts = _byteskwargs(opts)
1776 opts = _byteskwargs(opts)
1770
1777
1771 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1778 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1772 rllen = getlen(ui)(rl)
1779 rllen = getlen(ui)(rl)
1773
1780
1774 if startrev < 0:
1781 if startrev < 0:
1775 startrev = rllen + startrev
1782 startrev = rllen + startrev
1776
1783
1777 def d():
1784 def d():
1778 rl.clearcaches()
1785 rl.clearcaches()
1779
1786
1780 beginrev = startrev
1787 beginrev = startrev
1781 endrev = rllen
1788 endrev = rllen
1782 dist = opts[b'dist']
1789 dist = opts[b'dist']
1783
1790
1784 if reverse:
1791 if reverse:
1785 beginrev, endrev = endrev - 1, beginrev - 1
1792 beginrev, endrev = endrev - 1, beginrev - 1
1786 dist = -1 * dist
1793 dist = -1 * dist
1787
1794
1788 for x in _xrange(beginrev, endrev, dist):
1795 for x in _xrange(beginrev, endrev, dist):
1789 # Old revisions don't support passing int.
1796 # Old revisions don't support passing int.
1790 n = rl.node(x)
1797 n = rl.node(x)
1791 rl.revision(n)
1798 rl.revision(n)
1792
1799
1793 timer, fm = gettimer(ui, opts)
1800 timer, fm = gettimer(ui, opts)
1794 timer(d)
1801 timer(d)
1795 fm.end()
1802 fm.end()
1796
1803
1797 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1804 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1798 [(b's', b'startrev', 1000, b'revision to start writing at'),
1805 [(b's', b'startrev', 1000, b'revision to start writing at'),
1799 (b'', b'stoprev', -1, b'last revision to write'),
1806 (b'', b'stoprev', -1, b'last revision to write'),
1800 (b'', b'count', 3, b'last revision to write'),
1807 (b'', b'count', 3, b'last revision to write'),
1801 (b'', b'details', False, b'print timing for every revisions tested'),
1808 (b'', b'details', False, b'print timing for every revisions tested'),
1802 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1809 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1803 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1810 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1804 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1805 ],
1812 ],
1806 b'-c|-m|FILE')
1813 b'-c|-m|FILE')
1807 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1814 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1808 """Benchmark writing a series of revisions to a revlog.
1815 """Benchmark writing a series of revisions to a revlog.
1809
1816
1810 Possible source values are:
1817 Possible source values are:
1811 * `full`: add from a full text (default).
1818 * `full`: add from a full text (default).
1812 * `parent-1`: add from a delta to the first parent
1819 * `parent-1`: add from a delta to the first parent
1813 * `parent-2`: add from a delta to the second parent if it exists
1820 * `parent-2`: add from a delta to the second parent if it exists
1814 (use a delta from the first parent otherwise)
1821 (use a delta from the first parent otherwise)
1815 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1822 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1816 * `storage`: add from the existing precomputed deltas
1823 * `storage`: add from the existing precomputed deltas
1817 """
1824 """
1818 opts = _byteskwargs(opts)
1825 opts = _byteskwargs(opts)
1819
1826
1820 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1827 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1821 rllen = getlen(ui)(rl)
1828 rllen = getlen(ui)(rl)
1822 if startrev < 0:
1829 if startrev < 0:
1823 startrev = rllen + startrev
1830 startrev = rllen + startrev
1824 if stoprev < 0:
1831 if stoprev < 0:
1825 stoprev = rllen + stoprev
1832 stoprev = rllen + stoprev
1826
1833
1827 lazydeltabase = opts['lazydeltabase']
1834 lazydeltabase = opts['lazydeltabase']
1828 source = opts['source']
1835 source = opts['source']
1829 clearcaches = opts['clear_caches']
1836 clearcaches = opts['clear_caches']
1830 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1837 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1831 b'storage')
1838 b'storage')
1832 if source not in validsource:
1839 if source not in validsource:
1833 raise error.Abort('invalid source type: %s' % source)
1840 raise error.Abort('invalid source type: %s' % source)
1834
1841
1835 ### actually gather results
1842 ### actually gather results
1836 count = opts['count']
1843 count = opts['count']
1837 if count <= 0:
1844 if count <= 0:
1838 raise error.Abort('invalide run count: %d' % count)
1845 raise error.Abort('invalide run count: %d' % count)
1839 allresults = []
1846 allresults = []
1840 for c in range(count):
1847 for c in range(count):
1841 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1848 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1842 lazydeltabase=lazydeltabase,
1849 lazydeltabase=lazydeltabase,
1843 clearcaches=clearcaches)
1850 clearcaches=clearcaches)
1844 allresults.append(timing)
1851 allresults.append(timing)
1845
1852
1846 ### consolidate the results in a single list
1853 ### consolidate the results in a single list
1847 results = []
1854 results = []
1848 for idx, (rev, t) in enumerate(allresults[0]):
1855 for idx, (rev, t) in enumerate(allresults[0]):
1849 ts = [t]
1856 ts = [t]
1850 for other in allresults[1:]:
1857 for other in allresults[1:]:
1851 orev, ot = other[idx]
1858 orev, ot = other[idx]
1852 assert orev == rev
1859 assert orev == rev
1853 ts.append(ot)
1860 ts.append(ot)
1854 results.append((rev, ts))
1861 results.append((rev, ts))
1855 resultcount = len(results)
1862 resultcount = len(results)
1856
1863
1857 ### Compute and display relevant statistics
1864 ### Compute and display relevant statistics
1858
1865
1859 # get a formatter
1866 # get a formatter
1860 fm = ui.formatter(b'perf', opts)
1867 fm = ui.formatter(b'perf', opts)
1861 displayall = ui.configbool(b"perf", b"all-timing", False)
1868 displayall = ui.configbool(b"perf", b"all-timing", False)
1862
1869
1863 # print individual details if requested
1870 # print individual details if requested
1864 if opts['details']:
1871 if opts['details']:
1865 for idx, item in enumerate(results, 1):
1872 for idx, item in enumerate(results, 1):
1866 rev, data = item
1873 rev, data = item
1867 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1874 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1868 formatone(fm, data, title=title, displayall=displayall)
1875 formatone(fm, data, title=title, displayall=displayall)
1869
1876
1870 # sorts results by median time
1877 # sorts results by median time
1871 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1878 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1872 # list of (name, index) to display)
1879 # list of (name, index) to display)
1873 relevants = [
1880 relevants = [
1874 ("min", 0),
1881 ("min", 0),
1875 ("10%", resultcount * 10 // 100),
1882 ("10%", resultcount * 10 // 100),
1876 ("25%", resultcount * 25 // 100),
1883 ("25%", resultcount * 25 // 100),
1877 ("50%", resultcount * 70 // 100),
1884 ("50%", resultcount * 70 // 100),
1878 ("75%", resultcount * 75 // 100),
1885 ("75%", resultcount * 75 // 100),
1879 ("90%", resultcount * 90 // 100),
1886 ("90%", resultcount * 90 // 100),
1880 ("95%", resultcount * 95 // 100),
1887 ("95%", resultcount * 95 // 100),
1881 ("99%", resultcount * 99 // 100),
1888 ("99%", resultcount * 99 // 100),
1882 ("99.9%", resultcount * 999 // 1000),
1889 ("99.9%", resultcount * 999 // 1000),
1883 ("99.99%", resultcount * 9999 // 10000),
1890 ("99.99%", resultcount * 9999 // 10000),
1884 ("99.999%", resultcount * 99999 // 100000),
1891 ("99.999%", resultcount * 99999 // 100000),
1885 ("max", -1),
1892 ("max", -1),
1886 ]
1893 ]
1887 if not ui.quiet:
1894 if not ui.quiet:
1888 for name, idx in relevants:
1895 for name, idx in relevants:
1889 data = results[idx]
1896 data = results[idx]
1890 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1897 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1891 formatone(fm, data[1], title=title, displayall=displayall)
1898 formatone(fm, data[1], title=title, displayall=displayall)
1892
1899
1893 # XXX summing that many float will not be very precise, we ignore this fact
1900 # XXX summing that many float will not be very precise, we ignore this fact
1894 # for now
1901 # for now
1895 totaltime = []
1902 totaltime = []
1896 for item in allresults:
1903 for item in allresults:
1897 totaltime.append((sum(x[1][0] for x in item),
1904 totaltime.append((sum(x[1][0] for x in item),
1898 sum(x[1][1] for x in item),
1905 sum(x[1][1] for x in item),
1899 sum(x[1][2] for x in item),)
1906 sum(x[1][2] for x in item),)
1900 )
1907 )
1901 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1908 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1902 displayall=displayall)
1909 displayall=displayall)
1903 fm.end()
1910 fm.end()
1904
1911
1905 class _faketr(object):
1912 class _faketr(object):
1906 def add(s, x, y, z=None):
1913 def add(s, x, y, z=None):
1907 return None
1914 return None
1908
1915
1909 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1916 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1910 lazydeltabase=True, clearcaches=True):
1917 lazydeltabase=True, clearcaches=True):
1911 timings = []
1918 timings = []
1912 tr = _faketr()
1919 tr = _faketr()
1913 with _temprevlog(ui, orig, startrev) as dest:
1920 with _temprevlog(ui, orig, startrev) as dest:
1914 dest._lazydeltabase = lazydeltabase
1921 dest._lazydeltabase = lazydeltabase
1915 revs = list(orig.revs(startrev, stoprev))
1922 revs = list(orig.revs(startrev, stoprev))
1916 total = len(revs)
1923 total = len(revs)
1917 topic = 'adding'
1924 topic = 'adding'
1918 if runidx is not None:
1925 if runidx is not None:
1919 topic += ' (run #%d)' % runidx
1926 topic += ' (run #%d)' % runidx
1920 # Support both old and new progress API
1927 # Support both old and new progress API
1921 if util.safehasattr(ui, 'makeprogress'):
1928 if util.safehasattr(ui, 'makeprogress'):
1922 progress = ui.makeprogress(topic, unit='revs', total=total)
1929 progress = ui.makeprogress(topic, unit='revs', total=total)
1923 def updateprogress(pos):
1930 def updateprogress(pos):
1924 progress.update(pos)
1931 progress.update(pos)
1925 def completeprogress():
1932 def completeprogress():
1926 progress.complete()
1933 progress.complete()
1927 else:
1934 else:
1928 def updateprogress(pos):
1935 def updateprogress(pos):
1929 ui.progress(topic, pos, unit='revs', total=total)
1936 ui.progress(topic, pos, unit='revs', total=total)
1930 def completeprogress():
1937 def completeprogress():
1931 ui.progress(topic, None, unit='revs', total=total)
1938 ui.progress(topic, None, unit='revs', total=total)
1932
1939
1933 for idx, rev in enumerate(revs):
1940 for idx, rev in enumerate(revs):
1934 updateprogress(idx)
1941 updateprogress(idx)
1935 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1942 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1936 if clearcaches:
1943 if clearcaches:
1937 dest.index.clearcaches()
1944 dest.index.clearcaches()
1938 dest.clearcaches()
1945 dest.clearcaches()
1939 with timeone() as r:
1946 with timeone() as r:
1940 dest.addrawrevision(*addargs, **addkwargs)
1947 dest.addrawrevision(*addargs, **addkwargs)
1941 timings.append((rev, r[0]))
1948 timings.append((rev, r[0]))
1942 updateprogress(total)
1949 updateprogress(total)
1943 completeprogress()
1950 completeprogress()
1944 return timings
1951 return timings
1945
1952
1946 def _getrevisionseed(orig, rev, tr, source):
1953 def _getrevisionseed(orig, rev, tr, source):
1947 from mercurial.node import nullid
1954 from mercurial.node import nullid
1948
1955
1949 linkrev = orig.linkrev(rev)
1956 linkrev = orig.linkrev(rev)
1950 node = orig.node(rev)
1957 node = orig.node(rev)
1951 p1, p2 = orig.parents(node)
1958 p1, p2 = orig.parents(node)
1952 flags = orig.flags(rev)
1959 flags = orig.flags(rev)
1953 cachedelta = None
1960 cachedelta = None
1954 text = None
1961 text = None
1955
1962
1956 if source == b'full':
1963 if source == b'full':
1957 text = orig.revision(rev)
1964 text = orig.revision(rev)
1958 elif source == b'parent-1':
1965 elif source == b'parent-1':
1959 baserev = orig.rev(p1)
1966 baserev = orig.rev(p1)
1960 cachedelta = (baserev, orig.revdiff(p1, rev))
1967 cachedelta = (baserev, orig.revdiff(p1, rev))
1961 elif source == b'parent-2':
1968 elif source == b'parent-2':
1962 parent = p2
1969 parent = p2
1963 if p2 == nullid:
1970 if p2 == nullid:
1964 parent = p1
1971 parent = p1
1965 baserev = orig.rev(parent)
1972 baserev = orig.rev(parent)
1966 cachedelta = (baserev, orig.revdiff(parent, rev))
1973 cachedelta = (baserev, orig.revdiff(parent, rev))
1967 elif source == b'parent-smallest':
1974 elif source == b'parent-smallest':
1968 p1diff = orig.revdiff(p1, rev)
1975 p1diff = orig.revdiff(p1, rev)
1969 parent = p1
1976 parent = p1
1970 diff = p1diff
1977 diff = p1diff
1971 if p2 != nullid:
1978 if p2 != nullid:
1972 p2diff = orig.revdiff(p2, rev)
1979 p2diff = orig.revdiff(p2, rev)
1973 if len(p1diff) > len(p2diff):
1980 if len(p1diff) > len(p2diff):
1974 parent = p2
1981 parent = p2
1975 diff = p2diff
1982 diff = p2diff
1976 baserev = orig.rev(parent)
1983 baserev = orig.rev(parent)
1977 cachedelta = (baserev, diff)
1984 cachedelta = (baserev, diff)
1978 elif source == b'storage':
1985 elif source == b'storage':
1979 baserev = orig.deltaparent(rev)
1986 baserev = orig.deltaparent(rev)
1980 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1987 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1981
1988
1982 return ((text, tr, linkrev, p1, p2),
1989 return ((text, tr, linkrev, p1, p2),
1983 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1990 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1984
1991
1985 @contextlib.contextmanager
1992 @contextlib.contextmanager
1986 def _temprevlog(ui, orig, truncaterev):
1993 def _temprevlog(ui, orig, truncaterev):
1987 from mercurial import vfs as vfsmod
1994 from mercurial import vfs as vfsmod
1988
1995
1989 if orig._inline:
1996 if orig._inline:
1990 raise error.Abort('not supporting inline revlog (yet)')
1997 raise error.Abort('not supporting inline revlog (yet)')
1991
1998
1992 origindexpath = orig.opener.join(orig.indexfile)
1999 origindexpath = orig.opener.join(orig.indexfile)
1993 origdatapath = orig.opener.join(orig.datafile)
2000 origdatapath = orig.opener.join(orig.datafile)
1994 indexname = 'revlog.i'
2001 indexname = 'revlog.i'
1995 dataname = 'revlog.d'
2002 dataname = 'revlog.d'
1996
2003
1997 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2004 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
1998 try:
2005 try:
1999 # copy the data file in a temporary directory
2006 # copy the data file in a temporary directory
2000 ui.debug('copying data in %s\n' % tmpdir)
2007 ui.debug('copying data in %s\n' % tmpdir)
2001 destindexpath = os.path.join(tmpdir, 'revlog.i')
2008 destindexpath = os.path.join(tmpdir, 'revlog.i')
2002 destdatapath = os.path.join(tmpdir, 'revlog.d')
2009 destdatapath = os.path.join(tmpdir, 'revlog.d')
2003 shutil.copyfile(origindexpath, destindexpath)
2010 shutil.copyfile(origindexpath, destindexpath)
2004 shutil.copyfile(origdatapath, destdatapath)
2011 shutil.copyfile(origdatapath, destdatapath)
2005
2012
2006 # remove the data we want to add again
2013 # remove the data we want to add again
2007 ui.debug('truncating data to be rewritten\n')
2014 ui.debug('truncating data to be rewritten\n')
2008 with open(destindexpath, 'ab') as index:
2015 with open(destindexpath, 'ab') as index:
2009 index.seek(0)
2016 index.seek(0)
2010 index.truncate(truncaterev * orig._io.size)
2017 index.truncate(truncaterev * orig._io.size)
2011 with open(destdatapath, 'ab') as data:
2018 with open(destdatapath, 'ab') as data:
2012 data.seek(0)
2019 data.seek(0)
2013 data.truncate(orig.start(truncaterev))
2020 data.truncate(orig.start(truncaterev))
2014
2021
2015 # instantiate a new revlog from the temporary copy
2022 # instantiate a new revlog from the temporary copy
2016 ui.debug('truncating adding to be rewritten\n')
2023 ui.debug('truncating adding to be rewritten\n')
2017 vfs = vfsmod.vfs(tmpdir)
2024 vfs = vfsmod.vfs(tmpdir)
2018 vfs.options = getattr(orig.opener, 'options', None)
2025 vfs.options = getattr(orig.opener, 'options', None)
2019
2026
2020 dest = revlog.revlog(vfs,
2027 dest = revlog.revlog(vfs,
2021 indexfile=indexname,
2028 indexfile=indexname,
2022 datafile=dataname)
2029 datafile=dataname)
2023 if dest._inline:
2030 if dest._inline:
2024 raise error.Abort('not supporting inline revlog (yet)')
2031 raise error.Abort('not supporting inline revlog (yet)')
2025 # make sure internals are initialized
2032 # make sure internals are initialized
2026 dest.revision(len(dest) - 1)
2033 dest.revision(len(dest) - 1)
2027 yield dest
2034 yield dest
2028 del dest, vfs
2035 del dest, vfs
2029 finally:
2036 finally:
2030 shutil.rmtree(tmpdir, True)
2037 shutil.rmtree(tmpdir, True)
2031
2038
2032 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2039 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2033 [(b'e', b'engines', b'', b'compression engines to use'),
2040 [(b'e', b'engines', b'', b'compression engines to use'),
2034 (b's', b'startrev', 0, b'revision to start at')],
2041 (b's', b'startrev', 0, b'revision to start at')],
2035 b'-c|-m|FILE')
2042 b'-c|-m|FILE')
2036 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2043 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2037 """Benchmark operations on revlog chunks.
2044 """Benchmark operations on revlog chunks.
2038
2045
2039 Logically, each revlog is a collection of fulltext revisions. However,
2046 Logically, each revlog is a collection of fulltext revisions. However,
2040 stored within each revlog are "chunks" of possibly compressed data. This
2047 stored within each revlog are "chunks" of possibly compressed data. This
2041 data needs to be read and decompressed or compressed and written.
2048 data needs to be read and decompressed or compressed and written.
2042
2049
2043 This command measures the time it takes to read+decompress and recompress
2050 This command measures the time it takes to read+decompress and recompress
2044 chunks in a revlog. It effectively isolates I/O and compression performance.
2051 chunks in a revlog. It effectively isolates I/O and compression performance.
2045 For measurements of higher-level operations like resolving revisions,
2052 For measurements of higher-level operations like resolving revisions,
2046 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2053 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2047 """
2054 """
2048 opts = _byteskwargs(opts)
2055 opts = _byteskwargs(opts)
2049
2056
2050 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2057 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2051
2058
2052 # _chunkraw was renamed to _getsegmentforrevs.
2059 # _chunkraw was renamed to _getsegmentforrevs.
2053 try:
2060 try:
2054 segmentforrevs = rl._getsegmentforrevs
2061 segmentforrevs = rl._getsegmentforrevs
2055 except AttributeError:
2062 except AttributeError:
2056 segmentforrevs = rl._chunkraw
2063 segmentforrevs = rl._chunkraw
2057
2064
2058 # Verify engines argument.
2065 # Verify engines argument.
2059 if engines:
2066 if engines:
2060 engines = set(e.strip() for e in engines.split(b','))
2067 engines = set(e.strip() for e in engines.split(b','))
2061 for engine in engines:
2068 for engine in engines:
2062 try:
2069 try:
2063 util.compressionengines[engine]
2070 util.compressionengines[engine]
2064 except KeyError:
2071 except KeyError:
2065 raise error.Abort(b'unknown compression engine: %s' % engine)
2072 raise error.Abort(b'unknown compression engine: %s' % engine)
2066 else:
2073 else:
2067 engines = []
2074 engines = []
2068 for e in util.compengines:
2075 for e in util.compengines:
2069 engine = util.compengines[e]
2076 engine = util.compengines[e]
2070 try:
2077 try:
2071 if engine.available():
2078 if engine.available():
2072 engine.revlogcompressor().compress(b'dummy')
2079 engine.revlogcompressor().compress(b'dummy')
2073 engines.append(e)
2080 engines.append(e)
2074 except NotImplementedError:
2081 except NotImplementedError:
2075 pass
2082 pass
2076
2083
2077 revs = list(rl.revs(startrev, len(rl) - 1))
2084 revs = list(rl.revs(startrev, len(rl) - 1))
2078
2085
2079 def rlfh(rl):
2086 def rlfh(rl):
2080 if rl._inline:
2087 if rl._inline:
2081 return getsvfs(repo)(rl.indexfile)
2088 return getsvfs(repo)(rl.indexfile)
2082 else:
2089 else:
2083 return getsvfs(repo)(rl.datafile)
2090 return getsvfs(repo)(rl.datafile)
2084
2091
2085 def doread():
2092 def doread():
2086 rl.clearcaches()
2093 rl.clearcaches()
2087 for rev in revs:
2094 for rev in revs:
2088 segmentforrevs(rev, rev)
2095 segmentforrevs(rev, rev)
2089
2096
2090 def doreadcachedfh():
2097 def doreadcachedfh():
2091 rl.clearcaches()
2098 rl.clearcaches()
2092 fh = rlfh(rl)
2099 fh = rlfh(rl)
2093 for rev in revs:
2100 for rev in revs:
2094 segmentforrevs(rev, rev, df=fh)
2101 segmentforrevs(rev, rev, df=fh)
2095
2102
2096 def doreadbatch():
2103 def doreadbatch():
2097 rl.clearcaches()
2104 rl.clearcaches()
2098 segmentforrevs(revs[0], revs[-1])
2105 segmentforrevs(revs[0], revs[-1])
2099
2106
2100 def doreadbatchcachedfh():
2107 def doreadbatchcachedfh():
2101 rl.clearcaches()
2108 rl.clearcaches()
2102 fh = rlfh(rl)
2109 fh = rlfh(rl)
2103 segmentforrevs(revs[0], revs[-1], df=fh)
2110 segmentforrevs(revs[0], revs[-1], df=fh)
2104
2111
2105 def dochunk():
2112 def dochunk():
2106 rl.clearcaches()
2113 rl.clearcaches()
2107 fh = rlfh(rl)
2114 fh = rlfh(rl)
2108 for rev in revs:
2115 for rev in revs:
2109 rl._chunk(rev, df=fh)
2116 rl._chunk(rev, df=fh)
2110
2117
2111 chunks = [None]
2118 chunks = [None]
2112
2119
2113 def dochunkbatch():
2120 def dochunkbatch():
2114 rl.clearcaches()
2121 rl.clearcaches()
2115 fh = rlfh(rl)
2122 fh = rlfh(rl)
2116 # Save chunks as a side-effect.
2123 # Save chunks as a side-effect.
2117 chunks[0] = rl._chunks(revs, df=fh)
2124 chunks[0] = rl._chunks(revs, df=fh)
2118
2125
2119 def docompress(compressor):
2126 def docompress(compressor):
2120 rl.clearcaches()
2127 rl.clearcaches()
2121
2128
2122 try:
2129 try:
2123 # Swap in the requested compression engine.
2130 # Swap in the requested compression engine.
2124 oldcompressor = rl._compressor
2131 oldcompressor = rl._compressor
2125 rl._compressor = compressor
2132 rl._compressor = compressor
2126 for chunk in chunks[0]:
2133 for chunk in chunks[0]:
2127 rl.compress(chunk)
2134 rl.compress(chunk)
2128 finally:
2135 finally:
2129 rl._compressor = oldcompressor
2136 rl._compressor = oldcompressor
2130
2137
2131 benches = [
2138 benches = [
2132 (lambda: doread(), b'read'),
2139 (lambda: doread(), b'read'),
2133 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2140 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2134 (lambda: doreadbatch(), b'read batch'),
2141 (lambda: doreadbatch(), b'read batch'),
2135 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2142 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2136 (lambda: dochunk(), b'chunk'),
2143 (lambda: dochunk(), b'chunk'),
2137 (lambda: dochunkbatch(), b'chunk batch'),
2144 (lambda: dochunkbatch(), b'chunk batch'),
2138 ]
2145 ]
2139
2146
2140 for engine in sorted(engines):
2147 for engine in sorted(engines):
2141 compressor = util.compengines[engine].revlogcompressor()
2148 compressor = util.compengines[engine].revlogcompressor()
2142 benches.append((functools.partial(docompress, compressor),
2149 benches.append((functools.partial(docompress, compressor),
2143 b'compress w/ %s' % engine))
2150 b'compress w/ %s' % engine))
2144
2151
2145 for fn, title in benches:
2152 for fn, title in benches:
2146 timer, fm = gettimer(ui, opts)
2153 timer, fm = gettimer(ui, opts)
2147 timer(fn, title=title)
2154 timer(fn, title=title)
2148 fm.end()
2155 fm.end()
2149
2156
2150 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2157 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2151 [(b'', b'cache', False, b'use caches instead of clearing')],
2158 [(b'', b'cache', False, b'use caches instead of clearing')],
2152 b'-c|-m|FILE REV')
2159 b'-c|-m|FILE REV')
2153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2160 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2154 """Benchmark obtaining a revlog revision.
2161 """Benchmark obtaining a revlog revision.
2155
2162
2156 Obtaining a revlog revision consists of roughly the following steps:
2163 Obtaining a revlog revision consists of roughly the following steps:
2157
2164
2158 1. Compute the delta chain
2165 1. Compute the delta chain
2159 2. Slice the delta chain if applicable
2166 2. Slice the delta chain if applicable
2160 3. Obtain the raw chunks for that delta chain
2167 3. Obtain the raw chunks for that delta chain
2161 4. Decompress each raw chunk
2168 4. Decompress each raw chunk
2162 5. Apply binary patches to obtain fulltext
2169 5. Apply binary patches to obtain fulltext
2163 6. Verify hash of fulltext
2170 6. Verify hash of fulltext
2164
2171
2165 This command measures the time spent in each of these phases.
2172 This command measures the time spent in each of these phases.
2166 """
2173 """
2167 opts = _byteskwargs(opts)
2174 opts = _byteskwargs(opts)
2168
2175
2169 if opts.get(b'changelog') or opts.get(b'manifest'):
2176 if opts.get(b'changelog') or opts.get(b'manifest'):
2170 file_, rev = None, file_
2177 file_, rev = None, file_
2171 elif rev is None:
2178 elif rev is None:
2172 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2179 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2173
2180
2174 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2181 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2175
2182
2176 # _chunkraw was renamed to _getsegmentforrevs.
2183 # _chunkraw was renamed to _getsegmentforrevs.
2177 try:
2184 try:
2178 segmentforrevs = r._getsegmentforrevs
2185 segmentforrevs = r._getsegmentforrevs
2179 except AttributeError:
2186 except AttributeError:
2180 segmentforrevs = r._chunkraw
2187 segmentforrevs = r._chunkraw
2181
2188
2182 node = r.lookup(rev)
2189 node = r.lookup(rev)
2183 rev = r.rev(node)
2190 rev = r.rev(node)
2184
2191
2185 def getrawchunks(data, chain):
2192 def getrawchunks(data, chain):
2186 start = r.start
2193 start = r.start
2187 length = r.length
2194 length = r.length
2188 inline = r._inline
2195 inline = r._inline
2189 iosize = r._io.size
2196 iosize = r._io.size
2190 buffer = util.buffer
2197 buffer = util.buffer
2191
2198
2192 chunks = []
2199 chunks = []
2193 ladd = chunks.append
2200 ladd = chunks.append
2194 for idx, item in enumerate(chain):
2201 for idx, item in enumerate(chain):
2195 offset = start(item[0])
2202 offset = start(item[0])
2196 bits = data[idx]
2203 bits = data[idx]
2197 for rev in item:
2204 for rev in item:
2198 chunkstart = start(rev)
2205 chunkstart = start(rev)
2199 if inline:
2206 if inline:
2200 chunkstart += (rev + 1) * iosize
2207 chunkstart += (rev + 1) * iosize
2201 chunklength = length(rev)
2208 chunklength = length(rev)
2202 ladd(buffer(bits, chunkstart - offset, chunklength))
2209 ladd(buffer(bits, chunkstart - offset, chunklength))
2203
2210
2204 return chunks
2211 return chunks
2205
2212
2206 def dodeltachain(rev):
2213 def dodeltachain(rev):
2207 if not cache:
2214 if not cache:
2208 r.clearcaches()
2215 r.clearcaches()
2209 r._deltachain(rev)
2216 r._deltachain(rev)
2210
2217
2211 def doread(chain):
2218 def doread(chain):
2212 if not cache:
2219 if not cache:
2213 r.clearcaches()
2220 r.clearcaches()
2214 for item in slicedchain:
2221 for item in slicedchain:
2215 segmentforrevs(item[0], item[-1])
2222 segmentforrevs(item[0], item[-1])
2216
2223
2217 def doslice(r, chain, size):
2224 def doslice(r, chain, size):
2218 for s in slicechunk(r, chain, targetsize=size):
2225 for s in slicechunk(r, chain, targetsize=size):
2219 pass
2226 pass
2220
2227
2221 def dorawchunks(data, chain):
2228 def dorawchunks(data, chain):
2222 if not cache:
2229 if not cache:
2223 r.clearcaches()
2230 r.clearcaches()
2224 getrawchunks(data, chain)
2231 getrawchunks(data, chain)
2225
2232
2226 def dodecompress(chunks):
2233 def dodecompress(chunks):
2227 decomp = r.decompress
2234 decomp = r.decompress
2228 for chunk in chunks:
2235 for chunk in chunks:
2229 decomp(chunk)
2236 decomp(chunk)
2230
2237
2231 def dopatch(text, bins):
2238 def dopatch(text, bins):
2232 if not cache:
2239 if not cache:
2233 r.clearcaches()
2240 r.clearcaches()
2234 mdiff.patches(text, bins)
2241 mdiff.patches(text, bins)
2235
2242
2236 def dohash(text):
2243 def dohash(text):
2237 if not cache:
2244 if not cache:
2238 r.clearcaches()
2245 r.clearcaches()
2239 r.checkhash(text, node, rev=rev)
2246 r.checkhash(text, node, rev=rev)
2240
2247
2241 def dorevision():
2248 def dorevision():
2242 if not cache:
2249 if not cache:
2243 r.clearcaches()
2250 r.clearcaches()
2244 r.revision(node)
2251 r.revision(node)
2245
2252
2246 try:
2253 try:
2247 from mercurial.revlogutils.deltas import slicechunk
2254 from mercurial.revlogutils.deltas import slicechunk
2248 except ImportError:
2255 except ImportError:
2249 slicechunk = getattr(revlog, '_slicechunk', None)
2256 slicechunk = getattr(revlog, '_slicechunk', None)
2250
2257
2251 size = r.length(rev)
2258 size = r.length(rev)
2252 chain = r._deltachain(rev)[0]
2259 chain = r._deltachain(rev)[0]
2253 if not getattr(r, '_withsparseread', False):
2260 if not getattr(r, '_withsparseread', False):
2254 slicedchain = (chain,)
2261 slicedchain = (chain,)
2255 else:
2262 else:
2256 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2263 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2257 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2264 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2258 rawchunks = getrawchunks(data, slicedchain)
2265 rawchunks = getrawchunks(data, slicedchain)
2259 bins = r._chunks(chain)
2266 bins = r._chunks(chain)
2260 text = bytes(bins[0])
2267 text = bytes(bins[0])
2261 bins = bins[1:]
2268 bins = bins[1:]
2262 text = mdiff.patches(text, bins)
2269 text = mdiff.patches(text, bins)
2263
2270
2264 benches = [
2271 benches = [
2265 (lambda: dorevision(), b'full'),
2272 (lambda: dorevision(), b'full'),
2266 (lambda: dodeltachain(rev), b'deltachain'),
2273 (lambda: dodeltachain(rev), b'deltachain'),
2267 (lambda: doread(chain), b'read'),
2274 (lambda: doread(chain), b'read'),
2268 ]
2275 ]
2269
2276
2270 if getattr(r, '_withsparseread', False):
2277 if getattr(r, '_withsparseread', False):
2271 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2278 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2272 benches.append(slicing)
2279 benches.append(slicing)
2273
2280
2274 benches.extend([
2281 benches.extend([
2275 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2282 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2276 (lambda: dodecompress(rawchunks), b'decompress'),
2283 (lambda: dodecompress(rawchunks), b'decompress'),
2277 (lambda: dopatch(text, bins), b'patch'),
2284 (lambda: dopatch(text, bins), b'patch'),
2278 (lambda: dohash(text), b'hash'),
2285 (lambda: dohash(text), b'hash'),
2279 ])
2286 ])
2280
2287
2281 timer, fm = gettimer(ui, opts)
2288 timer, fm = gettimer(ui, opts)
2282 for fn, title in benches:
2289 for fn, title in benches:
2283 timer(fn, title=title)
2290 timer(fn, title=title)
2284 fm.end()
2291 fm.end()
2285
2292
2286 @command(b'perfrevset',
2293 @command(b'perfrevset',
2287 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2294 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2288 (b'', b'contexts', False, b'obtain changectx for each revision')]
2295 (b'', b'contexts', False, b'obtain changectx for each revision')]
2289 + formatteropts, b"REVSET")
2296 + formatteropts, b"REVSET")
2290 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2297 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2291 """benchmark the execution time of a revset
2298 """benchmark the execution time of a revset
2292
2299
2293 Use the --clean option if need to evaluate the impact of build volatile
2300 Use the --clean option if need to evaluate the impact of build volatile
2294 revisions set cache on the revset execution. Volatile cache hold filtered
2301 revisions set cache on the revset execution. Volatile cache hold filtered
2295 and obsolete related cache."""
2302 and obsolete related cache."""
2296 opts = _byteskwargs(opts)
2303 opts = _byteskwargs(opts)
2297
2304
2298 timer, fm = gettimer(ui, opts)
2305 timer, fm = gettimer(ui, opts)
2299 def d():
2306 def d():
2300 if clear:
2307 if clear:
2301 repo.invalidatevolatilesets()
2308 repo.invalidatevolatilesets()
2302 if contexts:
2309 if contexts:
2303 for ctx in repo.set(expr): pass
2310 for ctx in repo.set(expr): pass
2304 else:
2311 else:
2305 for r in repo.revs(expr): pass
2312 for r in repo.revs(expr): pass
2306 timer(d)
2313 timer(d)
2307 fm.end()
2314 fm.end()
2308
2315
2309 @command(b'perfvolatilesets',
2316 @command(b'perfvolatilesets',
2310 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2317 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2311 ] + formatteropts)
2318 ] + formatteropts)
2312 def perfvolatilesets(ui, repo, *names, **opts):
2319 def perfvolatilesets(ui, repo, *names, **opts):
2313 """benchmark the computation of various volatile set
2320 """benchmark the computation of various volatile set
2314
2321
2315 Volatile set computes element related to filtering and obsolescence."""
2322 Volatile set computes element related to filtering and obsolescence."""
2316 opts = _byteskwargs(opts)
2323 opts = _byteskwargs(opts)
2317 timer, fm = gettimer(ui, opts)
2324 timer, fm = gettimer(ui, opts)
2318 repo = repo.unfiltered()
2325 repo = repo.unfiltered()
2319
2326
2320 def getobs(name):
2327 def getobs(name):
2321 def d():
2328 def d():
2322 repo.invalidatevolatilesets()
2329 repo.invalidatevolatilesets()
2323 if opts[b'clear_obsstore']:
2330 if opts[b'clear_obsstore']:
2324 clearfilecache(repo, b'obsstore')
2331 clearfilecache(repo, b'obsstore')
2325 obsolete.getrevs(repo, name)
2332 obsolete.getrevs(repo, name)
2326 return d
2333 return d
2327
2334
2328 allobs = sorted(obsolete.cachefuncs)
2335 allobs = sorted(obsolete.cachefuncs)
2329 if names:
2336 if names:
2330 allobs = [n for n in allobs if n in names]
2337 allobs = [n for n in allobs if n in names]
2331
2338
2332 for name in allobs:
2339 for name in allobs:
2333 timer(getobs(name), title=name)
2340 timer(getobs(name), title=name)
2334
2341
2335 def getfiltered(name):
2342 def getfiltered(name):
2336 def d():
2343 def d():
2337 repo.invalidatevolatilesets()
2344 repo.invalidatevolatilesets()
2338 if opts[b'clear_obsstore']:
2345 if opts[b'clear_obsstore']:
2339 clearfilecache(repo, b'obsstore')
2346 clearfilecache(repo, b'obsstore')
2340 repoview.filterrevs(repo, name)
2347 repoview.filterrevs(repo, name)
2341 return d
2348 return d
2342
2349
2343 allfilter = sorted(repoview.filtertable)
2350 allfilter = sorted(repoview.filtertable)
2344 if names:
2351 if names:
2345 allfilter = [n for n in allfilter if n in names]
2352 allfilter = [n for n in allfilter if n in names]
2346
2353
2347 for name in allfilter:
2354 for name in allfilter:
2348 timer(getfiltered(name), title=name)
2355 timer(getfiltered(name), title=name)
2349 fm.end()
2356 fm.end()
2350
2357
2351 @command(b'perfbranchmap',
2358 @command(b'perfbranchmap',
2352 [(b'f', b'full', False,
2359 [(b'f', b'full', False,
2353 b'Includes build time of subset'),
2360 b'Includes build time of subset'),
2354 (b'', b'clear-revbranch', False,
2361 (b'', b'clear-revbranch', False,
2355 b'purge the revbranch cache between computation'),
2362 b'purge the revbranch cache between computation'),
2356 ] + formatteropts)
2363 ] + formatteropts)
2357 def perfbranchmap(ui, repo, *filternames, **opts):
2364 def perfbranchmap(ui, repo, *filternames, **opts):
2358 """benchmark the update of a branchmap
2365 """benchmark the update of a branchmap
2359
2366
2360 This benchmarks the full repo.branchmap() call with read and write disabled
2367 This benchmarks the full repo.branchmap() call with read and write disabled
2361 """
2368 """
2362 opts = _byteskwargs(opts)
2369 opts = _byteskwargs(opts)
2363 full = opts.get(b"full", False)
2370 full = opts.get(b"full", False)
2364 clear_revbranch = opts.get(b"clear_revbranch", False)
2371 clear_revbranch = opts.get(b"clear_revbranch", False)
2365 timer, fm = gettimer(ui, opts)
2372 timer, fm = gettimer(ui, opts)
2366 def getbranchmap(filtername):
2373 def getbranchmap(filtername):
2367 """generate a benchmark function for the filtername"""
2374 """generate a benchmark function for the filtername"""
2368 if filtername is None:
2375 if filtername is None:
2369 view = repo
2376 view = repo
2370 else:
2377 else:
2371 view = repo.filtered(filtername)
2378 view = repo.filtered(filtername)
2372 def d():
2379 def d():
2373 if clear_revbranch:
2380 if clear_revbranch:
2374 repo.revbranchcache()._clear()
2381 repo.revbranchcache()._clear()
2375 if full:
2382 if full:
2376 view._branchcaches.clear()
2383 view._branchcaches.clear()
2377 else:
2384 else:
2378 view._branchcaches.pop(filtername, None)
2385 view._branchcaches.pop(filtername, None)
2379 view.branchmap()
2386 view.branchmap()
2380 return d
2387 return d
2381 # add filter in smaller subset to bigger subset
2388 # add filter in smaller subset to bigger subset
2382 possiblefilters = set(repoview.filtertable)
2389 possiblefilters = set(repoview.filtertable)
2383 if filternames:
2390 if filternames:
2384 possiblefilters &= set(filternames)
2391 possiblefilters &= set(filternames)
2385 subsettable = getbranchmapsubsettable()
2392 subsettable = getbranchmapsubsettable()
2386 allfilters = []
2393 allfilters = []
2387 while possiblefilters:
2394 while possiblefilters:
2388 for name in possiblefilters:
2395 for name in possiblefilters:
2389 subset = subsettable.get(name)
2396 subset = subsettable.get(name)
2390 if subset not in possiblefilters:
2397 if subset not in possiblefilters:
2391 break
2398 break
2392 else:
2399 else:
2393 assert False, b'subset cycle %s!' % possiblefilters
2400 assert False, b'subset cycle %s!' % possiblefilters
2394 allfilters.append(name)
2401 allfilters.append(name)
2395 possiblefilters.remove(name)
2402 possiblefilters.remove(name)
2396
2403
2397 # warm the cache
2404 # warm the cache
2398 if not full:
2405 if not full:
2399 for name in allfilters:
2406 for name in allfilters:
2400 repo.filtered(name).branchmap()
2407 repo.filtered(name).branchmap()
2401 if not filternames or b'unfiltered' in filternames:
2408 if not filternames or b'unfiltered' in filternames:
2402 # add unfiltered
2409 # add unfiltered
2403 allfilters.append(None)
2410 allfilters.append(None)
2404
2411
2405 branchcacheread = safeattrsetter(branchmap, b'read')
2412 branchcacheread = safeattrsetter(branchmap, b'read')
2406 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2413 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2407 branchcacheread.set(lambda repo: None)
2414 branchcacheread.set(lambda repo: None)
2408 branchcachewrite.set(lambda bc, repo: None)
2415 branchcachewrite.set(lambda bc, repo: None)
2409 try:
2416 try:
2410 for name in allfilters:
2417 for name in allfilters:
2411 printname = name
2418 printname = name
2412 if name is None:
2419 if name is None:
2413 printname = b'unfiltered'
2420 printname = b'unfiltered'
2414 timer(getbranchmap(name), title=str(printname))
2421 timer(getbranchmap(name), title=str(printname))
2415 finally:
2422 finally:
2416 branchcacheread.restore()
2423 branchcacheread.restore()
2417 branchcachewrite.restore()
2424 branchcachewrite.restore()
2418 fm.end()
2425 fm.end()
2419
2426
2420 @command(b'perfbranchmapupdate', [
2427 @command(b'perfbranchmapupdate', [
2421 (b'', b'base', [], b'subset of revision to start from'),
2428 (b'', b'base', [], b'subset of revision to start from'),
2422 (b'', b'target', [], b'subset of revision to end with'),
2429 (b'', b'target', [], b'subset of revision to end with'),
2423 (b'', b'clear-caches', False, b'clear cache between each runs')
2430 (b'', b'clear-caches', False, b'clear cache between each runs')
2424 ] + formatteropts)
2431 ] + formatteropts)
2425 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2432 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2426 """benchmark branchmap update from for <base> revs to <target> revs
2433 """benchmark branchmap update from for <base> revs to <target> revs
2427
2434
2428 If `--clear-caches` is passed, the following items will be reset before
2435 If `--clear-caches` is passed, the following items will be reset before
2429 each update:
2436 each update:
2430 * the changelog instance and associated indexes
2437 * the changelog instance and associated indexes
2431 * the rev-branch-cache instance
2438 * the rev-branch-cache instance
2432
2439
2433 Examples:
2440 Examples:
2434
2441
2435 # update for the one last revision
2442 # update for the one last revision
2436 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2443 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2437
2444
2438 $ update for change coming with a new branch
2445 $ update for change coming with a new branch
2439 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2446 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2440 """
2447 """
2441 from mercurial import branchmap
2448 from mercurial import branchmap
2442 from mercurial import repoview
2449 from mercurial import repoview
2443 opts = _byteskwargs(opts)
2450 opts = _byteskwargs(opts)
2444 timer, fm = gettimer(ui, opts)
2451 timer, fm = gettimer(ui, opts)
2445 clearcaches = opts[b'clear_caches']
2452 clearcaches = opts[b'clear_caches']
2446 unfi = repo.unfiltered()
2453 unfi = repo.unfiltered()
2447 x = [None] # used to pass data between closure
2454 x = [None] # used to pass data between closure
2448
2455
2449 # we use a `list` here to avoid possible side effect from smartset
2456 # we use a `list` here to avoid possible side effect from smartset
2450 baserevs = list(scmutil.revrange(repo, base))
2457 baserevs = list(scmutil.revrange(repo, base))
2451 targetrevs = list(scmutil.revrange(repo, target))
2458 targetrevs = list(scmutil.revrange(repo, target))
2452 if not baserevs:
2459 if not baserevs:
2453 raise error.Abort(b'no revisions selected for --base')
2460 raise error.Abort(b'no revisions selected for --base')
2454 if not targetrevs:
2461 if not targetrevs:
2455 raise error.Abort(b'no revisions selected for --target')
2462 raise error.Abort(b'no revisions selected for --target')
2456
2463
2457 # make sure the target branchmap also contains the one in the base
2464 # make sure the target branchmap also contains the one in the base
2458 targetrevs = list(set(baserevs) | set(targetrevs))
2465 targetrevs = list(set(baserevs) | set(targetrevs))
2459 targetrevs.sort()
2466 targetrevs.sort()
2460
2467
2461 cl = repo.changelog
2468 cl = repo.changelog
2462 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2469 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2463 allbaserevs.sort()
2470 allbaserevs.sort()
2464 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2471 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2465
2472
2466 newrevs = list(alltargetrevs.difference(allbaserevs))
2473 newrevs = list(alltargetrevs.difference(allbaserevs))
2467 newrevs.sort()
2474 newrevs.sort()
2468
2475
2469 allrevs = frozenset(unfi.changelog.revs())
2476 allrevs = frozenset(unfi.changelog.revs())
2470 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2477 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2471 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2478 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2472
2479
2473 def basefilter(repo, visibilityexceptions=None):
2480 def basefilter(repo, visibilityexceptions=None):
2474 return basefilterrevs
2481 return basefilterrevs
2475
2482
2476 def targetfilter(repo, visibilityexceptions=None):
2483 def targetfilter(repo, visibilityexceptions=None):
2477 return targetfilterrevs
2484 return targetfilterrevs
2478
2485
2479 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2486 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2480 ui.status(msg % (len(allbaserevs), len(newrevs)))
2487 ui.status(msg % (len(allbaserevs), len(newrevs)))
2481 if targetfilterrevs:
2488 if targetfilterrevs:
2482 msg = b'(%d revisions still filtered)\n'
2489 msg = b'(%d revisions still filtered)\n'
2483 ui.status(msg % len(targetfilterrevs))
2490 ui.status(msg % len(targetfilterrevs))
2484
2491
2485 try:
2492 try:
2486 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2493 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2487 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2494 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2488
2495
2489 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2496 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2490 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2497 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2491
2498
2492 # try to find an existing branchmap to reuse
2499 # try to find an existing branchmap to reuse
2493 subsettable = getbranchmapsubsettable()
2500 subsettable = getbranchmapsubsettable()
2494 candidatefilter = subsettable.get(None)
2501 candidatefilter = subsettable.get(None)
2495 while candidatefilter is not None:
2502 while candidatefilter is not None:
2496 candidatebm = repo.filtered(candidatefilter).branchmap()
2503 candidatebm = repo.filtered(candidatefilter).branchmap()
2497 if candidatebm.validfor(baserepo):
2504 if candidatebm.validfor(baserepo):
2498 filtered = repoview.filterrevs(repo, candidatefilter)
2505 filtered = repoview.filterrevs(repo, candidatefilter)
2499 missing = [r for r in allbaserevs if r in filtered]
2506 missing = [r for r in allbaserevs if r in filtered]
2500 base = candidatebm.copy()
2507 base = candidatebm.copy()
2501 base.update(baserepo, missing)
2508 base.update(baserepo, missing)
2502 break
2509 break
2503 candidatefilter = subsettable.get(candidatefilter)
2510 candidatefilter = subsettable.get(candidatefilter)
2504 else:
2511 else:
2505 # no suitable subset where found
2512 # no suitable subset where found
2506 base = branchmap.branchcache()
2513 base = branchmap.branchcache()
2507 base.update(baserepo, allbaserevs)
2514 base.update(baserepo, allbaserevs)
2508
2515
2509 def setup():
2516 def setup():
2510 x[0] = base.copy()
2517 x[0] = base.copy()
2511 if clearcaches:
2518 if clearcaches:
2512 unfi._revbranchcache = None
2519 unfi._revbranchcache = None
2513 clearchangelog(repo)
2520 clearchangelog(repo)
2514
2521
2515 def bench():
2522 def bench():
2516 x[0].update(targetrepo, newrevs)
2523 x[0].update(targetrepo, newrevs)
2517
2524
2518 timer(bench, setup=setup)
2525 timer(bench, setup=setup)
2519 fm.end()
2526 fm.end()
2520 finally:
2527 finally:
2521 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2528 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2522 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2529 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2523
2530
2524 @command(b'perfbranchmapload', [
2531 @command(b'perfbranchmapload', [
2525 (b'f', b'filter', b'', b'Specify repoview filter'),
2532 (b'f', b'filter', b'', b'Specify repoview filter'),
2526 (b'', b'list', False, b'List brachmap filter caches'),
2533 (b'', b'list', False, b'List brachmap filter caches'),
2527 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2534 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2528
2535
2529 ] + formatteropts)
2536 ] + formatteropts)
2530 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2537 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2531 """benchmark reading the branchmap"""
2538 """benchmark reading the branchmap"""
2532 opts = _byteskwargs(opts)
2539 opts = _byteskwargs(opts)
2533 clearrevlogs = opts[b'clear_revlogs']
2540 clearrevlogs = opts[b'clear_revlogs']
2534
2541
2535 if list:
2542 if list:
2536 for name, kind, st in repo.cachevfs.readdir(stat=True):
2543 for name, kind, st in repo.cachevfs.readdir(stat=True):
2537 if name.startswith(b'branch2'):
2544 if name.startswith(b'branch2'):
2538 filtername = name.partition(b'-')[2] or b'unfiltered'
2545 filtername = name.partition(b'-')[2] or b'unfiltered'
2539 ui.status(b'%s - %s\n'
2546 ui.status(b'%s - %s\n'
2540 % (filtername, util.bytecount(st.st_size)))
2547 % (filtername, util.bytecount(st.st_size)))
2541 return
2548 return
2542 if not filter:
2549 if not filter:
2543 filter = None
2550 filter = None
2544 subsettable = getbranchmapsubsettable()
2551 subsettable = getbranchmapsubsettable()
2545 if filter is None:
2552 if filter is None:
2546 repo = repo.unfiltered()
2553 repo = repo.unfiltered()
2547 else:
2554 else:
2548 repo = repoview.repoview(repo, filter)
2555 repo = repoview.repoview(repo, filter)
2549
2556
2550 repo.branchmap() # make sure we have a relevant, up to date branchmap
2557 repo.branchmap() # make sure we have a relevant, up to date branchmap
2551
2558
2552 currentfilter = filter
2559 currentfilter = filter
2553 # try once without timer, the filter may not be cached
2560 # try once without timer, the filter may not be cached
2554 while branchmap.read(repo) is None:
2561 while branchmap.read(repo) is None:
2555 currentfilter = subsettable.get(currentfilter)
2562 currentfilter = subsettable.get(currentfilter)
2556 if currentfilter is None:
2563 if currentfilter is None:
2557 raise error.Abort(b'No branchmap cached for %s repo'
2564 raise error.Abort(b'No branchmap cached for %s repo'
2558 % (filter or b'unfiltered'))
2565 % (filter or b'unfiltered'))
2559 repo = repo.filtered(currentfilter)
2566 repo = repo.filtered(currentfilter)
2560 timer, fm = gettimer(ui, opts)
2567 timer, fm = gettimer(ui, opts)
2561 def setup():
2568 def setup():
2562 if clearrevlogs:
2569 if clearrevlogs:
2563 clearchangelog(repo)
2570 clearchangelog(repo)
2564 def bench():
2571 def bench():
2565 branchmap.read(repo)
2572 branchmap.read(repo)
2566 timer(bench, setup=setup)
2573 timer(bench, setup=setup)
2567 fm.end()
2574 fm.end()
2568
2575
2569 @command(b'perfloadmarkers')
2576 @command(b'perfloadmarkers')
2570 def perfloadmarkers(ui, repo):
2577 def perfloadmarkers(ui, repo):
2571 """benchmark the time to parse the on-disk markers for a repo
2578 """benchmark the time to parse the on-disk markers for a repo
2572
2579
2573 Result is the number of markers in the repo."""
2580 Result is the number of markers in the repo."""
2574 timer, fm = gettimer(ui)
2581 timer, fm = gettimer(ui)
2575 svfs = getsvfs(repo)
2582 svfs = getsvfs(repo)
2576 timer(lambda: len(obsolete.obsstore(svfs)))
2583 timer(lambda: len(obsolete.obsstore(svfs)))
2577 fm.end()
2584 fm.end()
2578
2585
2579 @command(b'perflrucachedict', formatteropts +
2586 @command(b'perflrucachedict', formatteropts +
2580 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2587 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2581 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2588 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2582 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2589 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2583 (b'', b'size', 4, b'size of cache'),
2590 (b'', b'size', 4, b'size of cache'),
2584 (b'', b'gets', 10000, b'number of key lookups'),
2591 (b'', b'gets', 10000, b'number of key lookups'),
2585 (b'', b'sets', 10000, b'number of key sets'),
2592 (b'', b'sets', 10000, b'number of key sets'),
2586 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2593 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2587 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2594 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2588 norepo=True)
2595 norepo=True)
2589 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2596 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2590 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2597 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2591 opts = _byteskwargs(opts)
2598 opts = _byteskwargs(opts)
2592
2599
2593 def doinit():
2600 def doinit():
2594 for i in _xrange(10000):
2601 for i in _xrange(10000):
2595 util.lrucachedict(size)
2602 util.lrucachedict(size)
2596
2603
2597 costrange = list(range(mincost, maxcost + 1))
2604 costrange = list(range(mincost, maxcost + 1))
2598
2605
2599 values = []
2606 values = []
2600 for i in _xrange(size):
2607 for i in _xrange(size):
2601 values.append(random.randint(0, _maxint))
2608 values.append(random.randint(0, _maxint))
2602
2609
2603 # Get mode fills the cache and tests raw lookup performance with no
2610 # Get mode fills the cache and tests raw lookup performance with no
2604 # eviction.
2611 # eviction.
2605 getseq = []
2612 getseq = []
2606 for i in _xrange(gets):
2613 for i in _xrange(gets):
2607 getseq.append(random.choice(values))
2614 getseq.append(random.choice(values))
2608
2615
2609 def dogets():
2616 def dogets():
2610 d = util.lrucachedict(size)
2617 d = util.lrucachedict(size)
2611 for v in values:
2618 for v in values:
2612 d[v] = v
2619 d[v] = v
2613 for key in getseq:
2620 for key in getseq:
2614 value = d[key]
2621 value = d[key]
2615 value # silence pyflakes warning
2622 value # silence pyflakes warning
2616
2623
2617 def dogetscost():
2624 def dogetscost():
2618 d = util.lrucachedict(size, maxcost=costlimit)
2625 d = util.lrucachedict(size, maxcost=costlimit)
2619 for i, v in enumerate(values):
2626 for i, v in enumerate(values):
2620 d.insert(v, v, cost=costs[i])
2627 d.insert(v, v, cost=costs[i])
2621 for key in getseq:
2628 for key in getseq:
2622 try:
2629 try:
2623 value = d[key]
2630 value = d[key]
2624 value # silence pyflakes warning
2631 value # silence pyflakes warning
2625 except KeyError:
2632 except KeyError:
2626 pass
2633 pass
2627
2634
2628 # Set mode tests insertion speed with cache eviction.
2635 # Set mode tests insertion speed with cache eviction.
2629 setseq = []
2636 setseq = []
2630 costs = []
2637 costs = []
2631 for i in _xrange(sets):
2638 for i in _xrange(sets):
2632 setseq.append(random.randint(0, _maxint))
2639 setseq.append(random.randint(0, _maxint))
2633 costs.append(random.choice(costrange))
2640 costs.append(random.choice(costrange))
2634
2641
2635 def doinserts():
2642 def doinserts():
2636 d = util.lrucachedict(size)
2643 d = util.lrucachedict(size)
2637 for v in setseq:
2644 for v in setseq:
2638 d.insert(v, v)
2645 d.insert(v, v)
2639
2646
2640 def doinsertscost():
2647 def doinsertscost():
2641 d = util.lrucachedict(size, maxcost=costlimit)
2648 d = util.lrucachedict(size, maxcost=costlimit)
2642 for i, v in enumerate(setseq):
2649 for i, v in enumerate(setseq):
2643 d.insert(v, v, cost=costs[i])
2650 d.insert(v, v, cost=costs[i])
2644
2651
2645 def dosets():
2652 def dosets():
2646 d = util.lrucachedict(size)
2653 d = util.lrucachedict(size)
2647 for v in setseq:
2654 for v in setseq:
2648 d[v] = v
2655 d[v] = v
2649
2656
2650 # Mixed mode randomly performs gets and sets with eviction.
2657 # Mixed mode randomly performs gets and sets with eviction.
2651 mixedops = []
2658 mixedops = []
2652 for i in _xrange(mixed):
2659 for i in _xrange(mixed):
2653 r = random.randint(0, 100)
2660 r = random.randint(0, 100)
2654 if r < mixedgetfreq:
2661 if r < mixedgetfreq:
2655 op = 0
2662 op = 0
2656 else:
2663 else:
2657 op = 1
2664 op = 1
2658
2665
2659 mixedops.append((op,
2666 mixedops.append((op,
2660 random.randint(0, size * 2),
2667 random.randint(0, size * 2),
2661 random.choice(costrange)))
2668 random.choice(costrange)))
2662
2669
2663 def domixed():
2670 def domixed():
2664 d = util.lrucachedict(size)
2671 d = util.lrucachedict(size)
2665
2672
2666 for op, v, cost in mixedops:
2673 for op, v, cost in mixedops:
2667 if op == 0:
2674 if op == 0:
2668 try:
2675 try:
2669 d[v]
2676 d[v]
2670 except KeyError:
2677 except KeyError:
2671 pass
2678 pass
2672 else:
2679 else:
2673 d[v] = v
2680 d[v] = v
2674
2681
2675 def domixedcost():
2682 def domixedcost():
2676 d = util.lrucachedict(size, maxcost=costlimit)
2683 d = util.lrucachedict(size, maxcost=costlimit)
2677
2684
2678 for op, v, cost in mixedops:
2685 for op, v, cost in mixedops:
2679 if op == 0:
2686 if op == 0:
2680 try:
2687 try:
2681 d[v]
2688 d[v]
2682 except KeyError:
2689 except KeyError:
2683 pass
2690 pass
2684 else:
2691 else:
2685 d.insert(v, v, cost=cost)
2692 d.insert(v, v, cost=cost)
2686
2693
2687 benches = [
2694 benches = [
2688 (doinit, b'init'),
2695 (doinit, b'init'),
2689 ]
2696 ]
2690
2697
2691 if costlimit:
2698 if costlimit:
2692 benches.extend([
2699 benches.extend([
2693 (dogetscost, b'gets w/ cost limit'),
2700 (dogetscost, b'gets w/ cost limit'),
2694 (doinsertscost, b'inserts w/ cost limit'),
2701 (doinsertscost, b'inserts w/ cost limit'),
2695 (domixedcost, b'mixed w/ cost limit'),
2702 (domixedcost, b'mixed w/ cost limit'),
2696 ])
2703 ])
2697 else:
2704 else:
2698 benches.extend([
2705 benches.extend([
2699 (dogets, b'gets'),
2706 (dogets, b'gets'),
2700 (doinserts, b'inserts'),
2707 (doinserts, b'inserts'),
2701 (dosets, b'sets'),
2708 (dosets, b'sets'),
2702 (domixed, b'mixed')
2709 (domixed, b'mixed')
2703 ])
2710 ])
2704
2711
2705 for fn, title in benches:
2712 for fn, title in benches:
2706 timer, fm = gettimer(ui, opts)
2713 timer, fm = gettimer(ui, opts)
2707 timer(fn, title=title)
2714 timer(fn, title=title)
2708 fm.end()
2715 fm.end()
2709
2716
2710 @command(b'perfwrite', formatteropts)
2717 @command(b'perfwrite', formatteropts)
2711 def perfwrite(ui, repo, **opts):
2718 def perfwrite(ui, repo, **opts):
2712 """microbenchmark ui.write
2719 """microbenchmark ui.write
2713 """
2720 """
2714 opts = _byteskwargs(opts)
2721 opts = _byteskwargs(opts)
2715
2722
2716 timer, fm = gettimer(ui, opts)
2723 timer, fm = gettimer(ui, opts)
2717 def write():
2724 def write():
2718 for i in range(100000):
2725 for i in range(100000):
2719 ui.write((b'Testing write performance\n'))
2726 ui.write((b'Testing write performance\n'))
2720 timer(write)
2727 timer(write)
2721 fm.end()
2728 fm.end()
2722
2729
2723 def uisetup(ui):
2730 def uisetup(ui):
2724 if (util.safehasattr(cmdutil, b'openrevlog') and
2731 if (util.safehasattr(cmdutil, b'openrevlog') and
2725 not util.safehasattr(commands, b'debugrevlogopts')):
2732 not util.safehasattr(commands, b'debugrevlogopts')):
2726 # for "historical portability":
2733 # for "historical portability":
2727 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2734 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2728 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2735 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2729 # openrevlog() should cause failure, because it has been
2736 # openrevlog() should cause failure, because it has been
2730 # available since 3.5 (or 49c583ca48c4).
2737 # available since 3.5 (or 49c583ca48c4).
2731 def openrevlog(orig, repo, cmd, file_, opts):
2738 def openrevlog(orig, repo, cmd, file_, opts):
2732 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2739 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2733 raise error.Abort(b"This version doesn't support --dir option",
2740 raise error.Abort(b"This version doesn't support --dir option",
2734 hint=b"use 3.5 or later")
2741 hint=b"use 3.5 or later")
2735 return orig(repo, cmd, file_, opts)
2742 return orig(repo, cmd, file_, opts)
2736 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2743 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2737
2744
2738 @command(b'perfprogress', formatteropts + [
2745 @command(b'perfprogress', formatteropts + [
2739 (b'', b'topic', b'topic', b'topic for progress messages'),
2746 (b'', b'topic', b'topic', b'topic for progress messages'),
2740 (b'c', b'total', 1000000, b'total value we are progressing to'),
2747 (b'c', b'total', 1000000, b'total value we are progressing to'),
2741 ], norepo=True)
2748 ], norepo=True)
2742 def perfprogress(ui, topic=None, total=None, **opts):
2749 def perfprogress(ui, topic=None, total=None, **opts):
2743 """printing of progress bars"""
2750 """printing of progress bars"""
2744 opts = _byteskwargs(opts)
2751 opts = _byteskwargs(opts)
2745
2752
2746 timer, fm = gettimer(ui, opts)
2753 timer, fm = gettimer(ui, opts)
2747
2754
2748 def doprogress():
2755 def doprogress():
2749 with ui.makeprogress(topic, total=total) as progress:
2756 with ui.makeprogress(topic, total=total) as progress:
2750 for i in pycompat.xrange(total):
2757 for i in pycompat.xrange(total):
2751 progress.increment()
2758 progress.increment()
2752
2759
2753 timer(doprogress)
2760 timer(doprogress)
2754 fm.end()
2761 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now